diff options
author | orivej <orivej@yandex-team.ru> | 2022-02-10 16:45:01 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:45:01 +0300 |
commit | 2d37894b1b037cf24231090eda8589bbb44fb6fc (patch) | |
tree | be835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/tools/cython/Cython | |
parent | 718c552901d703c502ccbefdfc3c9028d608b947 (diff) | |
download | ydb-2d37894b1b037cf24231090eda8589bbb44fb6fc.tar.gz |
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/tools/cython/Cython')
233 files changed, 21218 insertions, 21218 deletions
diff --git a/contrib/tools/cython/Cython/Build/Cythonize.py b/contrib/tools/cython/Cython/Build/Cythonize.py index caa3cebc0e..c85b6eabab 100644 --- a/contrib/tools/cython/Cython/Build/Cythonize.py +++ b/contrib/tools/cython/Cython/Build/Cythonize.py @@ -21,27 +21,27 @@ except ImportError: class _FakePool(object): def map_async(self, func, args): - try: - from itertools import imap - except ImportError: - imap=map + try: + from itertools import imap + except ImportError: + imap=map for _ in imap(func, args): pass - def close(self): - pass + def close(self): + pass + + def terminate(self): + pass + + def join(self): + pass - def terminate(self): - pass - def join(self): - pass - - def parse_directives(option, name, value, parser): dest = option.dest old_directives = dict(getattr(parser.values, dest, - Options.get_directive_defaults())) + Options.get_directive_defaults())) directives = Options.parse_directive_list( value, relaxed_bool=True, current_settings=old_directives) setattr(parser.values, dest, directives) @@ -60,13 +60,13 @@ def parse_options(option, name, value, parser): setattr(parser.values, dest, options) -def parse_compile_time_env(option, name, value, parser): - dest = option.dest - old_env = dict(getattr(parser.values, dest, {})) - new_env = Options.parse_compile_time_env(value, current_settings=old_env) - setattr(parser.values, dest, new_env) - - +def parse_compile_time_env(option, name, value, parser): + dest = option.dest + old_env = dict(getattr(parser.values, dest, {})) + new_env = Options.parse_compile_time_env(value, current_settings=old_env) + setattr(parser.values, dest, new_env) + + def find_package_base(path): base_dir, package_path = os.path.split(path) while os.path.isfile(os.path.join(base_dir, '__init__.py')): @@ -77,9 +77,9 @@ def find_package_base(path): def cython_compile(path_pattern, options): pool = None - all_paths = map(os.path.abspath, extended_iglob(path_pattern)) + all_paths = map(os.path.abspath, extended_iglob(path_pattern)) try: - for path in all_paths: + for path in all_paths: if options.build_inplace: base_dir = path while not os.path.isdir(base_dir) or is_package_dir(base_dir): @@ -89,7 +89,7 @@ def cython_compile(path_pattern, options): if os.path.isdir(path): # recursively compiling a package - paths = [os.path.join(path, '**', '*.{py,pyx}')] + paths = [os.path.join(path, '**', '*.{py,pyx}')] else: # assume it's a file(-like thing) paths = [path] @@ -100,7 +100,7 @@ def cython_compile(path_pattern, options): exclude_failures=options.keep_going, exclude=options.excludes, compiler_directives=options.directives, - compile_time_env=options.compile_time_env, + compile_time_env=options.compile_time_env, force=options.force, quiet=options.quiet, depfile=options.depfile, @@ -153,26 +153,26 @@ def parse_args(args): from optparse import OptionParser parser = OptionParser(usage='%prog [options] [sources and packages]+') - parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', - dest='directives', default={}, type="str", - action='callback', callback=parse_directives, + parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', + dest='directives', default={}, type="str", + action='callback', callback=parse_directives, help='set a compiler directive') - parser.add_option('-E', '--compile-time-env', metavar='NAME=VALUE,...', - dest='compile_time_env', default={}, type="str", - action='callback', callback=parse_compile_time_env, - help='set a compile time environment variable') - parser.add_option('-s', '--option', metavar='NAME=VALUE', - dest='options', default={}, type="str", - action='callback', callback=parse_options, + parser.add_option('-E', '--compile-time-env', metavar='NAME=VALUE,...', + dest='compile_time_env', default={}, type="str", + action='callback', callback=parse_compile_time_env, + help='set a compile time environment variable') + parser.add_option('-s', '--option', metavar='NAME=VALUE', + dest='options', default={}, type="str", + action='callback', callback=parse_options, help='set a cythonize option') - parser.add_option('-2', dest='language_level', action='store_const', const=2, default=None, - help='use Python 2 syntax mode by default') - parser.add_option('-3', dest='language_level', action='store_const', const=3, + parser.add_option('-2', dest='language_level', action='store_const', const=2, default=None, + help='use Python 2 syntax mode by default') + parser.add_option('-3', dest='language_level', action='store_const', const=3, + help='use Python 3 syntax mode by default') + parser.add_option('--3str', dest='language_level', action='store_const', const='3str', help='use Python 3 syntax mode by default') - parser.add_option('--3str', dest='language_level', action='store_const', const='3str', - help='use Python 3 syntax mode by default') - parser.add_option('-a', '--annotate', dest='annotate', action='store_true', - help='generate annotated HTML page for source files') + parser.add_option('-a', '--annotate', dest='annotate', action='store_true', + help='generate annotated HTML page for source files') parser.add_option('-x', '--exclude', metavar='PATTERN', dest='excludes', action='append', default=[], @@ -204,9 +204,9 @@ def parse_args(args): options.build = True if multiprocessing is None: options.parallel = 0 - if options.language_level: - assert options.language_level in (2, 3, '3str') - options.options['language_level'] = options.language_level + if options.language_level: + assert options.language_level in (2, 3, '3str') + options.options['language_level'] = options.language_level return options, args @@ -218,9 +218,9 @@ def main(args=None): Options.error_on_unknown_names = False Options.error_on_uninitialized = False - if options.annotate: - Options.annotate = True - + if options.annotate: + Options.annotate = True + for path in paths: cython_compile(path, options) diff --git a/contrib/tools/cython/Cython/Build/Dependencies.py b/contrib/tools/cython/Cython/Build/Dependencies.py index ac5a3a10c0..7eb55e2607 100644 --- a/contrib/tools/cython/Cython/Build/Dependencies.py +++ b/contrib/tools/cython/Cython/Build/Dependencies.py @@ -1,22 +1,22 @@ -from __future__ import absolute_import, print_function +from __future__ import absolute_import, print_function import cython from .. import __version__ -import collections -import contextlib -import hashlib +import collections +import contextlib +import hashlib import os import shutil import subprocess import re, sys, time -import warnings +import warnings from glob import iglob from io import open as io_open from os.path import relpath as _relpath from distutils.extension import Extension from distutils.util import strtobool -import zipfile +import zipfile try: from collections.abc import Iterable @@ -32,24 +32,24 @@ except ImportError: gzip_ext = '' try: - import zlib - zipfile_compression_mode = zipfile.ZIP_DEFLATED -except ImportError: - zipfile_compression_mode = zipfile.ZIP_STORED - -try: + import zlib + zipfile_compression_mode = zipfile.ZIP_DEFLATED +except ImportError: + zipfile_compression_mode = zipfile.ZIP_STORED + +try: import pythran except: - pythran = None + pythran = None from .. import Utils -from ..Utils import (cached_function, cached_method, path_exists, - safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, replace_suffix) +from ..Utils import (cached_function, cached_method, path_exists, + safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, replace_suffix) from ..Compiler.Main import Context, CompilationOptions, default_options join_path = cached_function(os.path.join) -copy_once_if_newer = cached_function(copy_file_to_dir_if_newer) -safe_makedirs_once = cached_function(safe_makedirs) +copy_once_if_newer = cached_function(copy_file_to_dir_if_newer) +safe_makedirs_once = cached_function(safe_makedirs) if sys.version_info[0] < 3: # stupid Py2 distutils enforces str type in list of sources @@ -57,7 +57,7 @@ if sys.version_info[0] < 3: if _fs_encoding is None: _fs_encoding = sys.getdefaultencoding() def encode_filename_in_py2(filename): - if not isinstance(filename, bytes): + if not isinstance(filename, bytes): return filename.encode(_fs_encoding) return filename else: @@ -65,7 +65,7 @@ else: return filename basestring = str - + def _make_relative(file_paths, base=None): if not base: base = os.getcwd() @@ -76,14 +76,14 @@ def _make_relative(file_paths, base=None): def extended_iglob(pattern): - if '{' in pattern: - m = re.match('(.*){([^}]+)}(.*)', pattern) - if m: - before, switch, after = m.groups() - for case in switch.split(','): - for path in extended_iglob(before + case + after): - yield path - return + if '{' in pattern: + m = re.match('(.*){([^}]+)}(.*)', pattern) + if m: + before, switch, after = m.groups() + for case in switch.split(','): + for path in extended_iglob(before + case + after): + yield path + return if '**/' in pattern: seen = set() first, rest = pattern.split('**/', 1) @@ -104,60 +104,60 @@ def extended_iglob(pattern): for path in iglob(pattern): yield path - -def nonempty(it, error_msg="expected non-empty iterator"): - empty = True - for value in it: - empty = False - yield value - if empty: - raise ValueError(error_msg) - - + +def nonempty(it, error_msg="expected non-empty iterator"): + empty = True + for value in it: + empty = False + yield value + if empty: + raise ValueError(error_msg) + + @cached_function def file_hash(filename): - path = os.path.normpath(filename) - prefix = ('%d:%s' % (len(path), path)).encode("UTF-8") + path = os.path.normpath(filename) + prefix = ('%d:%s' % (len(path), path)).encode("UTF-8") m = hashlib.md5(prefix) - with open(path, 'rb') as f: + with open(path, 'rb') as f: data = f.read(65000) while data: m.update(data) data = f.read(65000) return m.hexdigest() - -def update_pythran_extension(ext): - if pythran is None: - raise RuntimeError("You first need to install Pythran to use the np_pythran directive.") - try: - pythran_ext = pythran.config.make_extension(python=True) - except TypeError: # older pythran version only - pythran_ext = pythran.config.make_extension() - - ext.include_dirs.extend(pythran_ext['include_dirs']) - ext.extra_compile_args.extend(pythran_ext['extra_compile_args']) - ext.extra_link_args.extend(pythran_ext['extra_link_args']) - ext.define_macros.extend(pythran_ext['define_macros']) - ext.undef_macros.extend(pythran_ext['undef_macros']) - ext.library_dirs.extend(pythran_ext['library_dirs']) - ext.libraries.extend(pythran_ext['libraries']) - ext.language = 'c++' - - # These options are not compatible with the way normal Cython extensions work - for bad_option in ["-fwhole-program", "-fvisibility=hidden"]: - try: - ext.extra_compile_args.remove(bad_option) - except ValueError: - pass - - + +def update_pythran_extension(ext): + if pythran is None: + raise RuntimeError("You first need to install Pythran to use the np_pythran directive.") + try: + pythran_ext = pythran.config.make_extension(python=True) + except TypeError: # older pythran version only + pythran_ext = pythran.config.make_extension() + + ext.include_dirs.extend(pythran_ext['include_dirs']) + ext.extra_compile_args.extend(pythran_ext['extra_compile_args']) + ext.extra_link_args.extend(pythran_ext['extra_link_args']) + ext.define_macros.extend(pythran_ext['define_macros']) + ext.undef_macros.extend(pythran_ext['undef_macros']) + ext.library_dirs.extend(pythran_ext['library_dirs']) + ext.libraries.extend(pythran_ext['libraries']) + ext.language = 'c++' + + # These options are not compatible with the way normal Cython extensions work + for bad_option in ["-fwhole-program", "-fvisibility=hidden"]: + try: + ext.extra_compile_args.remove(bad_option) + except ValueError: + pass + + def parse_list(s): """ - >>> parse_list("") - [] - >>> parse_list("a") - ['a'] + >>> parse_list("") + [] + >>> parse_list("a") + ['a'] >>> parse_list("a b c") ['a', 'b', 'c'] >>> parse_list("[a, b, c]") @@ -167,7 +167,7 @@ def parse_list(s): >>> parse_list('[a, ",a", "a,", ",", ]') ['a', ',a', 'a,', ','] """ - if len(s) >= 2 and s[0] == '[' and s[-1] == ']': + if len(s) >= 2 and s[0] == '[' and s[-1] == ']': s = s[1:-1] delimiter = ',' else: @@ -181,7 +181,7 @@ def parse_list(s): return literal return [unquote(item) for item in s.split(delimiter) if item.strip()] - + transitive_str = object() transitive_list = object() bool_or = object() @@ -204,8 +204,8 @@ distutils_settings = { 'np_pythran': bool_or } - -@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t) + +@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t) def line_iter(source): if isinstance(source, basestring): start = 0 @@ -220,30 +220,30 @@ def line_iter(source): for line in source: yield line - + class DistutilsInfo(object): def __init__(self, source=None, exn=None): self.values = {} if source is not None: for line in line_iter(source): - line = line.lstrip() - if not line: - continue - if line[0] != '#': + line = line.lstrip() + if not line: + continue + if line[0] != '#': break - line = line[1:].lstrip() + line = line[1:].lstrip() kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None) - if kind is not None: + if kind is not None: key, _, value = [s.strip() for s in line[len(kind):].partition('=')] type = distutils_settings.get(key, None) if line.startswith("cython:") and type is None: continue if type in (list, transitive_list): value = parse_list(value) if key == 'define_macros': - value = [tuple(macro.split('=', 1)) - if '=' in macro else (macro, None) - for macro in value] + value = [tuple(macro.split('=', 1)) + if '=' in macro else (macro, None) + for macro in value] if type is bool_or: value = strtobool(value) self.values[key] = value @@ -264,13 +264,13 @@ class DistutilsInfo(object): self.values[key] = value elif type is transitive_list: if key in self.values: - # Change a *copy* of the list (Trac #845) - all = self.values[key][:] + # Change a *copy* of the list (Trac #845) + all = self.values[key][:] for v in value: if v not in all: all.append(v) - value = all - self.values[key] = value + value = all + self.values[key] = value elif type is bool_or: self.values[key] = self.values.get(key, False) | value return self @@ -301,14 +301,14 @@ class DistutilsInfo(object): for key, value in self.values.items(): type = distutils_settings[key] if type in [list, transitive_list]: - value = getattr(extension, key) + list(value) - setattr(extension, key, value) - - -@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t, - single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t, - hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t, - k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t) + value = getattr(extension, key) + list(value) + setattr(extension, key, value) + + +@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t, + single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t, + hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t, + k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t) def strip_string_literals(code, prefix='__Pyx_L'): """ Normalizes every string literal to be of the form '__Pyx_Lxxx', @@ -333,8 +333,8 @@ def strip_string_literals(code, prefix='__Pyx_L'): if double_q < q: double_q = code.find('"', q) q = min(single_q, double_q) - if q == -1: - q = max(single_q, double_q) + if q == -1: + q = max(single_q, double_q) # We're done. if q == -1 and hash_mark == -1: @@ -350,8 +350,8 @@ def strip_string_literals(code, prefix='__Pyx_L'): if k % 2 == 0: q += 1 continue - if code[q] == quote_type and ( - quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])): + if code[q] == quote_type and ( + quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])): counter += 1 label = "%s%s_" % (prefix, counter) literals[label] = code[start+quote_len:q] @@ -396,23 +396,23 @@ def strip_string_literals(code, prefix='__Pyx_L'): return "".join(new_code), literals -# We need to allow spaces to allow for conditional compilation like -# IF ...: -# cimport ... -dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|" - r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|" - r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|" - r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M) -dependency_after_from_regex = re.compile( - r"(?:^\s+\(([0-9a-zA-Z_., ]*)\)[#\n])|" - r"(?:^\s+([0-9a-zA-Z_., ]*)[#\n])", - re.M) +# We need to allow spaces to allow for conditional compilation like +# IF ...: +# cimport ... +dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|" + r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|" + r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|" + r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M) +dependency_after_from_regex = re.compile( + r"(?:^\s+\(([0-9a-zA-Z_., ]*)\)[#\n])|" + r"(?:^\s+([0-9a-zA-Z_., ]*)[#\n])", + re.M) + - def normalize_existing(base_path, rel_paths): return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths))) - + @cached_function def normalize_existing0(base_dir, rel_paths): """ @@ -440,7 +440,7 @@ def normalize_existing0(base_dir, rel_paths): normalized.append(rel) return (normalized, needed_base) - + def resolve_depends(depends, include_dirs): include_dirs = tuple(include_dirs) resolved = [] @@ -450,7 +450,7 @@ def resolve_depends(depends, include_dirs): resolved.append(path) return resolved - + @cached_function def resolve_depend(depend, include_dirs): if depend[0] == '<' and depend[-1] == '>': @@ -461,16 +461,16 @@ def resolve_depend(depend, include_dirs): return os.path.normpath(path) return None - + @cached_function def package(filename): dir = os.path.dirname(os.path.abspath(str(filename))) - if dir != filename and is_package_dir(dir): + if dir != filename and is_package_dir(dir): return package(dir) + (os.path.basename(dir),) else: return () - + @cached_function def fully_qualified_name(filename): module = os.path.splitext(os.path.basename(filename))[0] @@ -479,10 +479,10 @@ def fully_qualified_name(filename): @cached_function def parse_dependencies(source_filename): - # Actual parsing is way too slow, so we use regular expressions. + # Actual parsing is way too slow, so we use regular expressions. # The only catch is that we must strip comments and string # literals ahead of time. - with Utils.open_source_file(source_filename, error_handling='ignore') as fh: + with Utils.open_source_file(source_filename, error_handling='ignore') as fh: source = fh.read() distutils_info = DistutilsInfo(source) source, literals = strip_string_literals(source) @@ -492,19 +492,19 @@ def parse_dependencies(source_filename): cimports = [] includes = [] externs = [] - for m in dependency_regex.finditer(source): - cimport_from, cimport_list, extern, include = m.groups() + for m in dependency_regex.finditer(source): + cimport_from, cimport_list, extern, include = m.groups() if cimport_from: cimports.append(cimport_from) - m_after_from = dependency_after_from_regex.search(source, pos=m.end()) - if m_after_from: - multiline, one_line = m_after_from.groups() - subimports = multiline or one_line - cimports.extend("{0}.{1}".format(cimport_from, s.strip()) - for s in subimports.split(',')) - - elif cimport_list: - cimports.extend(x.strip() for x in cimport_list.split(",")) + m_after_from = dependency_after_from_regex.search(source, pos=m.end()) + if m_after_from: + multiline, one_line = m_after_from.groups() + subimports = multiline or one_line + cimports.extend("{0}.{1}".format(cimport_from, s.strip()) + for s in subimports.split(',')) + + elif cimport_list: + cimports.extend(x.strip() for x in cimport_list.split(",")) elif extern: externs.append(literals[extern]) else: @@ -520,8 +520,8 @@ class DependencyTree(object): self._transitive_cache = {} def parse_dependencies(self, source_filename): - if path_exists(source_filename): - source_filename = os.path.normpath(source_filename) + if path_exists(source_filename): + source_filename = os.path.normpath(source_filename) return parse_dependencies(source_filename) @cached_method @@ -599,8 +599,8 @@ class DependencyTree(object): pxd_list = [filename[:-4] + '.pxd'] else: pxd_list = [] - # Cimports generates all possible combinations package.module - # when imported as from package cimport module. + # Cimports generates all possible combinations package.module + # when imported as from package cimport module. for module in self.cimports(filename): if module[:7] == 'cython.' or module == 'cython': continue @@ -629,32 +629,32 @@ class DependencyTree(object): def newest_dependency(self, filename): return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)]) - def transitive_fingerprint(self, filename, module, compilation_options): - r""" - Return a fingerprint of a cython file that is about to be cythonized. - - Fingerprints are looked up in future compilations. If the fingerprint - is found, the cythonization can be skipped. The fingerprint must - incorporate everything that has an influence on the generated code. - """ + def transitive_fingerprint(self, filename, module, compilation_options): + r""" + Return a fingerprint of a cython file that is about to be cythonized. + + Fingerprints are looked up in future compilations. If the fingerprint + is found, the cythonization can be skipped. The fingerprint must + incorporate everything that has an influence on the generated code. + """ try: m = hashlib.md5(__version__.encode('UTF-8')) m.update(file_hash(filename).encode('UTF-8')) for x in sorted(self.all_dependencies(filename)): if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'): m.update(file_hash(x).encode('UTF-8')) - # Include the module attributes that change the compilation result - # in the fingerprint. We do not iterate over module.__dict__ and - # include almost everything here as users might extend Extension - # with arbitrary (random) attributes that would lead to cache - # misses. - m.update(str(( - module.language, - getattr(module, 'py_limited_api', False), - getattr(module, 'np_pythran', False) - )).encode('UTF-8')) - - m.update(compilation_options.get_fingerprint().encode('UTF-8')) + # Include the module attributes that change the compilation result + # in the fingerprint. We do not iterate over module.__dict__ and + # include almost everything here as users might extend Extension + # with arbitrary (random) attributes that would lead to cache + # misses. + m.update(str(( + module.language, + getattr(module, 'py_limited_api', False), + getattr(module, 'np_pythran', False) + )).encode('UTF-8')) + + m.update(compilation_options.get_fingerprint().encode('UTF-8')) return m.hexdigest() except IOError: return None @@ -719,9 +719,9 @@ class DependencyTree(object): finally: del stack[node] - + _dep_tree = None - + def create_dependency_tree(ctx=None, quiet=False): global _dep_tree if _dep_tree is None: @@ -746,15 +746,15 @@ def default_create_extension(template, kwds): # This may be useful for advanced users? -def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None, - exclude_failures=False): - if language is not None: - print('Warning: passing language={0!r} to cythonize() is deprecated. ' - 'Instead, put "# distutils: language={0}" in your .pyx or .pxd file(s)'.format(language)) - if exclude is None: - exclude = [] - if patterns is None: - return [], {} +def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None, + exclude_failures=False): + if language is not None: + print('Warning: passing language={0!r} to cythonize() is deprecated. ' + 'Instead, put "# distutils: language={0}" in your .pyx or .pxd file(s)'.format(language)) + if exclude is None: + exclude = [] + if patterns is None: + return [], {} elif isinstance(patterns, basestring) or not isinstance(patterns, Iterable): patterns = [patterns] explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)]) @@ -767,17 +767,17 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= to_exclude.update(map(os.path.abspath, extended_iglob(pattern))) module_list = [] - module_metadata = {} - - # workaround for setuptools - if 'setuptools' in sys.modules: - Extension_distutils = sys.modules['setuptools.extension']._Extension - Extension_setuptools = sys.modules['setuptools'].Extension - else: - # dummy class, in case we do not have setuptools - Extension_distutils = Extension - class Extension_setuptools(Extension): pass - + module_metadata = {} + + # workaround for setuptools + if 'setuptools' in sys.modules: + Extension_distutils = sys.modules['setuptools.extension']._Extension + Extension_setuptools = sys.modules['setuptools'].Extension + else: + # dummy class, in case we do not have setuptools + Extension_distutils = Extension + class Extension_setuptools(Extension): pass + # if no create_extension() function is defined, use a simple # default function. create_extension = ctx.options.create_extension or default_create_extension @@ -788,11 +788,11 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= template = Extension(pattern, []) # Fake Extension without sources name = '*' base = None - ext_language = language - elif isinstance(pattern, (Extension_distutils, Extension_setuptools)): - cython_sources = [s for s in pattern.sources - if os.path.splitext(s)[1] in ('.py', '.pyx')] - if cython_sources: + ext_language = language + elif isinstance(pattern, (Extension_distutils, Extension_setuptools)): + cython_sources = [s for s in pattern.sources + if os.path.splitext(s)[1] in ('.py', '.pyx')] + if cython_sources: filepattern = cython_sources[0] if len(cython_sources) > 1: print("Warning: Multiple cython sources found for extension '%s': %s\n" @@ -805,25 +805,25 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= template = pattern name = template.name base = DistutilsInfo(exn=template) - ext_language = None # do not override whatever the Extension says + ext_language = None # do not override whatever the Extension says else: - msg = str("pattern is not of type str nor subclass of Extension (%s)" - " but of type %s and class %s" % (repr(Extension), - type(pattern), - pattern.__class__)) - raise TypeError(msg) + msg = str("pattern is not of type str nor subclass of Extension (%s)" + " but of type %s and class %s" % (repr(Extension), + type(pattern), + pattern.__class__)) + raise TypeError(msg) - for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern): + for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern): if os.path.abspath(file) in to_exclude: continue - module_name = deps.fully_qualified_name(file) + module_name = deps.fully_qualified_name(file) if '*' in name: if module_name in explicit_modules: continue elif name: module_name = name - Utils.raise_error_if_module_name_forbidden(module_name) + Utils.raise_error_if_module_name_forbidden(module_name) if module_name not in seen: try: @@ -848,9 +848,9 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= sources.append(source) kwds['sources'] = sources - if ext_language and 'language' not in kwds: - kwds['language'] = ext_language - + if ext_language and 'language' not in kwds: + kwds['language'] = ext_language + np_pythran = kwds.pop('np_pythran', False) # Create the new extension @@ -864,7 +864,7 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= # generated C file but otherwise has no purpose) module_metadata[module_name] = metadata - if file not in m.sources: + if file not in m.sources: # Old setuptools unconditionally replaces .pyx with .c/.cpp target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c') try: @@ -872,93 +872,93 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= except ValueError: # never seen this in the wild, but probably better to warn about this unexpected case print("Warning: Cython source file not found in sources list, adding %s" % file) - m.sources.insert(0, file) + m.sources.insert(0, file) seen.add(name) - return module_list, module_metadata + return module_list, module_metadata # This is the user-exposed entry point. -def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, +def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, exclude_failures=False, **options): """ Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them. - :param module_list: As module list, pass either a glob pattern, a list of glob - patterns or a list of Extension objects. The latter - allows you to configure the extensions separately - through the normal distutils options. - You can also pass Extension objects that have - glob patterns as their sources. Then, cythonize - will resolve the pattern and create a - copy of the Extension for every matching file. - - :param exclude: When passing glob patterns as ``module_list``, you can exclude certain - module names explicitly by passing them into the ``exclude`` option. - - :param nthreads: The number of concurrent builds for parallel compilation - (requires the ``multiprocessing`` module). - - :param aliases: If you want to use compiler directives like ``# distutils: ...`` but - can only know at compile time (when running the ``setup.py``) which values - to use, you can use aliases and pass a dictionary mapping those aliases - to Python strings when calling :func:`cythonize`. As an example, say you - want to use the compiler - directive ``# distutils: include_dirs = ../static_libs/include/`` - but this path isn't always fixed and you want to find it when running - the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, - find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python - variable called ``foo`` as a string, and then call - ``cythonize(..., aliases={'MY_HEADERS': foo})``. + :param module_list: As module list, pass either a glob pattern, a list of glob + patterns or a list of Extension objects. The latter + allows you to configure the extensions separately + through the normal distutils options. + You can also pass Extension objects that have + glob patterns as their sources. Then, cythonize + will resolve the pattern and create a + copy of the Extension for every matching file. + + :param exclude: When passing glob patterns as ``module_list``, you can exclude certain + module names explicitly by passing them into the ``exclude`` option. + + :param nthreads: The number of concurrent builds for parallel compilation + (requires the ``multiprocessing`` module). + + :param aliases: If you want to use compiler directives like ``# distutils: ...`` but + can only know at compile time (when running the ``setup.py``) which values + to use, you can use aliases and pass a dictionary mapping those aliases + to Python strings when calling :func:`cythonize`. As an example, say you + want to use the compiler + directive ``# distutils: include_dirs = ../static_libs/include/`` + but this path isn't always fixed and you want to find it when running + the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, + find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python + variable called ``foo`` as a string, and then call + ``cythonize(..., aliases={'MY_HEADERS': foo})``. :param quiet: If True, Cython won't print error, warning, or status messages during the compilation. - :param force: Forces the recompilation of the Cython modules, even if the timestamps - don't indicate that a recompilation is necessary. - - :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this - will be determined at a per-file level based on compiler directives. This - affects only modules found based on file names. Extension instances passed - into :func:`cythonize` will not be changed. It is recommended to rather - use the compiler directive ``# distutils: language = c++`` than this option. - - :param exclude_failures: For a broad 'try to compile' mode that ignores compilation - failures and simply excludes the failed extensions, - pass ``exclude_failures=True``. Note that this only - really makes sense for compiling ``.py`` files which can also - be used without compilation. - - :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` - files compiled. The HTML file gives an indication - of how much Python interaction there is in - each of the source code lines, compared to plain C code. - It also allows you to see the C/C++ code - generated for each line of Cython code. This report is invaluable when - optimizing a function for speed, - and for determining when to :ref:`release the GIL <nogil>`: - in general, a ``nogil`` block may contain only "white" code. - See examples in :ref:`determining_where_to_add_types` or - :ref:`primes`. - - :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: - ``compiler_directives={'embedsignature': True}``. - See :ref:`compiler-directives`. + :param force: Forces the recompilation of the Cython modules, even if the timestamps + don't indicate that a recompilation is necessary. + + :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this + will be determined at a per-file level based on compiler directives. This + affects only modules found based on file names. Extension instances passed + into :func:`cythonize` will not be changed. It is recommended to rather + use the compiler directive ``# distutils: language = c++`` than this option. + + :param exclude_failures: For a broad 'try to compile' mode that ignores compilation + failures and simply excludes the failed extensions, + pass ``exclude_failures=True``. Note that this only + really makes sense for compiling ``.py`` files which can also + be used without compilation. + + :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` + files compiled. The HTML file gives an indication + of how much Python interaction there is in + each of the source code lines, compared to plain C code. + It also allows you to see the C/C++ code + generated for each line of Cython code. This report is invaluable when + optimizing a function for speed, + and for determining when to :ref:`release the GIL <nogil>`: + in general, a ``nogil`` block may contain only "white" code. + See examples in :ref:`determining_where_to_add_types` or + :ref:`primes`. + + :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: + ``compiler_directives={'embedsignature': True}``. + See :ref:`compiler-directives`. :param depfile: produce depfiles for the sources if True. """ - if exclude is None: - exclude = [] + if exclude is None: + exclude = [] if 'include_path' not in options: options['include_path'] = ['.'] if 'common_utility_include_dir' in options: - safe_makedirs(options['common_utility_include_dir']) + safe_makedirs(options['common_utility_include_dir']) depfile = options.pop('depfile', None) - if pythran is None: - pythran_options = None - else: + if pythran is None: + pythran_options = None + else: pythran_options = CompilationOptions(**options) pythran_options.cplus = True pythran_options.np_pythran = True @@ -967,13 +967,13 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, cpp_options = CompilationOptions(**options); cpp_options.cplus = True ctx = c_options.create_context() options = c_options - module_list, module_metadata = create_extension_list( + module_list, module_metadata = create_extension_list( module_list, exclude=exclude, ctx=ctx, quiet=quiet, exclude_failures=exclude_failures, - language=language, + language=language, aliases=aliases) deps = create_dependency_tree(ctx, quiet=quiet) build_dir = getattr(options, 'build_dir', None) @@ -1021,11 +1021,11 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, # setup for out of place build directory if enabled if build_dir: - if os.path.isabs(c_file): - warnings.warn("build_dir has no effect for absolute source paths") + if os.path.isabs(c_file): + warnings.warn("build_dir has no effect for absolute source paths") c_file = os.path.join(build_dir, c_file) dir = os.path.dirname(c_file) - safe_makedirs_once(dir) + safe_makedirs_once(dir) # write out the depfile, if requested if depfile: @@ -1066,8 +1066,8 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, print("Compiling %s because it changed." % source) else: print("Compiling %s because it depends on %s." % (source, dep)) - if not force and options.cache: - fingerprint = deps.transitive_fingerprint(source, m, options) + if not force and options.cache: + fingerprint = deps.transitive_fingerprint(source, m, options) else: fingerprint = None to_compile.append(( @@ -1082,19 +1082,19 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, copy_to_build_dir(source) m.sources = new_sources - if options.cache: + if options.cache: if not os.path.exists(options.cache): os.makedirs(options.cache) to_compile.sort() - # Drop "priority" component of "to_compile" entries and add a - # simple progress indicator. - N = len(to_compile) - progress_fmt = "[{0:%d}/{1}] " % len(str(N)) - for i in range(N): - progress = progress_fmt.format(i+1, N) - to_compile[i] = to_compile[i][1:] + (progress,) - - if N <= 1: + # Drop "priority" component of "to_compile" entries and add a + # simple progress indicator. + N = len(to_compile) + progress_fmt = "[{0:%d}/{1}] " % len(str(N)) + for i in range(N): + progress = progress_fmt.format(i+1, N) + to_compile[i] = to_compile[i][1:] + (progress,) + + if N <= 1: nthreads = 0 if nthreads: # Requires multiprocessing (or Python >= 2.6) @@ -1124,11 +1124,11 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, pool.join() if not nthreads: for args in to_compile: - cythonize_one(*args) + cythonize_one(*args) if exclude_failures: failed_modules = set() - for c_file, modules in modules_by_cfile.items(): + for c_file, modules in modules_by_cfile.items(): if not os.path.exists(c_file): failed_modules.update(modules) elif os.path.getsize(c_file) < 200: @@ -1145,7 +1145,7 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, print("Failed compilations: %s" % ', '.join(sorted([ module.name for module in failed_modules]))) - if options.cache: + if options.cache: cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) # cythonize() is often followed by the (non-Python-buffered) # compiler output, flush now to avoid interleaving output. @@ -1185,10 +1185,10 @@ if os.environ.get('XML_RESULTS'): output.close() return with_record else: - def record_results(func): - return func + def record_results(func): + return func + - # TODO: Share context? Issue: pyx processing leaks into pxd module @record_results def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, @@ -1202,38 +1202,38 @@ def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, safe_makedirs(options.cache) # Cython-generated c files are highly compressible. # (E.g. a compression ratio of about 10 for Sage). - fingerprint_file_base = join_path( - options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint)) - gz_fingerprint_file = fingerprint_file_base + gzip_ext - zip_fingerprint_file = fingerprint_file_base + '.zip' - if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file): + fingerprint_file_base = join_path( + options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint)) + gz_fingerprint_file = fingerprint_file_base + gzip_ext + zip_fingerprint_file = fingerprint_file_base + '.zip' + if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file): if not quiet: - print("%sFound compiled %s in cache" % (progress, pyx_file)) - if os.path.exists(gz_fingerprint_file): - os.utime(gz_fingerprint_file, None) - with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g: - with contextlib.closing(open(c_file, 'wb')) as f: - shutil.copyfileobj(g, f) - else: - os.utime(zip_fingerprint_file, None) - dirname = os.path.dirname(c_file) - with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z: - for artifact in z.namelist(): - z.extract(artifact, os.path.join(dirname, artifact)) + print("%sFound compiled %s in cache" % (progress, pyx_file)) + if os.path.exists(gz_fingerprint_file): + os.utime(gz_fingerprint_file, None) + with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g: + with contextlib.closing(open(c_file, 'wb')) as f: + shutil.copyfileobj(g, f) + else: + os.utime(zip_fingerprint_file, None) + dirname = os.path.dirname(c_file) + with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z: + for artifact in z.namelist(): + z.extract(artifact, os.path.join(dirname, artifact)) return if not quiet: - print("%sCythonizing %s" % (progress, pyx_file)) + print("%sCythonizing %s" % (progress, pyx_file)) if options is None: options = CompilationOptions(default_options) options.output_file = c_file - options.embedded_metadata = embedded_metadata + options.embedded_metadata = embedded_metadata any_failures = 0 try: result = compile_single(pyx_file, options, full_module_name=full_module_name) if result.num_errors > 0: any_failures = 1 - except (EnvironmentError, PyrexError) as e: + except (EnvironmentError, PyrexError) as e: sys.stderr.write('%s\n' % e) any_failures = 1 # XXX @@ -1251,27 +1251,27 @@ def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, elif os.path.exists(c_file): os.remove(c_file) elif fingerprint: - artifacts = list(filter(None, [ - getattr(result, attr, None) - for attr in ('c_file', 'h_file', 'api_file', 'i_file')])) - if len(artifacts) == 1: - fingerprint_file = gz_fingerprint_file - with contextlib.closing(open(c_file, 'rb')) as f: - with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g: - shutil.copyfileobj(f, g) - else: - fingerprint_file = zip_fingerprint_file - with contextlib.closing(zipfile.ZipFile( - fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip: - for artifact in artifacts: - zip.write(artifact, os.path.basename(artifact)) - os.rename(fingerprint_file + '.tmp', fingerprint_file) + artifacts = list(filter(None, [ + getattr(result, attr, None) + for attr in ('c_file', 'h_file', 'api_file', 'i_file')])) + if len(artifacts) == 1: + fingerprint_file = gz_fingerprint_file + with contextlib.closing(open(c_file, 'rb')) as f: + with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g: + shutil.copyfileobj(f, g) + else: + fingerprint_file = zip_fingerprint_file + with contextlib.closing(zipfile.ZipFile( + fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip: + for artifact in artifacts: + zip.write(artifact, os.path.basename(artifact)) + os.rename(fingerprint_file + '.tmp', fingerprint_file) def cythonize_one_helper(m): import traceback try: - return cythonize_one(*m) + return cythonize_one(*m) except Exception: traceback.print_exc() raise diff --git a/contrib/tools/cython/Cython/Build/Distutils.py b/contrib/tools/cython/Cython/Build/Distutils.py index 4008d21f52..3efcc0d7b5 100644 --- a/contrib/tools/cython/Cython/Build/Distutils.py +++ b/contrib/tools/cython/Cython/Build/Distutils.py @@ -1 +1 @@ -from Cython.Distutils.build_ext import build_ext +from Cython.Distutils.build_ext import build_ext diff --git a/contrib/tools/cython/Cython/Build/Inline.py b/contrib/tools/cython/Cython/Build/Inline.py index cd48e49a70..db6d2640a5 100644 --- a/contrib/tools/cython/Cython/Build/Inline.py +++ b/contrib/tools/cython/Cython/Build/Inline.py @@ -15,19 +15,19 @@ from ..Compiler.Main import Context, default_options from ..Compiler.Visitor import CythonTransform, EnvTransform from ..Compiler.ParseTreeTransforms import SkipDeclarations from ..Compiler.TreeFragment import parse_from_strings -from ..Compiler.StringEncoding import _unicode +from ..Compiler.StringEncoding import _unicode from .Dependencies import strip_string_literals, cythonize, cached_function from ..Compiler import Pipeline from ..Utils import get_cython_cache_dir import cython as cython_module - + IS_PY3 = sys.version_info >= (3,) # A utility function to convert user-supplied ASCII strings to unicode. if not IS_PY3: def to_unicode(s): - if isinstance(s, bytes): + if isinstance(s, bytes): return s.decode('ascii') else: return s @@ -59,7 +59,7 @@ class UnboundSymbols(EnvTransform, SkipDeclarations): super(UnboundSymbols, self).__call__(node) return self.unbound - + @cached_function def unbound_symbols(code, context=None): code = to_unicode(code) @@ -77,9 +77,9 @@ def unbound_symbols(code, context=None): import builtins except ImportError: import __builtin__ as builtins - return tuple(UnboundSymbols()(tree) - set(dir(builtins))) + return tuple(UnboundSymbols()(tree) - set(dir(builtins))) + - def unsafe_type(arg, context=None): py_type = type(arg) if py_type is int: @@ -87,10 +87,10 @@ def unsafe_type(arg, context=None): else: return safe_type(arg, context) - + def safe_type(arg, context=None): py_type = type(arg) - if py_type in (list, tuple, dict, str): + if py_type in (list, tuple, dict, str): return py_type.__name__ elif py_type is complex: return 'double complex' @@ -101,7 +101,7 @@ def safe_type(arg, context=None): elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray): return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim) else: - for base_type in py_type.__mro__: + for base_type in py_type.__mro__: if base_type.__module__ in ('__builtin__', 'builtins'): return 'object' module = context.find_module(base_type.__module__, need_pxd=False) @@ -111,7 +111,7 @@ def safe_type(arg, context=None): return '%s.%s' % (base_type.__module__, base_type.__name__) return 'object' - + def _get_build_extension(): dist = Distribution() # Ensure the build respects distutils configuration by parsing @@ -122,66 +122,66 @@ def _get_build_extension(): build_extension.finalize_options() return build_extension - + @cached_function def _create_context(cython_include_dirs): return Context(list(cython_include_dirs), default_options) - -_cython_inline_cache = {} -_cython_inline_default_context = _create_context(('.',)) - - -def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None): - for symbol in unbound_symbols: - if symbol not in kwds: - if locals is None or globals is None: - calling_frame = inspect.currentframe().f_back.f_back.f_back - if locals is None: - locals = calling_frame.f_locals - if globals is None: - globals = calling_frame.f_globals - if symbol in locals: - kwds[symbol] = locals[symbol] - elif symbol in globals: - kwds[symbol] = globals[symbol] - else: - print("Couldn't find %r" % symbol) - + +_cython_inline_cache = {} +_cython_inline_default_context = _create_context(('.',)) + + +def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None): + for symbol in unbound_symbols: + if symbol not in kwds: + if locals is None or globals is None: + calling_frame = inspect.currentframe().f_back.f_back.f_back + if locals is None: + locals = calling_frame.f_locals + if globals is None: + globals = calling_frame.f_globals + if symbol in locals: + kwds[symbol] = locals[symbol] + elif symbol in globals: + kwds[symbol] = globals[symbol] + else: + print("Couldn't find %r" % symbol) + def _inline_key(orig_code, arg_sigs, language_level): key = orig_code, arg_sigs, sys.version_info, sys.executable, language_level, Cython.__version__ return hashlib.sha1(_unicode(key).encode('utf-8')).hexdigest() -def cython_inline(code, get_type=unsafe_type, - lib_dir=os.path.join(get_cython_cache_dir(), 'inline'), - cython_include_dirs=None, cython_compiler_directives=None, - force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds): - +def cython_inline(code, get_type=unsafe_type, + lib_dir=os.path.join(get_cython_cache_dir(), 'inline'), + cython_include_dirs=None, cython_compiler_directives=None, + force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds): + if get_type is None: get_type = lambda x: 'object' - ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context - + ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context + cython_compiler_directives = dict(cython_compiler_directives) if cython_compiler_directives else {} if language_level is None and 'language_level' not in cython_compiler_directives: language_level = '3str' if language_level is not None: cython_compiler_directives['language_level'] = language_level - # Fast path if this has been called in this session. - _unbound_symbols = _cython_inline_cache.get(code) - if _unbound_symbols is not None: - _populate_unbound(kwds, _unbound_symbols, locals, globals) - args = sorted(kwds.items()) - arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args]) + # Fast path if this has been called in this session. + _unbound_symbols = _cython_inline_cache.get(code) + if _unbound_symbols is not None: + _populate_unbound(kwds, _unbound_symbols, locals, globals) + args = sorted(kwds.items()) + arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args]) key_hash = _inline_key(code, arg_sigs, language_level) invoke = _cython_inline_cache.get((code, arg_sigs, key_hash)) - if invoke is not None: - arg_list = [arg[1] for arg in args] - return invoke(*arg_list) - - orig_code = code + if invoke is not None: + arg_list = [arg[1] for arg in args] + return invoke(*arg_list) + + orig_code = code code = to_unicode(code) code, literals = strip_string_literals(code) code = strip_common_indent(code) @@ -190,19 +190,19 @@ def cython_inline(code, get_type=unsafe_type, if globals is None: globals = inspect.currentframe().f_back.f_back.f_globals try: - _cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code) - _populate_unbound(kwds, _unbound_symbols, locals, globals) + _cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code) + _populate_unbound(kwds, _unbound_symbols, locals, globals) except AssertionError: if not quiet: # Parsing from strings not fully supported (e.g. cimports). print("Could not parse code as a string (to extract unbound symbols).") - + cimports = [] - for name, arg in list(kwds.items()): + for name, arg in list(kwds.items()): if arg is cython_module: cimports.append('\ncimport cython as %s' % name) del kwds[name] - arg_names = sorted(kwds) + arg_names = sorted(kwds) arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names]) key_hash = _inline_key(orig_code, arg_sigs, language_level) module_name = "_cython_inline_" + key_hash @@ -261,11 +261,11 @@ def __invoke(%(params)s): extra_compile_args = cflags) if build_extension is None: build_extension = _get_build_extension() - build_extension.extensions = cythonize( - [extension], - include_path=cython_include_dirs or ['.'], - compiler_directives=cython_compiler_directives, - quiet=quiet) + build_extension.extensions = cythonize( + [extension], + include_path=cython_include_dirs or ['.'], + compiler_directives=cython_compiler_directives, + quiet=quiet) build_extension.build_temp = os.path.dirname(pyx_file) build_extension.build_lib = lib_dir build_extension.run() @@ -281,29 +281,29 @@ def __invoke(%(params)s): # overridden with actual value upon the first cython_inline invocation cython_inline.so_ext = None -_find_non_space = re.compile('[^ ]').search - - +_find_non_space = re.compile('[^ ]').search + + def strip_common_indent(code): min_indent = None - lines = code.splitlines() + lines = code.splitlines() for line in lines: - match = _find_non_space(line) + match = _find_non_space(line) if not match: - continue # blank + continue # blank indent = match.start() if line[indent] == '#': - continue # comment - if min_indent is None or min_indent > indent: + continue # comment + if min_indent is None or min_indent > indent: min_indent = indent for ix, line in enumerate(lines): - match = _find_non_space(line) - if not match or not line or line[indent:indent+1] == '#': + match = _find_non_space(line) + if not match or not line or line[indent:indent+1] == '#': continue - lines[ix] = line[min_indent:] + lines[ix] = line[min_indent:] return '\n'.join(lines) - + module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))') def extract_func_code(code): module = [] @@ -331,7 +331,7 @@ except ImportError: all[varargs] = arg_values[len(args):] for name, value in zip(args, arg_values): all[name] = value - for name, value in list(kwd_values.items()): + for name, value in list(kwd_values.items()): if name in args: if name in all: raise TypeError("Duplicate argument %s" % name) @@ -339,7 +339,7 @@ except ImportError: if kwds is not None: all[kwds] = kwd_values elif kwd_values: - raise TypeError("Unexpected keyword arguments: %s" % list(kwd_values)) + raise TypeError("Unexpected keyword arguments: %s" % list(kwd_values)) if defaults is None: defaults = () first_default = len(args) - len(defaults) @@ -351,7 +351,7 @@ except ImportError: raise TypeError("Missing argument: %s" % name) return all - + def get_body(source): ix = source.index(':') if source[:5] == 'lambda': @@ -359,7 +359,7 @@ def get_body(source): else: return source[ix+1:] - + # Lots to be done here... It would be especially cool if compiled functions # could invoke each other quickly. class RuntimeCompiledFunction(object): @@ -370,7 +370,7 @@ class RuntimeCompiledFunction(object): def __call__(self, *args, **kwds): all = getcallargs(self._f, *args, **kwds) - if IS_PY3: - return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all) - else: - return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all) + if IS_PY3: + return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all) + else: + return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all) diff --git a/contrib/tools/cython/Cython/Build/IpythonMagic.py b/contrib/tools/cython/Cython/Build/IpythonMagic.py index 80bba08bfc..7abb97ec70 100644 --- a/contrib/tools/cython/Cython/Build/IpythonMagic.py +++ b/contrib/tools/cython/Cython/Build/IpythonMagic.py @@ -14,7 +14,7 @@ Magic command interface for interactive work with Cython Usage ===== -To enable the magics below, execute ``%load_ext cython``. +To enable the magics below, execute ``%load_ext cython``. ``%%cython`` @@ -41,7 +41,7 @@ Parts of this code were taken from Cython.inline. # # Distributed under the terms of the Modified BSD License. # -# The full license is in the file ipython-COPYING.rst, distributed with this software. +# The full license is in the file ipython-COPYING.rst, distributed with this software. #----------------------------------------------------------------------------- from __future__ import absolute_import, print_function @@ -75,11 +75,11 @@ from distutils.command.build_ext import build_ext from IPython.core import display from IPython.core import magic_arguments from IPython.core.magic import Magics, magics_class, cell_magic -try: - from IPython.paths import get_ipython_cache_dir -except ImportError: - # older IPython version - from IPython.utils.path import get_ipython_cache_dir +try: + from IPython.paths import get_ipython_cache_dir +except ImportError: + # older IPython version + from IPython.utils.path import get_ipython_cache_dir from IPython.utils.text import dedent from ..Shadow import __version__ as cython_version @@ -175,15 +175,15 @@ class CythonMagics(Magics): f.write(cell) if 'pyximport' not in sys.modules or not self._pyximport_installed: import pyximport - pyximport.install() + pyximport.install() self._pyximport_installed = True if module_name in self._reloads: module = self._reloads[module_name] - # Note: reloading extension modules is not actually supported - # (requires PEP-489 reinitialisation support). - # Don't know why this should ever have worked as it reads here. - # All we really need to do is to update the globals below. - #reload(module) + # Note: reloading extension modules is not actually supported + # (requires PEP-489 reinitialisation support). + # Don't know why this should ever have worked as it reads here. + # All we really need to do is to update the globals below. + #reload(module) else: __import__(module_name) module = sys.modules[module_name] @@ -200,14 +200,14 @@ class CythonMagics(Magics): help="Output a C++ rather than C file." ) @magic_arguments.argument( - '-3', dest='language_level', action='store_const', const=3, default=None, - help="Select Python 3 syntax." - ) - @magic_arguments.argument( - '-2', dest='language_level', action='store_const', const=2, default=None, - help="Select Python 2 syntax." - ) - @magic_arguments.argument( + '-3', dest='language_level', action='store_const', const=3, default=None, + help="Select Python 3 syntax." + ) + @magic_arguments.argument( + '-2', dest='language_level', action='store_const', const=2, default=None, + help="Select Python 2 syntax." + ) + @magic_arguments.argument( '-f', '--force', action='store_true', default=False, help="Force the compilation of a new module, even if the source has been " "previously compiled." @@ -233,7 +233,7 @@ class CythonMagics(Magics): ) @magic_arguments.argument( '-L', dest='library_dirs', metavar='dir', action='append', default=[], - help="Add a path to the list of library directories (can be specified " + help="Add a path to the list of library directories (can be specified " "multiple times)." ) @magic_arguments.argument( diff --git a/contrib/tools/cython/Cython/Build/Tests/TestCyCache.py b/contrib/tools/cython/Cython/Build/Tests/TestCyCache.py index a0ed3f389a..a3224b4175 100644 --- a/contrib/tools/cython/Cython/Build/Tests/TestCyCache.py +++ b/contrib/tools/cython/Cython/Build/Tests/TestCyCache.py @@ -1,106 +1,106 @@ -import difflib -import glob -import gzip -import os -import tempfile - -import Cython.Build.Dependencies -import Cython.Utils -from Cython.TestUtils import CythonTest - - -class TestCyCache(CythonTest): - - def setUp(self): - CythonTest.setUp(self) - self.temp_dir = tempfile.mkdtemp( - prefix='cycache-test', - dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None) - self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) - self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir) - - def cache_files(self, file_glob): - return glob.glob(os.path.join(self.cache_dir, file_glob)) - - def fresh_cythonize(self, *args, **kwargs): - Cython.Utils.clear_function_caches() - Cython.Build.Dependencies._dep_tree = None # discard method caches - Cython.Build.Dependencies.cythonize(*args, **kwargs) - - def test_cycache_switch(self): - content1 = 'value = 1\n' - content2 = 'value = 2\n' - a_pyx = os.path.join(self.src_dir, 'a.pyx') - a_c = a_pyx[:-4] + '.c' - - open(a_pyx, 'w').write(content1) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - self.assertEqual(1, len(self.cache_files('a.c*'))) - a_contents1 = open(a_c).read() - os.unlink(a_c) - - open(a_pyx, 'w').write(content2) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - a_contents2 = open(a_c).read() - os.unlink(a_c) - - self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!') - self.assertEqual(2, len(self.cache_files('a.c*'))) - - open(a_pyx, 'w').write(content1) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - self.assertEqual(2, len(self.cache_files('a.c*'))) - a_contents = open(a_c).read() - self.assertEqual( - a_contents, a_contents1, - msg='\n'.join(list(difflib.unified_diff( - a_contents.split('\n'), a_contents1.split('\n')))[:10])) - - def test_cycache_uses_cache(self): - a_pyx = os.path.join(self.src_dir, 'a.pyx') - a_c = a_pyx[:-4] + '.c' - open(a_pyx, 'w').write('pass') - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0]) - gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii')) - os.unlink(a_c) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - a_contents = open(a_c).read() - self.assertEqual(a_contents, 'fake stuff', - 'Unexpected contents: %s...' % a_contents[:100]) - - def test_multi_file_output(self): - a_pyx = os.path.join(self.src_dir, 'a.pyx') - a_c = a_pyx[:-4] + '.c' - a_h = a_pyx[:-4] + '.h' - a_api_h = a_pyx[:-4] + '_api.h' - open(a_pyx, 'w').write('cdef public api int foo(int x): return x\n') - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - expected = [a_c, a_h, a_api_h] - for output in expected: - self.assertTrue(os.path.exists(output), output) - os.unlink(output) - self.fresh_cythonize(a_pyx, cache=self.cache_dir) - for output in expected: - self.assertTrue(os.path.exists(output), output) - - def test_options_invalidation(self): - hash_pyx = os.path.join(self.src_dir, 'options.pyx') - hash_c = hash_pyx[:-len('.pyx')] + '.c' - - open(hash_pyx, 'w').write('pass') - self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False) - self.assertEqual(1, len(self.cache_files('options.c*'))) - - os.unlink(hash_c) - self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True) - self.assertEqual(2, len(self.cache_files('options.c*'))) - - os.unlink(hash_c) - self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False) - self.assertEqual(2, len(self.cache_files('options.c*'))) - - os.unlink(hash_c) - self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True) - self.assertEqual(2, len(self.cache_files('options.c*'))) +import difflib +import glob +import gzip +import os +import tempfile + +import Cython.Build.Dependencies +import Cython.Utils +from Cython.TestUtils import CythonTest + + +class TestCyCache(CythonTest): + + def setUp(self): + CythonTest.setUp(self) + self.temp_dir = tempfile.mkdtemp( + prefix='cycache-test', + dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None) + self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir) + + def cache_files(self, file_glob): + return glob.glob(os.path.join(self.cache_dir, file_glob)) + + def fresh_cythonize(self, *args, **kwargs): + Cython.Utils.clear_function_caches() + Cython.Build.Dependencies._dep_tree = None # discard method caches + Cython.Build.Dependencies.cythonize(*args, **kwargs) + + def test_cycache_switch(self): + content1 = 'value = 1\n' + content2 = 'value = 2\n' + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + + open(a_pyx, 'w').write(content1) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.assertEqual(1, len(self.cache_files('a.c*'))) + a_contents1 = open(a_c).read() + os.unlink(a_c) + + open(a_pyx, 'w').write(content2) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + a_contents2 = open(a_c).read() + os.unlink(a_c) + + self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!') + self.assertEqual(2, len(self.cache_files('a.c*'))) + + open(a_pyx, 'w').write(content1) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.assertEqual(2, len(self.cache_files('a.c*'))) + a_contents = open(a_c).read() + self.assertEqual( + a_contents, a_contents1, + msg='\n'.join(list(difflib.unified_diff( + a_contents.split('\n'), a_contents1.split('\n')))[:10])) + + def test_cycache_uses_cache(self): + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + open(a_pyx, 'w').write('pass') + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0]) + gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii')) + os.unlink(a_c) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + a_contents = open(a_c).read() + self.assertEqual(a_contents, 'fake stuff', + 'Unexpected contents: %s...' % a_contents[:100]) + + def test_multi_file_output(self): + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + a_h = a_pyx[:-4] + '.h' + a_api_h = a_pyx[:-4] + '_api.h' + open(a_pyx, 'w').write('cdef public api int foo(int x): return x\n') + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + expected = [a_c, a_h, a_api_h] + for output in expected: + self.assertTrue(os.path.exists(output), output) + os.unlink(output) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + for output in expected: + self.assertTrue(os.path.exists(output), output) + + def test_options_invalidation(self): + hash_pyx = os.path.join(self.src_dir, 'options.pyx') + hash_c = hash_pyx[:-len('.pyx')] + '.c' + + open(hash_pyx, 'w').write('pass') + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False) + self.assertEqual(1, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True) + self.assertEqual(2, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False) + self.assertEqual(2, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True) + self.assertEqual(2, len(self.cache_files('options.c*'))) diff --git a/contrib/tools/cython/Cython/Build/Tests/TestInline.py b/contrib/tools/cython/Cython/Build/Tests/TestInline.py index 30fab094b5..d209488083 100644 --- a/contrib/tools/cython/Cython/Build/Tests/TestInline.py +++ b/contrib/tools/cython/Cython/Build/Tests/TestInline.py @@ -17,8 +17,8 @@ class TestInline(CythonTest): def setUp(self): CythonTest.setUp(self) self.test_kwds = dict(test_kwds) - if os.path.isdir('TEST_TMP'): - lib_dir = os.path.join('TEST_TMP','inline') + if os.path.isdir('TEST_TMP'): + lib_dir = os.path.join('TEST_TMP','inline') else: lib_dir = tempfile.mkdtemp(prefix='cython_inline_') self.test_kwds['lib_dir'] = lib_dir @@ -45,35 +45,35 @@ class TestInline(CythonTest): a = 1 cdef double b = 2 cdef c = [] - """, **self.test_kwds), dict(a=1, b=2.0, c=[])) + """, **self.test_kwds), dict(a=1, b=2.0, c=[])) def test_def_node(self): - foo = inline("def foo(x): return x * x", **self.test_kwds)['foo'] + foo = inline("def foo(x): return x * x", **self.test_kwds)['foo'] self.assertEqual(foo(7), 49) - def test_class_ref(self): - class Type(object): - pass - tp = inline("Type")['Type'] - self.assertEqual(tp, Type) - + def test_class_ref(self): + class Type(object): + pass + tp = inline("Type")['Type'] + self.assertEqual(tp, Type) + def test_pure(self): import cython as cy b = inline(""" b = cy.declare(float, a) c = cy.declare(cy.pointer(cy.float), &b) return b - """, a=3, **self.test_kwds) + """, a=3, **self.test_kwds) self.assertEqual(type(b), float) - def test_compiler_directives(self): - self.assertEqual( - inline('return sum(x)', - x=[1, 2, 3], - cython_compiler_directives={'boundscheck': False}), - 6 - ) - + def test_compiler_directives(self): + self.assertEqual( + inline('return sum(x)', + x=[1, 2, 3], + cython_compiler_directives={'boundscheck': False}), + 6 + ) + def test_lang_version(self): # GH-3419. Caching for inline code didn't always respect compiler directives. inline_divcode = "def f(int a, int b): return a/b" diff --git a/contrib/tools/cython/Cython/Build/Tests/TestIpythonMagic.py b/contrib/tools/cython/Cython/Build/Tests/TestIpythonMagic.py index 9d2e1531a5..24213091b2 100644 --- a/contrib/tools/cython/Cython/Build/Tests/TestIpythonMagic.py +++ b/contrib/tools/cython/Cython/Build/Tests/TestIpythonMagic.py @@ -29,24 +29,24 @@ except ImportError: pass code = u"""\ -def f(x): +def f(x): return 2*x """ cython3_code = u"""\ -def f(int x): - return 2 / x +def f(int x): + return 2 / x -def call(x): - return f(*(x,)) +def call(x): + return f(*(x,)) """ - + pgo_cython3_code = cython3_code + u"""\ def main(): for _ in range(100): call(5) main() """ - + if sys.platform == 'win32': # not using IPython's decorators here because they depend on "nose" @@ -114,34 +114,34 @@ class TestIPythonMagic(CythonTest): ip.ex('import mymodule; g = mymodule.f(10)') self.assertEqual(ip.user_ns['g'], 20.0) - def test_cython_language_level(self): - # The Cython cell defines the functions f() and call(). + def test_cython_language_level(self): + # The Cython cell defines the functions f() and call(). ip = self._ip - ip.run_cell_magic('cython', '', cython3_code) - ip.ex('g = f(10); h = call(10)') - if sys.version_info[0] < 3: - self.assertEqual(ip.user_ns['g'], 2 // 10) - self.assertEqual(ip.user_ns['h'], 2 // 10) - else: - self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) - self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) - - def test_cython3(self): - # The Cython cell defines the functions f() and call(). + ip.run_cell_magic('cython', '', cython3_code) + ip.ex('g = f(10); h = call(10)') + if sys.version_info[0] < 3: + self.assertEqual(ip.user_ns['g'], 2 // 10) + self.assertEqual(ip.user_ns['h'], 2 // 10) + else: + self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) + self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) + + def test_cython3(self): + # The Cython cell defines the functions f() and call(). ip = self._ip - ip.run_cell_magic('cython', '-3', cython3_code) - ip.ex('g = f(10); h = call(10)') - self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) - self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) - - def test_cython2(self): - # The Cython cell defines the functions f() and call(). + ip.run_cell_magic('cython', '-3', cython3_code) + ip.ex('g = f(10); h = call(10)') + self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) + self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) + + def test_cython2(self): + # The Cython cell defines the functions f() and call(). ip = self._ip - ip.run_cell_magic('cython', '-2', cython3_code) - ip.ex('g = f(10); h = call(10)') - self.assertEqual(ip.user_ns['g'], 2 // 10) - self.assertEqual(ip.user_ns['h'], 2 // 10) - + ip.run_cell_magic('cython', '-2', cython3_code) + ip.ex('g = f(10); h = call(10)') + self.assertEqual(ip.user_ns['g'], 2 // 10) + self.assertEqual(ip.user_ns['h'], 2 // 10) + @skip_win32('Skip on Windows') def test_cython3_pgo(self): # The Cython cell defines the functions f() and call(). diff --git a/contrib/tools/cython/Cython/Build/Tests/TestStripLiterals.py b/contrib/tools/cython/Cython/Build/Tests/TestStripLiterals.py index 494a4e03b1..a7572a5083 100644 --- a/contrib/tools/cython/Cython/Build/Tests/TestStripLiterals.py +++ b/contrib/tools/cython/Cython/Build/Tests/TestStripLiterals.py @@ -6,10 +6,10 @@ class TestStripLiterals(CythonTest): def t(self, before, expected): actual, literals = strip_string_literals(before, prefix="_L") - self.assertEqual(expected, actual) + self.assertEqual(expected, actual) for key, value in literals.items(): actual = actual.replace(key, value) - self.assertEqual(before, actual) + self.assertEqual(before, actual) def test_empty(self): self.t("", "") diff --git a/contrib/tools/cython/Cython/Build/__init__.py b/contrib/tools/cython/Cython/Build/__init__.py index 38bc609706..d6f3986597 100644 --- a/contrib/tools/cython/Cython/Build/__init__.py +++ b/contrib/tools/cython/Cython/Build/__init__.py @@ -1,2 +1,2 @@ from .Dependencies import cythonize -from .Distutils import build_ext +from .Distutils import build_ext diff --git a/contrib/tools/cython/Cython/CodeWriter.py b/contrib/tools/cython/Cython/CodeWriter.py index 38caa36449..2e4646a654 100644 --- a/contrib/tools/cython/Cython/CodeWriter.py +++ b/contrib/tools/cython/Cython/CodeWriter.py @@ -6,12 +6,12 @@ The output is in a strict format, no whitespace or comments from the input is preserved (and it could not be as it is not present in the code tree). """ -from __future__ import absolute_import, print_function +from __future__ import absolute_import, print_function + +from .Compiler.Visitor import TreeVisitor +from .Compiler.ExprNodes import * + -from .Compiler.Visitor import TreeVisitor -from .Compiler.ExprNodes import * - - class LinesResult(object): def __init__(self): self.lines = [] @@ -32,7 +32,7 @@ class DeclarationWriter(TreeVisitor): indent_string = u" " - def __init__(self, result=None): + def __init__(self, result=None): super(DeclarationWriter, self).__init__() if result is None: result = LinesResult() @@ -51,7 +51,7 @@ class DeclarationWriter(TreeVisitor): def dedent(self): self.numindents -= 1 - def startline(self, s=u""): + def startline(self, s=u""): self.result.put(self.indent_string * self.numindents + s) def put(self, s): @@ -60,7 +60,7 @@ class DeclarationWriter(TreeVisitor): def putline(self, s): self.result.putline(self.indent_string * self.numindents + s) - def endline(self, s=u""): + def endline(self, s=u""): self.result.putline(s) def line(self, s): @@ -500,7 +500,7 @@ class CodeWriter(DeclarationWriter): class PxdWriter(DeclarationWriter): def __call__(self, node): - print(u'\n'.join(self.write(node).lines)) + print(u'\n'.join(self.write(node).lines)) return node def visit_CFuncDefNode(self, node): diff --git a/contrib/tools/cython/Cython/Compiler/Annotate.py b/contrib/tools/cython/Cython/Compiler/Annotate.py index c8a1d9be77..2ea38c00c7 100644 --- a/contrib/tools/cython/Cython/Compiler/Annotate.py +++ b/contrib/tools/cython/Cython/Compiler/Annotate.py @@ -3,21 +3,21 @@ from __future__ import absolute_import import os -import os.path +import os.path import re import codecs import textwrap -from datetime import datetime -from functools import partial -from collections import defaultdict +from datetime import datetime +from functools import partial +from collections import defaultdict try: from xml.sax.saxutils import escape as html_escape except ImportError: pass -try: - from StringIO import StringIO -except ImportError: - from io import StringIO # does not support writing 'str' in Py2 +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # does not support writing 'str' in Py2 from . import Version from .Code import CCodeWriter @@ -27,23 +27,23 @@ from .. import Utils class AnnotationCCodeWriter(CCodeWriter): def __init__(self, create_from=None, buffer=None, copy_formatting=True): - CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting) + CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting) if create_from is None: self.annotation_buffer = StringIO() - self.last_annotated_pos = None - # annotations[filename][line] -> [(column, AnnotationItem)*] - self.annotations = defaultdict(partial(defaultdict, list)) - # code[filename][line] -> str - self.code = defaultdict(partial(defaultdict, str)) - # scopes[filename][line] -> set(scopes) - self.scopes = defaultdict(partial(defaultdict, set)) + self.last_annotated_pos = None + # annotations[filename][line] -> [(column, AnnotationItem)*] + self.annotations = defaultdict(partial(defaultdict, list)) + # code[filename][line] -> str + self.code = defaultdict(partial(defaultdict, str)) + # scopes[filename][line] -> set(scopes) + self.scopes = defaultdict(partial(defaultdict, set)) else: # When creating an insertion point, keep references to the same database self.annotation_buffer = create_from.annotation_buffer self.annotations = create_from.annotations self.code = create_from.code - self.scopes = create_from.scopes - self.last_annotated_pos = create_from.last_annotated_pos + self.scopes = create_from.scopes + self.last_annotated_pos = create_from.last_annotated_pos def create_new(self, create_from, buffer, copy_formatting): return AnnotationCCodeWriter(create_from, buffer, copy_formatting) @@ -52,54 +52,54 @@ class AnnotationCCodeWriter(CCodeWriter): CCodeWriter.write(self, s) self.annotation_buffer.write(s) - def mark_pos(self, pos, trace=True): + def mark_pos(self, pos, trace=True): if pos is not None: - CCodeWriter.mark_pos(self, pos, trace) - if self.funcstate and self.funcstate.scope: - # lambdas and genexprs can result in multiple scopes per line => keep them in a set - self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope) - if self.last_annotated_pos: - source_desc, line, _ = self.last_annotated_pos - pos_code = self.code[source_desc.filename] - pos_code[line] += self.annotation_buffer.getvalue() + CCodeWriter.mark_pos(self, pos, trace) + if self.funcstate and self.funcstate.scope: + # lambdas and genexprs can result in multiple scopes per line => keep them in a set + self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope) + if self.last_annotated_pos: + source_desc, line, _ = self.last_annotated_pos + pos_code = self.code[source_desc.filename] + pos_code[line] += self.annotation_buffer.getvalue() self.annotation_buffer = StringIO() - self.last_annotated_pos = pos + self.last_annotated_pos = pos def annotate(self, pos, item): - self.annotations[pos[0].filename][pos[1]].append((pos[2], item)) + self.annotations[pos[0].filename][pos[1]].append((pos[2], item)) def _css(self): """css template will later allow to choose a colormap""" css = [self._css_template] for i in range(255): color = u"FFFF%02x" % int(255/(1+i/10.0)) - css.append('.cython.score-%d {background-color: #%s;}' % (i, color)) + css.append('.cython.score-%d {background-color: #%s;}' % (i, color)) try: from pygments.formatters import HtmlFormatter except ImportError: pass - else: - css.append(HtmlFormatter().get_style_defs('.cython')) - return '\n'.join(css) + else: + css.append(HtmlFormatter().get_style_defs('.cython')) + return '\n'.join(css) _css_template = textwrap.dedent(""" body.cython { font-family: courier; font-size: 12; } .cython.tag { } .cython.line { margin: 0em } - .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; } + .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; } + + .cython.line .run { background-color: #B0FFB0; } + .cython.line .mis { background-color: #FFB0B0; } + .cython.code.run { border-left: 8px solid #B0FFB0; } + .cython.code.mis { border-left: 8px solid #FFB0B0; } - .cython.line .run { background-color: #B0FFB0; } - .cython.line .mis { background-color: #FFB0B0; } - .cython.code.run { border-left: 8px solid #B0FFB0; } - .cython.code.mis { border-left: 8px solid #FFB0B0; } - .cython.code .py_c_api { color: red; } .cython.code .py_macro_api { color: #FF7000; } .cython.code .pyx_c_api { color: #FF3000; } .cython.code .pyx_macro_api { color: #FF7000; } .cython.code .refnanny { color: #FFA000; } - .cython.code .trace { color: #FFA000; } + .cython.code .trace { color: #FFA000; } .cython.code .error_goto { color: #FFA000; } .cython.code .coerce { color: #008000; border: 1px dotted #008000 } @@ -117,22 +117,22 @@ class AnnotationCCodeWriter(CCodeWriter): ).replace(' ', '') # poor dev's JS minification ) - def save_annotation(self, source_filename, target_filename, coverage_xml=None): + def save_annotation(self, source_filename, target_filename, coverage_xml=None): with Utils.open_source_file(source_filename) as f: code = f.read() generated_code = self.code.get(source_filename, {}) c_file = Utils.decode_filename(os.path.basename(target_filename)) html_filename = os.path.splitext(target_filename)[0] + ".html" - + with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer: - out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml)) - - def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None): - coverage_info = '' - if coverage_timestamp: - coverage_info = u' with coverage data from {timestamp}'.format( - timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000)) - + out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml)) + + def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None): + coverage_info = '' + if coverage_timestamp: + coverage_info = u' with coverage data from {timestamp}'.format( + timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000)) + outlist = [ textwrap.dedent(u'''\ <!DOCTYPE html> @@ -140,20 +140,20 @@ class AnnotationCCodeWriter(CCodeWriter): <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> - <title>Cython: {filename}</title> + <title>Cython: {filename}</title> <style type="text/css"> {css} </style> </head> <body class="cython"> - <p><span style="border-bottom: solid 1px grey;">Generated by Cython {watermark}</span>{more_info}</p> - <p> - <span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br /> - Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it. - </p> + <p><span style="border-bottom: solid 1px grey;">Generated by Cython {watermark}</span>{more_info}</p> + <p> + <span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br /> + Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it. + </p> ''').format(css=self._css(), watermark=Version.watermark, - filename=os.path.basename(source_filename) if source_filename else '', - more_info=coverage_info) + filename=os.path.basename(source_filename) if source_filename else '', + more_info=coverage_info) ] if c_file: outlist.append(u'<p>Raw output: <a href="%s">%s</a></p>\n' % (c_file, c_file)) @@ -162,45 +162,45 @@ class AnnotationCCodeWriter(CCodeWriter): def _save_annotation_footer(self): return (u'</body></html>\n',) - def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None): + def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None): """ lines : original cython source code split by lines generated_code : generated c code keyed by line number in original file target filename : name of the file in which to store the generated html c_file : filename in which the c_code has been written """ - if coverage_xml is not None and source_filename: - coverage_timestamp = coverage_xml.get('timestamp', '').strip() - covered_lines = self._get_line_coverage(coverage_xml, source_filename) - else: - coverage_timestamp = covered_lines = None - annotation_items = dict(self.annotations[source_filename]) - scopes = dict(self.scopes[source_filename]) - + if coverage_xml is not None and source_filename: + coverage_timestamp = coverage_xml.get('timestamp', '').strip() + covered_lines = self._get_line_coverage(coverage_xml, source_filename) + else: + coverage_timestamp = covered_lines = None + annotation_items = dict(self.annotations[source_filename]) + scopes = dict(self.scopes[source_filename]) + outlist = [] - outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp)) - outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines)) + outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp)) + outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines)) outlist.extend(self._save_annotation_footer()) return ''.join(outlist) - def _get_line_coverage(self, coverage_xml, source_filename): - coverage_data = None - for entry in coverage_xml.iterfind('.//class'): - if not entry.get('filename'): - continue - if (entry.get('filename') == source_filename or - os.path.abspath(entry.get('filename')) == source_filename): - coverage_data = entry - break - elif source_filename.endswith(entry.get('filename')): - coverage_data = entry # but we might still find a better match... - if coverage_data is None: - return None - return dict( - (int(line.get('number')), int(line.get('hits'))) - for line in coverage_data.iterfind('lines/line') - ) - + def _get_line_coverage(self, coverage_xml, source_filename): + coverage_data = None + for entry in coverage_xml.iterfind('.//class'): + if not entry.get('filename'): + continue + if (entry.get('filename') == source_filename or + os.path.abspath(entry.get('filename')) == source_filename): + coverage_data = entry + break + elif source_filename.endswith(entry.get('filename')): + coverage_data = entry # but we might still find a better match... + if coverage_data is None: + return None + return dict( + (int(line.get('number')), int(line.get('hits'))) + for line in coverage_data.iterfind('lines/line') + ) + def _htmlify_code(self, code): try: from pygments import highlight @@ -215,12 +215,12 @@ class AnnotationCCodeWriter(CCodeWriter): HtmlFormatter(nowrap=True)) return html_code - def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None): + def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None): outlist = [u'<div class="cython">'] pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n' new_calls_map = dict( (name, 0) for name in - 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split() + 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split() ).copy self.mark_pos(None) @@ -228,13 +228,13 @@ class AnnotationCCodeWriter(CCodeWriter): def annotate(match): group_name = match.lastgroup calls[group_name] += 1 - return u"<span class='%s'>%s</span>" % ( + return u"<span class='%s'>%s</span>" % ( group_name, match.group(group_name)) lines = self._htmlify_code(cython_code).splitlines() lineno_width = len(str(len(lines))) - if not covered_lines: - covered_lines = None + if not covered_lines: + covered_lines = None for k, line in enumerate(lines, 1): try: @@ -259,48 +259,48 @@ class AnnotationCCodeWriter(CCodeWriter): onclick = '' expandsymbol = ' ' - covered = '' - if covered_lines is not None and k in covered_lines: - hits = covered_lines[k] - if hits is not None: - covered = 'run' if hits else 'mis' - + covered = '' + if covered_lines is not None and k in covered_lines: + hits = covered_lines[k] + if hits is not None: + covered = 'run' if hits else 'mis' + outlist.append( - u'<pre class="cython line score-{score}"{onclick}>' + u'<pre class="cython line score-{score}"{onclick}>' # generate line number with expand symbol in front, # and the right number of digit - u'{expandsymbol}<span class="{covered}">{line:0{lineno_width}d}</span>: {code}</pre>\n'.format( + u'{expandsymbol}<span class="{covered}">{line:0{lineno_width}d}</span>: {code}</pre>\n'.format( score=score, expandsymbol=expandsymbol, - covered=covered, + covered=covered, lineno_width=lineno_width, line=k, code=line.rstrip(), onclick=onclick, )) if c_code: - outlist.append(u"<pre class='cython code score-{score} {covered}'>{code}</pre>".format( - score=score, covered=covered, code=c_code)) + outlist.append(u"<pre class='cython code score-{score} {covered}'>{code}</pre>".format( + score=score, covered=covered, code=c_code)) outlist.append(u"</div>") return outlist -_parse_code = re.compile(( - br'(?P<refnanny>__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|' - br'(?P<trace>__Pyx_Trace[A-Za-z]+)|' - br'(?:' - br'(?P<pyx_macro_api>__Pyx_[A-Z][A-Z_]+)|' - br'(?P<pyx_c_api>(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|' - br'(?P<py_macro_api>Py[A-Z][a-z]+_[A-Z][A-Z_]+)|' - br'(?P<py_c_api>Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)' - br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement +_parse_code = re.compile(( + br'(?P<refnanny>__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|' + br'(?P<trace>__Pyx_Trace[A-Za-z]+)|' + br'(?:' + br'(?P<pyx_macro_api>__Pyx_[A-Z][A-Z_]+)|' + br'(?P<pyx_c_api>(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|' + br'(?P<py_macro_api>Py[A-Z][a-z]+_[A-Z][A-Z_]+)|' + br'(?P<py_c_api>Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)' + br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement br'(?P<error_goto>(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))' -).decode('ascii')).sub +).decode('ascii')).sub _replace_pos_comment = re.compile( # this matches what Cython generates as code line marker comment - br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'), + br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'), re.M ).sub diff --git a/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py index 321bac6fba..d3c0a1d0da 100644 --- a/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py +++ b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py @@ -167,7 +167,7 @@ class EmbedSignature(CythonTransform): old_doc = node.py_func.entry.doc else: old_doc = None - new_doc = self._embed_signature(signature, old_doc) + new_doc = self._embed_signature(signature, old_doc) doc_holder.doc = EncodedString(new_doc) if not is_constructor and getattr(node, 'py_func', None) is not None: node.py_func.entry.doc = EncodedString(new_doc) diff --git a/contrib/tools/cython/Cython/Compiler/Buffer.py b/contrib/tools/cython/Cython/Compiler/Buffer.py index 7c9a13fcea..c62a24f568 100644 --- a/contrib/tools/cython/Cython/Compiler/Buffer.py +++ b/contrib/tools/cython/Cython/Compiler/Buffer.py @@ -47,22 +47,22 @@ class IntroduceBufferAuxiliaryVars(CythonTransform): # For all buffers, insert extra variables in the scope. # The variables are also accessible from the buffer_info # on the buffer entry - scope_items = scope.entries.items() - bufvars = [entry for name, entry in scope_items if entry.type.is_buffer] + scope_items = scope.entries.items() + bufvars = [entry for name, entry in scope_items if entry.type.is_buffer] if len(bufvars) > 0: bufvars.sort(key=lambda entry: entry.name) self.buffers_exists = True - memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice] + memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice] if len(memviewslicevars) > 0: self.buffers_exists = True - for (name, entry) in scope_items: + for (name, entry) in scope_items: if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode): self.using_memoryview = True break - del scope_items + del scope_items if isinstance(node, ModuleNode) and len(bufvars) > 0: # for now...note that pos is wrong @@ -138,14 +138,14 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee if defaults is None: defaults = buffer_defaults - posargs, dictargs = Interpreter.interpret_compiletime_options( - posargs, dictargs, type_env=env, type_args=(0, 'dtype')) + posargs, dictargs = Interpreter.interpret_compiletime_options( + posargs, dictargs, type_env=env, type_args=(0, 'dtype')) if len(posargs) > buffer_positional_options_count: raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY) options = {} - for name, (value, pos) in dictargs.items(): + for name, (value, pos) in dictargs.items(): if not name in buffer_options: raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name) options[name] = value @@ -199,14 +199,14 @@ class BufferEntry(object): self.type = entry.type self.cname = entry.buffer_aux.buflocal_nd_var.cname self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname - self.buf_ptr_type = entry.type.buffer_ptr_type - self.init_attributes() - - def init_attributes(self): - self.shape = self.get_buf_shapevars() - self.strides = self.get_buf_stridevars() - self.suboffsets = self.get_buf_suboffsetvars() - + self.buf_ptr_type = entry.type.buffer_ptr_type + self.init_attributes() + + def init_attributes(self): + self.shape = self.get_buf_shapevars() + self.strides = self.get_buf_stridevars() + self.suboffsets = self.get_buf_suboffsetvars() + def get_buf_suboffsetvars(self): return self._for_all_ndim("%s.diminfo[%d].suboffsets") @@ -258,7 +258,7 @@ class BufferEntry(object): defcode = code.globalstate['utility_code_def'] funcgen(protocode, defcode, name=funcname, nd=nd) - buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code() + buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code() ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr, ", ".join(params)) return ptrcode @@ -421,7 +421,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry, code.putln("}") # Release stack - + def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos, code, negative_indices, in_nogil_context): """ @@ -442,21 +442,21 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, if directives['boundscheck']: # Check bounds and fix negative indices. # We allocate a temporary which is initialized to -1, meaning OK (!). - # If an error occurs, the temp is set to the index dimension the - # error is occurring at. - failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) - code.putln("%s = -1;" % failed_dim_temp) - for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())): + # If an error occurs, the temp is set to the index dimension the + # error is occurring at. + failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = -1;" % failed_dim_temp) + for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())): if signed != 0: # not unsigned, deal with negative index code.putln("if (%s < 0) {" % cname) if negative_indices: code.putln("%s += %s;" % (cname, shape)) code.putln("if (%s) %s = %d;" % ( - code.unlikely("%s < 0" % cname), - failed_dim_temp, dim)) + code.unlikely("%s < 0" % cname), + failed_dim_temp, dim)) else: - code.putln("%s = %d;" % (failed_dim_temp, dim)) + code.putln("%s = %d;" % (failed_dim_temp, dim)) code.put("} else ") # check bounds in positive direction if signed != 0: @@ -465,7 +465,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, cast = "(size_t)" code.putln("if (%s) %s = %d;" % ( code.unlikely("%s >= %s%s" % (cname, cast, shape)), - failed_dim_temp, dim)) + failed_dim_temp, dim)) if in_nogil_context: code.globalstate.use_utility_code(raise_indexerror_nogil) @@ -474,14 +474,14 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, code.globalstate.use_utility_code(raise_indexerror_code) func = '__Pyx_RaiseBufferIndexError' - code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp)) - code.putln('%s(%s);' % (func, failed_dim_temp)) + code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp)) + code.putln('%s(%s);' % (func, failed_dim_temp)) code.putln(code.error_goto(pos)) code.putln('}') - code.funcstate.release_temp(failed_dim_temp) + code.funcstate.release_temp(failed_dim_temp) elif negative_indices: # Only fix negative indices. - for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()): + for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()): if signed != 0: code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape)) @@ -571,14 +571,14 @@ class GetAndReleaseBufferUtilityCode(object): def __hash__(self): return 24342342 - def get_tree(self, **kwargs): pass + def get_tree(self, **kwargs): pass def put_code(self, output): code = output['utility_code_def'] proto_code = output['utility_code_proto'] env = output.module_node.scope cython_scope = env.context.cython_scope - + # Search all types for __getbuffer__ overloads types = [] visited_scopes = set() @@ -628,7 +628,7 @@ def mangle_dtype_name(dtype): prefix = "nn_" else: prefix = "" - return prefix + dtype.specialization_name() + return prefix + dtype.specialization_name() def get_type_information_cname(code, dtype, maxdepth=None): """ @@ -664,7 +664,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): complex_possible = dtype.is_struct_or_union and dtype.can_be_complex() - declcode = dtype.empty_declaration_code() + declcode = dtype.empty_declaration_code() if dtype.is_simple_buffer_dtype(): structinfo_name = "NULL" elif dtype.is_struct: @@ -679,7 +679,7 @@ def get_type_information_cname(code, dtype, maxdepth=None): typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True) for f, typeinfo in zip(fields, types): typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' % - (typeinfo, f.name, dtype.empty_declaration_code(), f.cname), safe=True) + (typeinfo, f.name, dtype.empty_declaration_code(), f.cname), safe=True) typecode.putln(' {NULL, NULL, 0}', safe=True) typecode.putln("};", safe=True) else: diff --git a/contrib/tools/cython/Cython/Compiler/Builtin.py b/contrib/tools/cython/Cython/Compiler/Builtin.py index 32cb3e0e91..5fa717507d 100644 --- a/contrib/tools/cython/Cython/Compiler/Builtin.py +++ b/contrib/tools/cython/Cython/Compiler/Builtin.py @@ -124,12 +124,12 @@ builtin_function_table = [ PyrexTypes.c_double_complex_type, PyrexTypes.c_longdouble_complex_type) ) + [ - BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute", - utility_code=UtilityCode.load("py_abs", "Builtins.c")), - #('all', "", "", ""), - #('any', "", "", ""), - #('ascii', "", "", ""), - #('bin', "", "", ""), + BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute", + utility_code=UtilityCode.load("py_abs", "Builtins.c")), + #('all', "", "", ""), + #('any', "", "", ""), + #('ascii', "", "", ""), + #('bin', "", "", ""), BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check", utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")), #('chr', "", "", ""), @@ -176,26 +176,26 @@ builtin_function_table = [ utility_code = iter_next_utility_code), # not available in Py2 => implemented here #('oct', "", "", ""), #('open', "ss", "O", "PyFile_FromString"), # not in Py3 -] + [ - BuiltinFunction('ord', None, None, "__Pyx_long_cast", - func_type=PyrexTypes.CFuncType( - PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], - is_strict_signature=True)) - for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type] -] + [ - BuiltinFunction('ord', None, None, "__Pyx_uchar_cast", - func_type=PyrexTypes.CFuncType( - PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], - is_strict_signature=True)) - for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type] -] + [ - BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord", - utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"), - func_type=PyrexTypes.CFuncType( - PyrexTypes.c_long_type, [ - PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None) - ], - exception_value="(long)(Py_UCS4)-1")), +] + [ + BuiltinFunction('ord', None, None, "__Pyx_long_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_uchar_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord", + utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"), + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [ + PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None) + ], + exception_value="(long)(Py_UCS4)-1")), BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"), BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2", utility_code = UtilityCode.load("pow2", "Builtins.c")), @@ -207,7 +207,7 @@ builtin_function_table = [ #('round', "", "", ""), BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"), #('sum', "", "", ""), - #('sorted', "", "", ""), + #('sorted', "", "", ""), #('type', "O", "O", "PyObject_Type"), #('unichr', "", "", ""), #('unicode', "", "", ""), @@ -219,10 +219,10 @@ builtin_function_table = [ # Put in namespace append optimization. BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"), - - # This is conditionally looked up based on a compiler directive. - BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals", - utility_code=globals_utility_code), + + # This is conditionally looked up based on a compiler directive. + BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals", + utility_code=globals_utility_code), ] @@ -339,7 +339,7 @@ builtin_types_table = [ BuiltinMethod("add", "TO", "r", "PySet_Add"), BuiltinMethod("pop", "T", "O", "PySet_Pop")]), ("frozenset", "PyFrozenSet_Type", []), - ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []), + ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []), ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []), ] @@ -396,8 +396,8 @@ def init_builtin_types(): objstruct_cname = 'PyByteArrayObject' elif name == 'bool': objstruct_cname = None - elif name == 'Exception': - objstruct_cname = "PyBaseExceptionObject" + elif name == 'Exception': + objstruct_cname = "PyBaseExceptionObject" elif name == 'StopAsyncIteration': objstruct_cname = "PyBaseExceptionObject" else: @@ -421,11 +421,11 @@ def init_builtins(): init_builtin_structs() init_builtin_types() init_builtin_funcs() - + builtin_scope.declare_var( '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type), pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True) - + global list_type, tuple_type, dict_type, set_type, frozenset_type global bytes_type, str_type, unicode_type, basestring_type, slice_type global float_type, bool_type, type_type, complex_type, bytearray_type diff --git a/contrib/tools/cython/Cython/Compiler/CmdLine.py b/contrib/tools/cython/Cython/Compiler/CmdLine.py index 6ddc14d76f..a20ab38dc2 100644 --- a/contrib/tools/cython/Cython/Compiler/CmdLine.py +++ b/contrib/tools/cython/Cython/Compiler/CmdLine.py @@ -34,14 +34,14 @@ Options: -D, --no-docstrings Strip docstrings from the compiled module. -a, --annotate Produce a colorized HTML version of the source. - --annotate-coverage <cov.xml> Annotate and include coverage information from cov.xml. + --annotate-coverage <cov.xml> Annotate and include coverage information from cov.xml. --line-directives Produce #line directives pointing to the .pyx source --cplus Output a C++ rather than C file. --embed[=<method_name>] Generate a main() function that embeds the Python interpreter. -2 Compile based on Python-2 syntax and code semantics. -3 Compile based on Python-3 syntax and code semantics. - --3str Compile based on Python-3 syntax and code semantics without - assuming unicode by default for string literals under Python 2. + --3str Compile based on Python-3 syntax and code semantics without + assuming unicode by default for string literals under Python 2. --lenient Change some compile time errors to runtime errors to improve Python compatibility --capi-reexport-cincludes Add cincluded headers to any auto-generated header files. @@ -49,11 +49,11 @@ Options: --warning-errors, -Werror Make all warnings into errors --warning-extra, -Wextra Enable extra warnings -X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive - -E, --compile-time-env name=value[,<name=value,...] Provides compile time env like DEF would do. + -E, --compile-time-env name=value[,<name=value,...] Provides compile time env like DEF would do. """ -# The following experimental options are supported only on MacOSX: +# The following experimental options are supported only on MacOSX: # -C, --compile Compile generated .c file to .o file # --link Link .o file to produce extension module (implies -C) # -+, --cplus Use C++ compiler for compiling and linking @@ -67,26 +67,26 @@ def bad_usage(): def parse_command_line(args): from .Main import CompilationOptions, default_options - pending_arg = [] - + pending_arg = [] + def pop_arg(): - if not args or pending_arg: + if not args or pending_arg: + bad_usage() + if '=' in args[0] and args[0].startswith('--'): # allow "--long-option=xyz" + name, value = args.pop(0).split('=', 1) + pending_arg.append(value) + return name + return args.pop(0) + + def pop_value(default=None): + if pending_arg: + return pending_arg.pop() + elif default is not None: + return default + elif not args: bad_usage() - if '=' in args[0] and args[0].startswith('--'): # allow "--long-option=xyz" - name, value = args.pop(0).split('=', 1) - pending_arg.append(value) - return name - return args.pop(0) - - def pop_value(default=None): - if pending_arg: - return pending_arg.pop() - elif default is not None: - return default - elif not args: - bad_usage() - return args.pop(0) - + return args.pop(0) + def get_param(option): tail = option[2:] if tail: @@ -106,15 +106,15 @@ def parse_command_line(args): elif option in ("-+", "--cplus"): options.cplus = 1 elif option == "--embed": - Options.embed = pop_value("main") + Options.embed = pop_value("main") elif option.startswith("-I"): options.include_path.append(get_param(option)) elif option == "--include-dir": - options.include_path.append(pop_value()) + options.include_path.append(pop_value()) elif option in ("-w", "--working"): - options.working_path = pop_value() + options.working_path = pop_value() elif option in ("-o", "--output-file"): - options.output_file = pop_value() + options.output_file = pop_value() elif option in ("-t", "--timestamps"): options.timestamps = 1 elif option in ("-f", "--force"): @@ -124,16 +124,16 @@ def parse_command_line(args): elif option in ("-p", "--embed-positions"): Options.embed_pos_in_docstring = 1 elif option in ("-z", "--pre-import"): - Options.pre_import = pop_value() + Options.pre_import = pop_value() elif option == "--cleanup": - Options.generate_cleanup_code = int(pop_value()) + Options.generate_cleanup_code = int(pop_value()) elif option in ("-D", "--no-docstrings"): Options.docstrings = False elif option in ("-a", "--annotate"): Options.annotate = True - elif option == "--annotate-coverage": - Options.annotate = True - Options.annotate_coverage_xml = pop_value() + elif option == "--annotate-coverage": + Options.annotate = True + Options.annotate_coverage_xml = pop_value() elif option == "--convert-range": Options.convert_range = True elif option == "--line-directives": @@ -145,22 +145,22 @@ def parse_command_line(args): options.output_dir = os.curdir elif option == "--gdb-outdir": options.gdb_debug = True - options.output_dir = pop_value() + options.output_dir = pop_value() elif option == "--lenient": Options.error_on_unknown_names = False Options.error_on_uninitialized = False - elif option == '--module-name': - options.module_name = pop_arg() - elif option == '--init-suffix': - options.init_suffix = pop_arg() + elif option == '--module-name': + options.module_name = pop_arg() + elif option == '--init-suffix': + options.init_suffix = pop_arg() elif option == '--source-root': Options.source_root = pop_arg() elif option == '-2': options.language_level = 2 elif option == '-3': options.language_level = 3 - elif option == '--3str': - options.language_level = '3str' + elif option == '--3str': + options.language_level = '3str' elif option == "--capi-reexport-cincludes": options.capi_reexport_cincludes = True elif option == "--fast-fail": @@ -177,25 +177,25 @@ def parse_command_line(args): if option.startswith('-X') and option[2:].strip(): x_args = option[2:] else: - x_args = pop_value() + x_args = pop_value() try: options.compiler_directives = Options.parse_directive_list( x_args, relaxed_bool=True, current_settings=options.compiler_directives) - except ValueError as e: + except ValueError as e: sys.stderr.write("Error in compiler directive: %s\n" % e.args[0]) sys.exit(1) - elif option == "--compile-time-env" or option.startswith('-E'): - if option.startswith('-E') and option[2:].strip(): - x_args = option[2:] - else: - x_args = pop_value() - try: - options.compile_time_env = Options.parse_compile_time_env( - x_args, current_settings=options.compile_time_env) - except ValueError as e: - sys.stderr.write("Error in compile-time-env: %s\n" % e.args[0]) - sys.exit(1) + elif option == "--compile-time-env" or option.startswith('-E'): + if option.startswith('-E') and option[2:].strip(): + x_args = option[2:] + else: + x_args = pop_value() + try: + options.compile_time_env = Options.parse_compile_time_env( + x_args, current_settings=options.compile_time_env) + except ValueError as e: + sys.stderr.write("Error in compile-time-env: %s\n" % e.args[0]) + sys.exit(1) elif option.startswith('--debug'): option = option[2:].replace('-', '_') from . import DebugFlags @@ -212,10 +212,10 @@ def parse_command_line(args): sys.exit(1) else: sources.append(pop_arg()) - - if pending_arg: - bad_usage() - + + if pending_arg: + bad_usage() + if options.use_listing_file and len(sources) > 1: sys.stderr.write( "cython: Only one source file allowed when using -o\n") diff --git a/contrib/tools/cython/Cython/Compiler/Code.pxd b/contrib/tools/cython/Cython/Compiler/Code.pxd index 4c0e27f15f..acad0c1cf4 100644 --- a/contrib/tools/cython/Cython/Compiler/Code.pxd +++ b/contrib/tools/cython/Cython/Compiler/Code.pxd @@ -2,7 +2,7 @@ from __future__ import absolute_import cimport cython -from ..StringIOTree cimport StringIOTree +from ..StringIOTree cimport StringIOTree cdef class UtilityCodeBase(object): @@ -27,7 +27,7 @@ cdef class UtilityCode(UtilityCodeBase): cdef class FunctionState: cdef public set names_taken cdef public object owner - cdef public object scope + cdef public object scope cdef public object error_label cdef public size_t label_counter @@ -39,11 +39,11 @@ cdef class FunctionState: cdef public object return_from_error_cleanup_label # not used in __init__ ? - cdef public object exc_vars + cdef public object exc_vars cdef public object current_except cdef public bint in_try_finally cdef public bint can_trace - cdef public bint gil_owned + cdef public bint gil_owned cdef public list temps_allocated cdef public dict temps_free @@ -97,28 +97,28 @@ cdef class StringConst: #def funccontext_property(name): -cdef class CCodeWriter(object): - cdef readonly StringIOTree buffer - cdef readonly list pyclass_stack - cdef readonly object globalstate - cdef readonly object funcstate - cdef object code_config - cdef object last_pos - cdef object last_marked_pos - cdef Py_ssize_t level - cdef public Py_ssize_t call_level # debug-only, see Nodes.py - cdef bint bol - - cpdef write(self, s) - cpdef put(self, code) - cpdef put_safe(self, code) - cpdef putln(self, code=*, bint safe=*) - @cython.final - cdef increase_indent(self) - @cython.final - cdef decrease_indent(self) - - +cdef class CCodeWriter(object): + cdef readonly StringIOTree buffer + cdef readonly list pyclass_stack + cdef readonly object globalstate + cdef readonly object funcstate + cdef object code_config + cdef object last_pos + cdef object last_marked_pos + cdef Py_ssize_t level + cdef public Py_ssize_t call_level # debug-only, see Nodes.py + cdef bint bol + + cpdef write(self, s) + cpdef put(self, code) + cpdef put_safe(self, code) + cpdef putln(self, code=*, bint safe=*) + @cython.final + cdef increase_indent(self) + @cython.final + cdef decrease_indent(self) + + cdef class PyrexCodeWriter: cdef public object f cdef public Py_ssize_t level diff --git a/contrib/tools/cython/Cython/Compiler/Code.py b/contrib/tools/cython/Cython/Compiler/Code.py index 97e97a12d3..f43c4b2b8e 100644 --- a/contrib/tools/cython/Cython/Compiler/Code.py +++ b/contrib/tools/cython/Cython/Compiler/Code.py @@ -1,5 +1,5 @@ # cython: language_level = 2 -# cython: auto_pickle=False +# cython: auto_pickle=False # # Code output module # @@ -19,10 +19,10 @@ import shutil import sys import operator import textwrap -from string import Template -from functools import partial -from contextlib import closing -from collections import defaultdict +from string import Template +from functools import partial +from contextlib import closing +from collections import defaultdict try: import hashlib @@ -33,7 +33,7 @@ from . import Naming from . import Options from . import DebugFlags from . import StringEncoding -from . import Version +from . import Version from .. import Utils from .Scanning import SourceDescriptor from ..StringIOTree import StringIOTree @@ -43,7 +43,7 @@ try: except ImportError: from builtins import str as basestring -KEYWORDS_MUST_BE_BYTES = sys.version_info < (2, 7) +KEYWORDS_MUST_BE_BYTES = sys.version_info < (2, 7) non_portable_builtins_map = { @@ -53,22 +53,22 @@ non_portable_builtins_map = { 'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'), 'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'), 'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'), -} - -ctypedef_builtins_map = { - # types of builtins in "ctypedef class" statements which we don't - # import either because the names conflict with C types or because - # the type simply is not exposed. - 'py_int' : '&PyInt_Type', - 'py_long' : '&PyLong_Type', - 'py_float' : '&PyFloat_Type', - 'wrapper_descriptor' : '&PyWrapperDescr_Type', -} - +} + +ctypedef_builtins_map = { + # types of builtins in "ctypedef class" statements which we don't + # import either because the names conflict with C types or because + # the type simply is not exposed. + 'py_int' : '&PyInt_Type', + 'py_long' : '&PyLong_Type', + 'py_float' : '&PyFloat_Type', + 'wrapper_descriptor' : '&PyWrapperDescr_Type', +} + basicsize_builtins_map = { # builtins whose type has a different tp_basicsize than sizeof(...) - 'PyTypeObject': 'PyHeapTypeObject', -} + 'PyTypeObject': 'PyHeapTypeObject', +} uncachable_builtins = [ # Global/builtin names that cannot be cached because they may or may not @@ -107,15 +107,15 @@ uncachable_builtins = [ 'WindowsError', ## - others '_', # e.g. used by gettext -] - -special_py_methods = set([ - '__cinit__', '__dealloc__', '__richcmp__', '__next__', - '__await__', '__aiter__', '__anext__', - '__getreadbuffer__', '__getwritebuffer__', '__getsegcount__', - '__getcharbuffer__', '__getbuffer__', '__releasebuffer__' -]) - +] + +special_py_methods = set([ + '__cinit__', '__dealloc__', '__richcmp__', '__next__', + '__await__', '__aiter__', '__anext__', + '__getreadbuffer__', '__getwritebuffer__', '__getsegcount__', + '__getcharbuffer__', '__getbuffer__', '__releasebuffer__' +]) + modifier_output_mapper = { 'inline': 'CYTHON_INLINE' }.get @@ -246,7 +246,7 @@ class UtilityCodeBase(object): del tags['substitute'] try: code = Template(code).substitute(vars(Naming)) - except (KeyError, ValueError) as e: + except (KeyError, ValueError) as e: raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % ( type, begin_lineno, e)) @@ -281,23 +281,23 @@ class UtilityCodeBase(object): if ext in ('.pyx', '.py', '.pxd', '.pxi'): comment = '#' strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '') - rstrip = StringEncoding._unicode.rstrip + rstrip = StringEncoding._unicode.rstrip else: comment = '/' - strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '') - rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1') + strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '') + rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1') match_special = re.compile( (r'^%(C)s{5,30}\s*(?P<name>(?:\w|\.)+)\s*%(C)s{5,30}|' - r'^%(C)s+@(?P<tag>\w+)\s*:\s*(?P<value>(?:\w|[.:])+)') % - {'C': comment}).match - match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match + r'^%(C)s+@(?P<tag>\w+)\s*:\s*(?P<value>(?:\w|[.:])+)') % + {'C': comment}).match + match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match - with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f: + with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f: all_lines = f.readlines() utilities = defaultdict(lambda: [None, None, {}]) lines = [] - tags = defaultdict(set) + tags = defaultdict(set) utility = type = None begin_lineno = 0 @@ -317,12 +317,12 @@ class UtilityCodeBase(object): name, type = mtype.groups() else: type = 'impl' - utility = utilities[name] + utility = utilities[name] else: - tags[m.group('tag')].add(m.group('value')) - lines.append('') # keep line number correct + tags[m.group('tag')].add(m.group('value')) + lines.append('') # keep line number correct else: - lines.append(rstrip(strip_comments(line))) + lines.append(rstrip(strip_comments(line))) if utility is None: raise ValueError("Empty utility code file") @@ -330,7 +330,7 @@ class UtilityCodeBase(object): # Don't forget to add the last utility code cls._add_utility(utility, type, lines, begin_lineno, tags) - utilities = dict(utilities) # un-defaultdict-ify + utilities = dict(utilities) # un-defaultdict-ify cls._utility_cache[path] = utilities return utilities @@ -356,12 +356,12 @@ class UtilityCodeBase(object): global __loader__ loader = __loader__ archive = loader.archive - with closing(zipfile.ZipFile(archive)) as fileobj: - listing = [os.path.basename(name) - for name in fileobj.namelist() - if os.path.join(archive, name).startswith(utility_dir)] - files = [filename for filename in listing - if filename.startswith(prefix)] + with closing(zipfile.ZipFile(archive)) as fileobj: + listing = [os.path.basename(name) + for name in fileobj.namelist() + if os.path.join(archive, name).startswith(utility_dir)] + files = [filename for filename in listing + if filename.startswith(prefix)] if not files: raise ValueError("No match found for utility code " + util_code_name) if len(files) > 1: @@ -434,16 +434,16 @@ class UtilityCodeBase(object): return code_string def __str__(self): - return "<%s(%s)>" % (type(self).__name__, self.name) + return "<%s(%s)>" % (type(self).__name__, self.name) - def get_tree(self, **kwargs): + def get_tree(self, **kwargs): pass - def __deepcopy__(self, memodict=None): - # No need to deep-copy utility code since it's essentially immutable. - return self + def __deepcopy__(self, memodict=None): + # No need to deep-copy utility code since it's essentially immutable. + return self + - class UtilityCode(UtilityCodeBase): """ Stores utility code to add during code generation. @@ -483,8 +483,8 @@ class UtilityCode(UtilityCodeBase): def __eq__(self, other): if self is other: return True - self_type, other_type = type(self), type(other) - if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)): + self_type, other_type = type(self), type(other) + if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)): return False self_proto = getattr(self, 'proto', None) @@ -503,7 +503,7 @@ class UtilityCode(UtilityCodeBase): # Dicts aren't hashable... name = self.name if pyrex_type is not None: - data['type'] = pyrex_type.empty_declaration_code() + data['type'] = pyrex_type.empty_declaration_code() data['type_name'] = pyrex_type.specialization_name() name = "%s[%s]" % (name, data['type_name']) key = tuple(sorted(data.items())) @@ -516,11 +516,11 @@ class UtilityCode(UtilityCodeBase): requires = [r.specialize(data) for r in self.requires] s = self._cache[key] = UtilityCode( - self.none_or_sub(self.proto, data), - self.none_or_sub(self.impl, data), - self.none_or_sub(self.init, data), - self.none_or_sub(self.cleanup, data), - requires, + self.none_or_sub(self.proto, data), + self.none_or_sub(self.impl, data), + self.none_or_sub(self.init, data), + self.none_or_sub(self.cleanup, data), + requires, self.proto_block, name, ) @@ -532,8 +532,8 @@ class UtilityCode(UtilityCodeBase): """Replace 'PYIDENT("xyz")' by a constant Python identifier cname. """ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl: - return False, impl - + return False, impl + replacements = {} def externalise(matchobj): key = matchobj.groups() @@ -549,18 +549,18 @@ class UtilityCode(UtilityCodeBase): assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl return True, impl - def inject_unbound_methods(self, impl, output): - """Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname. - """ - if 'CALL_UNBOUND_METHOD(' not in impl: - return False, impl - - def externalise(matchobj): + def inject_unbound_methods(self, impl, output): + """Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname. + """ + if 'CALL_UNBOUND_METHOD(' not in impl: + return False, impl + + def externalise(matchobj): type_cname, method_name, obj_cname, args = matchobj.groups() args = [arg.strip() for arg in args[1:].split(',')] if args else [] assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args) return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args) - + impl = re.sub( r'CALL_UNBOUND_METHOD\(' r'([a-zA-Z_]+),' # type cname @@ -568,46 +568,46 @@ class UtilityCode(UtilityCodeBase): r'\s*([^),]+)' # object cname r'((?:,\s*[^),]+)*)' # args* r'\)', externalise, impl) - assert 'CALL_UNBOUND_METHOD(' not in impl - + assert 'CALL_UNBOUND_METHOD(' not in impl + return True, impl - - def wrap_c_strings(self, impl): - """Replace CSTRING('''xyz''') by a C compatible string - """ - if 'CSTRING(' not in impl: - return impl - - def split_string(matchobj): - content = matchobj.group(1).replace('"', '\042') - return ''.join( - '"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1] - for line in content.splitlines()) - - impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl) - assert 'CSTRING(' not in impl - return impl - + + def wrap_c_strings(self, impl): + """Replace CSTRING('''xyz''') by a C compatible string + """ + if 'CSTRING(' not in impl: + return impl + + def split_string(matchobj): + content = matchobj.group(1).replace('"', '\042') + return ''.join( + '"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1] + for line in content.splitlines()) + + impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl) + assert 'CSTRING(' not in impl + return impl + def put_code(self, output): if self.requires: for dependency in self.requires: output.use_utility_code(dependency) if self.proto: - writer = output[self.proto_block] - writer.putln("/* %s.proto */" % self.name) - writer.put_or_include( - self.format_code(self.proto), '%s_proto' % self.name) + writer = output[self.proto_block] + writer.putln("/* %s.proto */" % self.name) + writer.put_or_include( + self.format_code(self.proto), '%s_proto' % self.name) if self.impl: - impl = self.format_code(self.wrap_c_strings(self.impl)) - is_specialised1, impl = self.inject_string_constants(impl, output) - is_specialised2, impl = self.inject_unbound_methods(impl, output) - writer = output['utility_code_def'] - writer.putln("/* %s */" % self.name) - if not (is_specialised1 or is_specialised2): + impl = self.format_code(self.wrap_c_strings(self.impl)) + is_specialised1, impl = self.inject_string_constants(impl, output) + is_specialised2, impl = self.inject_unbound_methods(impl, output) + writer = output['utility_code_def'] + writer.putln("/* %s */" % self.name) + if not (is_specialised1 or is_specialised2): # no module specific adaptations => can be reused - writer.put_or_include(impl, '%s_impl' % self.name) + writer.put_or_include(impl, '%s_impl' % self.name) else: - writer.put(impl) + writer.put(impl) if self.init: writer = output['init_globals'] writer.putln("/* %s.init */" % self.name) @@ -619,7 +619,7 @@ class UtilityCode(UtilityCodeBase): writer.putln() if self.cleanup and Options.generate_cleanup_code: writer = output['cleanup_globals'] - writer.putln("/* %s.cleanup */" % self.name) + writer.putln("/* %s.cleanup */" % self.name) if isinstance(self.cleanup, basestring): writer.put_or_include( self.format_code(self.cleanup), @@ -641,7 +641,7 @@ def sub_tempita(s, context, file=None, name=None): from ..Tempita import sub return sub(s, **context) - + class TempitaUtilityCode(UtilityCode): def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs): if context is None: @@ -652,18 +652,18 @@ class TempitaUtilityCode(UtilityCode): super(TempitaUtilityCode, self).__init__( proto, impl, init=init, name=name, file=file, **kwargs) - @classmethod - def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}): - context_key = tuple(sorted(context.items())) if context else None - assert hash(context_key) is not None # raise TypeError if not hashable - key = (cls, from_file, utility_code_name, context_key) - try: - return __cache[key] - except KeyError: - pass - code = __cache[key] = cls.load(utility_code_name, from_file, context=context) - return code - + @classmethod + def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}): + context_key = tuple(sorted(context.items())) if context else None + assert hash(context_key) is not None # raise TypeError if not hashable + key = (cls, from_file, utility_code_name, context_key) + try: + return __cache[key] + except KeyError: + pass + code = __cache[key] = cls.load(utility_code_name, from_file, context=context) + return code + def none_or_sub(self, s, context): """ Format a string in this utility code with context. If None, do nothing. @@ -678,7 +678,7 @@ class LazyUtilityCode(UtilityCodeBase): Utility code that calls a callback with the root code writer when available. Useful when you only have 'env' but not 'code'. """ - __name__ = '<lazy>' + __name__ = '<lazy>' requires = None def __init__(self, callback): @@ -699,13 +699,13 @@ class FunctionState(object): # in_try_finally boolean inside try of try...finally # exc_vars (string * 3) exception variables for reraise, or None # can_trace boolean line tracing is supported in the current context - # scope Scope the scope object of the current function + # scope Scope the scope object of the current function # Not used for now, perhaps later - def __init__(self, owner, names_taken=set(), scope=None): + def __init__(self, owner, names_taken=set(), scope=None): self.names_taken = names_taken self.owner = owner - self.scope = scope + self.scope = scope self.error_label = None self.label_counter = 0 @@ -720,7 +720,7 @@ class FunctionState(object): self.exc_vars = None self.current_except = None self.can_trace = False - self.gil_owned = True + self.gil_owned = True self.temps_allocated = [] # of (name, type, manage_ref, static) self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status @@ -846,10 +846,10 @@ class FunctionState(object): A C string referring to the variable is returned. """ - if type.is_const and not type.is_reference: + if type.is_const and not type.is_reference: type = type.const_base_type - elif type.is_reference and not type.is_fake_reference: - type = type.ref_base_type + elif type.is_reference and not type.is_fake_reference: + type = type.ref_base_type elif type.is_cfunction: from . import PyrexTypes type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value @@ -860,13 +860,13 @@ class FunctionState(object): freelist = self.temps_free.get((type, manage_ref)) if reusable and freelist is not None and freelist[0]: - result = freelist[0].pop() - freelist[1].remove(result) + result = freelist[0].pop() + freelist[1].remove(result) else: while True: self.temp_counter += 1 result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter) - if result not in self.names_taken: break + if result not in self.names_taken: break self.temps_allocated.append((result, type, manage_ref, static)) if not reusable: self.zombie_temps.add(result) @@ -887,13 +887,13 @@ class FunctionState(object): type, manage_ref = self.temps_used_type[name] freelist = self.temps_free.get((type, manage_ref)) if freelist is None: - freelist = ([], set()) # keep order in list and make lookups in set fast + freelist = ([], set()) # keep order in list and make lookups in set fast self.temps_free[(type, manage_ref)] = freelist - if name in freelist[1]: + if name in freelist[1]: raise RuntimeError("Temp %s freed twice!" % name) if name not in self.zombie_temps: freelist[0].append(name) - freelist[1].add(name) + freelist[1].add(name) if DebugFlags.debug_temp_code_comments: self.owner.putln("/* %s released %s*/" % ( name, " - zombie" if name in self.zombie_temps else "")) @@ -905,7 +905,7 @@ class FunctionState(object): used = [] for name, type, manage_ref, static in self.temps_allocated: freelist = self.temps_free.get((type, manage_ref)) - if freelist is None or name not in freelist[1]: + if freelist is None or name not in freelist[1]: used.append((name, type, manage_ref and type.is_pyobject)) return used @@ -922,8 +922,8 @@ class FunctionState(object): """Return a list of (cname, type) tuples of refcount-managed Python objects. """ return [(cname, type) - for cname, type, manage_ref, static in self.temps_allocated - if manage_ref] + for cname, type, manage_ref, static in self.temps_allocated + if manage_ref] def all_free_managed_temps(self): """Return a list of (cname, type) tuples of refcount-managed Python @@ -931,11 +931,11 @@ class FunctionState(object): try-except and try-finally blocks to clean up temps in the error case. """ - return sorted([ # Enforce deterministic order. - (cname, type) - for (type, manage_ref), freelist in self.temps_free.items() if manage_ref - for cname in freelist[0] - ]) + return sorted([ # Enforce deterministic order. + (cname, type) + for (type, manage_ref), freelist in self.temps_free.items() if manage_ref + for cname in freelist[0] + ]) def start_collecting_temps(self): """ @@ -979,7 +979,7 @@ class PyObjectConst(object): cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object, replace_identifier=object, find_alphanums=object) -possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match +possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub find_alphanums = re.compile('([a-zA-Z0-9]+)').findall @@ -1000,7 +1000,7 @@ class StringConst(object): def add_py_version(self, version): if not version: - self.py_versions = [2, 3] + self.py_versions = [2, 3] elif version not in self.py_versions: self.py_versions.append(version) @@ -1036,9 +1036,9 @@ class StringConst(object): if identifier: intern = True elif identifier is None: - if isinstance(text, bytes): - intern = bool(possible_bytes_identifier(text)) - else: + if isinstance(text, bytes): + intern = bool(possible_bytes_identifier(text)) + else: intern = bool(possible_unicode_identifier(text)) else: intern = False @@ -1129,7 +1129,7 @@ class GlobalState(object): 'typeinfo', 'before_global_var', 'global_var', - 'string_decls', + 'string_decls', 'decls', 'late_includes', 'all_the_rest', @@ -1146,14 +1146,14 @@ class GlobalState(object): ] - def __init__(self, writer, module_node, code_config, common_utility_include_dir=None): + def __init__(self, writer, module_node, code_config, common_utility_include_dir=None): self.filename_table = {} self.filename_list = [] self.input_file_contents = {} self.utility_codes = set() self.declared_cnames = {} self.in_utility_code_generation = False - self.code_config = code_config + self.code_config = code_config self.common_utility_include_dir = common_utility_include_dir self.parts = {} self.module_node = module_node # because some utility code generation needs it @@ -1161,14 +1161,14 @@ class GlobalState(object): self.const_cnames_used = {} self.string_const_index = {} - self.dedup_const_index = {} + self.dedup_const_index = {} self.pyunicode_ptr_const_index = {} self.num_const_index = {} self.py_constants = [] - self.cached_cmethods = {} - self.initialised_constants = set() + self.cached_cmethods = {} + self.initialised_constants = set() - writer.set_global_state(self) + writer.set_global_state(self) self.rootwriter = writer def initialize_main_c_code(self): @@ -1181,19 +1181,19 @@ class GlobalState(object): else: w = self.parts['cached_builtins'] w.enter_cfunc_scope() - w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {") w = self.parts['cached_constants'] w.enter_cfunc_scope() w.putln("") - w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {") w.put_declare_refcount_context() w.put_setup_refcount_context("__Pyx_InitCachedConstants") w = self.parts['init_globals'] w.enter_cfunc_scope() w.putln("") - w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {") if not Options.generate_cleanup_code: del self.parts['cleanup_globals'] @@ -1201,17 +1201,17 @@ class GlobalState(object): w = self.parts['cleanup_globals'] w.enter_cfunc_scope() w.putln("") - w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {") + w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {") + + code = self.parts['utility_code_proto'] + code.putln("") + code.putln("/* --- Runtime support code (head) --- */") - code = self.parts['utility_code_proto'] - code.putln("") - code.putln("/* --- Runtime support code (head) --- */") - code = self.parts['utility_code_def'] - if self.code_config.emit_linenums: + if self.code_config.emit_linenums: code.write('\n#line 1 "cython_utility"\n') code.putln("") - code.putln("/* --- Runtime support code --- */") + code.putln("/* --- Runtime support code --- */") def finalize_main_c_code(self): self.close_global_decls() @@ -1220,8 +1220,8 @@ class GlobalState(object): # utility_code_def # code = self.parts['utility_code_def'] - util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c") - code.put(util.format_code(util.impl)) + util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c") + code.put(util.format_code(util.impl)) code.putln("") def __getitem__(self, key): @@ -1276,12 +1276,12 @@ class GlobalState(object): # constant handling at code generation time - def get_cached_constants_writer(self, target=None): - if target is not None: - if target in self.initialised_constants: - # Return None on second/later calls to prevent duplicate creation code. - return None - self.initialised_constants.add(target) + def get_cached_constants_writer(self, target=None): + if target is not None: + if target in self.initialised_constants: + # Return None on second/later calls to prevent duplicate creation code. + return None + self.initialised_constants.add(target) return self.parts['cached_constants'] def get_int_const(self, str_value, longness=False): @@ -1299,19 +1299,19 @@ class GlobalState(object): c = self.new_num_const(str_value, 'float', value_code) return c - def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): - if dedup_key is not None: - const = self.dedup_const_index.get(dedup_key) - if const is not None: - return const + def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): + if dedup_key is not None: + const = self.dedup_const_index.get(dedup_key) + if const is not None: + return const # create a new Python object constant const = self.new_py_const(type, prefix) if cleanup_level is not None \ and cleanup_level <= Options.generate_cleanup_code: cleanup_writer = self.parts['cleanup_globals'] cleanup_writer.putln('Py_CLEAR(%s);' % const.cname) - if dedup_key is not None: - self.dedup_const_index[dedup_key] = const + if dedup_key is not None: + self.dedup_const_index[dedup_key] = const return const def get_string_const(self, text, py_version=None): @@ -1401,13 +1401,13 @@ class GlobalState(object): def get_cached_unbound_method(self, type_cname, method_name): key = (type_cname, method_name) - try: - cname = self.cached_cmethods[key] - except KeyError: - cname = self.cached_cmethods[key] = self.new_const_cname( - 'umethod', '%s_%s' % (type_cname, method_name)) - return cname - + try: + cname = self.cached_cmethods[key] + except KeyError: + cname = self.cached_cmethods[key] = self.new_const_cname( + 'umethod', '%s_%s' % (type_cname, method_name)) + return cname + def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames): # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ... utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames) @@ -1451,54 +1451,54 @@ class GlobalState(object): w.error_goto(pos))) def generate_const_declarations(self): - self.generate_cached_methods_decls() + self.generate_cached_methods_decls() self.generate_string_constants() self.generate_num_constants() self.generate_object_constant_decls() def generate_object_constant_decls(self): - consts = [(len(c.cname), c.cname, c) - for c in self.py_constants] + consts = [(len(c.cname), c.cname, c) + for c in self.py_constants] consts.sort() decls_writer = self.parts['decls'] for _, cname, c in consts: decls_writer.putln( "static %s;" % c.type.declaration_code(cname)) - def generate_cached_methods_decls(self): - if not self.cached_cmethods: - return - - decl = self.parts['decls'] - init = self.parts['init_globals'] - cnames = [] + def generate_cached_methods_decls(self): + if not self.cached_cmethods: + return + + decl = self.parts['decls'] + init = self.parts['init_globals'] + cnames = [] for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()): - cnames.append(cname) - method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname - decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % ( - cname, method_name_cname)) - # split type reference storage as it might not be static - init.putln('%s.type = (PyObject*)&%s;' % ( - cname, type_cname)) - - if Options.generate_cleanup_code: - cleanup = self.parts['cleanup_globals'] - for cname in cnames: - cleanup.putln("Py_CLEAR(%s.method);" % cname) - + cnames.append(cname) + method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname + decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % ( + cname, method_name_cname)) + # split type reference storage as it might not be static + init.putln('%s.type = (PyObject*)&%s;' % ( + cname, type_cname)) + + if Options.generate_cleanup_code: + cleanup = self.parts['cleanup_globals'] + for cname in cnames: + cleanup.putln("Py_CLEAR(%s.method);" % cname) + def generate_string_constants(self): - c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()] + c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()] c_consts.sort() py_strings = [] - decls_writer = self.parts['string_decls'] + decls_writer = self.parts['string_decls'] for _, cname, c in c_consts: conditional = False if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions): conditional = True decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % ( (2 in c.py_versions) and '<' or '>=')) - decls_writer.putln('static const char %s[] = "%s";' % ( + decls_writer.putln('static const char %s[] = "%s";' % ( cname, StringEncoding.split_string_literal(c.escaped_value))) if conditional: decls_writer.putln("#endif") @@ -1506,7 +1506,7 @@ class GlobalState(object): for py_string in c.py_strings.values(): py_strings.append((c.cname, len(py_string.cname), py_string)) - for c, cname in sorted(self.pyunicode_ptr_const_index.items()): + for c, cname in sorted(self.pyunicode_ptr_const_index.items()): utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c) if utf16_array: # Narrow and wide representations differ @@ -1522,11 +1522,11 @@ class GlobalState(object): py_strings.sort() w = self.parts['pystring_table'] w.putln("") - w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname) + w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname) for c_cname, _, py_string in py_strings: if not py_string.is_str or not py_string.encoding or \ - py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII', - 'UTF8', 'UTF-8'): + py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII', + 'UTF8', 'UTF-8'): encoding = '0' else: encoding = '"%s"' % py_string.encoding.lower() @@ -1535,7 +1535,7 @@ class GlobalState(object): "static PyObject *%s;" % py_string.cname) if py_string.py3str_cstring: w.putln("#if PY_MAJOR_VERSION >= 3") - w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( + w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( py_string.cname, py_string.py3str_cstring.cname, py_string.py3str_cstring.cname, @@ -1543,7 +1543,7 @@ class GlobalState(object): py_string.intern )) w.putln("#else") - w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( + w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( py_string.cname, c_cname, c_cname, @@ -1605,13 +1605,13 @@ class GlobalState(object): # File name state # - def lookup_filename(self, source_desc): + def lookup_filename(self, source_desc): entry = source_desc.get_filenametable_entry() try: index = self.filename_table[entry] except KeyError: index = len(self.filename_list) - self.filename_list.append(source_desc) + self.filename_list.append(source_desc) self.filename_table[entry] = index return index @@ -1649,21 +1649,21 @@ class GlobalState(object): See UtilityCode. """ - if utility_code and utility_code not in self.utility_codes: + if utility_code and utility_code not in self.utility_codes: self.utility_codes.add(utility_code) utility_code.put_code(self) - def use_entry_utility_code(self, entry): - if entry is None: - return - if entry.utility_code: - self.use_utility_code(entry.utility_code) - if entry.utility_code_definition: - self.use_utility_code(entry.utility_code_definition) - - -def funccontext_property(func): - name = func.__name__ + def use_entry_utility_code(self, entry): + if entry is None: + return + if entry.utility_code: + self.use_utility_code(entry.utility_code) + if entry.utility_code_definition: + self.use_utility_code(entry.utility_code_definition) + + +def funccontext_property(func): + name = func.__name__ attribute_of = operator.attrgetter(name) def get(self): return attribute_of(self.funcstate) @@ -1672,17 +1672,17 @@ def funccontext_property(func): return property(get, set) -class CCodeConfig(object): - # emit_linenums boolean write #line pragmas? - # emit_code_comments boolean copy the original code into C comments? - # c_line_in_traceback boolean append the c file and line number to the traceback for exceptions? - - def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True): - self.emit_code_comments = emit_code_comments - self.emit_linenums = emit_linenums - self.c_line_in_traceback = c_line_in_traceback - - +class CCodeConfig(object): + # emit_linenums boolean write #line pragmas? + # emit_code_comments boolean copy the original code into C comments? + # c_line_in_traceback boolean append the c file and line number to the traceback for exceptions? + + def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True): + self.emit_code_comments = emit_code_comments + self.emit_linenums = emit_linenums + self.c_line_in_traceback = c_line_in_traceback + + class CCodeWriter(object): """ Utility class to output C code. @@ -1712,46 +1712,46 @@ class CCodeWriter(object): # utility code, declared constants etc.) # pyclass_stack list used during recursive code generation to pass information # about the current class one is in - # code_config CCodeConfig configuration options for the C code writer + # code_config CCodeConfig configuration options for the C code writer - @cython.locals(create_from='CCodeWriter') - def __init__(self, create_from=None, buffer=None, copy_formatting=False): + @cython.locals(create_from='CCodeWriter') + def __init__(self, create_from=None, buffer=None, copy_formatting=False): if buffer is None: buffer = StringIOTree() self.buffer = buffer - self.last_pos = None - self.last_marked_pos = None + self.last_pos = None + self.last_marked_pos = None self.pyclass_stack = [] self.funcstate = None - self.globalstate = None - self.code_config = None + self.globalstate = None + self.code_config = None self.level = 0 self.call_level = 0 self.bol = 1 if create_from is not None: # Use same global state - self.set_global_state(create_from.globalstate) + self.set_global_state(create_from.globalstate) self.funcstate = create_from.funcstate # Clone formatting state if copy_formatting: self.level = create_from.level self.bol = create_from.bol self.call_level = create_from.call_level - self.last_pos = create_from.last_pos - self.last_marked_pos = create_from.last_marked_pos + self.last_pos = create_from.last_pos + self.last_marked_pos = create_from.last_marked_pos def create_new(self, create_from, buffer, copy_formatting): # polymorphic constructor -- very slightly more versatile # than using __class__ - result = CCodeWriter(create_from, buffer, copy_formatting) + result = CCodeWriter(create_from, buffer, copy_formatting) return result - def set_global_state(self, global_state): - assert self.globalstate is None # prevent overwriting once it's set - self.globalstate = global_state - self.code_config = global_state.code_config - + def set_global_state(self, global_state): + assert self.globalstate is None # prevent overwriting once it's set + self.globalstate = global_state + self.code_config = global_state.code_config + def copyto(self, f): self.buffer.copyto(f) @@ -1761,7 +1761,7 @@ class CCodeWriter(object): def write(self, s): # also put invalid markers (lineno 0), to indicate that those lines # have no Cython source code correspondence - cython_lineno = self.last_marked_pos[1] if self.last_marked_pos else 0 + cython_lineno = self.last_marked_pos[1] if self.last_marked_pos else 0 self.buffer.markers.extend([cython_lineno] * s.count('\n')) self.buffer.write(s) @@ -1774,7 +1774,7 @@ class CCodeWriter(object): Creates a new CCodeWriter connected to the same global state, which can later be inserted using insert. """ - return CCodeWriter(create_from=self) + return CCodeWriter(create_from=self) def insert(self, writer): """ @@ -1787,22 +1787,22 @@ class CCodeWriter(object): self.buffer.insert(writer.buffer) # Properties delegated to function scope - @funccontext_property - def label_counter(self): pass - @funccontext_property - def return_label(self): pass - @funccontext_property - def error_label(self): pass - @funccontext_property - def labels_used(self): pass - @funccontext_property - def continue_label(self): pass - @funccontext_property - def break_label(self): pass - @funccontext_property - def return_from_error_cleanup_label(self): pass - @funccontext_property - def yield_labels(self): pass + @funccontext_property + def label_counter(self): pass + @funccontext_property + def return_label(self): pass + @funccontext_property + def error_label(self): pass + @funccontext_property + def labels_used(self): pass + @funccontext_property + def continue_label(self): pass + @funccontext_property + def break_label(self): pass + @funccontext_property + def return_from_error_cleanup_label(self): pass + @funccontext_property + def yield_labels(self): pass # Functions delegated to function scope def new_label(self, name=None): return self.funcstate.new_label(name) @@ -1818,8 +1818,8 @@ class CCodeWriter(object): def label_used(self, lbl): return self.funcstate.label_used(lbl) - def enter_cfunc_scope(self, scope=None): - self.funcstate = FunctionState(self, scope=scope) + def enter_cfunc_scope(self, scope=None): + self.funcstate = FunctionState(self, scope=scope) def exit_cfunc_scope(self): self.funcstate = None @@ -1832,8 +1832,8 @@ class CCodeWriter(object): def get_py_float(self, str_value, value_code): return self.globalstate.get_float_const(str_value, value_code).cname - def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): - return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname + def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): + return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname def get_string_const(self, text): return self.globalstate.get_string_const(text).cname @@ -1855,17 +1855,17 @@ class CCodeWriter(object): def intern_identifier(self, text): return self.get_py_string_const(text, identifier=True) - def get_cached_constants_writer(self, target=None): - return self.globalstate.get_cached_constants_writer(target) + def get_cached_constants_writer(self, target=None): + return self.globalstate.get_cached_constants_writer(target) # code generation def putln(self, code="", safe=False): - if self.last_pos and self.bol: + if self.last_pos and self.bol: self.emit_marker() - if self.code_config.emit_linenums and self.last_marked_pos: - source_desc, line, _ = self.last_marked_pos - self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description())) + if self.code_config.emit_linenums and self.last_marked_pos: + source_desc, line, _ = self.last_marked_pos + self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description())) if code: if safe: self.put_safe(code) @@ -1874,35 +1874,35 @@ class CCodeWriter(object): self.write("\n") self.bol = 1 - def mark_pos(self, pos, trace=True): - if pos is None: - return - if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]: - return - self.last_pos = (pos, trace) - + def mark_pos(self, pos, trace=True): + if pos is None: + return + if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]: + return + self.last_pos = (pos, trace) + def emit_marker(self): - pos, trace = self.last_pos - self.last_marked_pos = pos - self.last_pos = None + pos, trace = self.last_pos + self.last_marked_pos = pos + self.last_pos = None self.write("\n") - if self.code_config.emit_code_comments: - self.indent() - self.write("/* %s */\n" % self._build_marker(pos)) - if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']: + if self.code_config.emit_code_comments: + self.indent() + self.write("/* %s */\n" % self._build_marker(pos)) + if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']: self.indent() - self.write('__Pyx_TraceLine(%d,%d,%s)\n' % ( - pos[1], not self.funcstate.gil_owned, self.error_goto(pos))) - - def _build_marker(self, pos): - source_desc, line, col = pos - assert isinstance(source_desc, SourceDescriptor) - contents = self.globalstate.commented_file_contents(source_desc) - lines = contents[max(0, line-3):line] # line numbers start at 1 - lines[-1] += u' # <<<<<<<<<<<<<<' - lines += contents[line:line+2] - return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines)) - + self.write('__Pyx_TraceLine(%d,%d,%s)\n' % ( + pos[1], not self.funcstate.gil_owned, self.error_goto(pos))) + + def _build_marker(self, pos): + source_desc, line, col = pos + assert isinstance(source_desc, SourceDescriptor) + contents = self.globalstate.commented_file_contents(source_desc) + lines = contents[max(0, line-3):line] # line numbers start at 1 + lines[-1] += u' # <<<<<<<<<<<<<<' + lines += contents[line:line+2] + return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines)) + def put_safe(self, code): # put code, but ignore {} self.write(code) @@ -1916,7 +1916,7 @@ class CCodeWriter(object): path = os.path.join(include_dir, include_file) if not os.path.exists(path): tmp_path = '%s.tmp%s' % (path, os.getpid()) - with closing(Utils.open_new_file(tmp_path)) as f: + with closing(Utils.open_new_file(tmp_path)) as f: f.write(code) shutil.move(tmp_path, path) code = '#include "%s"\n' % path @@ -2023,10 +2023,10 @@ class CCodeWriter(object): self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname)) self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname)) - def put_generated_by(self): - self.putln("/* Generated by Cython %s */" % Version.watermark) - self.putln("") - + def put_generated_by(self): + self.putln("/* Generated by Cython %s */" % Version.watermark) + self.putln("") + def put_h_guard(self, guard): self.putln("#ifndef %s" % guard) self.putln("#define %s" % guard) @@ -2100,10 +2100,10 @@ class CCodeWriter(object): else: self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry)) - def put_var_xincref(self, entry): - if entry.type.is_pyobject: - self.putln("__Pyx_XINCREF(%s);" % self.entry_as_pyobject(entry)) - + def put_var_xincref(self, entry): + if entry.type.is_pyobject: + self.putln("__Pyx_XINCREF(%s);" % self.entry_as_pyobject(entry)) + def put_decref_clear(self, cname, type, nanny=True, clear_before_decref=False): self._put_decref(cname, type, nanny, null_check=False, clear=True, clear_before_decref=clear_before_decref) @@ -2219,9 +2219,9 @@ class CCodeWriter(object): if entry.in_closure: self.put_giveref('Py_None') - def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None): + def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None): if entry.is_special or entry.name == '__getattribute__': - if entry.name not in special_py_methods: + if entry.name not in special_py_methods: if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']: pass # Python's typeobject.c will automatically fill in our slot @@ -2229,39 +2229,39 @@ class CCodeWriter(object): # that's better than ours. elif allow_skip: return - + method_flags = entry.signature.method_flags() - if not method_flags: - return - if entry.is_special: - from . import TypeSlots - method_flags += [TypeSlots.method_coexist] - func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname - # Add required casts, but try not to shadow real warnings. - cast = '__Pyx_PyCFunctionFast' if 'METH_FASTCALL' in method_flags else 'PyCFunction' - if 'METH_KEYWORDS' in method_flags: - cast += 'WithKeywords' - if cast != 'PyCFunction': - func_ptr = '(void*)(%s)%s' % (cast, func_ptr) - self.putln( - '{"%s", (PyCFunction)%s, %s, %s}%s' % ( - entry.name, - func_ptr, - "|".join(method_flags), - entry.doc_cname if entry.doc else '0', - term)) - - def put_pymethoddef_wrapper(self, entry): - func_cname = entry.func_cname - if entry.is_special: - method_flags = entry.signature.method_flags() - if method_flags and 'METH_NOARGS' in method_flags: - # Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one. - func_cname = Naming.method_wrapper_prefix + func_cname - self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % ( - func_cname, entry.func_cname)) - return func_cname - + if not method_flags: + return + if entry.is_special: + from . import TypeSlots + method_flags += [TypeSlots.method_coexist] + func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname + # Add required casts, but try not to shadow real warnings. + cast = '__Pyx_PyCFunctionFast' if 'METH_FASTCALL' in method_flags else 'PyCFunction' + if 'METH_KEYWORDS' in method_flags: + cast += 'WithKeywords' + if cast != 'PyCFunction': + func_ptr = '(void*)(%s)%s' % (cast, func_ptr) + self.putln( + '{"%s", (PyCFunction)%s, %s, %s}%s' % ( + entry.name, + func_ptr, + "|".join(method_flags), + entry.doc_cname if entry.doc else '0', + term)) + + def put_pymethoddef_wrapper(self, entry): + func_cname = entry.func_cname + if entry.is_special: + method_flags = entry.signature.method_flags() + if method_flags and 'METH_NOARGS' in method_flags: + # Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one. + func_cname = Naming.method_wrapper_prefix + func_cname + self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % ( + func_cname, entry.func_cname)) + return func_cname + # GIL methods def put_ensure_gil(self, declare_gilstate=True, variable=None): @@ -2337,8 +2337,8 @@ class CCodeWriter(object): # error handling def put_error_if_neg(self, pos, value): - # TODO this path is almost _never_ taken, yet this macro makes is slower! - # return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos))) + # TODO this path is almost _never_ taken, yet this macro makes is slower! + # return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos))) return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos))) def put_error_if_unbound(self, pos, entry, in_nogil_context=False): @@ -2373,14 +2373,14 @@ class CCodeWriter(object): def error_goto(self, pos, used=True): lbl = self.funcstate.error_label self.funcstate.use_label(lbl) - if pos is None: - return 'goto %s;' % lbl + if pos is None: + return 'goto %s;' % lbl self.funcstate.should_declare_error_indicator = True if used: self.funcstate.uses_error_indicator = True - return "__PYX_ERR(%s, %s, %s)" % ( - self.lookup_filename(pos[0]), - pos[1], + return "__PYX_ERR(%s, %s, %s)" % ( + self.lookup_filename(pos[0]), + pos[1], lbl) def error_goto_if(self, cond, pos): @@ -2425,7 +2425,7 @@ class CCodeWriter(object): self.funcstate.uses_error_indicator = True self.putln('__Pyx_AddTraceback("%s", %s, %s, %s);' % format_tuple) - def put_unraisable(self, qualified_name, nogil=False): + def put_unraisable(self, qualified_name, nogil=False): """ Generate code to print a Python warning for an unraisable exception. @@ -2436,30 +2436,30 @@ class CCodeWriter(object): Naming.clineno_cname, Naming.lineno_cname, Naming.filename_cname, - self.globalstate.directives['unraisable_tracebacks'], - nogil, + self.globalstate.directives['unraisable_tracebacks'], + nogil, ) self.funcstate.uses_error_indicator = True - self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple) + self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple) self.globalstate.use_utility_code( UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c")) def put_trace_declarations(self): self.putln('__Pyx_TraceDeclarations') - def put_trace_frame_init(self, codeobj=None): - if codeobj: - self.putln('__Pyx_TraceFrameInit(%s)' % codeobj) + def put_trace_frame_init(self, codeobj=None): + if codeobj: + self.putln('__Pyx_TraceFrameInit(%s)' % codeobj) + + def put_trace_call(self, name, pos, nogil=False): + self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % ( + name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos))) - def put_trace_call(self, name, pos, nogil=False): - self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % ( - name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos))) - def put_trace_exception(self): self.putln("__Pyx_TraceException();") - def put_trace_return(self, retvalue_cname, nogil=False): - self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil)) + def put_trace_return(self, retvalue_cname, nogil=False): + self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil)) def putln_openmp(self, string): self.putln("#ifdef _OPENMP") @@ -2543,7 +2543,7 @@ class PyxCodeWriter(object): def getvalue(self): result = self.buffer.getvalue() - if isinstance(result, bytes): + if isinstance(result, bytes): result = result.decode(self.encoding) return result @@ -2585,7 +2585,7 @@ class ClosureTempAllocator(object): self.temps_free[type] = list(cnames) def allocate_temp(self, type): - if type not in self.temps_allocated: + if type not in self.temps_allocated: self.temps_allocated[type] = [] self.temps_free[type] = [] elif self.temps_free[type]: diff --git a/contrib/tools/cython/Cython/Compiler/CythonScope.py b/contrib/tools/cython/Cython/Compiler/CythonScope.py index 70e83916b5..1c25d1a6b4 100644 --- a/contrib/tools/cython/Cython/Compiler/CythonScope.py +++ b/contrib/tools/cython/Cython/Compiler/CythonScope.py @@ -71,9 +71,9 @@ class CythonScope(ModuleScope): name_path = qname.split(u'.') scope = self while len(name_path) > 1: - scope = scope.lookup_here(name_path[0]) - if scope: - scope = scope.as_module + scope = scope.lookup_here(name_path[0]) + if scope: + scope = scope.as_module del name_path[0] if scope is None: return None diff --git a/contrib/tools/cython/Cython/Compiler/Errors.py b/contrib/tools/cython/Cython/Compiler/Errors.py index aa7a40437c..9761b52c32 100644 --- a/contrib/tools/cython/Cython/Compiler/Errors.py +++ b/contrib/tools/cython/Cython/Compiler/Errors.py @@ -4,11 +4,11 @@ from __future__ import absolute_import -try: - from __builtin__ import basestring as any_string_type -except ImportError: - any_string_type = (bytes, str) - +try: + from __builtin__ import basestring as any_string_type +except ImportError: + any_string_type = (bytes, str) + import sys from contextlib import contextmanager @@ -27,7 +27,7 @@ class PyrexWarning(Exception): def context(position): source = position[0] - assert not (isinstance(source, any_string_type)), ( + assert not (isinstance(source, any_string_type)), ( "Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source) try: F = source.get_lines() @@ -173,7 +173,7 @@ def report_error(err, use_stack=True): def error(position, message): - #print("Errors.error:", repr(position), repr(message)) ### + #print("Errors.error:", repr(position), repr(message)) ### if position is None: raise InternalError(message) err = CompileError(position, message) diff --git a/contrib/tools/cython/Cython/Compiler/ExprNodes.py b/contrib/tools/cython/Cython/Compiler/ExprNodes.py index 45938521c9..4a402f8126 100644 --- a/contrib/tools/cython/Cython/Compiler/ExprNodes.py +++ b/contrib/tools/cython/Cython/Compiler/ExprNodes.py @@ -13,12 +13,12 @@ cython.declare(error=object, warning=object, warn_once=object, InternalError=obj unicode_type=object, str_type=object, bytes_type=object, type_type=object, Builtin=object, Symtab=object, Utils=object, find_coercion_error=object, debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object, - bytearray_type=object, slice_type=object, _py_int_types=object, - IS_PYTHON3=cython.bint) + bytearray_type=object, slice_type=object, _py_int_types=object, + IS_PYTHON3=cython.bint) import re -import sys -import copy +import sys +import copy import os.path import operator @@ -43,31 +43,31 @@ from . import Future from ..Debugging import print_call_chain from .DebugFlags import debug_disposal_code, debug_temp_alloc, \ debug_coercion -from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type, - is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran, - pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type, - pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor) +from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type, + is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran, + pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type, + pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor) from .PyrexTypes import PythranExpr try: from __builtin__ import basestring except ImportError: - # Python 3 - basestring = str - any_string_type = (bytes, str) -else: - # Python 2 - any_string_type = (bytes, unicode) - - -if sys.version_info[0] >= 3: - IS_PYTHON3 = True - _py_int_types = int -else: - IS_PYTHON3 = False - _py_int_types = (int, long) - - + # Python 3 + basestring = str + any_string_type = (bytes, str) +else: + # Python 2 + any_string_type = (bytes, unicode) + + +if sys.version_info[0] >= 3: + IS_PYTHON3 = True + _py_int_types = int +else: + IS_PYTHON3 = False + _py_int_types = (int, long) + + class NotConstant(object): _obj = None @@ -86,46 +86,46 @@ constant_value_not_set = object() # error messages when coercing from key[0] to key[1] coercion_error_dict = { # string related errors - (unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly." - " This is not portable and requires explicit encoding."), - (unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.", - (unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", - (unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", - (unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", - (unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", - (bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required", - (bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.", - (bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly." - " This is not portable to Py3."), - (bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.", - (bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): ( - "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."), - (basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.", - (str_type, unicode_type): ("str objects do not support coercion to unicode," - " use a unicode string literal instead (u'')"), - (str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.", - (str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", - (str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", - (str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", - (str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", - (str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).", - (str_type, PyrexTypes.c_const_py_unicode_ptr_type): ( - "'str' objects do not support coercion to C types (use 'unicode'?)."), - (PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", - (PyrexTypes.c_const_char_ptr_type, unicode_type): ( - "Cannot convert 'char*' to unicode implicitly, decoding required"), - (PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", - (PyrexTypes.c_const_uchar_ptr_type, unicode_type): ( - "Cannot convert 'char*' to unicode implicitly, decoding required"), + (unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly." + " This is not portable and requires explicit encoding."), + (unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.", + (unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required", + (bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.", + (bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly." + " This is not portable to Py3."), + (bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.", + (bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): ( + "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."), + (basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.", + (str_type, unicode_type): ("str objects do not support coercion to unicode," + " use a unicode string literal instead (u'')"), + (str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.", + (str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).", + (str_type, PyrexTypes.c_const_py_unicode_ptr_type): ( + "'str' objects do not support coercion to C types (use 'unicode'?)."), + (PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", + (PyrexTypes.c_const_char_ptr_type, unicode_type): ( + "Cannot convert 'char*' to unicode implicitly, decoding required"), + (PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", + (PyrexTypes.c_const_uchar_ptr_type, unicode_type): ( + "Cannot convert 'char*' to unicode implicitly, decoding required"), } def find_coercion_error(type_tuple, default, env): err = coercion_error_dict.get(type_tuple) if err is None: return default - elif (env.directives['c_string_encoding'] and - any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type, - PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))): + elif (env.directives['c_string_encoding'] and + any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type, + PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))): if type_tuple[1].is_pyobject: return default elif env.directives['c_string_encoding'] in ('ascii', 'default'): @@ -151,9 +151,9 @@ def check_negative_indices(*nodes): Used to find (potential) bugs inside of "wraparound=False" sections. """ for node in nodes: - if node is None or ( - not isinstance(node.constant_result, _py_int_types) and - not isinstance(node.constant_result, float)): + if node is None or ( + not isinstance(node.constant_result, _py_int_types) and + not isinstance(node.constant_result, float)): continue if node.constant_result < 0: warning(node.pos, @@ -187,111 +187,111 @@ def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None): return item_types.pop() return None - -def make_dedup_key(outer_type, item_nodes): - """ - Recursively generate a deduplication key from a sequence of values. - Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example. - - @param outer_type: The type of the outer container. - @param item_nodes: A sequence of constant nodes that will be traversed recursively. - @return: A tuple that can be used as a dict key for deduplication. - """ - item_keys = [ - (py_object_type, None, type(None)) if node is None - # For sequences and their "mult_factor", see TupleNode. - else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor - else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice - # For constants, look at the Python value type if we don't know the concrete Cython type. - else (node.type, node.constant_result, - type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result() - else None # something we cannot handle => short-circuit below - for node in item_nodes - ] - if None in item_keys: - return None - return outer_type, tuple(item_keys) - - -# Returns a block of code to translate the exception, -# plus a boolean indicating whether to check for Python exceptions. -def get_exception_handler(exception_value): - if exception_value is None: - return "__Pyx_CppExn2PyErr();", False - elif (exception_value.type == PyrexTypes.c_char_type - and exception_value.value == '*'): - return "__Pyx_CppExn2PyErr();", True - elif exception_value.type.is_pyobject: - return ( - 'try { throw; } catch(const std::exception& exn) {' - 'PyErr_SetString(%s, exn.what());' - '} catch(...) { PyErr_SetNone(%s); }' % ( - exception_value.entry.cname, - exception_value.entry.cname), - False) - else: - return ( - '%s(); if (!PyErr_Occurred())' - 'PyErr_SetString(PyExc_RuntimeError, ' - '"Error converting c++ exception.");' % ( - exception_value.entry.cname), - False) - -def maybe_check_py_error(code, check_py_exception, pos, nogil): - if check_py_exception: - if nogil: - code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos)) - else: - code.putln(code.error_goto_if("PyErr_Occurred()", pos)) - -def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil): - raise_py_exception, check_py_exception = get_exception_handler(exception_value) - code.putln("try {") - code.putln("%s" % inside) - if py_result: - code.putln(code.error_goto_if_null(py_result, pos)) - maybe_check_py_error(code, check_py_exception, pos, nogil) - code.putln("} catch(...) {") - if nogil: - code.put_ensure_gil(declare_gilstate=True) - code.putln(raise_py_exception) - if nogil: - code.put_release_ensured_gil() - code.putln(code.error_goto(pos)) - code.putln("}") - -# Used to handle the case where an lvalue expression and an overloaded assignment -# both have an exception declaration. -def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, - lhs_exc_val, assign_exc_val, nogil): - handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val) - handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val) - code.putln("try {") - code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code)) - maybe_check_py_error(code, lhc_check_py_exc, pos, nogil) - code.putln("try {") - code.putln("__pyx_local_lvalue = %s;" % rhs_code) - maybe_check_py_error(code, assignment_check_py_exc, pos, nogil) - # Catch any exception from the overloaded assignment. - code.putln("} catch(...) {") - if nogil: - code.put_ensure_gil(declare_gilstate=True) - code.putln(handle_assignment_exc) - if nogil: - code.put_release_ensured_gil() - code.putln(code.error_goto(pos)) - code.putln("}") - # Catch any exception from evaluating lhs. - code.putln("} catch(...) {") - if nogil: - code.put_ensure_gil(declare_gilstate=True) - code.putln(handle_lhs_exc) - if nogil: - code.put_release_ensured_gil() - code.putln(code.error_goto(pos)) - code.putln('}') - - + +def make_dedup_key(outer_type, item_nodes): + """ + Recursively generate a deduplication key from a sequence of values. + Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example. + + @param outer_type: The type of the outer container. + @param item_nodes: A sequence of constant nodes that will be traversed recursively. + @return: A tuple that can be used as a dict key for deduplication. + """ + item_keys = [ + (py_object_type, None, type(None)) if node is None + # For sequences and their "mult_factor", see TupleNode. + else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor + else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice + # For constants, look at the Python value type if we don't know the concrete Cython type. + else (node.type, node.constant_result, + type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result() + else None # something we cannot handle => short-circuit below + for node in item_nodes + ] + if None in item_keys: + return None + return outer_type, tuple(item_keys) + + +# Returns a block of code to translate the exception, +# plus a boolean indicating whether to check for Python exceptions. +def get_exception_handler(exception_value): + if exception_value is None: + return "__Pyx_CppExn2PyErr();", False + elif (exception_value.type == PyrexTypes.c_char_type + and exception_value.value == '*'): + return "__Pyx_CppExn2PyErr();", True + elif exception_value.type.is_pyobject: + return ( + 'try { throw; } catch(const std::exception& exn) {' + 'PyErr_SetString(%s, exn.what());' + '} catch(...) { PyErr_SetNone(%s); }' % ( + exception_value.entry.cname, + exception_value.entry.cname), + False) + else: + return ( + '%s(); if (!PyErr_Occurred())' + 'PyErr_SetString(PyExc_RuntimeError, ' + '"Error converting c++ exception.");' % ( + exception_value.entry.cname), + False) + +def maybe_check_py_error(code, check_py_exception, pos, nogil): + if check_py_exception: + if nogil: + code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos)) + else: + code.putln(code.error_goto_if("PyErr_Occurred()", pos)) + +def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil): + raise_py_exception, check_py_exception = get_exception_handler(exception_value) + code.putln("try {") + code.putln("%s" % inside) + if py_result: + code.putln(code.error_goto_if_null(py_result, pos)) + maybe_check_py_error(code, check_py_exception, pos, nogil) + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(raise_py_exception) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln("}") + +# Used to handle the case where an lvalue expression and an overloaded assignment +# both have an exception declaration. +def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, + lhs_exc_val, assign_exc_val, nogil): + handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val) + handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val) + code.putln("try {") + code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code)) + maybe_check_py_error(code, lhc_check_py_exc, pos, nogil) + code.putln("try {") + code.putln("__pyx_local_lvalue = %s;" % rhs_code) + maybe_check_py_error(code, assignment_check_py_exc, pos, nogil) + # Catch any exception from the overloaded assignment. + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(handle_assignment_exc) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln("}") + # Catch any exception from evaluating lhs. + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(handle_lhs_exc) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln('}') + + class ExprNode(Node): # subexprs [string] Class var holding names of subexpr node attrs # type PyrexType Type of the result @@ -434,18 +434,18 @@ class ExprNode(Node): is_sequence_constructor = False is_dict_literal = False - is_set_literal = False + is_set_literal = False is_string_literal = False is_attribute = False is_subscript = False - is_slice = False - - is_buffer_access = False - is_memview_index = False - is_memview_slice = False - is_memview_broadcast = False - is_memview_copy_assignment = False - + is_slice = False + + is_buffer_access = False + is_memview_index = False + is_memview_slice = False + is_memview_broadcast = False + is_memview_copy_assignment = False + saved_subexpr_nodes = None is_temp = False is_target = False @@ -505,12 +505,12 @@ class ExprNode(Node): assert(type_ is not None) return to_pythran(self, type_) - def is_c_result_required(self): - """ - Subtypes may return False here if result temp allocation can be skipped. - """ - return True - + def is_c_result_required(self): + """ + Subtypes may return False here if result temp allocation can be skipped. + """ + return True + def result_as(self, type = None): # Return the result code cast to the specified C type. if (self.is_temp and self.type.is_pyobject and @@ -635,14 +635,14 @@ class ExprNode(Node): # can't be modified as part of globals or closures. return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction - def inferable_item_node(self, index=0): - """ - Return a node that represents the (type) result of an indexing operation, - e.g. for tuple unpacking or iteration. - """ - return IndexNode(self.pos, base=self, index=IntNode( - self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type)) - + def inferable_item_node(self, index=0): + """ + Return a node that represents the (type) result of an indexing operation, + e.g. for tuple unpacking or iteration. + """ + return IndexNode(self.pos, base=self, index=IntNode( + self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type)) + # --------------- Type Analysis ------------------ def analyse_as_module(self, env): @@ -718,9 +718,9 @@ class ExprNode(Node): if not type.is_void: if type.is_pyobject: type = PyrexTypes.py_object_type - elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()): - self.temp_code = None - return + elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()): + self.temp_code = None + return self.temp_code = code.funcstate.allocate_temp( type, manage_ref=self.use_managed_ref) else: @@ -796,8 +796,8 @@ class ExprNode(Node): elif self.type.is_memoryviewslice: code.put_xdecref_memoryviewslice( self.result(), have_gil=not self.in_nogil_context) - code.putln("%s.memview = NULL;" % self.result()) - code.putln("%s.data = NULL;" % self.result()) + code.putln("%s.memview = NULL;" % self.result()) + code.putln("%s.data = NULL;" % self.result()) else: # Already done if self.is_temp self.generate_subexpr_disposal_code(code) @@ -822,8 +822,8 @@ class ExprNode(Node): else: self.generate_subexpr_disposal_code(code) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): # Stub method for nodes which are not legal as # the LHS of an assignment. An error will have # been reported earlier. @@ -879,8 +879,8 @@ class ExprNode(Node): if self.check_for_coercion_error(dst_type, env): return self - used_as_reference = dst_type.is_reference - if used_as_reference and not src_type.is_reference: + used_as_reference = dst_type.is_reference + if used_as_reference and not src_type.is_reference: dst_type = dst_type.ref_base_type if src_type.is_const: @@ -903,9 +903,9 @@ class ExprNode(Node): if src_type.is_fused: error(self.pos, "Type is not specialized") - elif src_type.is_null_ptr and dst_type.is_ptr: - # NULL can be implicitly cast to any pointer type - return self + elif src_type.is_null_ptr and dst_type.is_ptr: + # NULL can be implicitly cast to any pointer type + return self else: error(self.pos, "Cannot coerce to a type that is not specialized") @@ -923,10 +923,10 @@ class ExprNode(Node): if src.type.is_pyobject: src = CoerceToMemViewSliceNode(src, dst_type, env) elif src.type.is_array: - src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env) + src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env) elif not src_type.is_error: error(self.pos, - "Cannot convert '%s' to memoryviewslice" % (src_type,)) + "Cannot convert '%s' to memoryviewslice" % (src_type,)) else: if src.type.writable_needed: dst_type.writable_needed = True @@ -961,10 +961,10 @@ class ExprNode(Node): # Else, we need to convert the Pythran expression to a Python object src = CoerceToPyTypeNode(src, env, type=dst_type) elif src.type.is_pyobject: - if used_as_reference and dst_type.is_cpp_class: - warning( - self.pos, - "Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type) + if used_as_reference and dst_type.is_cpp_class: + warning( + self.pos, + "Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type) src = CoerceFromPyTypeNode(dst_type, src, env) elif (dst_type.is_complex and src_type != dst_type @@ -974,8 +974,8 @@ class ExprNode(Node): # Added the string comparison, since for c types that # is enough, but Cython gets confused when the types are # in different pxi files. - # TODO: Remove this hack and require shared declarations. - if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)): + # TODO: Remove this hack and require shared declarations. + if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)): self.fail_assignment(dst_type) return src @@ -1013,15 +1013,15 @@ class ExprNode(Node): elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float: return CoerceToBooleanNode(self, env) elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"): - return SimpleCallNode( - self.pos, - function=AttributeNode( + return SimpleCallNode( + self.pos, + function=AttributeNode( self.pos, obj=self, attribute=StringEncoding.EncodedString('operator bool')), - args=[]).analyse_types(env) - elif type.is_ctuple: - bool_value = len(type.components) == 0 - return BoolNode(self.pos, value=bool_value, - constant_result=bool_value) + args=[]).analyse_types(env) + elif type.is_ctuple: + bool_value = len(type.components) == 0 + return BoolNode(self.pos, value=bool_value, + constant_result=bool_value) else: error(self.pos, "Type '%s' not acceptable as a boolean" % type) return self @@ -1209,10 +1209,10 @@ class BoolNode(ConstNode): return str(int(self.value)) def coerce_to(self, dst_type, env): - if dst_type == self.type: - return self - if dst_type is py_object_type and self.type is Builtin.bool_type: - return self + if dst_type == self.type: + return self + if dst_type is py_object_type and self.type is Builtin.bool_type: + return self if dst_type.is_pyobject and self.type.is_int: return BoolNode( self.pos, value=self.value, @@ -1272,7 +1272,7 @@ class IntNode(ConstNode): # we ignore 'is_c_literal = True' and instead map signed 32bit # integers as C long values if self.is_c_literal or \ - not self.has_constant_result() or \ + not self.has_constant_result() or \ self.unsigned or self.longness == 'LL': # clearly a C literal rank = (self.longness == 'LL') and 2 or 1 @@ -1302,12 +1302,12 @@ class IntNode(ConstNode): constant_result=not_a_constant) if dst_type.is_numeric and not dst_type.is_complex: node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, - type=dst_type, is_c_literal=True, + type=dst_type, is_c_literal=True, unsigned=self.unsigned, longness=self.longness) return node elif dst_type.is_pyobject: node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, - type=PyrexTypes.py_object_type, is_c_literal=False, + type=PyrexTypes.py_object_type, is_c_literal=False, unsigned=self.unsigned, longness=self.longness) else: # FIXME: not setting the type here to keep it working with @@ -1335,43 +1335,43 @@ class IntNode(ConstNode): self.result_code = self.get_constant_c_result_code() def get_constant_c_result_code(self): - unsigned, longness = self.unsigned, self.longness - literal = self.value_as_c_integer_string() - if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0': - # negative decimal literal => guess longness from type to prevent wrap-around - if self.type.rank >= PyrexTypes.c_longlong_type.rank: - longness = 'LL' - elif self.type.rank >= PyrexTypes.c_long_type.rank: - longness = 'L' - return literal + unsigned + longness + unsigned, longness = self.unsigned, self.longness + literal = self.value_as_c_integer_string() + if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0': + # negative decimal literal => guess longness from type to prevent wrap-around + if self.type.rank >= PyrexTypes.c_longlong_type.rank: + longness = 'LL' + elif self.type.rank >= PyrexTypes.c_long_type.rank: + longness = 'L' + return literal + unsigned + longness def value_as_c_integer_string(self): value = self.value - if len(value) <= 2: - # too short to go wrong (and simplifies code below) - return value - neg_sign = '' - if value[0] == '-': - neg_sign = '-' - value = value[1:] - if value[0] == '0': - literal_type = value[1] # 0'o' - 0'b' - 0'x' - # 0x123 hex literals and 0123 octal literals work nicely in C - # but C-incompatible Py3 oct/bin notations need conversion - if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit(): - # negative hex/octal literal => prevent C compiler from using - # unsigned integer types by converting to decimal (see C standard 6.4.4.1) - value = str(Utils.str_to_number(value)) - elif literal_type in 'oO': - value = '0' + value[2:] # '0o123' => '0123' - elif literal_type in 'bB': - value = str(int(value[2:], 2)) - elif value.isdigit() and not self.unsigned and not self.longness: - if not neg_sign: - # C compilers do not consider unsigned types for decimal literals, - # but they do for hex (see C standard 6.4.4.1) - value = '0x%X' % int(value) - return neg_sign + value + if len(value) <= 2: + # too short to go wrong (and simplifies code below) + return value + neg_sign = '' + if value[0] == '-': + neg_sign = '-' + value = value[1:] + if value[0] == '0': + literal_type = value[1] # 0'o' - 0'b' - 0'x' + # 0x123 hex literals and 0123 octal literals work nicely in C + # but C-incompatible Py3 oct/bin notations need conversion + if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit(): + # negative hex/octal literal => prevent C compiler from using + # unsigned integer types by converting to decimal (see C standard 6.4.4.1) + value = str(Utils.str_to_number(value)) + elif literal_type in 'oO': + value = '0' + value[2:] # '0o123' => '0123' + elif literal_type in 'bB': + value = str(int(value[2:], 2)) + elif value.isdigit() and not self.unsigned and not self.longness: + if not neg_sign: + # C compilers do not consider unsigned types for decimal literals, + # but they do for hex (see C standard 6.4.4.1) + value = '0x%X' % int(value) + return neg_sign + value def calculate_result_code(self): return self.result_code @@ -1409,7 +1409,7 @@ class FloatNode(ConstNode): def get_constant_c_result_code(self): strval = self.value - assert isinstance(strval, basestring) + assert isinstance(strval, basestring) cmpval = repr(float(strval)) if cmpval == 'nan': return "(Py_HUGE_VAL * 0)" @@ -1433,12 +1433,12 @@ def _analyse_name_as_type(name, pos, env): if type is not None: return type - global_entry = env.global_scope().lookup(name) - if global_entry and global_entry.type and ( - global_entry.type.is_extension_type - or global_entry.type.is_struct_or_union - or global_entry.type.is_builtin_type - or global_entry.type.is_cpp_class): + global_entry = env.global_scope().lookup(name) + if global_entry and global_entry.type and ( + global_entry.type.is_extension_type + or global_entry.type.is_struct_or_union + or global_entry.type.is_builtin_type + or global_entry.type.is_cpp_class): return global_entry.type from .TreeFragment import TreeFragment @@ -1470,11 +1470,11 @@ class BytesNode(ConstNode): self.constant_result = self.value def as_sliced_node(self, start, stop, step=None): - value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding) - return BytesNode(self.pos, value=value, constant_result=value) + value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding) + return BytesNode(self.pos, value=value, constant_result=value) def compile_time_value(self, denv): - return self.value.byteencode() + return self.value.byteencode() def analyse_as_type(self, env): return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env) @@ -1501,20 +1501,20 @@ class BytesNode(ConstNode): return CharNode(self.pos, value=self.value, constant_result=ord(self.value)) - node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result) + node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result) if dst_type.is_pyobject: if dst_type in (py_object_type, Builtin.bytes_type): node.type = Builtin.bytes_type else: self.check_for_coercion_error(dst_type, env, fail=True) return node - elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): + elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): node.type = dst_type return node - elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type): - node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type - else PyrexTypes.c_char_ptr_type) - return CastNode(node, dst_type) + elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type): + node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type + else PyrexTypes.c_char_ptr_type) + return CastNode(node, dst_type) elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type): # Exclude the case of passing a C string literal into a non-const C++ string. if not dst_type.is_cpp_class or dst_type.is_const: @@ -1528,15 +1528,15 @@ class BytesNode(ConstNode): def generate_evaluation_code(self, code): if self.type.is_pyobject: - result = code.get_py_string_const(self.value) - elif self.type.is_const: - result = code.get_string_const(self.value) + result = code.get_py_string_const(self.value) + elif self.type.is_const: + result = code.get_string_const(self.value) else: - # not const => use plain C string literal and cast to mutable type - literal = self.value.as_c_string_literal() - # C++ may require a cast - result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal) - self.result_code = result + # not const => use plain C string literal and cast to mutable type + literal = self.value.as_c_string_literal() + # C++ may require a cast + result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal) + self.result_code = result def get_constant_c_result_code(self): return None # FIXME @@ -1570,8 +1570,8 @@ class UnicodeNode(ConstNode): value = StringEncoding.EncodedString(self.value[start:stop:step]) value.encoding = self.value.encoding if self.bytes_value is not None: - bytes_value = StringEncoding.bytes_literal( - self.bytes_value[start:stop:step], self.bytes_value.encoding) + bytes_value = StringEncoding.bytes_literal( + self.bytes_value[start:stop:step], self.bytes_value.encoding) else: bytes_value = None return UnicodeNode( @@ -1634,17 +1634,17 @@ class UnicodeNode(ConstNode): self.result_code = code.get_py_const(py_object_type, 'ustring') data_cname = code.get_string_const( StringEncoding.BytesLiteral(self.value.encode('unicode_escape'))) - const_code = code.get_cached_constants_writer(self.result_code) - if const_code is None: - return # already initialised - const_code.mark_pos(self.pos) - const_code.putln( + const_code = code.get_cached_constants_writer(self.result_code) + if const_code is None: + return # already initialised + const_code.mark_pos(self.pos) + const_code.putln( "%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % ( self.result_code, data_cname, data_cname, - const_code.error_goto_if_null(self.result_code, self.pos))) - const_code.put_error_if_neg( + const_code.error_goto_if_null(self.result_code, self.pos))) + const_code.put_error_if_neg( self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code) else: self.result_code = code.get_py_string_const(self.value) @@ -1720,15 +1720,15 @@ class StringNode(PyConstNode): return self.result_code def compile_time_value(self, env): - if self.value.is_unicode: - return self.value - if not IS_PYTHON3: - # use plain str/bytes object in Py2 - return self.value.byteencode() - # in Py3, always return a Unicode string - if self.unicode_value is not None: - return self.unicode_value - return self.value.decode('iso8859-1') + if self.value.is_unicode: + return self.value + if not IS_PYTHON3: + # use plain str/bytes object in Py2 + return self.value.byteencode() + # in Py3, always return a Unicode string + if self.unicode_value is not None: + return self.unicode_value + return self.value.decode('iso8859-1') class IdentifierStringNode(StringNode): @@ -1820,7 +1820,7 @@ class NewExprNode(AtomicExprNode): pass def calculate_result_code(self): - return "new " + self.class_type.empty_declaration_code() + return "new " + self.class_type.empty_declaration_code() class NameNode(AtomicExprNode): @@ -2023,25 +2023,25 @@ class NameNode(AtomicExprNode): def analyse_target_types(self, env): self.analyse_entry(env, is_target=True) - entry = self.entry - if entry.is_cfunction and entry.as_variable: - # FIXME: unify "is_overridable" flags below - if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction: - # We need this for assigning to cpdef names and for the fused 'def' TreeFragment - entry = self.entry = entry.as_variable - self.type = entry.type + entry = self.entry + if entry.is_cfunction and entry.as_variable: + # FIXME: unify "is_overridable" flags below + if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction: + # We need this for assigning to cpdef names and for the fused 'def' TreeFragment + entry = self.entry = entry.as_variable + self.type = entry.type if self.type.is_const: error(self.pos, "Assignment to const '%s'" % self.name) if self.type.is_reference: error(self.pos, "Assignment to reference '%s'" % self.name) if not self.is_lvalue(): - error(self.pos, "Assignment to non-lvalue '%s'" % self.name) + error(self.pos, "Assignment to non-lvalue '%s'" % self.name) self.type = PyrexTypes.error_type - entry.used = 1 - if entry.type.is_buffer: + entry.used = 1 + if entry.type.is_buffer: from . import Buffer - Buffer.used_buffer_aux_vars(entry) + Buffer.used_buffer_aux_vars(entry) return self def analyse_rvalue_entry(self, env): @@ -2096,11 +2096,11 @@ class NameNode(AtomicExprNode): entry = self.entry if entry.is_type and entry.type.is_extension_type: self.type_entry = entry - if entry.is_type and entry.type.is_enum: - py_entry = Symtab.Entry(self.name, None, py_object_type) - py_entry.is_pyglobal = True - py_entry.scope = self.entry.scope - self.entry = py_entry + if entry.is_type and entry.type.is_enum: + py_entry = Symtab.Entry(self.name, None, py_object_type) + py_entry.is_pyglobal = True + py_entry.scope = self.entry.scope + self.entry = py_entry elif not (entry.is_const or entry.is_variable or entry.is_builtin or entry.is_cfunction or entry.is_cpp_class): @@ -2170,13 +2170,13 @@ class NameNode(AtomicExprNode): return True def is_lvalue(self): - return ( - self.entry.is_variable and + return ( + self.entry.is_variable and not self.entry.is_readonly - ) or ( - self.entry.is_cfunction and - self.entry.is_overridable - ) + ) or ( + self.entry.is_cfunction and + self.entry.is_overridable + ) def is_addressable(self): return self.entry.is_variable and not self.type.is_memoryviewslice @@ -2197,8 +2197,8 @@ class NameNode(AtomicExprNode): entry = self.entry if entry is None: return # There was an error earlier - if entry.utility_code: - code.globalstate.use_utility_code(entry.utility_code) + if entry.utility_code: + code.globalstate.use_utility_code(entry.utility_code) if entry.is_builtin and entry.is_const: return # Lookup already cached elif entry.is_pyclass_attr: @@ -2219,7 +2219,7 @@ class NameNode(AtomicExprNode): code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) code.putln( - '__Pyx_GetModuleGlobalName(%s, %s);' % ( + '__Pyx_GetModuleGlobalName(%s, %s);' % ( self.result(), interned_cname)) if not self.cf_is_null: @@ -2248,7 +2248,7 @@ class NameNode(AtomicExprNode): code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) code.putln( - '__Pyx_GetModuleGlobalName(%s, %s); %s' % ( + '__Pyx_GetModuleGlobalName(%s, %s); %s' % ( self.result(), interned_cname, code.error_goto_if_null(self.result(), self.pos))) @@ -2257,7 +2257,7 @@ class NameNode(AtomicExprNode): code.globalstate.use_utility_code( UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) code.putln( - '__Pyx_GetNameInClass(%s, %s, %s); %s' % ( + '__Pyx_GetNameInClass(%s, %s, %s); %s' % ( self.result(), entry.scope.namespace_cname, interned_cname, @@ -2275,15 +2275,15 @@ class NameNode(AtomicExprNode): if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check): code.put_error_if_unbound(self.pos, entry, self.in_nogil_context) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): #print "NameNode.generate_assignment_code:", self.name ### entry = self.entry if entry is None: return # There was an error earlier if (self.entry.type.is_ptr and isinstance(rhs, ListNode) - and not self.lhs_of_first_assignment and not rhs.in_module_scope): + and not self.lhs_of_first_assignment and not rhs.in_module_scope): error(self.pos, "Literal list must be assigned to pointer at time of declaration") # is_pyglobal seems to be True for module level-globals only. @@ -2367,22 +2367,22 @@ class NameNode(AtomicExprNode): code.put_giveref(rhs.py_result()) if not self.type.is_memoryviewslice: if not assigned: - if overloaded_assignment: - result = rhs.result() - if exception_check == '+': - translate_cpp_exception( - code, self.pos, - '%s = %s;' % (self.result(), result), - self.result() if self.type.is_pyobject else None, - exception_value, self.in_nogil_context) - else: - code.putln('%s = %s;' % (self.result(), result)) - else: - result = rhs.result_as(self.ctype()) + if overloaded_assignment: + result = rhs.result() + if exception_check == '+': + translate_cpp_exception( + code, self.pos, + '%s = %s;' % (self.result(), result), + self.result() if self.type.is_pyobject else None, + exception_value, self.in_nogil_context) + else: + code.putln('%s = %s;' % (self.result(), result)) + else: + result = rhs.result_as(self.ctype()) if is_pythran_expr(self.type): code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result)) - elif result != self.result(): + elif result != self.result(): code.putln('%s = %s;' % (self.result(), result)) if debug_disposal_code: print("NameNode.generate_assignment_code:") @@ -2456,10 +2456,10 @@ class NameNode(AtomicExprNode): del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( Naming.module_cname, interned_cname) if ignore_nonexisting: - code.putln( - 'if (unlikely(%s < 0)) {' - ' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s ' - '}' % (del_code, code.error_goto(self.pos))) + code.putln( + 'if (unlikely(%s < 0)) {' + ' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s ' + '}' % (del_code, code.error_goto(self.pos))) else: code.put_error_if_neg(self.pos, del_code) elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice: @@ -2566,24 +2566,24 @@ class ImportNode(ExprNode): name_list_code = self.name_list.py_result() else: name_list_code = "0" - - code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c")) - import_code = "__Pyx_Import(%s, %s, %d)" % ( - self.module_name.py_result(), - name_list_code, - self.level) - - if (self.level <= 0 and - self.module_name.is_string_literal and - self.module_name.value in utility_code_for_imports): - helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value] - code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file)) - import_code = '%s(%s)' % (helper_func, import_code) - - code.putln("%s = %s; %s" % ( - self.result(), - import_code, - code.error_goto_if_null(self.result(), self.pos))) + + code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c")) + import_code = "__Pyx_Import(%s, %s, %d)" % ( + self.module_name.py_result(), + name_list_code, + self.level) + + if (self.level <= 0 and + self.module_name.is_string_literal and + self.module_name.value in utility_code_for_imports): + helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value] + code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file)) + import_code = '%s(%s)' % (helper_func, import_code) + + code.putln("%s = %s; %s" % ( + self.result(), + import_code, + code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) @@ -2599,7 +2599,7 @@ class IteratorNode(ExprNode): counter_cname = None cpp_iterator_cname = None reversed = False # currently only used for list/tuple types (see Optimize.py) - is_async = False + is_async = False subexprs = ['sequence'] @@ -2613,7 +2613,7 @@ class IteratorNode(ExprNode): self.analyse_cpp_types(env) else: self.sequence = self.sequence.coerce_to_pyobject(env) - if self.sequence.type in (list_type, tuple_type): + if self.sequence.type in (list_type, tuple_type): self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable") self.is_temp = 1 return self @@ -2701,8 +2701,8 @@ class IteratorNode(ExprNode): return if sequence_type.is_array or sequence_type.is_ptr: raise InternalError("for in carray slice not transformed") - - is_builtin_sequence = sequence_type in (list_type, tuple_type) + + is_builtin_sequence = sequence_type in (list_type, tuple_type) if not is_builtin_sequence: # reversed() not currently optimised (see Optimize.py) assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects" @@ -2712,7 +2712,7 @@ class IteratorNode(ExprNode): "if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % ( self.sequence.py_result(), self.sequence.py_result())) - + if is_builtin_sequence or self.may_be_a_sequence: self.counter_cname = code.funcstate.allocate_temp( PyrexTypes.c_py_ssize_t_type, manage_ref=False) @@ -2723,25 +2723,25 @@ class IteratorNode(ExprNode): init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result() else: init_value = '0' - code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % ( - self.result(), - self.sequence.py_result(), - self.result(), - self.counter_cname, - init_value)) + code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % ( + self.result(), + self.sequence.py_result(), + self.result(), + self.counter_cname, + init_value)) if not is_builtin_sequence: self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False) if self.may_be_a_sequence: code.putln("%s = NULL;" % self.iter_func_ptr) code.putln("} else {") code.put("%s = -1; " % self.counter_cname) - + code.putln("%s = PyObject_GetIter(%s); %s" % ( - self.result(), - self.sequence.py_result(), - code.error_goto_if_null(self.result(), self.pos))) + self.result(), + self.sequence.py_result(), + code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - + # PyObject_GetIter() fails if "tp_iternext" is not set, but the check below # makes it visible to the C compiler that the pointer really isn't NULL, so that # it can distinguish between the special cases and the generic case @@ -2758,14 +2758,14 @@ class IteratorNode(ExprNode): item_count = len(self.sequence.args) if self.sequence.mult_factor is None: final_size = item_count - elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types): + elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types): final_size = item_count * self.sequence.mult_factor.constant_result code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size)) if self.reversed: inc_dec = '--' else: inc_dec = '++' - code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") + code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") code.putln( "%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % ( result_name, @@ -2787,7 +2787,7 @@ class IteratorNode(ExprNode): self.counter_cname, inc_dec, code.error_goto_if_null(result_name, self.pos))) - code.put_gotref(result_name) + code.put_gotref(result_name) code.putln("#endif") def generate_iter_next_result_code(self, result_name, code): @@ -2854,7 +2854,7 @@ class IteratorNode(ExprNode): class NextNode(AtomicExprNode): # Used as part of for statement implementation. - # Implements result = next(iterator) + # Implements result = next(iterator) # Created during analyse_types phase. # The iterator is not owned by this node. # @@ -2864,14 +2864,14 @@ class NextNode(AtomicExprNode): AtomicExprNode.__init__(self, iterator.pos) self.iterator = iterator - def nogil_check(self, env): - # ignore - errors (if any) are already handled by IteratorNode - pass - + def nogil_check(self, env): + # ignore - errors (if any) are already handled by IteratorNode + pass + def type_dependencies(self, env): return self.iterator.type_dependencies(env) - def infer_type(self, env, iterator_type=None): + def infer_type(self, env, iterator_type=None): if iterator_type is None: iterator_type = self.iterator.infer_type(env) if iterator_type.is_ptr or iterator_type.is_array: @@ -2901,68 +2901,68 @@ class NextNode(AtomicExprNode): self.iterator.generate_iter_next_result_code(self.result(), code) -class AsyncIteratorNode(ExprNode): - # Used as part of 'async for' statement implementation. - # - # Implements result = sequence.__aiter__() - # - # sequence ExprNode - - subexprs = ['sequence'] - - is_async = True - type = py_object_type - is_temp = 1 - - def infer_type(self, env): - return py_object_type - - def analyse_types(self, env): - self.sequence = self.sequence.analyse_types(env) - if not self.sequence.type.is_pyobject: - error(self.pos, "async for loops not allowed on C/C++ types") - self.sequence = self.sequence.coerce_to_pyobject(env) - return self - - def generate_result_code(self, code): - code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) - code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % ( - self.result(), - self.sequence.py_result(), - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.result()) - - -class AsyncNextNode(AtomicExprNode): - # Used as part of 'async for' statement implementation. - # Implements result = iterator.__anext__() - # Created during analyse_types phase. - # The iterator is not owned by this node. - # - # iterator IteratorNode - - type = py_object_type - is_temp = 1 - - def __init__(self, iterator): - AtomicExprNode.__init__(self, iterator.pos) - self.iterator = iterator - - def infer_type(self, env): - return py_object_type - - def analyse_types(self, env): - return self - - def generate_result_code(self, code): - code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) - code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % ( - self.result(), - self.iterator.py_result(), - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.result()) - - +class AsyncIteratorNode(ExprNode): + # Used as part of 'async for' statement implementation. + # + # Implements result = sequence.__aiter__() + # + # sequence ExprNode + + subexprs = ['sequence'] + + is_async = True + type = py_object_type + is_temp = 1 + + def infer_type(self, env): + return py_object_type + + def analyse_types(self, env): + self.sequence = self.sequence.analyse_types(env) + if not self.sequence.type.is_pyobject: + error(self.pos, "async for loops not allowed on C/C++ types") + self.sequence = self.sequence.coerce_to_pyobject(env) + return self + + def generate_result_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) + code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % ( + self.result(), + self.sequence.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.result()) + + +class AsyncNextNode(AtomicExprNode): + # Used as part of 'async for' statement implementation. + # Implements result = iterator.__anext__() + # Created during analyse_types phase. + # The iterator is not owned by this node. + # + # iterator IteratorNode + + type = py_object_type + is_temp = 1 + + def __init__(self, iterator): + AtomicExprNode.__init__(self, iterator.pos) + self.iterator = iterator + + def infer_type(self, env): + return py_object_type + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) + code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % ( + self.result(), + self.iterator.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.result()) + + class WithExitCallNode(ExprNode): # The __exit__() call of a 'with' statement. Used in both the # except and finally clauses. @@ -3004,14 +3004,14 @@ class WithExitCallNode(ExprNode): code.putln(code.error_goto_if_null(result_var, self.pos)) code.put_gotref(result_var) - + if self.await_expr: - # FIXME: result_var temp currently leaks into the closure + # FIXME: result_var temp currently leaks into the closure self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True) code.putln("%s = %s;" % (result_var, self.await_expr.py_result())) self.await_expr.generate_post_assignment_code(code) self.await_expr.free_temps(code) - + if self.result_is_used: self.allocate_temp_result(code) code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var)) @@ -3123,59 +3123,59 @@ class RawCNameExprNode(ExprNode): #------------------------------------------------------------------- # -# F-strings -# -#------------------------------------------------------------------- - - -class JoinedStrNode(ExprNode): - # F-strings - # - # values [UnicodeNode|FormattedValueNode] Substrings of the f-string - # - type = unicode_type - is_temp = True - - subexprs = ['values'] - - def analyse_types(self, env): - self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values] - return self - - def may_be_none(self): - # PyUnicode_Join() always returns a Unicode string or raises an exception - return False - - def generate_evaluation_code(self, code): - code.mark_pos(self.pos) - num_items = len(self.values) - list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True) - ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) - max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False) - - code.putln('%s = PyTuple_New(%s); %s' % ( - list_var, - num_items, - code.error_goto_if_null(list_var, self.pos))) - code.put_gotref(list_var) - code.putln("%s = 0;" % ulength_var) - code.putln("%s = 127;" % max_char_var) # at least ASCII character range - - for i, node in enumerate(self.values): - node.generate_evaluation_code(code) - node.make_owned_reference(code) - - ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result() - max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result() - is_ascii = False - if isinstance(node, UnicodeNode): - try: +# F-strings +# +#------------------------------------------------------------------- + + +class JoinedStrNode(ExprNode): + # F-strings + # + # values [UnicodeNode|FormattedValueNode] Substrings of the f-string + # + type = unicode_type + is_temp = True + + subexprs = ['values'] + + def analyse_types(self, env): + self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values] + return self + + def may_be_none(self): + # PyUnicode_Join() always returns a Unicode string or raises an exception + return False + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + num_items = len(self.values) + list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) + max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False) + + code.putln('%s = PyTuple_New(%s); %s' % ( + list_var, + num_items, + code.error_goto_if_null(list_var, self.pos))) + code.put_gotref(list_var) + code.putln("%s = 0;" % ulength_var) + code.putln("%s = 127;" % max_char_var) # at least ASCII character range + + for i, node in enumerate(self.values): + node.generate_evaluation_code(code) + node.make_owned_reference(code) + + ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result() + max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result() + is_ascii = False + if isinstance(node, UnicodeNode): + try: # most strings will be ASCII or at least Latin-1 - node.value.encode('iso8859-1') - max_char_value = '255' - node.value.encode('us-ascii') - is_ascii = True - except UnicodeEncodeError: + node.value.encode('iso8859-1') + max_char_value = '255' + node.value.encode('us-ascii') + is_ascii = True + except UnicodeEncodeError: if max_char_value != '255': # not ISO8859-1 => check BMP limit max_char = max(map(ord, node.value)) @@ -3191,133 +3191,133 @@ class JoinedStrNode(ExprNode): # not really worth implementing a check for surrogate pairs here # drawback: C code can differ when generating on Py2 with 2-byte Unicode pass - else: - ulength = str(len(node.value)) - elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric: - is_ascii = True # formatted C numbers are always ASCII - - if not is_ascii: - code.putln("%s = (%s > %s) ? %s : %s;" % ( - max_char_var, max_char_value, max_char_var, max_char_value, max_char_var)) - code.putln("%s += %s;" % (ulength_var, ulength)) - - code.put_giveref(node.py_result()) - code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result())) - node.generate_post_assignment_code(code) - node.free_temps(code) - - code.mark_pos(self.pos) - self.allocate_temp_result(code) - code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c")) - code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % ( - self.result(), - list_var, - num_items, - ulength_var, - max_char_var, - code.error_goto_if_null(self.py_result(), self.pos))) - code.put_gotref(self.py_result()) - - code.put_decref_clear(list_var, py_object_type) - code.funcstate.release_temp(list_var) - code.funcstate.release_temp(ulength_var) - code.funcstate.release_temp(max_char_var) - - -class FormattedValueNode(ExprNode): - # {}-delimited portions of an f-string - # - # value ExprNode The expression itself + else: + ulength = str(len(node.value)) + elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric: + is_ascii = True # formatted C numbers are always ASCII + + if not is_ascii: + code.putln("%s = (%s > %s) ? %s : %s;" % ( + max_char_var, max_char_value, max_char_var, max_char_value, max_char_var)) + code.putln("%s += %s;" % (ulength_var, ulength)) + + code.put_giveref(node.py_result()) + code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result())) + node.generate_post_assignment_code(code) + node.free_temps(code) + + code.mark_pos(self.pos) + self.allocate_temp_result(code) + code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c")) + code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % ( + self.result(), + list_var, + num_items, + ulength_var, + max_char_var, + code.error_goto_if_null(self.py_result(), self.pos))) + code.put_gotref(self.py_result()) + + code.put_decref_clear(list_var, py_object_type) + code.funcstate.release_temp(list_var) + code.funcstate.release_temp(ulength_var) + code.funcstate.release_temp(max_char_var) + + +class FormattedValueNode(ExprNode): + # {}-delimited portions of an f-string + # + # value ExprNode The expression itself # conversion_char str or None Type conversion (!s, !r, !a, or none, or 'd' for integer conversion) - # format_spec JoinedStrNode or None Format string passed to __format__ - # c_format_spec str or None If not None, formatting can be done at the C level - - subexprs = ['value', 'format_spec'] - - type = unicode_type - is_temp = True - c_format_spec = None - - find_conversion_func = { - 's': 'PyObject_Unicode', - 'r': 'PyObject_Repr', - 'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2 + # format_spec JoinedStrNode or None Format string passed to __format__ + # c_format_spec str or None If not None, formatting can be done at the C level + + subexprs = ['value', 'format_spec'] + + type = unicode_type + is_temp = True + c_format_spec = None + + find_conversion_func = { + 's': 'PyObject_Unicode', + 'r': 'PyObject_Repr', + 'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2 'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting - }.get - - def may_be_none(self): - # PyObject_Format() always returns a Unicode string or raises an exception - return False - - def analyse_types(self, env): - self.value = self.value.analyse_types(env) - if not self.format_spec or self.format_spec.is_string_literal: - c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec - if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec): - self.c_format_spec = c_format_spec - - if self.format_spec: - self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env) - if self.c_format_spec is None: - self.value = self.value.coerce_to_pyobject(env) + }.get + + def may_be_none(self): + # PyObject_Format() always returns a Unicode string or raises an exception + return False + + def analyse_types(self, env): + self.value = self.value.analyse_types(env) + if not self.format_spec or self.format_spec.is_string_literal: + c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec + if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec): + self.c_format_spec = c_format_spec + + if self.format_spec: + self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env) + if self.c_format_spec is None: + self.value = self.value.coerce_to_pyobject(env) if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'): - if self.value.type is unicode_type and not self.value.may_be_none(): - # value is definitely a unicode string and we don't format it any special - return self.value - return self - - def generate_result_code(self, code): - if self.c_format_spec is not None and not self.value.type.is_pyobject: - convert_func_call = self.value.type.convert_to_pystring( - self.value.result(), code, self.c_format_spec) - code.putln("%s = %s; %s" % ( - self.result(), - convert_func_call, - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.py_result()) - return - - value_result = self.value.py_result() - value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none() - if self.format_spec: - format_func = '__Pyx_PyObject_Format' - format_spec = self.format_spec.py_result() - else: - # common case: expect simple Unicode pass-through if no format spec - format_func = '__Pyx_PyObject_FormatSimple' - # passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string - format_spec = Naming.empty_unicode - - conversion_char = self.conversion_char - if conversion_char == 's' and value_is_unicode: - # no need to pipe unicode strings through str() - conversion_char = None - - if conversion_char: - fn = self.find_conversion_func(conversion_char) - assert fn is not None, "invalid conversion character found: '%s'" % conversion_char - value_result = '%s(%s)' % (fn, value_result) - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c")) - format_func += 'AndDecref' - elif self.format_spec: - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectFormat", "StringTools.c")) - else: - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c")) - - code.putln("%s = %s(%s, %s); %s" % ( - self.result(), - format_func, - value_result, - format_spec, - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.py_result()) - - -#------------------------------------------------------------------- -# + if self.value.type is unicode_type and not self.value.may_be_none(): + # value is definitely a unicode string and we don't format it any special + return self.value + return self + + def generate_result_code(self, code): + if self.c_format_spec is not None and not self.value.type.is_pyobject: + convert_func_call = self.value.type.convert_to_pystring( + self.value.result(), code, self.c_format_spec) + code.putln("%s = %s; %s" % ( + self.result(), + convert_func_call, + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.py_result()) + return + + value_result = self.value.py_result() + value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none() + if self.format_spec: + format_func = '__Pyx_PyObject_Format' + format_spec = self.format_spec.py_result() + else: + # common case: expect simple Unicode pass-through if no format spec + format_func = '__Pyx_PyObject_FormatSimple' + # passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string + format_spec = Naming.empty_unicode + + conversion_char = self.conversion_char + if conversion_char == 's' and value_is_unicode: + # no need to pipe unicode strings through str() + conversion_char = None + + if conversion_char: + fn = self.find_conversion_func(conversion_char) + assert fn is not None, "invalid conversion character found: '%s'" % conversion_char + value_result = '%s(%s)' % (fn, value_result) + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c")) + format_func += 'AndDecref' + elif self.format_spec: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormat", "StringTools.c")) + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c")) + + code.putln("%s = %s(%s, %s); %s" % ( + self.result(), + format_func, + value_result, + format_spec, + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.py_result()) + + +#------------------------------------------------------------------- +# # Parallel nodes (cython.parallel.thread(savailable|id)) # #------------------------------------------------------------------- @@ -3380,38 +3380,38 @@ class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode): # #------------------------------------------------------------------- - -class _IndexingBaseNode(ExprNode): - # Base class for indexing nodes. - # - # base ExprNode the value being indexed - - def is_ephemeral(self): - # in most cases, indexing will return a safe reference to an object in a container, - # so we consider the result safe if the base object is - return self.base.is_ephemeral() or self.base.type in ( + +class _IndexingBaseNode(ExprNode): + # Base class for indexing nodes. + # + # base ExprNode the value being indexed + + def is_ephemeral(self): + # in most cases, indexing will return a safe reference to an object in a container, + # so we consider the result safe if the base object is + return self.base.is_ephemeral() or self.base.type in ( basestring_type, str_type, bytes_type, bytearray_type, unicode_type) - - def check_const_addr(self): - return self.base.check_const_addr() and self.index.check_const() - - def is_lvalue(self): - # NOTE: references currently have both is_reference and is_ptr - # set. Since pointers and references have different lvalue - # rules, we must be careful to separate the two. - if self.type.is_reference: - if self.type.ref_base_type.is_array: - # fixed-sized arrays aren't l-values - return False - elif self.type.is_ptr: - # non-const pointers can always be reassigned - return True - # Just about everything else returned by the index operator - # can be an lvalue. - return True - - -class IndexNode(_IndexingBaseNode): + + def check_const_addr(self): + return self.base.check_const_addr() and self.index.check_const() + + def is_lvalue(self): + # NOTE: references currently have both is_reference and is_ptr + # set. Since pointers and references have different lvalue + # rules, we must be careful to separate the two. + if self.type.is_reference: + if self.type.ref_base_type.is_array: + # fixed-sized arrays aren't l-values + return False + elif self.type.is_ptr: + # non-const pointers can always be reassigned + return True + # Just about everything else returned by the index operator + # can be an lvalue. + return True + + +class IndexNode(_IndexingBaseNode): # Sequence indexing. # # base ExprNode @@ -3421,21 +3421,21 @@ class IndexNode(_IndexingBaseNode): # is_fused_index boolean Whether the index is used to specialize a # c(p)def function - subexprs = ['base', 'index'] + subexprs = ['base', 'index'] type_indices = None is_subscript = True is_fused_index = False def calculate_constant_result(self): - self.constant_result = self.base.constant_result[self.index.constant_result] + self.constant_result = self.base.constant_result[self.index.constant_result] def compile_time_value(self, denv): base = self.base.compile_time_value(denv) index = self.index.compile_time_value(denv) try: return base[index] - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def is_simple(self): @@ -3467,26 +3467,26 @@ class IndexNode(_IndexingBaseNode): else: template_values = [self.index] type_node = Nodes.TemplatedTypeNode( - pos=self.pos, - positional_args=template_values, - keyword_args=None) - return type_node.analyse(env, base_type=base_type) - elif self.index.is_slice or self.index.is_sequence_constructor: - # memory view - from . import MemoryView - env.use_utility_code(MemoryView.view_utility_code) - axes = [self.index] if self.index.is_slice else list(self.index.args) - return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes)) + pos=self.pos, + positional_args=template_values, + keyword_args=None) + return type_node.analyse(env, base_type=base_type) + elif self.index.is_slice or self.index.is_sequence_constructor: + # memory view + from . import MemoryView + env.use_utility_code(MemoryView.view_utility_code) + axes = [self.index] if self.index.is_slice else list(self.index.args) + return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes)) else: - # C array + # C array index = self.index.compile_time_value(env) if index is not None: - try: - index = int(index) - except (ValueError, TypeError): - pass - else: - return PyrexTypes.CArrayType(base_type, index) + try: + index = int(index) + except (ValueError, TypeError): + pass + else: + return PyrexTypes.CArrayType(base_type, index) error(self.pos, "Array size must be a compile time constant") return None @@ -3495,7 +3495,7 @@ class IndexNode(_IndexingBaseNode): def infer_type(self, env): base_type = self.base.infer_type(env) - if self.index.is_slice: + if self.index.is_slice: # slicing! if base_type.is_string: # sliced C strings must coerce to Python @@ -3542,13 +3542,13 @@ class IndexNode(_IndexingBaseNode): return item_type elif base_type.is_ptr or base_type.is_array: return base_type.base_type - elif base_type.is_ctuple and isinstance(self.index, IntNode): - if self.index.has_constant_result(): - index = self.index.constant_result - if index < 0: - index += base_type.size - if 0 <= index < base_type.size: - return base_type.components[index] + elif base_type.is_ctuple and isinstance(self.index, IntNode): + if self.index.has_constant_result(): + index = self.index.constant_result + if index < 0: + index += base_type.size + if 0 <= index < base_type.size: + return base_type.components[index] if base_type.is_cpp_class: class FakeOperand: @@ -3581,7 +3581,7 @@ class IndexNode(_IndexingBaseNode): node = self.analyse_base_and_index_types(env, setting=True) if node.type.is_const: error(self.pos, "Assignment to const dereference") - if node is self and not node.is_lvalue(): + if node is self and not node.is_lvalue(): error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type) return node @@ -3599,7 +3599,7 @@ class IndexNode(_IndexingBaseNode): self.type = PyrexTypes.error_type return self - is_slice = self.index.is_slice + is_slice = self.index.is_slice if not env.directives['wraparound']: if is_slice: check_negative_indices(self.index.start, self.index.stop) @@ -3616,16 +3616,16 @@ class IndexNode(_IndexingBaseNode): if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array): self.base = self.base.coerce_to_pyobject(env) - replacement_node = self.analyse_as_buffer_operation(env, getting) - if replacement_node is not None: - return replacement_node + replacement_node = self.analyse_as_buffer_operation(env, getting) + if replacement_node is not None: + return replacement_node self.nogil = env.nogil - base_type = self.base.type + base_type = self.base.type - if not base_type.is_cfunction: - self.index = self.index.analyse_types(env) - self.original_index_type = self.index.type + if not base_type.is_cfunction: + self.index = self.index.analyse_types(env) + self.original_index_type = self.index.type if base_type.is_unicode_char: # we infer Py_UNICODE/Py_UCS4 for unicode strings in some @@ -3637,26 +3637,26 @@ class IndexNode(_IndexingBaseNode): return self.base self.base = self.base.coerce_to_pyobject(env) base_type = self.base.type - - if base_type.is_pyobject: - return self.analyse_as_pyobject(env, is_slice, getting, setting) - elif base_type.is_ptr or base_type.is_array: - return self.analyse_as_c_array(env, is_slice) - elif base_type.is_cpp_class: - return self.analyse_as_cpp(env, setting) - elif base_type.is_cfunction: - return self.analyse_as_c_function(env) - elif base_type.is_ctuple: - return self.analyse_as_c_tuple(env, getting, setting) - else: - error(self.pos, - "Attempting to index non-array type '%s'" % - base_type) - self.type = PyrexTypes.error_type - return self - - def analyse_as_pyobject(self, env, is_slice, getting, setting): - base_type = self.base.type + + if base_type.is_pyobject: + return self.analyse_as_pyobject(env, is_slice, getting, setting) + elif base_type.is_ptr or base_type.is_array: + return self.analyse_as_c_array(env, is_slice) + elif base_type.is_cpp_class: + return self.analyse_as_cpp(env, setting) + elif base_type.is_cfunction: + return self.analyse_as_c_function(env) + elif base_type.is_ctuple: + return self.analyse_as_c_tuple(env, getting, setting) + else: + error(self.pos, + "Attempting to index non-array type '%s'" % + base_type) + self.type = PyrexTypes.error_type + return self + + def analyse_as_pyobject(self, env, is_slice, getting, setting): + base_type = self.base.type if self.index.type.is_unicode_char and base_type is not dict_type: # TODO: eventually fold into case below and remove warning, once people have adapted their code warning(self.pos, @@ -3665,139 +3665,139 @@ class IndexNode(_IndexingBaseNode): self.index = self.index.coerce_to_pyobject(env) self.is_temp = 1 elif self.index.type.is_int and base_type is not dict_type: - if (getting - and (base_type in (list_type, tuple_type, bytearray_type)) - and (not self.index.type.signed - or not env.directives['wraparound'] - or (isinstance(self.index, IntNode) and - self.index.has_constant_result() and self.index.constant_result >= 0)) - and not env.directives['boundscheck']): - self.is_temp = 0 + if (getting + and (base_type in (list_type, tuple_type, bytearray_type)) + and (not self.index.type.signed + or not env.directives['wraparound'] + or (isinstance(self.index, IntNode) and + self.index.has_constant_result() and self.index.constant_result >= 0)) + and not env.directives['boundscheck']): + self.is_temp = 0 + else: + self.is_temp = 1 + self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env) + self.original_index_type.create_to_py_utility_code(env) + else: + self.index = self.index.coerce_to_pyobject(env) + self.is_temp = 1 + + if self.index.type.is_int and base_type is unicode_type: + # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string + # if required, so this is fast and safe + self.type = PyrexTypes.c_py_ucs4_type + elif self.index.type.is_int and base_type is bytearray_type: + if setting: + self.type = PyrexTypes.c_uchar_type else: - self.is_temp = 1 - self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env) - self.original_index_type.create_to_py_utility_code(env) - else: - self.index = self.index.coerce_to_pyobject(env) - self.is_temp = 1 - - if self.index.type.is_int and base_type is unicode_type: - # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string - # if required, so this is fast and safe - self.type = PyrexTypes.c_py_ucs4_type - elif self.index.type.is_int and base_type is bytearray_type: - if setting: - self.type = PyrexTypes.c_uchar_type - else: - # not using 'uchar' to enable fast and safe error reporting as '-1' - self.type = PyrexTypes.c_int_type + # not using 'uchar' to enable fast and safe error reporting as '-1' + self.type = PyrexTypes.c_int_type elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type): - self.type = base_type - else: - item_type = None - if base_type in (list_type, tuple_type) and self.index.type.is_int: - item_type = infer_sequence_item_type( - env, self.base, self.index, seq_type=base_type) - if item_type is None: - item_type = py_object_type - self.type = item_type - if base_type in (list_type, tuple_type, dict_type): - # do the None check explicitly (not in a helper) to allow optimising it away - self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") - + self.type = base_type + else: + item_type = None + if base_type in (list_type, tuple_type) and self.index.type.is_int: + item_type = infer_sequence_item_type( + env, self.base, self.index, seq_type=base_type) + if item_type is None: + item_type = py_object_type + self.type = item_type + if base_type in (list_type, tuple_type, dict_type): + # do the None check explicitly (not in a helper) to allow optimising it away + self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") + self.wrap_in_nonecheck_node(env, getting) return self - def analyse_as_c_array(self, env, is_slice): - base_type = self.base.type - self.type = base_type.base_type - if is_slice: - self.type = base_type - elif self.index.type.is_pyobject: - self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env) - elif not self.index.type.is_int: - error(self.pos, "Invalid index type '%s'" % self.index.type) - return self - - def analyse_as_cpp(self, env, setting): - base_type = self.base.type - function = env.lookup_operator("[]", [self.base, self.index]) - if function is None: - error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type)) - self.type = PyrexTypes.error_type - self.result_code = "<error>" - return self - func_type = function.type - if func_type.is_ptr: - func_type = func_type.base_type - self.exception_check = func_type.exception_check - self.exception_value = func_type.exception_value - if self.exception_check: - if not setting: - self.is_temp = True - if self.exception_value is None: - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) - self.index = self.index.coerce_to(func_type.args[0].type, env) - self.type = func_type.return_type - if setting and not func_type.return_type.is_reference: - error(self.pos, "Can't set non-reference result '%s'" % self.type) - return self - - def analyse_as_c_function(self, env): - base_type = self.base.type - if base_type.is_fused: - self.parse_indexed_fused_cdef(env) - else: - self.type_indices = self.parse_index_as_types(env) - self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode - if base_type.templates is None: - error(self.pos, "Can only parameterize template functions.") - self.type = error_type + def analyse_as_c_array(self, env, is_slice): + base_type = self.base.type + self.type = base_type.base_type + if is_slice: + self.type = base_type + elif self.index.type.is_pyobject: + self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + elif not self.index.type.is_int: + error(self.pos, "Invalid index type '%s'" % self.index.type) + return self + + def analyse_as_cpp(self, env, setting): + base_type = self.base.type + function = env.lookup_operator("[]", [self.base, self.index]) + if function is None: + error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type)) + self.type = PyrexTypes.error_type + self.result_code = "<error>" + return self + func_type = function.type + if func_type.is_ptr: + func_type = func_type.base_type + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check: + if not setting: + self.is_temp = True + if self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + self.index = self.index.coerce_to(func_type.args[0].type, env) + self.type = func_type.return_type + if setting and not func_type.return_type.is_reference: + error(self.pos, "Can't set non-reference result '%s'" % self.type) + return self + + def analyse_as_c_function(self, env): + base_type = self.base.type + if base_type.is_fused: + self.parse_indexed_fused_cdef(env) + else: + self.type_indices = self.parse_index_as_types(env) + self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode + if base_type.templates is None: + error(self.pos, "Can only parameterize template functions.") + self.type = error_type elif self.type_indices is None: # Error recorded earlier. self.type = error_type - elif len(base_type.templates) != len(self.type_indices): - error(self.pos, "Wrong number of template arguments: expected %s, got %s" % ( - (len(base_type.templates), len(self.type_indices)))) - self.type = error_type + elif len(base_type.templates) != len(self.type_indices): + error(self.pos, "Wrong number of template arguments: expected %s, got %s" % ( + (len(base_type.templates), len(self.type_indices)))) + self.type = error_type + else: + self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices))) + # FIXME: use a dedicated Node class instead of generic IndexNode + return self + + def analyse_as_c_tuple(self, env, getting, setting): + base_type = self.base.type + if isinstance(self.index, IntNode) and self.index.has_constant_result(): + index = self.index.constant_result + if -base_type.size <= index < base_type.size: + if index < 0: + index += base_type.size + self.type = base_type.components[index] else: - self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices))) - # FIXME: use a dedicated Node class instead of generic IndexNode - return self - - def analyse_as_c_tuple(self, env, getting, setting): - base_type = self.base.type - if isinstance(self.index, IntNode) and self.index.has_constant_result(): - index = self.index.constant_result - if -base_type.size <= index < base_type.size: - if index < 0: - index += base_type.size - self.type = base_type.components[index] - else: - error(self.pos, - "Index %s out of bounds for '%s'" % - (index, base_type)) - self.type = PyrexTypes.error_type - return self - else: - self.base = self.base.coerce_to_pyobject(env) - return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False) - - def analyse_as_buffer_operation(self, env, getting): - """ - Analyse buffer indexing and memoryview indexing/slicing - """ - if isinstance(self.index, TupleNode): - indices = self.index.args - else: - indices = [self.index] + error(self.pos, + "Index %s out of bounds for '%s'" % + (index, base_type)) + self.type = PyrexTypes.error_type + return self + else: + self.base = self.base.coerce_to_pyobject(env) + return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False) + + def analyse_as_buffer_operation(self, env, getting): + """ + Analyse buffer indexing and memoryview indexing/slicing + """ + if isinstance(self.index, TupleNode): + indices = self.index.args + else: + indices = [self.index] base = self.base base_type = base.type - replacement_node = None - if base_type.is_memoryviewslice: - # memoryviewslice indexing or slicing - from . import MemoryView + replacement_node = None + if base_type.is_memoryviewslice: + # memoryviewslice indexing or slicing + from . import MemoryView if base.is_memview_slice: # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed. merged_indices = base.merged_indices(indices) @@ -3805,10 +3805,10 @@ class IndexNode(_IndexingBaseNode): base = base.base base_type = base.type indices = merged_indices - have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim) - if have_slices: + have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim) + if have_slices: replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base) - else: + else: replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base) elif base_type.is_buffer or base_type.is_pythran_expr: if base_type.is_pythran_expr or len(indices) == base_type.ndim: @@ -3831,16 +3831,16 @@ class IndexNode(_IndexingBaseNode): replacement_node = BufferIndexNode(self.pos, indices=indices, base=base) # On cloning, indices is cloned. Otherwise, unpack index into indices. assert not isinstance(self.index, CloneNode) - - if replacement_node is not None: - replacement_node = replacement_node.analyse_types(env, getting) - return replacement_node - - def wrap_in_nonecheck_node(self, env, getting): - if not env.directives['nonecheck'] or not self.base.may_be_none(): - return - self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") - + + if replacement_node is not None: + replacement_node = replacement_node.analyse_types(env, getting) + return replacement_node + + def wrap_in_nonecheck_node(self, env, getting): + if not env.directives['nonecheck'] or not self.base.may_be_none(): + return + self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") + def parse_index_as_types(self, env, required=True): if isinstance(self.index, TupleNode): indices = self.index.args @@ -3949,7 +3949,7 @@ class IndexNode(_IndexingBaseNode): gil_message = "Indexing Python object" def calculate_result_code(self): - if self.base.type in (list_type, tuple_type, bytearray_type): + if self.base.type in (list_type, tuple_type, bytearray_type): if self.base.type is list_type: index_code = "PyList_GET_ITEM(%s, %s)" elif self.base.type is tuple_type: @@ -3961,12 +3961,12 @@ class IndexNode(_IndexingBaseNode): elif self.base.type.is_cfunction: return "%s<%s>" % ( self.base.result(), - ",".join([param.empty_declaration_code() for param in self.type_indices])) - elif self.base.type.is_ctuple: - index = self.index.constant_result - if index < 0: - index += self.base.type.size - return "%s.f%s" % (self.base.result(), index) + ",".join([param.empty_declaration_code() for param in self.type_indices])) + elif self.base.type.is_ctuple: + index = self.index.constant_result + if index < 0: + index += self.base.type.size + return "%s.f%s" % (self.base.result(), index) else: if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type: error(self.pos, "Invalid use of pointer slice") @@ -3980,11 +3980,11 @@ class IndexNode(_IndexingBaseNode): wraparound = ( bool(code.globalstate.directives['wraparound']) and self.original_index_type.signed and - not (isinstance(self.index.constant_result, _py_int_types) + not (isinstance(self.index.constant_result, _py_int_types) and self.index.constant_result >= 0)) boundscheck = bool(code.globalstate.directives['boundscheck']) return ", %s, %d, %s, %d, %d, %d" % ( - self.original_index_type.empty_declaration_code(), + self.original_index_type.empty_declaration_code(), self.original_index_type.signed and 1 or 0, self.original_index_type.to_py_function, is_list, wraparound, boundscheck) @@ -3992,24 +3992,24 @@ class IndexNode(_IndexingBaseNode): return "" def generate_result_code(self, code): - if not self.is_temp: - # all handled in self.calculate_result_code() - return + if not self.is_temp: + # all handled in self.calculate_result_code() + return utility_code = None - if self.type.is_pyobject: - error_value = 'NULL' - if self.index.type.is_int: - if self.base.type is list_type: - function = "__Pyx_GetItemInt_List" - elif self.base.type is tuple_type: - function = "__Pyx_GetItemInt_Tuple" + if self.type.is_pyobject: + error_value = 'NULL' + if self.index.type.is_int: + if self.base.type is list_type: + function = "__Pyx_GetItemInt_List" + elif self.base.type is tuple_type: + function = "__Pyx_GetItemInt_Tuple" else: - function = "__Pyx_GetItemInt" + function = "__Pyx_GetItemInt" utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c") else: - if self.base.type is dict_type: - function = "__Pyx_PyDict_GetItem" + if self.base.type is dict_type: + function = "__Pyx_PyDict_GetItem" utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c") elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type): # obj[str] is probably doing a dict lookup @@ -4017,50 +4017,50 @@ class IndexNode(_IndexingBaseNode): utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c") else: function = "__Pyx_PyObject_GetItem" - code.globalstate.use_utility_code( + code.globalstate.use_utility_code( TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")) utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c") - elif self.type.is_unicode_char and self.base.type is unicode_type: - assert self.index.type.is_int - function = "__Pyx_GetItemInt_Unicode" - error_value = '(Py_UCS4)-1' + elif self.type.is_unicode_char and self.base.type is unicode_type: + assert self.index.type.is_int + function = "__Pyx_GetItemInt_Unicode" + error_value = '(Py_UCS4)-1' utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c") - elif self.base.type is bytearray_type: - assert self.index.type.is_int - assert self.type.is_int - function = "__Pyx_GetItemInt_ByteArray" - error_value = '-1' + elif self.base.type is bytearray_type: + assert self.index.type.is_int + assert self.type.is_int + function = "__Pyx_GetItemInt_ByteArray" + error_value = '-1' utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c") - elif not (self.base.type.is_cpp_class and self.exception_check): - assert False, "unexpected type %s and base type %s for indexing" % ( - self.type, self.base.type) + elif not (self.base.type.is_cpp_class and self.exception_check): + assert False, "unexpected type %s and base type %s for indexing" % ( + self.type, self.base.type) if utility_code is not None: code.globalstate.use_utility_code(utility_code) - if self.index.type.is_int: - index_code = self.index.result() - else: - index_code = self.index.py_result() - - if self.base.type.is_cpp_class and self.exception_check: - translate_cpp_exception(code, self.pos, - "%s = %s[%s];" % (self.result(), self.base.result(), - self.index.result()), - self.result() if self.type.is_pyobject else None, - self.exception_value, self.in_nogil_context) - else: - error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value + if self.index.type.is_int: + index_code = self.index.result() + else: + index_code = self.index.py_result() + + if self.base.type.is_cpp_class and self.exception_check: + translate_cpp_exception(code, self.pos, + "%s = %s[%s];" % (self.result(), self.base.result(), + self.index.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value code.putln( - "%s = %s(%s, %s%s); %s" % ( + "%s = %s(%s, %s%s); %s" % ( self.result(), function, self.base.py_result(), index_code, self.extra_index_params(code), - code.error_goto_if(error_check % self.result(), self.pos))) - if self.type.is_pyobject: - code.put_gotref(self.py_result()) + code.error_goto_if(error_check % self.result(), self.pos))) + if self.type.is_pyobject: + code.put_gotref(self.py_result()) def generate_setitem_code(self, value_code, code): if self.index.type.is_int: @@ -4086,46 +4086,46 @@ class IndexNode(_IndexingBaseNode): # (PyTuple_SetItem() is for creating new tuples from scratch). else: function = "PyObject_SetItem" - code.putln(code.error_goto_if_neg( - "%s(%s, %s, %s%s)" % ( + code.putln(code.error_goto_if_neg( + "%s(%s, %s, %s%s)" % ( function, self.base.py_result(), index_code, value_code, - self.extra_index_params(code)), - self.pos)) + self.extra_index_params(code)), + self.pos)) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): - self.generate_subexpr_evaluation_code(code) + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + self.generate_subexpr_evaluation_code(code) - if self.type.is_pyobject: + if self.type.is_pyobject: self.generate_setitem_code(rhs.py_result(), code) elif self.base.type is bytearray_type: value_code = self._check_byte_value(code, rhs) self.generate_setitem_code(value_code, code) - elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+': - if overloaded_assignment and exception_check and \ - self.exception_value != exception_value: - # Handle the case that both the index operator and the assignment - # operator have a c++ exception handler and they are not the same. - translate_double_cpp_exception(code, self.pos, self.type, - self.result(), rhs.result(), self.exception_value, - exception_value, self.in_nogil_context) - else: - # Handle the case that only the index operator has a - # c++ exception handler, or that - # both exception handlers are the same. - translate_cpp_exception(code, self.pos, - "%s = %s;" % (self.result(), rhs.result()), - self.result() if self.type.is_pyobject else None, - self.exception_value, self.in_nogil_context) + elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+': + if overloaded_assignment and exception_check and \ + self.exception_value != exception_value: + # Handle the case that both the index operator and the assignment + # operator have a c++ exception handler and they are not the same. + translate_double_cpp_exception(code, self.pos, self.type, + self.result(), rhs.result(), self.exception_value, + exception_value, self.in_nogil_context) + else: + # Handle the case that only the index operator has a + # c++ exception handler, or that + # both exception handlers are the same. + translate_cpp_exception(code, self.pos, + "%s = %s;" % (self.result(), rhs.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) else: code.putln( - "%s = %s;" % (self.result(), rhs.result())) + "%s = %s;" % (self.result(), rhs.result())) - self.generate_subexpr_disposal_code(code) - self.free_subexpr_temps(code) + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) rhs.generate_disposal_code(code) rhs.free_temps(code) @@ -4179,101 +4179,101 @@ class IndexNode(_IndexingBaseNode): function = "PyDict_DelItem" else: function = "PyObject_DelItem" - code.putln(code.error_goto_if_neg( - "%s(%s, %s%s)" % ( + code.putln(code.error_goto_if_neg( + "%s(%s, %s%s)" % ( function, self.base.py_result(), index_code, - self.extra_index_params(code)), - self.pos)) + self.extra_index_params(code)), + self.pos)) self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) - -class BufferIndexNode(_IndexingBaseNode): - """ - Indexing of buffers and memoryviews. This node is created during type - analysis from IndexNode and replaces it. - - Attributes: - base - base node being indexed - indices - list of indexing expressions - """ - - subexprs = ['base', 'indices'] - - is_buffer_access = True - - # Whether we're assigning to a buffer (in that case it needs to be writable) - writable_needed = False - + +class BufferIndexNode(_IndexingBaseNode): + """ + Indexing of buffers and memoryviews. This node is created during type + analysis from IndexNode and replaces it. + + Attributes: + base - base node being indexed + indices - list of indexing expressions + """ + + subexprs = ['base', 'indices'] + + is_buffer_access = True + + # Whether we're assigning to a buffer (in that case it needs to be writable) + writable_needed = False + # Any indexing temp variables that we need to clean up. index_temps = () - def analyse_target_types(self, env): - self.analyse_types(env, getting=False) - - def analyse_types(self, env, getting=True): - """ - Analyse types for buffer indexing only. Overridden by memoryview - indexing and slicing subclasses - """ - # self.indices are already analyzed + def analyse_target_types(self, env): + self.analyse_types(env, getting=False) + + def analyse_types(self, env, getting=True): + """ + Analyse types for buffer indexing only. Overridden by memoryview + indexing and slicing subclasses + """ + # self.indices are already analyzed if not self.base.is_name and not is_pythran_expr(self.base.type): - error(self.pos, "Can only index buffer variables") - self.type = error_type - return self - - if not getting: - if not self.base.entry.type.writable: - error(self.pos, "Writing to readonly buffer") - else: - self.writable_needed = True - if self.base.type.is_buffer: - self.base.entry.buffer_aux.writable_needed = True - - self.none_error_message = "'NoneType' object is not subscriptable" - self.analyse_buffer_index(env, getting) - self.wrap_in_nonecheck_node(env) - return self - - def analyse_buffer_index(self, env, getting): + error(self.pos, "Can only index buffer variables") + self.type = error_type + return self + + if not getting: + if not self.base.entry.type.writable: + error(self.pos, "Writing to readonly buffer") + else: + self.writable_needed = True + if self.base.type.is_buffer: + self.base.entry.buffer_aux.writable_needed = True + + self.none_error_message = "'NoneType' object is not subscriptable" + self.analyse_buffer_index(env, getting) + self.wrap_in_nonecheck_node(env) + return self + + def analyse_buffer_index(self, env, getting): if is_pythran_expr(self.base.type): index_with_type_list = [(idx, idx.type) for idx in self.indices] self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list)) else: self.base = self.base.coerce_to_simple(env) self.type = self.base.type.dtype - self.buffer_type = self.base.type - + self.buffer_type = self.base.type + if getting and (self.type.is_pyobject or self.type.is_pythran_expr): - self.is_temp = True - - def analyse_assignment(self, rhs): - """ - Called by IndexNode when this node is assigned to, - with the rhs of the assignment - """ - - def wrap_in_nonecheck_node(self, env): - if not env.directives['nonecheck'] or not self.base.may_be_none(): - return - self.base = self.base.as_none_safe_node(self.none_error_message) - - def nogil_check(self, env): - if self.is_buffer_access or self.is_memview_index: - if self.type.is_pyobject: - error(self.pos, "Cannot access buffer with object dtype without gil") - self.type = error_type - - def calculate_result_code(self): - return "(*%s)" % self.buffer_ptr_code - + self.is_temp = True + + def analyse_assignment(self, rhs): + """ + Called by IndexNode when this node is assigned to, + with the rhs of the assignment + """ + + def wrap_in_nonecheck_node(self, env): + if not env.directives['nonecheck'] or not self.base.may_be_none(): + return + self.base = self.base.as_none_safe_node(self.none_error_message) + + def nogil_check(self, env): + if self.is_buffer_access or self.is_memview_index: + if self.type.is_pyobject: + error(self.pos, "Cannot access buffer with object dtype without gil") + self.type = error_type + + def calculate_result_code(self): + return "(*%s)" % self.buffer_ptr_code + def buffer_entry(self): base = self.base if self.base.is_nonecheck: base = base.arg - return base.type.get_entry(base) + return base.type.get_entry(base) def get_index_in_temp(self, code, ivar): ret = code.funcstate.allocate_temp( @@ -4285,15 +4285,15 @@ class BufferIndexNode(_IndexingBaseNode): return ret def buffer_lookup_code(self, code): - """ - ndarray[1, 2, 3] and memslice[1, 2, 3] - """ + """ + ndarray[1, 2, 3] and memslice[1, 2, 3] + """ if self.in_nogil_context: if self.is_buffer_access or self.is_memview_index: if code.globalstate.directives['boundscheck']: warning(self.pos, "Use boundscheck(False) for faster access", level=1) - # Assign indices to temps of at least (s)size_t to allow further index calculations. + # Assign indices to temps of at least (s)size_t to allow further index calculations. self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices] # Generate buffer access code using these temps @@ -4305,23 +4305,23 @@ class BufferIndexNode(_IndexingBaseNode): negative_indices = Buffer.buffer_defaults['negative_indices'] return buffer_entry, Buffer.put_buffer_lookup_code( - entry=buffer_entry, - index_signeds=[ivar.type.signed for ivar in self.indices], - index_cnames=index_temps, - directives=code.globalstate.directives, - pos=self.pos, code=code, - negative_indices=negative_indices, - in_nogil_context=self.in_nogil_context) - - def generate_assignment_code(self, rhs, code, overloaded_assignment=False): - self.generate_subexpr_evaluation_code(code) - self.generate_buffer_setitem_code(rhs, code) - self.generate_subexpr_disposal_code(code) - self.free_subexpr_temps(code) - rhs.generate_disposal_code(code) - rhs.free_temps(code) - - def generate_buffer_setitem_code(self, rhs, code, op=""): + entry=buffer_entry, + index_signeds=[ivar.type.signed for ivar in self.indices], + index_cnames=index_temps, + directives=code.globalstate.directives, + pos=self.pos, code=code, + negative_indices=negative_indices, + in_nogil_context=self.in_nogil_context) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + self.generate_subexpr_evaluation_code(code) + self.generate_buffer_setitem_code(rhs, code) + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + def generate_buffer_setitem_code(self, rhs, code, op=""): base_type = self.base.type if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type): obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False) @@ -4343,27 +4343,27 @@ class BufferIndexNode(_IndexingBaseNode): code.funcstate.release_temp(obj) return - # Used from generate_assignment_code and InPlaceAssignmentNode - buffer_entry, ptrexpr = self.buffer_lookup_code(code) - - if self.buffer_type.dtype.is_pyobject: - # Must manage refcounts. Decref what is already there - # and incref what we put in. - ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type, - manage_ref=False) - rhs_code = rhs.result() - code.putln("%s = %s;" % (ptr, ptrexpr)) - code.put_gotref("*%s" % ptr) - code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % ( - rhs_code, ptr)) - code.putln("*%s %s= %s;" % (ptr, op, rhs_code)) - code.put_giveref("*%s" % ptr) - code.funcstate.release_temp(ptr) - else: - # Simple case - code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) - - def generate_result_code(self, code): + # Used from generate_assignment_code and InPlaceAssignmentNode + buffer_entry, ptrexpr = self.buffer_lookup_code(code) + + if self.buffer_type.dtype.is_pyobject: + # Must manage refcounts. Decref what is already there + # and incref what we put in. + ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type, + manage_ref=False) + rhs_code = rhs.result() + code.putln("%s = %s;" % (ptr, ptrexpr)) + code.put_gotref("*%s" % ptr) + code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % ( + rhs_code, ptr)) + code.putln("*%s %s= %s;" % (ptr, op, rhs_code)) + code.put_giveref("*%s" % ptr) + code.funcstate.release_temp(ptr) + else: + # Simple case + code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) + + def generate_result_code(self, code): if is_pythran_expr(self.base.type): res = self.result() code.putln("__Pyx_call_destructor(%s);" % res) @@ -4373,187 +4373,187 @@ class BufferIndexNode(_IndexingBaseNode): self.base.pythran_result(), pythran_indexing_code(self.indices))) return - buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code) - if self.type.is_pyobject: - # is_temp is True, so must pull out value and incref it. - # NOTE: object temporary results for nodes are declared - # as PyObject *, so we need a cast - code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code)) - code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result()) - + buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code) + if self.type.is_pyobject: + # is_temp is True, so must pull out value and incref it. + # NOTE: object temporary results for nodes are declared + # as PyObject *, so we need a cast + code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code)) + code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result()) + def free_subexpr_temps(self, code): for temp in self.index_temps: code.funcstate.release_temp(temp) self.index_temps = () super(BufferIndexNode, self).free_subexpr_temps(code) - - -class MemoryViewIndexNode(BufferIndexNode): - - is_memview_index = True - is_buffer_access = False - warned_untyped_idx = False - - def analyse_types(self, env, getting=True): - # memoryviewslice indexing or slicing - from . import MemoryView - + + +class MemoryViewIndexNode(BufferIndexNode): + + is_memview_index = True + is_buffer_access = False + warned_untyped_idx = False + + def analyse_types(self, env, getting=True): + # memoryviewslice indexing or slicing + from . import MemoryView + self.is_pythran_mode = has_np_pythran(env) - indices = self.indices - have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim) - + indices = self.indices + have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim) + if not getting: self.writable_needed = True if self.base.is_name or self.base.is_attribute: self.base.entry.type.writable_needed = True - self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim) - axes = [] - - index_type = PyrexTypes.c_py_ssize_t_type - new_indices = [] - - if len(indices) - len(newaxes) > self.base.type.ndim: - self.type = error_type - error(indices[self.base.type.ndim].pos, - "Too many indices specified for type %s" % self.base.type) - return self - - axis_idx = 0 - for i, index in enumerate(indices[:]): - index = index.analyse_types(env) - if index.is_none: - self.is_memview_slice = True - new_indices.append(index) - axes.append(('direct', 'strided')) - continue - - access, packing = self.base.type.axes[axis_idx] - axis_idx += 1 - - if index.is_slice: - self.is_memview_slice = True - if index.step.is_none: - axes.append((access, packing)) - else: - axes.append((access, 'strided')) - - # Coerce start, stop and step to temps of the right type - for attr in ('start', 'stop', 'step'): - value = getattr(index, attr) - if not value.is_none: - value = value.coerce_to(index_type, env) - #value = value.coerce_to_temp(env) - setattr(index, attr, value) - new_indices.append(value) - - elif index.type.is_int or index.type.is_pyobject: - if index.type.is_pyobject and not self.warned_untyped_idx: - warning(index.pos, "Index should be typed for more efficient access", level=2) - MemoryViewIndexNode.warned_untyped_idx = True - - self.is_memview_index = True - index = index.coerce_to(index_type, env) - indices[i] = index - new_indices.append(index) - - else: - self.type = error_type - error(index.pos, "Invalid index for memoryview specified, type %s" % index.type) - return self - - ### FIXME: replace by MemoryViewSliceNode if is_memview_slice ? - self.is_memview_index = self.is_memview_index and not self.is_memview_slice - self.indices = new_indices - # All indices with all start/stop/step for slices. - # We need to keep this around. - self.original_indices = indices - self.nogil = env.nogil - - self.analyse_operation(env, getting, axes) - self.wrap_in_nonecheck_node(env) - return self - - def analyse_operation(self, env, getting, axes): - self.none_error_message = "Cannot index None memoryview slice" - self.analyse_buffer_index(env, getting) - - def analyse_broadcast_operation(self, rhs): - """ - Support broadcasting for slice assignment. - E.g. - m_2d[...] = m_1d # or, - m_1d[...] = m_2d # if the leading dimension has extent 1 - """ - if self.type.is_memoryviewslice: - lhs = self - if lhs.is_memview_broadcast or rhs.is_memview_broadcast: - lhs.is_memview_broadcast = True - rhs.is_memview_broadcast = True - - def analyse_as_memview_scalar_assignment(self, rhs): - lhs = self.analyse_assignment(rhs) - if lhs: - rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment - return lhs - return self - - -class MemoryViewSliceNode(MemoryViewIndexNode): - - is_memview_slice = True - - # No-op slicing operation, this node will be replaced - is_ellipsis_noop = False - is_memview_scalar_assignment = False - is_memview_index = False - is_memview_broadcast = False - - def analyse_ellipsis_noop(self, env, getting): - """Slicing operations needing no evaluation, i.e. m[...] or m[:, :]""" - ### FIXME: replace directly - self.is_ellipsis_noop = all( - index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none - for index in self.indices) - - if self.is_ellipsis_noop: - self.type = self.base.type - - def analyse_operation(self, env, getting, axes): - from . import MemoryView - - if not getting: - self.is_memview_broadcast = True - self.none_error_message = "Cannot assign to None memoryview slice" - else: - self.none_error_message = "Cannot slice None memoryview slice" - - self.analyse_ellipsis_noop(env, getting) - if self.is_ellipsis_noop: - return - - self.index = None - self.is_temp = True - self.use_managed_ref = True - - if not MemoryView.validate_axes(self.pos, axes): - self.type = error_type - return - - self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes) - - if not (self.base.is_simple() or self.base.result_in_temp()): - self.base = self.base.coerce_to_temp(env) - - def analyse_assignment(self, rhs): - if not rhs.type.is_memoryviewslice and ( - self.type.dtype.assignable_from(rhs.type) or - rhs.type.is_pyobject): - # scalar assignment - return MemoryCopyScalar(self.pos, self) - else: - return MemoryCopySlice(self.pos, self) - + self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim) + axes = [] + + index_type = PyrexTypes.c_py_ssize_t_type + new_indices = [] + + if len(indices) - len(newaxes) > self.base.type.ndim: + self.type = error_type + error(indices[self.base.type.ndim].pos, + "Too many indices specified for type %s" % self.base.type) + return self + + axis_idx = 0 + for i, index in enumerate(indices[:]): + index = index.analyse_types(env) + if index.is_none: + self.is_memview_slice = True + new_indices.append(index) + axes.append(('direct', 'strided')) + continue + + access, packing = self.base.type.axes[axis_idx] + axis_idx += 1 + + if index.is_slice: + self.is_memview_slice = True + if index.step.is_none: + axes.append((access, packing)) + else: + axes.append((access, 'strided')) + + # Coerce start, stop and step to temps of the right type + for attr in ('start', 'stop', 'step'): + value = getattr(index, attr) + if not value.is_none: + value = value.coerce_to(index_type, env) + #value = value.coerce_to_temp(env) + setattr(index, attr, value) + new_indices.append(value) + + elif index.type.is_int or index.type.is_pyobject: + if index.type.is_pyobject and not self.warned_untyped_idx: + warning(index.pos, "Index should be typed for more efficient access", level=2) + MemoryViewIndexNode.warned_untyped_idx = True + + self.is_memview_index = True + index = index.coerce_to(index_type, env) + indices[i] = index + new_indices.append(index) + + else: + self.type = error_type + error(index.pos, "Invalid index for memoryview specified, type %s" % index.type) + return self + + ### FIXME: replace by MemoryViewSliceNode if is_memview_slice ? + self.is_memview_index = self.is_memview_index and not self.is_memview_slice + self.indices = new_indices + # All indices with all start/stop/step for slices. + # We need to keep this around. + self.original_indices = indices + self.nogil = env.nogil + + self.analyse_operation(env, getting, axes) + self.wrap_in_nonecheck_node(env) + return self + + def analyse_operation(self, env, getting, axes): + self.none_error_message = "Cannot index None memoryview slice" + self.analyse_buffer_index(env, getting) + + def analyse_broadcast_operation(self, rhs): + """ + Support broadcasting for slice assignment. + E.g. + m_2d[...] = m_1d # or, + m_1d[...] = m_2d # if the leading dimension has extent 1 + """ + if self.type.is_memoryviewslice: + lhs = self + if lhs.is_memview_broadcast or rhs.is_memview_broadcast: + lhs.is_memview_broadcast = True + rhs.is_memview_broadcast = True + + def analyse_as_memview_scalar_assignment(self, rhs): + lhs = self.analyse_assignment(rhs) + if lhs: + rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment + return lhs + return self + + +class MemoryViewSliceNode(MemoryViewIndexNode): + + is_memview_slice = True + + # No-op slicing operation, this node will be replaced + is_ellipsis_noop = False + is_memview_scalar_assignment = False + is_memview_index = False + is_memview_broadcast = False + + def analyse_ellipsis_noop(self, env, getting): + """Slicing operations needing no evaluation, i.e. m[...] or m[:, :]""" + ### FIXME: replace directly + self.is_ellipsis_noop = all( + index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none + for index in self.indices) + + if self.is_ellipsis_noop: + self.type = self.base.type + + def analyse_operation(self, env, getting, axes): + from . import MemoryView + + if not getting: + self.is_memview_broadcast = True + self.none_error_message = "Cannot assign to None memoryview slice" + else: + self.none_error_message = "Cannot slice None memoryview slice" + + self.analyse_ellipsis_noop(env, getting) + if self.is_ellipsis_noop: + return + + self.index = None + self.is_temp = True + self.use_managed_ref = True + + if not MemoryView.validate_axes(self.pos, axes): + self.type = error_type + return + + self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes) + + if not (self.base.is_simple() or self.base.result_in_temp()): + self.base = self.base.coerce_to_temp(env) + + def analyse_assignment(self, rhs): + if not rhs.type.is_memoryviewslice and ( + self.type.dtype.assignable_from(rhs.type) or + rhs.type.is_pyobject): + # scalar assignment + return MemoryCopyScalar(self.pos, self) + else: + return MemoryCopySlice(self.pos, self) + def merged_indices(self, indices): """Return a new list of indices/slices with 'indices' merged into the current ones according to slicing rules. @@ -4585,29 +4585,29 @@ class MemoryViewSliceNode(MemoryViewIndexNode): new_indices += indices return new_indices - def is_simple(self): - if self.is_ellipsis_noop: - # TODO: fix SimpleCallNode.is_simple() - return self.base.is_simple() or self.base.result_in_temp() - - return self.result_in_temp() - - def calculate_result_code(self): - """This is called in case this is a no-op slicing node""" - return self.base.result() - - def generate_result_code(self, code): - if self.is_ellipsis_noop: - return ### FIXME: remove + def is_simple(self): + if self.is_ellipsis_noop: + # TODO: fix SimpleCallNode.is_simple() + return self.base.is_simple() or self.base.result_in_temp() + + return self.result_in_temp() + + def calculate_result_code(self): + """This is called in case this is a no-op slicing node""" + return self.base.result() + + def generate_result_code(self, code): + if self.is_ellipsis_noop: + return ### FIXME: remove buffer_entry = self.buffer_entry() have_gil = not self.in_nogil_context - # TODO Mark: this is insane, do it better + # TODO Mark: this is insane, do it better have_slices = False it = iter(self.indices) for index in self.original_indices: - if index.is_slice: - have_slices = True + if index.is_slice: + have_slices = True if not index.start.is_none: index.start = next(it) if not index.stop.is_none: @@ -4619,126 +4619,126 @@ class MemoryViewSliceNode(MemoryViewIndexNode): assert not list(it) - buffer_entry.generate_buffer_slice_code( - code, self.original_indices, self.result(), - have_gil=have_gil, have_slices=have_slices, - directives=code.globalstate.directives) - - def generate_assignment_code(self, rhs, code, overloaded_assignment=False): - if self.is_ellipsis_noop: - self.generate_subexpr_evaluation_code(code) - else: - self.generate_evaluation_code(code) - - if self.is_memview_scalar_assignment: - self.generate_memoryviewslice_assign_scalar_code(rhs, code) - else: - self.generate_memoryviewslice_setslice_code(rhs, code) - - if self.is_ellipsis_noop: - self.generate_subexpr_disposal_code(code) - else: - self.generate_disposal_code(code) - - rhs.generate_disposal_code(code) - rhs.free_temps(code) - - -class MemoryCopyNode(ExprNode): - """ - Wraps a memoryview slice for slice assignment. - - dst: destination mememoryview slice - """ - - subexprs = ['dst'] - - def __init__(self, pos, dst): - super(MemoryCopyNode, self).__init__(pos) - self.dst = dst - self.type = dst.type - - def generate_assignment_code(self, rhs, code, overloaded_assignment=False): - self.dst.generate_evaluation_code(code) - self._generate_assignment_code(rhs, code) - self.dst.generate_disposal_code(code) + buffer_entry.generate_buffer_slice_code( + code, self.original_indices, self.result(), + have_gil=have_gil, have_slices=have_slices, + directives=code.globalstate.directives) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + if self.is_ellipsis_noop: + self.generate_subexpr_evaluation_code(code) + else: + self.generate_evaluation_code(code) + + if self.is_memview_scalar_assignment: + self.generate_memoryviewslice_assign_scalar_code(rhs, code) + else: + self.generate_memoryviewslice_setslice_code(rhs, code) + + if self.is_ellipsis_noop: + self.generate_subexpr_disposal_code(code) + else: + self.generate_disposal_code(code) + + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + +class MemoryCopyNode(ExprNode): + """ + Wraps a memoryview slice for slice assignment. + + dst: destination mememoryview slice + """ + + subexprs = ['dst'] + + def __init__(self, pos, dst): + super(MemoryCopyNode, self).__init__(pos) + self.dst = dst + self.type = dst.type + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + self.dst.generate_evaluation_code(code) + self._generate_assignment_code(rhs, code) + self.dst.generate_disposal_code(code) self.dst.free_temps(code) - rhs.generate_disposal_code(code) - rhs.free_temps(code) - - -class MemoryCopySlice(MemoryCopyNode): - """ - Copy the contents of slice src to slice dst. Does not support indirect - slices. - - memslice1[...] = memslice2 - memslice1[:] = memslice2 - """ - - is_memview_copy_assignment = True - copy_slice_cname = "__pyx_memoryview_copy_contents" - - def _generate_assignment_code(self, src, code): - dst = self.dst - - src.type.assert_direct_dims(src.pos) - dst.type.assert_direct_dims(dst.pos) - - code.putln(code.error_goto_if_neg( - "%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname, - src.result(), dst.result(), - src.type.ndim, dst.type.ndim, - dst.type.dtype.is_pyobject), - dst.pos)) - - -class MemoryCopyScalar(MemoryCopyNode): - """ - Assign a scalar to a slice. dst must be simple, scalar will be assigned - to a correct type and not just something assignable. - - memslice1[...] = 0.0 - memslice1[:] = 0.0 - """ - - def __init__(self, pos, dst): - super(MemoryCopyScalar, self).__init__(pos, dst) - self.type = dst.type.dtype - - def _generate_assignment_code(self, scalar, code): + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + +class MemoryCopySlice(MemoryCopyNode): + """ + Copy the contents of slice src to slice dst. Does not support indirect + slices. + + memslice1[...] = memslice2 + memslice1[:] = memslice2 + """ + + is_memview_copy_assignment = True + copy_slice_cname = "__pyx_memoryview_copy_contents" + + def _generate_assignment_code(self, src, code): + dst = self.dst + + src.type.assert_direct_dims(src.pos) + dst.type.assert_direct_dims(dst.pos) + + code.putln(code.error_goto_if_neg( + "%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname, + src.result(), dst.result(), + src.type.ndim, dst.type.ndim, + dst.type.dtype.is_pyobject), + dst.pos)) + + +class MemoryCopyScalar(MemoryCopyNode): + """ + Assign a scalar to a slice. dst must be simple, scalar will be assigned + to a correct type and not just something assignable. + + memslice1[...] = 0.0 + memslice1[:] = 0.0 + """ + + def __init__(self, pos, dst): + super(MemoryCopyScalar, self).__init__(pos, dst) + self.type = dst.type.dtype + + def _generate_assignment_code(self, scalar, code): from . import MemoryView - self.dst.type.assert_direct_dims(self.dst.pos) - - dtype = self.dst.type.dtype - type_decl = dtype.declaration_code("") - slice_decl = self.dst.type.declaration_code("") - - code.begin_block() - code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result())) - if self.dst.result_in_temp() or self.dst.is_simple(): - dst_temp = self.dst.result() - else: - code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result())) - dst_temp = "__pyx_temp_slice" - - slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp, - self.dst.type.ndim, code) - p = slice_iter_obj.start_loops() - - if dtype.is_pyobject: - code.putln("Py_DECREF(*(PyObject **) %s);" % p) - - code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p)) - - if dtype.is_pyobject: - code.putln("Py_INCREF(__pyx_temp_scalar);") - - slice_iter_obj.end_loops() - code.end_block() - - + self.dst.type.assert_direct_dims(self.dst.pos) + + dtype = self.dst.type.dtype + type_decl = dtype.declaration_code("") + slice_decl = self.dst.type.declaration_code("") + + code.begin_block() + code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result())) + if self.dst.result_in_temp() or self.dst.is_simple(): + dst_temp = self.dst.result() + else: + code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result())) + dst_temp = "__pyx_temp_slice" + + slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp, + self.dst.type.ndim, code) + p = slice_iter_obj.start_loops() + + if dtype.is_pyobject: + code.putln("Py_DECREF(*(PyObject **) %s);" % p) + + code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p)) + + if dtype.is_pyobject: + code.putln("Py_INCREF(__pyx_temp_scalar);") + + slice_iter_obj.end_loops() + code.end_block() + + class SliceIndexNode(ExprNode): # 2-element slice indexing # @@ -4764,15 +4764,15 @@ class SliceIndexNode(ExprNode): return PyrexTypes.c_array_type(base_type.base_type, None) return py_object_type - def inferable_item_node(self, index=0): - # slicing shouldn't change the result type of the base, but the index might - if index is not not_a_constant and self.start: - if self.start.has_constant_result(): - index += self.start.constant_result - else: - index = not_a_constant - return self.base.inferable_item_node(index) - + def inferable_item_node(self, index=0): + # slicing shouldn't change the result type of the base, but the index might + if index is not not_a_constant and self.start: + if self.start.has_constant_result(): + index += self.start.constant_result + else: + index = not_a_constant + return self.base.inferable_item_node(index) + def may_be_none(self): base_type = self.base.type if base_type: @@ -4806,7 +4806,7 @@ class SliceIndexNode(ExprNode): stop = self.stop.compile_time_value(denv) try: return base[start:stop] - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def analyse_target_declaration(self, env): @@ -4842,13 +4842,13 @@ class SliceIndexNode(ExprNode): check_negative_indices(self.start, self.stop) base_type = self.base.type - if base_type.is_array and not getting: - # cannot assign directly to C array => try to assign by making a copy - if not self.start and not self.stop: - self.type = base_type - else: - self.type = PyrexTypes.CPtrType(base_type.base_type) - elif base_type.is_string or base_type.is_cpp_string: + if base_type.is_array and not getting: + # cannot assign directly to C array => try to assign by making a copy + if not self.start and not self.stop: + self.type = base_type + else: + self.type = PyrexTypes.CPtrType(base_type.base_type) + elif base_type.is_string or base_type.is_cpp_string: self.type = default_str_type(env) elif base_type.is_pyunicode_ptr: self.type = unicode_type @@ -4880,59 +4880,59 @@ class SliceIndexNode(ExprNode): ).analyse_types(env) else: c_int = PyrexTypes.c_py_ssize_t_type - - def allow_none(node, default_value, env): - # Coerce to Py_ssize_t, but allow None as meaning the default slice bound. - from .UtilNodes import EvalWithTempExprNode, ResultRefNode - - node_ref = ResultRefNode(node) - new_expr = CondExprNode( - node.pos, - true_val=IntNode( - node.pos, - type=c_int, - value=default_value, - constant_result=int(default_value) if default_value.isdigit() else not_a_constant, - ), - false_val=node_ref.coerce_to(c_int, env), - test=PrimaryCmpNode( - node.pos, - operand1=node_ref, - operator='is', - operand2=NoneNode(node.pos), - ).analyse_types(env) - ).analyse_result_type(env) - return EvalWithTempExprNode(node_ref, new_expr) - + + def allow_none(node, default_value, env): + # Coerce to Py_ssize_t, but allow None as meaning the default slice bound. + from .UtilNodes import EvalWithTempExprNode, ResultRefNode + + node_ref = ResultRefNode(node) + new_expr = CondExprNode( + node.pos, + true_val=IntNode( + node.pos, + type=c_int, + value=default_value, + constant_result=int(default_value) if default_value.isdigit() else not_a_constant, + ), + false_val=node_ref.coerce_to(c_int, env), + test=PrimaryCmpNode( + node.pos, + operand1=node_ref, + operator='is', + operand2=NoneNode(node.pos), + ).analyse_types(env) + ).analyse_result_type(env) + return EvalWithTempExprNode(node_ref, new_expr) + if self.start: - if self.start.type.is_pyobject: - self.start = allow_none(self.start, '0', env) + if self.start.type.is_pyobject: + self.start = allow_none(self.start, '0', env) self.start = self.start.coerce_to(c_int, env) if self.stop: - if self.stop.type.is_pyobject: - self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env) + if self.stop.type.is_pyobject: + self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env) self.stop = self.stop.coerce_to(c_int, env) self.is_temp = 1 return self - def analyse_as_type(self, env): - base_type = self.base.analyse_as_type(env) - if base_type and not base_type.is_pyobject: - if not self.start and not self.stop: - # memory view - from . import MemoryView - env.use_utility_code(MemoryView.view_utility_code) - none_node = NoneNode(self.pos) - slice_node = SliceNode( - self.pos, - start=none_node, - stop=none_node, - step=none_node, - ) - return PyrexTypes.MemoryViewSliceType( - base_type, MemoryView.get_axes_specs(env, [slice_node])) - return None - + def analyse_as_type(self, env): + base_type = self.base.analyse_as_type(env) + if base_type and not base_type.is_pyobject: + if not self.start and not self.stop: + # memory view + from . import MemoryView + env.use_utility_code(MemoryView.view_utility_code) + none_node = NoneNode(self.pos) + slice_node = SliceNode( + self.pos, + start=none_node, + stop=none_node, + step=none_node, + ) + return PyrexTypes.MemoryViewSliceType( + base_type, MemoryView.get_axes_specs(env, [slice_node])) + return None + nogil_check = Node.gil_error gil_message = "Slicing Python object" @@ -4951,11 +4951,11 @@ class SliceIndexNode(ExprNode): "default encoding required for conversion from '%s' to '%s'" % (self.base.type, dst_type)) self.type = dst_type - if dst_type.is_array and self.base.type.is_array: - if not self.start and not self.stop: - # redundant slice building, copy C arrays directly - return self.base.coerce_to(dst_type, env) - # else: check array size if possible + if dst_type.is_array and self.base.type.is_array: + if not self.start and not self.stop: + # redundant slice building, copy C arrays directly + return self.base.coerce_to(dst_type, env) + # else: check array size if possible return super(SliceIndexNode, self).coerce_to(dst_type, env) def generate_result_code(self, code): @@ -4970,7 +4970,7 @@ class SliceIndexNode(ExprNode): stop_code = self.stop_code() if self.base.type.is_string: base_result = self.base.result() - if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): + if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): base_result = '((const char*)%s)' % base_result if self.type is bytearray_type: type_name = 'ByteArray' @@ -5059,8 +5059,8 @@ class SliceIndexNode(ExprNode): code.error_goto_if_null(result, self.pos))) code.put_gotref(self.py_result()) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): self.generate_subexpr_evaluation_code(code) if self.type.is_pyobject: code.globalstate.use_utility_code(self.set_slice_utility_code) @@ -5075,20 +5075,20 @@ class SliceIndexNode(ExprNode): has_c_start, has_c_stop, bool(code.globalstate.directives['wraparound']))) else: - start_offset = self.start_code() if self.start else '0' + start_offset = self.start_code() if self.start else '0' if rhs.type.is_array: array_length = rhs.type.size self.generate_slice_guard_code(code, array_length) else: - array_length = '%s - %s' % (self.stop_code(), start_offset) - - code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) - code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % ( - self.base.result(), start_offset, - rhs.result(), - self.base.result(), array_length - )) - + array_length = '%s - %s' % (self.stop_code(), start_offset) + + code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % ( + self.base.result(), start_offset, + rhs.result(), + self.base.result(), array_length + )) + self.generate_subexpr_disposal_code(code) self.free_subexpr_temps(code) rhs.generate_disposal_code(code) @@ -5136,77 +5136,77 @@ class SliceIndexNode(ExprNode): if not self.base.type.is_array: return slice_size = self.base.type.size - try: - total_length = slice_size = int(slice_size) - except ValueError: - total_length = None - + try: + total_length = slice_size = int(slice_size) + except ValueError: + total_length = None + start = stop = None if self.stop: stop = self.stop.result() try: stop = int(stop) if stop < 0: - if total_length is None: - slice_size = '%s + %d' % (slice_size, stop) - else: - slice_size += stop + if total_length is None: + slice_size = '%s + %d' % (slice_size, stop) + else: + slice_size += stop else: slice_size = stop stop = None except ValueError: pass - + if self.start: start = self.start.result() try: start = int(start) if start < 0: - if total_length is None: - start = '%s + %d' % (self.base.type.size, start) - else: - start += total_length - if isinstance(slice_size, _py_int_types): - slice_size -= start - else: - slice_size = '%s - (%s)' % (slice_size, start) + if total_length is None: + start = '%s + %d' % (self.base.type.size, start) + else: + start += total_length + if isinstance(slice_size, _py_int_types): + slice_size -= start + else: + slice_size = '%s - (%s)' % (slice_size, start) start = None except ValueError: pass - - runtime_check = None - compile_time_check = False - try: - int_target_size = int(target_size) - except ValueError: - int_target_size = None - else: - compile_time_check = isinstance(slice_size, _py_int_types) - - if compile_time_check and slice_size < 0: - if int_target_size > 0: + + runtime_check = None + compile_time_check = False + try: + int_target_size = int(target_size) + except ValueError: + int_target_size = None + else: + compile_time_check = isinstance(slice_size, _py_int_types) + + if compile_time_check and slice_size < 0: + if int_target_size > 0: error(self.pos, "Assignment to empty slice.") - elif compile_time_check and start is None and stop is None: + elif compile_time_check and start is None and stop is None: # we know the exact slice length - if int_target_size != slice_size: - error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % ( - slice_size, target_size)) + if int_target_size != slice_size: + error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % ( + slice_size, target_size)) elif start is not None: if stop is None: stop = slice_size - runtime_check = "(%s)-(%s)" % (stop, start) - elif stop is not None: - runtime_check = stop - else: - runtime_check = slice_size - - if runtime_check: - code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size)) - code.putln( - 'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,' - ' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",' - ' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % ( - target_size, runtime_check)) + runtime_check = "(%s)-(%s)" % (stop, start) + elif stop is not None: + runtime_check = stop + else: + runtime_check = slice_size + + if runtime_check: + code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size)) + code.putln( + 'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,' + ' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",' + ' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % ( + target_size, runtime_check)) code.putln(code.error_goto(self.pos)) code.putln("}") @@ -5237,7 +5237,7 @@ class SliceNode(ExprNode): # step ExprNode subexprs = ['start', 'stop', 'step'] - is_slice = True + is_slice = True type = slice_type is_temp = 1 @@ -5253,7 +5253,7 @@ class SliceNode(ExprNode): step = self.step.compile_time_value(denv) try: return slice(start, stop, step) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def may_be_none(self): @@ -5278,11 +5278,11 @@ class SliceNode(ExprNode): def generate_result_code(self, code): if self.is_literal: - dedup_key = make_dedup_key(self.type, (self,)) - self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key) - code = code.get_cached_constants_writer(self.result_code) - if code is None: - return # already initialised + dedup_key = make_dedup_key(self.type, (self,)) + self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key) + code = code.get_cached_constants_writer(self.result_code) + if code is None: + return # already initialised code.mark_pos(self.pos) code.putln( @@ -5357,7 +5357,7 @@ class CallNode(ExprNode): may_return_none = None def infer_type(self, env): - # TODO(robertwb): Reduce redundancy with analyse_types. + # TODO(robertwb): Reduce redundancy with analyse_types. function = self.function func_type = function.infer_type(env) if isinstance(function, NewExprNode): @@ -5371,15 +5371,15 @@ class CallNode(ExprNode): if func_type.is_ptr: func_type = func_type.base_type if func_type.is_cfunction: - if getattr(self.function, 'entry', None) and hasattr(self, 'args'): - alternatives = self.function.entry.all_alternatives() - arg_types = [arg.infer_type(env) for arg in self.args] - func_entry = PyrexTypes.best_match(arg_types, alternatives) - if func_entry: - func_type = func_entry.type - if func_type.is_ptr: - func_type = func_type.base_type - return func_type.return_type + if getattr(self.function, 'entry', None) and hasattr(self, 'args'): + alternatives = self.function.entry.all_alternatives() + arg_types = [arg.infer_type(env) for arg in self.args] + func_entry = PyrexTypes.best_match(arg_types, alternatives) + if func_entry: + func_type = func_entry.type + if func_type.is_ptr: + func_type = func_type.base_type + return func_type.return_type return func_type.return_type elif func_type is type_type: if function.is_name and function.entry and function.entry.type: @@ -5467,7 +5467,7 @@ class CallNode(ExprNode): return self self.function = RawCNameExprNode(self.function.pos, constructor.type) self.function.entry = constructor - self.function.set_cname(type.empty_declaration_code()) + self.function.set_cname(type.empty_declaration_code()) self.analyse_c_function_call(env) self.type = type return True @@ -5513,7 +5513,7 @@ class SimpleCallNode(CallNode): args = [arg.compile_time_value(denv) for arg in self.args] try: return function(*args) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def analyse_as_type(self, env): @@ -5554,8 +5554,8 @@ class SimpleCallNode(CallNode): func_type = self.function_type() self.is_numpy_call_with_exprs = False - if (has_np_pythran(env) and function.is_numpy_attribute and - pythran_is_numpy_func_supported(function)): + if (has_np_pythran(env) and function.is_numpy_attribute and + pythran_is_numpy_func_supported(function)): has_pythran_args = True self.arg_tuple = TupleNode(self.pos, args = self.args) self.arg_tuple = self.arg_tuple.analyse_types(env) @@ -5563,24 +5563,24 @@ class SimpleCallNode(CallNode): has_pythran_args &= is_pythran_supported_node_or_none(arg) self.is_numpy_call_with_exprs = bool(has_pythran_args) if self.is_numpy_call_with_exprs: - env.add_include_file(pythran_get_func_include_file(function)) + env.add_include_file(pythran_get_func_include_file(function)) return NumPyMethodCallNode.from_node( self, function_cname=pythran_functor(function), arg_tuple=self.arg_tuple, - type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)), + type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)), ) elif func_type.is_pyobject: self.arg_tuple = TupleNode(self.pos, args = self.args) - self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env) + self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env) self.args = None self.set_py_result_type(function, func_type) self.is_temp = 1 else: self.args = [ arg.analyse_types(env) for arg in self.args ] self.analyse_c_function_call(env) - if func_type.exception_check == '+': - self.is_temp = True + if func_type.exception_check == '+': + self.is_temp = True return self def function_type(self): @@ -5620,7 +5620,7 @@ class SimpleCallNode(CallNode): return elif hasattr(self.function, 'entry'): overloaded_entry = self.function.entry - elif self.function.is_subscript and self.function.is_fused_index: + elif self.function.is_subscript and self.function.is_fused_index: overloaded_entry = self.function.type.entry else: overloaded_entry = None @@ -5632,8 +5632,8 @@ class SimpleCallNode(CallNode): else: alternatives = overloaded_entry.all_alternatives() - entry = PyrexTypes.best_match( - [arg.type for arg in args], alternatives, self.pos, env, args) + entry = PyrexTypes.best_match( + [arg.type for arg in args], alternatives, self.pos, env, args) if not entry: self.type = PyrexTypes.error_type @@ -5641,8 +5641,8 @@ class SimpleCallNode(CallNode): return entry.used = True - if not func_type.is_cpp_class: - self.function.entry = entry + if not func_type.is_cpp_class: + self.function.entry = entry self.function.type = entry.type func_type = self.function_type() else: @@ -5692,7 +5692,7 @@ class SimpleCallNode(CallNode): # Coerce arguments some_args_in_temps = False - for i in range(min(max_nargs, actual_nargs)): + for i in range(min(max_nargs, actual_nargs)): formal_arg = func_type.args[i] formal_type = formal_arg.type arg = args[i].coerce_to(formal_type, env) @@ -5722,13 +5722,13 @@ class SimpleCallNode(CallNode): args[i] = arg # handle additional varargs parameters - for i in range(max_nargs, actual_nargs): + for i in range(max_nargs, actual_nargs): arg = args[i] if arg.type.is_pyobject: - if arg.type is str_type: - arg_ctype = PyrexTypes.c_char_ptr_type - else: - arg_ctype = arg.type.default_coerced_ctype() + if arg.type is str_type: + arg_ctype = PyrexTypes.c_char_ptr_type + else: + arg_ctype = arg.type.default_coerced_ctype() if arg_ctype is None: error(self.args[i].pos, "Python object cannot be passed as a varargs parameter") @@ -5743,7 +5743,7 @@ class SimpleCallNode(CallNode): # sure they are either all temps or all not temps (except # for the last argument, which is evaluated last in any # case) - for i in range(actual_nargs-1): + for i in range(actual_nargs-1): if i == 0 and self.self is not None: continue # self is ok arg = args[i] @@ -5774,22 +5774,22 @@ class SimpleCallNode(CallNode): self.type = func_type.return_type if self.function.is_name or self.function.is_attribute: - func_entry = self.function.entry - if func_entry and (func_entry.utility_code or func_entry.utility_code_definition): - self.is_temp = 1 # currently doesn't work for self.calculate_result_code() + func_entry = self.function.entry + if func_entry and (func_entry.utility_code or func_entry.utility_code_definition): + self.is_temp = 1 # currently doesn't work for self.calculate_result_code() if self.type.is_pyobject: self.result_ctype = py_object_type self.is_temp = 1 - elif func_type.exception_value is not None or func_type.exception_check: + elif func_type.exception_value is not None or func_type.exception_check: self.is_temp = 1 elif self.type.is_memoryviewslice: self.is_temp = 1 # func_type.exception_check = True - if self.is_temp and self.type.is_reference: - self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) - + if self.is_temp and self.type.is_reference: + self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) + # Called in 'nogil' context? self.nogil = env.nogil if (self.nogil and @@ -5836,12 +5836,12 @@ class SimpleCallNode(CallNode): result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code)) return result - def is_c_result_required(self): - func_type = self.function_type() - if not func_type.exception_value or func_type.exception_check == '+': - return False # skip allocation of unused result temp - return True - + def is_c_result_required(self): + func_type = self.function_type() + if not func_type.exception_value or func_type.exception_check == '+': + return False # skip allocation of unused result temp + return True + def generate_evaluation_code(self, code): function = self.function if function.is_name or function.is_attribute: @@ -5934,7 +5934,7 @@ class SimpleCallNode(CallNode): elif self.type.is_memoryviewslice: assert self.is_temp exc_checks.append(self.type.error_condition(self.result())) - elif func_type.exception_check != '+': + elif func_type.exception_check != '+': exc_val = func_type.exception_value exc_check = func_type.exception_check if exc_val is not None: @@ -5956,9 +5956,9 @@ class SimpleCallNode(CallNode): else: lhs = "" if func_type.exception_check == '+': - translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs), - self.result() if self.type.is_pyobject else None, - func_type.exception_value, self.nogil) + translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs), + self.result() if self.type.is_pyobject else None, + func_type.exception_value, self.nogil) else: if exc_checks: goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos) @@ -5992,7 +5992,7 @@ class NumPyMethodCallNode(ExprNode): code.putln("// function evaluation code for numpy function") code.putln("__Pyx_call_destructor(%s);" % self.result()) - code.putln("new (&%s) decltype(%s){%s{}(%s)};" % ( + code.putln("new (&%s) decltype(%s){%s{}(%s)};" % ( self.result(), self.result(), self.function_cname, @@ -6034,7 +6034,7 @@ class PyMethodCallNode(SimpleCallNode): code.putln("%s = NULL;" % self_arg) arg_offset_cname = None if len(args) > 1: - arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) code.putln("%s = 0;" % arg_offset_cname) def attribute_is_likely_method(attr): @@ -6058,7 +6058,7 @@ class PyMethodCallNode(SimpleCallNode): else: likely_method = 'unlikely' - code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function)) + code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function)) code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function)) # the following is always true in Py3 (kept only for safety), # but is false for unbound methods in Py2 @@ -6076,85 +6076,85 @@ class PyMethodCallNode(SimpleCallNode): if not args: # fastest special case: try to avoid tuple creation code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c")) - code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c")) + code.globalstate.use_utility_code( UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c")) code.putln( - "%s = (%s) ? __Pyx_PyObject_CallOneArg(%s, %s) : __Pyx_PyObject_CallNoArg(%s);" % ( - self.result(), self_arg, + "%s = (%s) ? __Pyx_PyObject_CallOneArg(%s, %s) : __Pyx_PyObject_CallNoArg(%s);" % ( + self.result(), self_arg, function, self_arg, - function)) - code.put_xdecref_clear(self_arg, py_object_type) + function)) + code.put_xdecref_clear(self_arg, py_object_type) code.funcstate.release_temp(self_arg) - code.putln(code.error_goto_if_null(self.result(), self.pos)) - code.put_gotref(self.py_result()) - elif len(args) == 1: - # fastest special case: try to avoid tuple creation + code.putln(code.error_goto_if_null(self.result(), self.pos)) + code.put_gotref(self.py_result()) + elif len(args) == 1: + # fastest special case: try to avoid tuple creation + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectCall2Args", "ObjectHandling.c")) code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectCall2Args", "ObjectHandling.c")) - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c")) - arg = args[0] + UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c")) + arg = args[0] code.putln( - "%s = (%s) ? __Pyx_PyObject_Call2Args(%s, %s, %s) : __Pyx_PyObject_CallOneArg(%s, %s);" % ( - self.result(), self_arg, - function, self_arg, arg.py_result(), - function, arg.py_result())) - code.put_xdecref_clear(self_arg, py_object_type) - code.funcstate.release_temp(self_arg) - arg.generate_disposal_code(code) - arg.free_temps(code) - code.putln(code.error_goto_if_null(self.result(), self.pos)) + "%s = (%s) ? __Pyx_PyObject_Call2Args(%s, %s, %s) : __Pyx_PyObject_CallOneArg(%s, %s);" % ( + self.result(), self_arg, + function, self_arg, arg.py_result(), + function, arg.py_result())) + code.put_xdecref_clear(self_arg, py_object_type) + code.funcstate.release_temp(self_arg) + arg.generate_disposal_code(code) + arg.free_temps(code) + code.putln(code.error_goto_if_null(self.result(), self.pos)) code.put_gotref(self.py_result()) else: - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyFunctionFastCall", "ObjectHandling.c")) - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyCFunctionFastCall", "ObjectHandling.c")) - for test_func, call_prefix in [('PyFunction_Check', 'Py'), ('__Pyx_PyFastCFunction_Check', 'PyC')]: - code.putln("#if CYTHON_FAST_%sCALL" % call_prefix.upper()) - code.putln("if (%s(%s)) {" % (test_func, function)) - code.putln("PyObject *%s[%d] = {%s, %s};" % ( - Naming.quick_temp_cname, - len(args)+1, - self_arg, - ', '.join(arg.py_result() for arg in args))) - code.putln("%s = __Pyx_%sFunction_FastCall(%s, %s+1-%s, %d+%s); %s" % ( - self.result(), - call_prefix, - function, - Naming.quick_temp_cname, - arg_offset_cname, - len(args), - arg_offset_cname, - code.error_goto_if_null(self.result(), self.pos))) - code.put_xdecref_clear(self_arg, py_object_type) - code.put_gotref(self.py_result()) - for arg in args: - arg.generate_disposal_code(code) - code.putln("} else") - code.putln("#endif") - - code.putln("{") + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyFunctionFastCall", "ObjectHandling.c")) + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyCFunctionFastCall", "ObjectHandling.c")) + for test_func, call_prefix in [('PyFunction_Check', 'Py'), ('__Pyx_PyFastCFunction_Check', 'PyC')]: + code.putln("#if CYTHON_FAST_%sCALL" % call_prefix.upper()) + code.putln("if (%s(%s)) {" % (test_func, function)) + code.putln("PyObject *%s[%d] = {%s, %s};" % ( + Naming.quick_temp_cname, + len(args)+1, + self_arg, + ', '.join(arg.py_result() for arg in args))) + code.putln("%s = __Pyx_%sFunction_FastCall(%s, %s+1-%s, %d+%s); %s" % ( + self.result(), + call_prefix, + function, + Naming.quick_temp_cname, + arg_offset_cname, + len(args), + arg_offset_cname, + code.error_goto_if_null(self.result(), self.pos))) + code.put_xdecref_clear(self_arg, py_object_type) + code.put_gotref(self.py_result()) + for arg in args: + arg.generate_disposal_code(code) + code.putln("} else") + code.putln("#endif") + + code.putln("{") args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True) code.putln("%s = PyTuple_New(%d+%s); %s" % ( - args_tuple, len(args), arg_offset_cname, + args_tuple, len(args), arg_offset_cname, code.error_goto_if_null(args_tuple, self.pos))) code.put_gotref(args_tuple) if len(args) > 1: code.putln("if (%s) {" % self_arg) - code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % ( - self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case + code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % ( + self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case code.funcstate.release_temp(self_arg) if len(args) > 1: code.putln("}") for i, arg in enumerate(args): arg.make_owned_reference(code) - code.put_giveref(arg.py_result()) + code.put_giveref(arg.py_result()) code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % ( - args_tuple, i, arg_offset_cname, arg.py_result())) + args_tuple, i, arg_offset_cname, arg.py_result())) if len(args) > 1: code.funcstate.release_temp(arg_offset_cname) @@ -6176,7 +6176,7 @@ class PyMethodCallNode(SimpleCallNode): if len(args) == 1: code.putln("}") - code.putln("}") # !CYTHON_FAST_PYCALL + code.putln("}") # !CYTHON_FAST_PYCALL if reuse_function_temp: self.function.generate_disposal_code(code) @@ -6205,8 +6205,8 @@ class InlinedDefNodeCallNode(CallNode): return False if len(func_type.args) != len(self.args): return False - if func_type.num_kwonly_args: - return False # actually wrong number of arguments + if func_type.num_kwonly_args: + return False # actually wrong number of arguments return True def analyse_types(self, env): @@ -6218,7 +6218,7 @@ class InlinedDefNodeCallNode(CallNode): # Coerce arguments some_args_in_temps = False - for i in range(actual_nargs): + for i in range(actual_nargs): formal_type = func_type.args[i].type arg = self.args[i].coerce_to(formal_type, env) if arg.is_temp: @@ -6245,7 +6245,7 @@ class InlinedDefNodeCallNode(CallNode): # sure they are either all temps or all not temps (except # for the last argument, which is evaluated last in any # case) - for i in range(actual_nargs-1): + for i in range(actual_nargs-1): arg = self.args[i] if arg.nonlocally_immutable(): # locals, C functions, unassignable types are safe. @@ -6374,12 +6374,12 @@ class GeneralCallNode(CallNode): keyword_args = self.keyword_args.compile_time_value(denv) try: return function(*positional_args, **keyword_args) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def explicit_args_kwds(self): - if (self.keyword_args and not self.keyword_args.is_dict_literal or - not self.positional_args.is_sequence_constructor): + if (self.keyword_args and not self.keyword_args.is_dict_literal or + not self.positional_args.is_sequence_constructor): raise CompileError(self.pos, 'Compile-time keyword arguments must be explicit.') return self.positional_args.args, self.keyword_args @@ -6425,7 +6425,7 @@ class GeneralCallNode(CallNode): if not isinstance(self.positional_args, TupleNode): # has starred argument return self - if not self.keyword_args.is_dict_literal: + if not self.keyword_args.is_dict_literal: # keywords come from arbitrary expression => nothing to do here return self function = self.function @@ -6588,13 +6588,13 @@ class AsTupleNode(ExprNode): arg = self.arg.compile_time_value(denv) try: return tuple(arg) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def analyse_types(self, env): - self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env) - if self.arg.type is tuple_type: - return self.arg.as_none_safe_node("'NoneType' object is not iterable") + self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env) + if self.arg.type is tuple_type: + return self.arg.as_none_safe_node("'NoneType' object is not iterable") self.type = tuple_type return self @@ -6614,159 +6614,159 @@ class AsTupleNode(ExprNode): code.put_gotref(self.py_result()) -class MergedDictNode(ExprNode): - # Helper class for keyword arguments and other merged dicts. - # - # keyword_args [DictNode or other ExprNode] - - subexprs = ['keyword_args'] - is_temp = 1 - type = dict_type - reject_duplicates = True - - def calculate_constant_result(self): - result = {} - reject_duplicates = self.reject_duplicates - for item in self.keyword_args: - if item.is_dict_literal: - # process items in order - items = ((key.constant_result, value.constant_result) - for key, value in item.key_value_pairs) - else: - items = item.constant_result.iteritems() - - for key, value in items: - if reject_duplicates and key in result: - raise ValueError("duplicate keyword argument found: %s" % key) - result[key] = value - - self.constant_result = result - - def compile_time_value(self, denv): - result = {} - reject_duplicates = self.reject_duplicates - for item in self.keyword_args: - if item.is_dict_literal: - # process items in order - items = [(key.compile_time_value(denv), value.compile_time_value(denv)) - for key, value in item.key_value_pairs] - else: - items = item.compile_time_value(denv).iteritems() - - try: - for key, value in items: - if reject_duplicates and key in result: - raise ValueError("duplicate keyword argument found: %s" % key) - result[key] = value - except Exception as e: - self.compile_time_value_error(e) - return result - - def type_dependencies(self, env): - return () - - def infer_type(self, env): - return dict_type - - def analyse_types(self, env): +class MergedDictNode(ExprNode): + # Helper class for keyword arguments and other merged dicts. + # + # keyword_args [DictNode or other ExprNode] + + subexprs = ['keyword_args'] + is_temp = 1 + type = dict_type + reject_duplicates = True + + def calculate_constant_result(self): + result = {} + reject_duplicates = self.reject_duplicates + for item in self.keyword_args: + if item.is_dict_literal: + # process items in order + items = ((key.constant_result, value.constant_result) + for key, value in item.key_value_pairs) + else: + items = item.constant_result.iteritems() + + for key, value in items: + if reject_duplicates and key in result: + raise ValueError("duplicate keyword argument found: %s" % key) + result[key] = value + + self.constant_result = result + + def compile_time_value(self, denv): + result = {} + reject_duplicates = self.reject_duplicates + for item in self.keyword_args: + if item.is_dict_literal: + # process items in order + items = [(key.compile_time_value(denv), value.compile_time_value(denv)) + for key, value in item.key_value_pairs] + else: + items = item.compile_time_value(denv).iteritems() + + try: + for key, value in items: + if reject_duplicates and key in result: + raise ValueError("duplicate keyword argument found: %s" % key) + result[key] = value + except Exception as e: + self.compile_time_value_error(e) + return result + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + return dict_type + + def analyse_types(self, env): self.keyword_args = [ - arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( - # FIXME: CPython's error message starts with the runtime function name - 'argument after ** must be a mapping, not NoneType') - for arg in self.keyword_args - ] - - return self - - def may_be_none(self): - return False - - gil_message = "Constructing Python dict" - - def generate_evaluation_code(self, code): - code.mark_pos(self.pos) - self.allocate_temp_result(code) - - args = iter(self.keyword_args) - item = next(args) - item.generate_evaluation_code(code) - if item.type is not dict_type: - # CPython supports calling functions with non-dicts, so do we - code.putln('if (likely(PyDict_CheckExact(%s))) {' % - item.py_result()) - - if item.is_dict_literal: - item.make_owned_reference(code) - code.putln("%s = %s;" % (self.result(), item.py_result())) - item.generate_post_assignment_code(code) - else: - code.putln("%s = PyDict_Copy(%s); %s" % ( - self.result(), - item.py_result(), - code.error_goto_if_null(self.result(), item.pos))) - code.put_gotref(self.result()) - item.generate_disposal_code(code) - - if item.type is not dict_type: - code.putln('} else {') - code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % ( - self.result(), - item.py_result(), - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.py_result()) - item.generate_disposal_code(code) - code.putln('}') - item.free_temps(code) - - helpers = set() - for item in args: - if item.is_dict_literal: - # inline update instead of creating an intermediate dict - for arg in item.key_value_pairs: - arg.generate_evaluation_code(code) - if self.reject_duplicates: - code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % ( - self.result(), - arg.key.py_result())) - helpers.add("RaiseDoubleKeywords") - # FIXME: find out function name at runtime! - code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( - arg.key.py_result(), - code.error_goto(self.pos))) - code.putln("}") - code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % ( - self.result(), - arg.key.py_result(), - arg.value.py_result())) - arg.generate_disposal_code(code) - arg.free_temps(code) - else: - item.generate_evaluation_code(code) - if self.reject_duplicates: - # merge mapping into kwdict one by one as we need to check for duplicates - helpers.add("MergeKeywords") - code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % ( - self.result(), item.py_result())) - else: - # simple case, just add all entries - helpers.add("RaiseMappingExpected") - code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % ( - self.result(), item.py_result())) - code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) " - "__Pyx_RaiseMappingExpectedError(%s);" % item.py_result()) - code.putln(code.error_goto(item.pos)) - code.putln("}") - item.generate_disposal_code(code) - item.free_temps(code) - - for helper in sorted(helpers): - code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c")) - - def annotate(self, code): - for item in self.keyword_args: - item.annotate(code) - - + arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( + # FIXME: CPython's error message starts with the runtime function name + 'argument after ** must be a mapping, not NoneType') + for arg in self.keyword_args + ] + + return self + + def may_be_none(self): + return False + + gil_message = "Constructing Python dict" + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + args = iter(self.keyword_args) + item = next(args) + item.generate_evaluation_code(code) + if item.type is not dict_type: + # CPython supports calling functions with non-dicts, so do we + code.putln('if (likely(PyDict_CheckExact(%s))) {' % + item.py_result()) + + if item.is_dict_literal: + item.make_owned_reference(code) + code.putln("%s = %s;" % (self.result(), item.py_result())) + item.generate_post_assignment_code(code) + else: + code.putln("%s = PyDict_Copy(%s); %s" % ( + self.result(), + item.py_result(), + code.error_goto_if_null(self.result(), item.pos))) + code.put_gotref(self.result()) + item.generate_disposal_code(code) + + if item.type is not dict_type: + code.putln('} else {') + code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % ( + self.result(), + item.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.py_result()) + item.generate_disposal_code(code) + code.putln('}') + item.free_temps(code) + + helpers = set() + for item in args: + if item.is_dict_literal: + # inline update instead of creating an intermediate dict + for arg in item.key_value_pairs: + arg.generate_evaluation_code(code) + if self.reject_duplicates: + code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % ( + self.result(), + arg.key.py_result())) + helpers.add("RaiseDoubleKeywords") + # FIXME: find out function name at runtime! + code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( + arg.key.py_result(), + code.error_goto(self.pos))) + code.putln("}") + code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % ( + self.result(), + arg.key.py_result(), + arg.value.py_result())) + arg.generate_disposal_code(code) + arg.free_temps(code) + else: + item.generate_evaluation_code(code) + if self.reject_duplicates: + # merge mapping into kwdict one by one as we need to check for duplicates + helpers.add("MergeKeywords") + code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % ( + self.result(), item.py_result())) + else: + # simple case, just add all entries + helpers.add("RaiseMappingExpected") + code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % ( + self.result(), item.py_result())) + code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) " + "__Pyx_RaiseMappingExpectedError(%s);" % item.py_result()) + code.putln(code.error_goto(item.pos)) + code.putln("}") + item.generate_disposal_code(code) + item.free_temps(code) + + for helper in sorted(helpers): + code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c")) + + def annotate(self, code): + for item in self.keyword_args: + item.annotate(code) + + class AttributeNode(ExprNode): # obj.attribute # @@ -6791,7 +6791,7 @@ class AttributeNode(ExprNode): needs_none_check = True is_memslice_transpose = False is_special_lookup = False - is_py_attr = 0 + is_py_attr = 0 def as_cython_attribute(self): if (isinstance(self.obj, NameNode) and @@ -6832,7 +6832,7 @@ class AttributeNode(ExprNode): obj = self.obj.compile_time_value(denv) try: return getattr(obj, attr) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def type_dependencies(self, env): @@ -6847,7 +6847,7 @@ class AttributeNode(ExprNode): return PyrexTypes.CPtrType(node.entry.type) else: return node.entry.type - node = self.analyse_as_type_attribute(env) + node = self.analyse_as_type_attribute(env) if node is not None: return node.entry.type obj_type = self.obj.infer_type(env) @@ -6857,10 +6857,10 @@ class AttributeNode(ExprNode): # builtin types cannot be inferred as C functions as # that would prevent their use as bound methods return py_object_type - elif self.entry and self.entry.is_cmethod: - # special case: bound methods should not be inferred - # as their unbound method types - return py_object_type + elif self.entry and self.entry.is_cmethod: + # special case: bound methods should not be inferred + # as their unbound method types + return py_object_type return self.type def analyse_target_declaration(self, env): @@ -6878,7 +6878,7 @@ class AttributeNode(ExprNode): self.initialized_check = env.directives['initializedcheck'] node = self.analyse_as_cimported_attribute_node(env, target) if node is None and not target: - node = self.analyse_as_type_attribute(env) + node = self.analyse_as_type_attribute(env) if node is None: node = self.analyse_as_ordinary_attribute_node(env, target) assert node is not None @@ -6905,7 +6905,7 @@ class AttributeNode(ExprNode): return self return None - def analyse_as_type_attribute(self, env): + def analyse_as_type_attribute(self, env): # Try to interpret this as a reference to an unbound # C method of an extension type or builtin type. If successful, # creates a corresponding NameNode and returns it, otherwise @@ -6913,49 +6913,49 @@ class AttributeNode(ExprNode): if self.obj.is_string_literal: return type = self.obj.analyse_as_type(env) - if type: - if type.is_extension_type or type.is_builtin_type or type.is_cpp_class: - entry = type.scope.lookup_here(self.attribute) - if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction): - if type.is_builtin_type: - if not self.is_called: - # must handle this as Python object - return None - ubcm_entry = entry - else: - # Create a temporary entry describing the C method - # as an ordinary function. - if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'): - cname = entry.func_cname - if entry.type.is_static_method or ( - env.parent_scope and env.parent_scope.is_cpp_class_scope): - ctype = entry.type - elif type.is_cpp_class: - error(self.pos, "%s not a static member of %s" % (entry.name, type)) - ctype = PyrexTypes.error_type - else: - # Fix self type. - ctype = copy.copy(entry.type) - ctype.args = ctype.args[:] - ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None) - else: - cname = "%s->%s" % (type.vtabptr_cname, entry.cname) + if type: + if type.is_extension_type or type.is_builtin_type or type.is_cpp_class: + entry = type.scope.lookup_here(self.attribute) + if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction): + if type.is_builtin_type: + if not self.is_called: + # must handle this as Python object + return None + ubcm_entry = entry + else: + # Create a temporary entry describing the C method + # as an ordinary function. + if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'): + cname = entry.func_cname + if entry.type.is_static_method or ( + env.parent_scope and env.parent_scope.is_cpp_class_scope): + ctype = entry.type + elif type.is_cpp_class: + error(self.pos, "%s not a static member of %s" % (entry.name, type)) + ctype = PyrexTypes.error_type + else: + # Fix self type. + ctype = copy.copy(entry.type) + ctype.args = ctype.args[:] + ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None) + else: + cname = "%s->%s" % (type.vtabptr_cname, entry.cname) ctype = entry.type - ubcm_entry = Symtab.Entry(entry.name, cname, ctype) - ubcm_entry.is_cfunction = 1 - ubcm_entry.func_cname = entry.func_cname - ubcm_entry.is_unbound_cmethod = 1 - ubcm_entry.scope = entry.scope - return self.as_name_node(env, ubcm_entry, target=False) - elif type.is_enum: - if self.attribute in type.values: - for entry in type.entry.enum_values: - if entry.name == self.attribute: - return self.as_name_node(env, entry, target=False) + ubcm_entry = Symtab.Entry(entry.name, cname, ctype) + ubcm_entry.is_cfunction = 1 + ubcm_entry.func_cname = entry.func_cname + ubcm_entry.is_unbound_cmethod = 1 + ubcm_entry.scope = entry.scope + return self.as_name_node(env, ubcm_entry, target=False) + elif type.is_enum: + if self.attribute in type.values: + for entry in type.entry.enum_values: + if entry.name == self.attribute: + return self.as_name_node(env, entry, target=False) else: - error(self.pos, "%s not a known value of %s" % (self.attribute, type)) - else: - error(self.pos, "%s not a known value of %s" % (self.attribute, type)) + error(self.pos, "%s not a known value of %s" % (self.attribute, type)) + else: + error(self.pos, "%s not a known value of %s" % (self.attribute, type)) return None def analyse_as_type(self, env): @@ -7036,23 +7036,23 @@ class AttributeNode(ExprNode): self.op = "->" elif obj_type.is_extension_type or obj_type.is_builtin_type: self.op = "->" - elif obj_type.is_reference and obj_type.is_fake_reference: - self.op = "->" + elif obj_type.is_reference and obj_type.is_fake_reference: + self.op = "->" else: self.op = "." if obj_type.has_attributes: if obj_type.attributes_known(): - entry = obj_type.scope.lookup_here(self.attribute) - if obj_type.is_memoryviewslice and not entry: + entry = obj_type.scope.lookup_here(self.attribute) + if obj_type.is_memoryviewslice and not entry: if self.attribute == 'T': self.is_memslice_transpose = True self.is_temp = True self.use_managed_ref = True - self.type = self.obj.type.transpose(self.pos) + self.type = self.obj.type.transpose(self.pos) return else: obj_type.declare_attribute(self.attribute, env, self.pos) - entry = obj_type.scope.lookup_here(self.attribute) + entry = obj_type.scope.lookup_here(self.attribute) if entry and entry.is_member: entry = None else: @@ -7156,7 +7156,7 @@ class AttributeNode(ExprNode): def is_lvalue(self): if self.obj: - return True + return True else: return NameNode.is_lvalue(self) @@ -7231,7 +7231,7 @@ class AttributeNode(ExprNode): return code.putln("%s = %s;" % (self.result(), self.obj.result())) - code.put_incref_memoryviewslice(self.result(), have_gil=True) + code.put_incref_memoryviewslice(self.result(), have_gil=True) T = "__pyx_memslice_transpose(&%s) == 0" code.putln(code.error_goto_if(T % self.result(), self.pos)) @@ -7245,24 +7245,24 @@ class AttributeNode(ExprNode): else: # result_code contains what is needed, but we may need to insert # a check and raise an exception - if self.obj.type and self.obj.type.is_extension_type: + if self.obj.type and self.obj.type.is_extension_type: pass - elif self.entry and self.entry.is_cmethod: + elif self.entry and self.entry.is_cmethod: # C method implemented as function call with utility code - code.globalstate.use_entry_utility_code(self.entry) + code.globalstate.use_entry_utility_code(self.entry) def generate_disposal_code(self, code): if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose: # mirror condition for putting the memview incref here: - code.put_xdecref_memoryviewslice( - self.result(), have_gil=True) - code.putln("%s.memview = NULL;" % self.result()) - code.putln("%s.data = NULL;" % self.result()) + code.put_xdecref_memoryviewslice( + self.result(), have_gil=True) + code.putln("%s.memview = NULL;" % self.result()) + code.putln("%s.data = NULL;" % self.result()) else: ExprNode.generate_disposal_code(self, code) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): self.obj.generate_evaluation_code(code) if self.is_py_attr: code.globalstate.use_utility_code( @@ -7333,14 +7333,14 @@ class AttributeNode(ExprNode): # #------------------------------------------------------------------- -class StarredUnpackingNode(ExprNode): +class StarredUnpackingNode(ExprNode): # A starred expression like "*a" # - # This is only allowed in sequence assignment or construction such as + # This is only allowed in sequence assignment or construction such as # # a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4] # - # and will be special cased during type analysis (or generate an error + # and will be special cased during type analysis (or generate an error # if it's found at unexpected places). # # target ExprNode @@ -7349,22 +7349,22 @@ class StarredUnpackingNode(ExprNode): is_starred = 1 type = py_object_type is_temp = 1 - starred_expr_allowed_here = False + starred_expr_allowed_here = False def __init__(self, pos, target): - ExprNode.__init__(self, pos, target=target) + ExprNode.__init__(self, pos, target=target) def analyse_declarations(self, env): - if not self.starred_expr_allowed_here: - error(self.pos, "starred expression is not allowed here") + if not self.starred_expr_allowed_here: + error(self.pos, "starred expression is not allowed here") self.target.analyse_declarations(env) - def infer_type(self, env): - return self.target.infer_type(env) - + def infer_type(self, env): + return self.target.infer_type(env) + def analyse_types(self, env): - if not self.starred_expr_allowed_here: - error(self.pos, "starred expression is not allowed here") + if not self.starred_expr_allowed_here: + error(self.pos, "starred expression is not allowed here") self.target = self.target.analyse_types(env) self.type = self.target.type return self @@ -7423,9 +7423,9 @@ class SequenceNode(ExprNode): arg.analyse_target_declaration(env) def analyse_types(self, env, skip_children=False): - for i, arg in enumerate(self.args): - if not skip_children: - arg = arg.analyse_types(env) + for i, arg in enumerate(self.args): + if not skip_children: + arg = arg.analyse_types(env) self.args[i] = arg.coerce_to_pyobject(env) if self.mult_factor: self.mult_factor = self.mult_factor.analyse_types(env) @@ -7435,49 +7435,49 @@ class SequenceNode(ExprNode): # not setting self.type here, subtypes do this return self - def coerce_to_ctuple(self, dst_type, env): - if self.type == dst_type: - return self - assert not self.mult_factor - if len(self.args) != dst_type.size: - error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % ( - dst_type.size, len(self.args))) - coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)] - return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True) - - def _create_merge_node_if_necessary(self, env): - self._flatten_starred_args() - if not any(arg.is_starred for arg in self.args): - return self - # convert into MergedSequenceNode by building partial sequences - args = [] - values = [] - for arg in self.args: - if arg.is_starred: - if values: - args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) - values = [] - args.append(arg.target) - else: - values.append(arg) - if values: - args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) - node = MergedSequenceNode(self.pos, args, self.type) - if self.mult_factor: - node = binop_node( - self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env), - inplace=True, type=self.type, is_temp=True) - return node - - def _flatten_starred_args(self): - args = [] - for arg in self.args: - if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor: - args.extend(arg.target.args) - else: - args.append(arg) - self.args[:] = args - + def coerce_to_ctuple(self, dst_type, env): + if self.type == dst_type: + return self + assert not self.mult_factor + if len(self.args) != dst_type.size: + error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % ( + dst_type.size, len(self.args))) + coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)] + return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True) + + def _create_merge_node_if_necessary(self, env): + self._flatten_starred_args() + if not any(arg.is_starred for arg in self.args): + return self + # convert into MergedSequenceNode by building partial sequences + args = [] + values = [] + for arg in self.args: + if arg.is_starred: + if values: + args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) + values = [] + args.append(arg.target) + else: + values.append(arg) + if values: + args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) + node = MergedSequenceNode(self.pos, args, self.type) + if self.mult_factor: + node = binop_node( + self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env), + inplace=True, type=self.type, is_temp=True) + return node + + def _flatten_starred_args(self): + args = [] + for arg in self.args: + if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor: + args.extend(arg.target.args) + else: + args.append(arg) + self.args[:] = args + def may_be_none(self): return False @@ -7490,11 +7490,11 @@ class SequenceNode(ExprNode): for i, arg in enumerate(self.args): arg = self.args[i] = arg.analyse_target_types(env) if arg.is_starred: - if not arg.type.assignable_from(list_type): + if not arg.type.assignable_from(list_type): error(arg.pos, "starred target must have Python object (list) type") if arg.type is py_object_type: - arg.type = list_type + arg.type = list_type unpacked_item = PyTempNode(self.pos, env) coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env) if unpacked_item is not coerced_unpacked_item: @@ -7517,31 +7517,31 @@ class SequenceNode(ExprNode): mult_factor = self.mult_factor if mult_factor.type.is_int: c_mult = mult_factor.result() - if (isinstance(mult_factor.constant_result, _py_int_types) and - mult_factor.constant_result > 0): + if (isinstance(mult_factor.constant_result, _py_int_types) and + mult_factor.constant_result > 0): size_factor = ' * %s' % mult_factor.constant_result - elif mult_factor.type.signed: - size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult) + elif mult_factor.type.signed: + size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult) else: - size_factor = ' * (%s)' % (c_mult,) + size_factor = ' * (%s)' % (c_mult,) - if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult: + if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult: # use PyTuple_Pack() to avoid generating huge amounts of one-time code code.putln('%s = PyTuple_Pack(%d, %s); %s' % ( target, len(self.args), - ', '.join(arg.py_result() for arg in self.args), + ', '.join(arg.py_result() for arg in self.args), code.error_goto_if_null(target, self.pos))) code.put_gotref(target) - elif self.type.is_ctuple: - for i, arg in enumerate(self.args): - code.putln("%s.f%s = %s;" % ( - target, i, arg.result())) + elif self.type.is_ctuple: + for i, arg in enumerate(self.args): + code.putln("%s.f%s = %s;" % ( + target, i, arg.result())) else: # build the tuple/list step by step, potentially multiplying it as we go - if self.type is list_type: + if self.type is list_type: create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM' - elif self.type is tuple_type: + elif self.type is tuple_type: create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM' else: raise InternalError("sequence packing for unexpected type %s" % self.type) @@ -7569,11 +7569,11 @@ class SequenceNode(ExprNode): else: offset = '' - for i in range(arg_count): + for i in range(arg_count): arg = self.args[i] if c_mult or not arg.result_in_temp(): code.put_incref(arg.result(), arg.ctype()) - code.put_giveref(arg.py_result()) + code.put_giveref(arg.py_result()) code.putln("%s(%s, %s, %s);" % ( set_item_func, target, @@ -7598,7 +7598,7 @@ class SequenceNode(ExprNode): def generate_subexpr_disposal_code(self, code): if self.mult_factor and self.mult_factor.type.is_int: super(SequenceNode, self).generate_subexpr_disposal_code(code) - elif self.type is tuple_type and (self.is_literal or self.slow): + elif self.type is tuple_type and (self.is_literal or self.slow): super(SequenceNode, self).generate_subexpr_disposal_code(code) else: # We call generate_post_assignment_code here instead @@ -7611,8 +7611,8 @@ class SequenceNode(ExprNode): if self.mult_factor: self.mult_factor.generate_disposal_code(code) - def generate_assignment_code(self, rhs, code, overloaded_assignment=False, - exception_check=None, exception_value=None): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): if self.starred_assignment: self.generate_starred_assignment_code(rhs, code) else: @@ -7685,7 +7685,7 @@ class SequenceNode(ExprNode): code.putln(code.error_goto(self.pos)) code.putln("}") - code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") + code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") # unpack items from list/tuple in unrolled loop (can't fail) if len(sequence_types) == 2: code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0]) @@ -7883,7 +7883,7 @@ class SequenceNode(ExprNode): code.put_decref(target_list, py_object_type) code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp)) code.putln('#else') - code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable + code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable code.funcstate.release_temp(sublist_temp) code.putln('#endif') @@ -7908,82 +7908,82 @@ class TupleNode(SequenceNode): gil_message = "Constructing Python tuple" - def infer_type(self, env): - if self.mult_factor or not self.args: - return tuple_type - arg_types = [arg.infer_type(env) for arg in self.args] + def infer_type(self, env): + if self.mult_factor or not self.args: + return tuple_type + arg_types = [arg.infer_type(env) for arg in self.args] if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused for type in arg_types): - return tuple_type + return tuple_type return env.declare_tuple_type(self.pos, arg_types).type - + def analyse_types(self, env, skip_children=False): if len(self.args) == 0: - self.is_temp = False - self.is_literal = True - return self - - if not skip_children: - for i, arg in enumerate(self.args): - if arg.is_starred: - arg.starred_expr_allowed_here = True - self.args[i] = arg.analyse_types(env) - if (not self.mult_factor and + self.is_temp = False + self.is_literal = True + return self + + if not skip_children: + for i, arg in enumerate(self.args): + if arg.is_starred: + arg.starred_expr_allowed_here = True + self.args[i] = arg.analyse_types(env) + if (not self.mult_factor and not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused) for arg in self.args)): - self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type - self.is_temp = 1 - return self - - node = SequenceNode.analyse_types(self, env, skip_children=True) - node = node._create_merge_node_if_necessary(env) - if not node.is_sequence_constructor: - return node - - if not all(child.is_literal for child in node.args): - return node - if not node.mult_factor or ( - node.mult_factor.is_literal and - isinstance(node.mult_factor.constant_result, _py_int_types)): + self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type + self.is_temp = 1 + return self + + node = SequenceNode.analyse_types(self, env, skip_children=True) + node = node._create_merge_node_if_necessary(env) + if not node.is_sequence_constructor: + return node + + if not all(child.is_literal for child in node.args): + return node + if not node.mult_factor or ( + node.mult_factor.is_literal and + isinstance(node.mult_factor.constant_result, _py_int_types)): node.is_temp = False node.is_literal = True else: - if not node.mult_factor.type.is_pyobject: - node.mult_factor = node.mult_factor.coerce_to_pyobject(env) - node.is_temp = True - node.is_partly_literal = True + if not node.mult_factor.type.is_pyobject: + node.mult_factor = node.mult_factor.coerce_to_pyobject(env) + node.is_temp = True + node.is_partly_literal = True return node - def analyse_as_type(self, env): - # ctuple type - if not self.args: - return None - item_types = [arg.analyse_as_type(env) for arg in self.args] - if any(t is None for t in item_types): - return None - entry = env.declare_tuple_type(self.pos, item_types) - return entry.type - - def coerce_to(self, dst_type, env): - if self.type.is_ctuple: - if dst_type.is_ctuple and self.type.size == dst_type.size: - return self.coerce_to_ctuple(dst_type, env) - elif dst_type is tuple_type or dst_type is py_object_type: - coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args] - return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True) - else: - return self.coerce_to_pyobject(env).coerce_to(dst_type, env) - elif dst_type.is_ctuple and not self.mult_factor: - return self.coerce_to_ctuple(dst_type, env) - else: - return SequenceNode.coerce_to(self, dst_type, env) - - def as_list(self): - t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor) - if isinstance(self.constant_result, tuple): - t.constant_result = list(self.constant_result) - return t - + def analyse_as_type(self, env): + # ctuple type + if not self.args: + return None + item_types = [arg.analyse_as_type(env) for arg in self.args] + if any(t is None for t in item_types): + return None + entry = env.declare_tuple_type(self.pos, item_types) + return entry.type + + def coerce_to(self, dst_type, env): + if self.type.is_ctuple: + if dst_type.is_ctuple and self.type.size == dst_type.size: + return self.coerce_to_ctuple(dst_type, env) + elif dst_type is tuple_type or dst_type is py_object_type: + coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args] + return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True) + else: + return self.coerce_to_pyobject(env).coerce_to(dst_type, env) + elif dst_type.is_ctuple and not self.mult_factor: + return self.coerce_to_ctuple(dst_type, env) + else: + return SequenceNode.coerce_to(self, dst_type, env) + + def as_list(self): + t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor) + if isinstance(self.constant_result, tuple): + t.constant_result = list(self.constant_result) + return t + def is_simple(self): # either temp or constant => always simple return True @@ -8006,7 +8006,7 @@ class TupleNode(SequenceNode): values = self.compile_time_value_list(denv) try: return tuple(values) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def generate_operation_code(self, code): @@ -8015,16 +8015,16 @@ class TupleNode(SequenceNode): return if self.is_literal or self.is_partly_literal: - # The "mult_factor" is part of the deduplication if it is also constant, i.e. when - # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part. - dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args) - tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key) - const_code = code.get_cached_constants_writer(tuple_target) - if const_code is not None: - # constant is not yet initialised - const_code.mark_pos(self.pos) - self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal) - const_code.put_giveref(tuple_target) + # The "mult_factor" is part of the deduplication if it is also constant, i.e. when + # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part. + dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args) + tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key) + const_code = code.get_cached_constants_writer(tuple_target) + if const_code is not None: + # constant is not yet initialised + const_code.mark_pos(self.pos) + self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal) + const_code.put_giveref(tuple_target) if self.is_literal: self.result_code = tuple_target else: @@ -8034,7 +8034,7 @@ class TupleNode(SequenceNode): )) code.put_gotref(self.py_result()) else: - self.type.entry.used = True + self.type.entry.used = True self.generate_sequence_packing_code(code) @@ -8054,13 +8054,13 @@ class ListNode(SequenceNode): return () def infer_type(self, env): - # TODO: Infer non-object list arrays. + # TODO: Infer non-object list arrays. return list_type def analyse_expressions(self, env): - for arg in self.args: - if arg.is_starred: - arg.starred_expr_allowed_here = True + for arg in self.args: + if arg.is_starred: + arg.starred_expr_allowed_here = True node = SequenceNode.analyse_expressions(self, env) return node.coerce_to_pyobject(env) @@ -8071,7 +8071,7 @@ class ListNode(SequenceNode): node.obj_conversion_errors = errors if env.is_module_scope: self.in_module_scope = True - node = node._create_merge_node_if_necessary(env) + node = node._create_merge_node_if_necessary(env) return node def coerce_to(self, dst_type, env): @@ -8081,31 +8081,31 @@ class ListNode(SequenceNode): self.obj_conversion_errors = [] if not self.type.subtype_of(dst_type): error(self.pos, "Cannot coerce list to type '%s'" % dst_type) - elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type: - array_length = len(self.args) - if self.mult_factor: - if isinstance(self.mult_factor.constant_result, _py_int_types): - if self.mult_factor.constant_result <= 0: - error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type) - else: - array_length *= self.mult_factor.constant_result - else: - error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type) + elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type: + array_length = len(self.args) + if self.mult_factor: + if isinstance(self.mult_factor.constant_result, _py_int_types): + if self.mult_factor.constant_result <= 0: + error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type) + else: + array_length *= self.mult_factor.constant_result + else: + error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type) base_type = dst_type.base_type - self.type = PyrexTypes.CArrayType(base_type, array_length) + self.type = PyrexTypes.CArrayType(base_type, array_length) for i in range(len(self.original_args)): arg = self.args[i] if isinstance(arg, CoerceToPyTypeNode): arg = arg.arg self.args[i] = arg.coerce_to(base_type, env) - elif dst_type.is_cpp_class: - # TODO(robertwb): Avoid object conversion for vector/list/set. - return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env) - elif self.mult_factor: - error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type) + elif dst_type.is_cpp_class: + # TODO(robertwb): Avoid object conversion for vector/list/set. + return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env) + elif self.mult_factor: + error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type) elif dst_type.is_struct: if len(self.args) > len(dst_type.scope.var_entries): - error(self.pos, "Too many members for '%s'" % dst_type) + error(self.pos, "Too many members for '%s'" % dst_type) else: if len(self.args) < len(dst_type.scope.var_entries): warning(self.pos, "Too few members for '%s'" % dst_type, 1) @@ -8114,16 +8114,16 @@ class ListNode(SequenceNode): arg = arg.arg self.args[i] = arg.coerce_to(member.type, env) self.type = dst_type - elif dst_type.is_ctuple: - return self.coerce_to_ctuple(dst_type, env) + elif dst_type.is_ctuple: + return self.coerce_to_ctuple(dst_type, env) else: self.type = error_type error(self.pos, "Cannot coerce list to type '%s'" % dst_type) return self - def as_list(self): # dummy for compatibility with TupleNode - return self - + def as_list(self): # dummy for compatibility with TupleNode + return self + def as_tuple(self): t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor) if isinstance(self.constant_result, list): @@ -8146,7 +8146,7 @@ class ListNode(SequenceNode): def calculate_constant_result(self): if self.mult_factor: - raise ValueError() # may exceed the compile time memory + raise ValueError() # may exceed the compile time memory self.constant_result = [ arg.constant_result for arg in self.args] @@ -8162,36 +8162,36 @@ class ListNode(SequenceNode): report_error(err) self.generate_sequence_packing_code(code) elif self.type.is_array: - if self.mult_factor: - code.putln("{") - code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname) - code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format( - i=Naming.quick_temp_cname, count=self.mult_factor.result())) - offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname) - else: - offset = '' + if self.mult_factor: + code.putln("{") + code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname) + code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format( + i=Naming.quick_temp_cname, count=self.mult_factor.result())) + offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname) + else: + offset = '' for i, arg in enumerate(self.args): - if arg.type.is_array: - code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) - code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % ( - self.result(), i, offset, - arg.result(), self.result() - )) - else: - code.putln("%s[%s%s] = %s;" % ( - self.result(), - i, - offset, - arg.result())) - if self.mult_factor: - code.putln("}") - code.putln("}") + if arg.type.is_array: + code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % ( + self.result(), i, offset, + arg.result(), self.result() + )) + else: + code.putln("%s[%s%s] = %s;" % ( + self.result(), + i, + offset, + arg.result())) + if self.mult_factor: + code.putln("}") + code.putln("}") elif self.type.is_struct: for arg, member in zip(self.args, self.type.scope.var_entries): code.putln("%s.%s = %s;" % ( - self.result(), - member.cname, - arg.result())) + self.result(), + member.cname, + arg.result())) else: raise InternalError("List type never specified") @@ -8416,228 +8416,228 @@ class DictComprehensionAppendNode(ComprehensionAppendNode): self.value_expr.annotate(code) -class InlinedGeneratorExpressionNode(ExprNode): - # An inlined generator expression for which the result is calculated - # inside of the loop and returned as a single, first and only Generator - # return value. - # This will only be created by transforms when replacing safe builtin - # calls on generator expressions. +class InlinedGeneratorExpressionNode(ExprNode): + # An inlined generator expression for which the result is calculated + # inside of the loop and returned as a single, first and only Generator + # return value. + # This will only be created by transforms when replacing safe builtin + # calls on generator expressions. # - # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes - # orig_func String the name of the builtin function this node replaces - # target ExprNode or None a 'target' for a ComprehensionAppend node - - subexprs = ["gen"] - orig_func = None - target = None - is_temp = True + # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes + # orig_func String the name of the builtin function this node replaces + # target ExprNode or None a 'target' for a ComprehensionAppend node + + subexprs = ["gen"] + orig_func = None + target = None + is_temp = True type = py_object_type - def __init__(self, pos, gen, comprehension_type=None, **kwargs): - gbody = gen.def_node.gbody - gbody.is_inlined = True - if comprehension_type is not None: - assert comprehension_type in (list_type, set_type, dict_type), comprehension_type - gbody.inlined_comprehension_type = comprehension_type - kwargs.update( - target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname), - type=comprehension_type, - ) - super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs) + def __init__(self, pos, gen, comprehension_type=None, **kwargs): + gbody = gen.def_node.gbody + gbody.is_inlined = True + if comprehension_type is not None: + assert comprehension_type in (list_type, set_type, dict_type), comprehension_type + gbody.inlined_comprehension_type = comprehension_type + kwargs.update( + target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname), + type=comprehension_type, + ) + super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs) def may_be_none(self): - return self.orig_func not in ('any', 'all', 'sorted') + return self.orig_func not in ('any', 'all', 'sorted') def infer_type(self, env): - return self.type + return self.type def analyse_types(self, env): - self.gen = self.gen.analyse_expressions(env) + self.gen = self.gen.analyse_expressions(env) return self - def generate_result_code(self, code): - code.putln("%s = __Pyx_Generator_Next(%s); %s" % ( - self.result(), self.gen.result(), - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.result()) - - -class MergedSequenceNode(ExprNode): - """ - Merge a sequence of iterables into a set/list/tuple. - - The target collection is determined by self.type, which must be set externally. - - args [ExprNode] - """ - subexprs = ['args'] - is_temp = True - gil_message = "Constructing Python collection" - - def __init__(self, pos, args, type): - if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor: - # construct a list directly from the first argument that we can then extend - if args[0].type is not list_type: - args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True) - ExprNode.__init__(self, pos, args=args, type=type) - - def calculate_constant_result(self): - result = [] - for item in self.args: - if item.is_sequence_constructor and item.mult_factor: - if item.mult_factor.constant_result <= 0: - continue - # otherwise, adding each item once should be enough - if item.is_set_literal or item.is_sequence_constructor: - # process items in order - items = (arg.constant_result for arg in item.args) - else: - items = item.constant_result - result.extend(items) - if self.type is set_type: - result = set(result) - elif self.type is tuple_type: - result = tuple(result) - else: - assert self.type is list_type - self.constant_result = result - - def compile_time_value(self, denv): - result = [] - for item in self.args: - if item.is_sequence_constructor and item.mult_factor: - if item.mult_factor.compile_time_value(denv) <= 0: - continue - if item.is_set_literal or item.is_sequence_constructor: - # process items in order - items = (arg.compile_time_value(denv) for arg in item.args) - else: - items = item.compile_time_value(denv) - result.extend(items) - if self.type is set_type: - try: - result = set(result) - except Exception as e: - self.compile_time_value_error(e) - elif self.type is tuple_type: - result = tuple(result) - else: - assert self.type is list_type - return result - - def type_dependencies(self, env): - return () - - def infer_type(self, env): - return self.type - - def analyse_types(self, env): - args = [ - arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( - # FIXME: CPython's error message starts with the runtime function name - 'argument after * must be an iterable, not NoneType') - for arg in self.args - ] - - if len(args) == 1 and args[0].type is self.type: - # strip this intermediate node and use the bare collection - return args[0] - - assert self.type in (set_type, list_type, tuple_type) - - self.args = args + def generate_result_code(self, code): + code.putln("%s = __Pyx_Generator_Next(%s); %s" % ( + self.result(), self.gen.result(), + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.result()) + + +class MergedSequenceNode(ExprNode): + """ + Merge a sequence of iterables into a set/list/tuple. + + The target collection is determined by self.type, which must be set externally. + + args [ExprNode] + """ + subexprs = ['args'] + is_temp = True + gil_message = "Constructing Python collection" + + def __init__(self, pos, args, type): + if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor: + # construct a list directly from the first argument that we can then extend + if args[0].type is not list_type: + args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True) + ExprNode.__init__(self, pos, args=args, type=type) + + def calculate_constant_result(self): + result = [] + for item in self.args: + if item.is_sequence_constructor and item.mult_factor: + if item.mult_factor.constant_result <= 0: + continue + # otherwise, adding each item once should be enough + if item.is_set_literal or item.is_sequence_constructor: + # process items in order + items = (arg.constant_result for arg in item.args) + else: + items = item.constant_result + result.extend(items) + if self.type is set_type: + result = set(result) + elif self.type is tuple_type: + result = tuple(result) + else: + assert self.type is list_type + self.constant_result = result + + def compile_time_value(self, denv): + result = [] + for item in self.args: + if item.is_sequence_constructor and item.mult_factor: + if item.mult_factor.compile_time_value(denv) <= 0: + continue + if item.is_set_literal or item.is_sequence_constructor: + # process items in order + items = (arg.compile_time_value(denv) for arg in item.args) + else: + items = item.compile_time_value(denv) + result.extend(items) + if self.type is set_type: + try: + result = set(result) + except Exception as e: + self.compile_time_value_error(e) + elif self.type is tuple_type: + result = tuple(result) + else: + assert self.type is list_type + return result + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + return self.type + + def analyse_types(self, env): + args = [ + arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( + # FIXME: CPython's error message starts with the runtime function name + 'argument after * must be an iterable, not NoneType') + for arg in self.args + ] + + if len(args) == 1 and args[0].type is self.type: + # strip this intermediate node and use the bare collection + return args[0] + + assert self.type in (set_type, list_type, tuple_type) + + self.args = args return self - def may_be_none(self): - return False - - def generate_evaluation_code(self, code): - code.mark_pos(self.pos) - self.allocate_temp_result(code) - - is_set = self.type is set_type - - args = iter(self.args) - item = next(args) - item.generate_evaluation_code(code) - if (is_set and item.is_set_literal or - not is_set and item.is_sequence_constructor and item.type is list_type): - code.putln("%s = %s;" % (self.result(), item.py_result())) - item.generate_post_assignment_code(code) - else: - code.putln("%s = %s(%s); %s" % ( - self.result(), - 'PySet_New' if is_set else 'PySequence_List', - item.py_result(), - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.py_result()) - item.generate_disposal_code(code) - item.free_temps(code) - - helpers = set() - if is_set: - add_func = "PySet_Add" - extend_func = "__Pyx_PySet_Update" - else: - add_func = "__Pyx_ListComp_Append" - extend_func = "__Pyx_PyList_Extend" - - for item in args: - if (is_set and (item.is_set_literal or item.is_sequence_constructor) or - (item.is_sequence_constructor and not item.mult_factor)): - if not is_set and item.args: - helpers.add(("ListCompAppend", "Optimize.c")) - for arg in item.args: - arg.generate_evaluation_code(code) - code.put_error_if_neg(arg.pos, "%s(%s, %s)" % ( - add_func, - self.result(), - arg.py_result())) - arg.generate_disposal_code(code) - arg.free_temps(code) - continue - - if is_set: - helpers.add(("PySet_Update", "Builtins.c")) - else: - helpers.add(("ListExtend", "Optimize.c")) - - item.generate_evaluation_code(code) - code.put_error_if_neg(item.pos, "%s(%s, %s)" % ( - extend_func, - self.result(), - item.py_result())) - item.generate_disposal_code(code) - item.free_temps(code) - - if self.type is tuple_type: - code.putln("{") - code.putln("PyObject *%s = PyList_AsTuple(%s);" % ( - Naming.quick_temp_cname, - self.result())) - code.put_decref(self.result(), py_object_type) - code.putln("%s = %s; %s" % ( - self.result(), - Naming.quick_temp_cname, - code.error_goto_if_null(self.result(), self.pos))) - code.put_gotref(self.result()) - code.putln("}") - - for helper in sorted(helpers): - code.globalstate.use_utility_code(UtilityCode.load_cached(*helper)) - - def annotate(self, code): - for item in self.args: - item.annotate(code) - - -class SetNode(ExprNode): - """ - Set constructor. - """ + def may_be_none(self): + return False + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + is_set = self.type is set_type + + args = iter(self.args) + item = next(args) + item.generate_evaluation_code(code) + if (is_set and item.is_set_literal or + not is_set and item.is_sequence_constructor and item.type is list_type): + code.putln("%s = %s;" % (self.result(), item.py_result())) + item.generate_post_assignment_code(code) + else: + code.putln("%s = %s(%s); %s" % ( + self.result(), + 'PySet_New' if is_set else 'PySequence_List', + item.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.py_result()) + item.generate_disposal_code(code) + item.free_temps(code) + + helpers = set() + if is_set: + add_func = "PySet_Add" + extend_func = "__Pyx_PySet_Update" + else: + add_func = "__Pyx_ListComp_Append" + extend_func = "__Pyx_PyList_Extend" + + for item in args: + if (is_set and (item.is_set_literal or item.is_sequence_constructor) or + (item.is_sequence_constructor and not item.mult_factor)): + if not is_set and item.args: + helpers.add(("ListCompAppend", "Optimize.c")) + for arg in item.args: + arg.generate_evaluation_code(code) + code.put_error_if_neg(arg.pos, "%s(%s, %s)" % ( + add_func, + self.result(), + arg.py_result())) + arg.generate_disposal_code(code) + arg.free_temps(code) + continue + + if is_set: + helpers.add(("PySet_Update", "Builtins.c")) + else: + helpers.add(("ListExtend", "Optimize.c")) + + item.generate_evaluation_code(code) + code.put_error_if_neg(item.pos, "%s(%s, %s)" % ( + extend_func, + self.result(), + item.py_result())) + item.generate_disposal_code(code) + item.free_temps(code) + + if self.type is tuple_type: + code.putln("{") + code.putln("PyObject *%s = PyList_AsTuple(%s);" % ( + Naming.quick_temp_cname, + self.result())) + code.put_decref(self.result(), py_object_type) + code.putln("%s = %s; %s" % ( + self.result(), + Naming.quick_temp_cname, + code.error_goto_if_null(self.result(), self.pos))) + code.put_gotref(self.result()) + code.putln("}") + + for helper in sorted(helpers): + code.globalstate.use_utility_code(UtilityCode.load_cached(*helper)) + + def annotate(self, code): + for item in self.args: + item.annotate(code) + + +class SetNode(ExprNode): + """ + Set constructor. + """ subexprs = ['args'] - type = set_type - is_set_literal = True + type = set_type + is_set_literal = True gil_message = "Constructing Python set" def analyse_types(self, env): @@ -8659,7 +8659,7 @@ class SetNode(ExprNode): values = [arg.compile_time_value(denv) for arg in self.args] try: return set(values) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def generate_evaluation_code(self, code): @@ -8692,7 +8692,7 @@ class DictNode(ExprNode): exclude_null_values = False type = dict_type is_dict_literal = True - reject_duplicates = False + reject_duplicates = False obj_conversion_errors = [] @@ -8710,14 +8710,14 @@ class DictNode(ExprNode): for item in self.key_value_pairs] try: return dict(pairs) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def type_dependencies(self, env): return () def infer_type(self, env): - # TODO: Infer struct constructors. + # TODO: Infer struct constructors. return dict_type def analyse_types(self, env): @@ -8735,13 +8735,13 @@ class DictNode(ExprNode): def coerce_to(self, dst_type, env): if dst_type.is_pyobject: self.release_errors() - if self.type.is_struct_or_union: - if not dict_type.subtype_of(dst_type): - error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type) - return DictNode(self.pos, key_value_pairs=[ - DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env), - value=item.value.coerce_to_pyobject(env)) - for item in self.key_value_pairs]) + if self.type.is_struct_or_union: + if not dict_type.subtype_of(dst_type): + error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type) + return DictNode(self.pos, key_value_pairs=[ + DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env), + value=item.value.coerce_to_pyobject(env)) + for item in self.key_value_pairs]) if not self.type.subtype_of(dst_type): error(self.pos, "Cannot interpret dict as type '%s'" % dst_type) elif dst_type.is_struct_or_union: @@ -8783,9 +8783,9 @@ class DictNode(ExprNode): # pairs are evaluated and used one at a time. code.mark_pos(self.pos) self.allocate_temp_result(code) - - is_dict = self.type.is_pyobject - if is_dict: + + is_dict = self.type.is_pyobject + if is_dict: self.release_errors() code.putln( "%s = __Pyx_PyDict_NewPresized(%d); %s" % ( @@ -8793,51 +8793,51 @@ class DictNode(ExprNode): len(self.key_value_pairs), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) - - keys_seen = set() - key_type = None - needs_error_helper = False - + + keys_seen = set() + key_type = None + needs_error_helper = False + for item in self.key_value_pairs: item.generate_evaluation_code(code) - if is_dict: + if is_dict: if self.exclude_null_values: code.putln('if (%s) {' % item.value.py_result()) - key = item.key - if self.reject_duplicates: - if keys_seen is not None: - # avoid runtime 'in' checks for literals that we can do at compile time - if not key.is_string_literal: - keys_seen = None - elif key.value in keys_seen: - # FIXME: this could be a compile time error, at least in Cython code - keys_seen = None - elif key_type is not type(key.value): - if key_type is None: - key_type = type(key.value) - keys_seen.add(key.value) - else: - # different types => may not be able to compare at compile time - keys_seen = None - else: - keys_seen.add(key.value) - - if keys_seen is None: - code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % ( - self.result(), key.py_result())) - # currently only used in function calls - needs_error_helper = True - code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( - key.py_result(), - code.error_goto(item.pos))) - code.putln("} else {") - - code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( - self.result(), - item.key.py_result(), - item.value.py_result())) - if self.reject_duplicates and keys_seen is None: - code.putln('}') + key = item.key + if self.reject_duplicates: + if keys_seen is not None: + # avoid runtime 'in' checks for literals that we can do at compile time + if not key.is_string_literal: + keys_seen = None + elif key.value in keys_seen: + # FIXME: this could be a compile time error, at least in Cython code + keys_seen = None + elif key_type is not type(key.value): + if key_type is None: + key_type = type(key.value) + keys_seen.add(key.value) + else: + # different types => may not be able to compare at compile time + keys_seen = None + else: + keys_seen.add(key.value) + + if keys_seen is None: + code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % ( + self.result(), key.py_result())) + # currently only used in function calls + needs_error_helper = True + code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( + key.py_result(), + code.error_goto(item.pos))) + code.putln("} else {") + + code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( + self.result(), + item.key.py_result(), + item.value.py_result())) + if self.reject_duplicates and keys_seen is None: + code.putln('}') if self.exclude_null_values: code.putln('}') else: @@ -8848,15 +8848,15 @@ class DictNode(ExprNode): item.generate_disposal_code(code) item.free_temps(code) - if needs_error_helper: - code.globalstate.use_utility_code( - UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c")) - + if needs_error_helper: + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c")) + def annotate(self, code): for item in self.key_value_pairs: item.annotate(code) - + class DictItemNode(ExprNode): # Represents a single item in a DictNode # @@ -9239,16 +9239,16 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): default_args = [] default_kwargs = [] annotations = [] - - # For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants - # for default arguments to avoid the dependency on the CyFunction object as 'self' argument - # in the underlying C function. Basically, cpdef functions/methods are static C functions, - # so their optional arguments must be static, too. - # TODO: change CyFunction implementation to pass both function object and owning object for method calls - must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope) - + + # For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants + # for default arguments to avoid the dependency on the CyFunction object as 'self' argument + # in the underlying C function. Basically, cpdef functions/methods are static C functions, + # so their optional arguments must be static, too. + # TODO: change CyFunction implementation to pass both function object and owning object for method calls + must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope) + for arg in self.def_node.args: - if arg.default and not must_use_constants: + if arg.default and not must_use_constants: if not arg.default.is_literal: arg.is_dynamic = True if arg.type.is_pyobject: @@ -9264,12 +9264,12 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): if arg.annotation: arg.annotation = self.analyse_annotation(env, arg.annotation) annotations.append((arg.pos, arg.name, arg.annotation)) - - for arg in (self.def_node.star_arg, self.def_node.starstar_arg): - if arg and arg.annotation: + + for arg in (self.def_node.star_arg, self.def_node.starstar_arg): + if arg and arg.annotation: arg.annotation = self.analyse_annotation(env, arg.annotation) - annotations.append((arg.pos, arg.name, arg.annotation)) - + annotations.append((arg.pos, arg.name, arg.annotation)) + annotation = self.def_node.return_type_annotation if annotation: annotation = self.analyse_annotation(env, annotation) @@ -9305,7 +9305,7 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): if default_args: defaults_tuple = TupleNode(self.pos, args=[ arg.default for arg in default_args]) - self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env) + self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env) if default_kwargs: defaults_kwdict = DictNode(self.pos, key_value_pairs=[ DictItemNode( @@ -9334,10 +9334,10 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): self.pos, args=[defaults_tuple, defaults_kwdict])), decorators=None, name=StringEncoding.EncodedString("__defaults__")) - # defaults getter must never live in class scopes, it's always a module function - module_scope = env.global_scope() - defaults_getter.analyse_declarations(module_scope) - defaults_getter = defaults_getter.analyse_expressions(module_scope) + # defaults getter must never live in class scopes, it's always a module function + module_scope = env.global_scope() + defaults_getter.analyse_declarations(module_scope) + defaults_getter = defaults_getter.analyse_expressions(module_scope) defaults_getter.body = defaults_getter.body.analyse_expressions( defaults_getter.local_scope) defaults_getter.py_wrapper_required = False @@ -9421,7 +9421,7 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): elif def_node.is_classmethod: flags.append('__Pyx_CYFUNCTION_CLASSMETHOD') - if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous: + if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous: flags.append('__Pyx_CYFUNCTION_CCLASS') if flags: @@ -9471,8 +9471,8 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin): if self.defaults_kwdict: code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % ( self.result(), self.defaults_kwdict.py_result())) - if def_node.defaults_getter and not self.specialized_cpdefs: - # Fused functions do not support dynamic defaults, only their specialisations can have them for now. + if def_node.defaults_getter and not self.specialized_cpdefs: + # Fused functions do not support dynamic defaults, only their specialisations can have them for now. code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % ( self.result(), def_node.defaults_getter.entry.pyfunc_cname)) if self.annotations_dict: @@ -9501,7 +9501,7 @@ class CodeObjectNode(ExprNode): subexprs = ['varnames'] is_temp = False - result_code = None + result_code = None def __init__(self, def_node): ExprNode.__init__(self, def_node.pos, def_node=def_node) @@ -9518,24 +9518,24 @@ class CodeObjectNode(ExprNode): def may_be_none(self): return False - def calculate_result_code(self, code=None): - if self.result_code is None: - self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) + def calculate_result_code(self, code=None): + if self.result_code is None: + self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) return self.result_code def generate_result_code(self, code): - if self.result_code is None: - self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) + if self.result_code is None: + self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) - code = code.get_cached_constants_writer(self.result_code) - if code is None: - return # already initialised + code = code.get_cached_constants_writer(self.result_code) + if code is None: + return # already initialised code.mark_pos(self.pos) func = self.def_node func_name = code.get_py_string_const( func.name, identifier=True, is_str=False, unicode_value=func.name) # FIXME: better way to get the module file path at module init time? Encoding to use? - file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8') + file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8') # XXX Use get_description() to set arcadia root relative filename file_path = StringEncoding.bytes_literal(func.pos[0].get_description().encode('utf8'), 'utf8') file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True) @@ -9635,10 +9635,10 @@ class DefaultsTupleNode(TupleNode): args.append(arg) super(DefaultsTupleNode, self).__init__(pos, args=args) - def analyse_types(self, env, skip_children=False): - return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env) + def analyse_types(self, env, skip_children=False): + return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env) + - class DefaultsKwDictNode(DictNode): # CyFunction's __kwdefaults__ dict @@ -9669,7 +9669,7 @@ class LambdaNode(InnerFunctionNode): name = StringEncoding.EncodedString('<lambda>') def analyse_declarations(self, env): - self.lambda_name = self.def_node.lambda_name = env.next_id('lambda') + self.lambda_name = self.def_node.lambda_name = env.next_id('lambda') self.def_node.no_assignment_synthesis = True self.def_node.pymethdef_required = True self.def_node.analyse_declarations(env) @@ -9698,7 +9698,7 @@ class GeneratorExpressionNode(LambdaNode): binding = False def analyse_declarations(self, env): - self.genexpr_name = env.next_id('genexpr') + self.genexpr_name = env.next_id('genexpr') super(GeneratorExpressionNode, self).analyse_declarations(env) # No pymethdef required self.def_node.pymethdef_required = False @@ -9728,13 +9728,13 @@ class YieldExprNode(ExprNode): type = py_object_type label_num = 0 is_yield_from = False - is_await = False + is_await = False in_async_gen = False - expr_keyword = 'yield' + expr_keyword = 'yield' def analyse_types(self, env): if not self.label_num or (self.is_yield_from and self.in_async_gen): - error(self.pos, "'%s' not supported here" % self.expr_keyword) + error(self.pos, "'%s' not supported here" % self.expr_keyword) self.is_temp = 1 if self.arg is not None: self.arg = self.arg.analyse_types(env) @@ -9820,22 +9820,22 @@ class YieldExprNode(ExprNode): class _YieldDelegationExprNode(YieldExprNode): - def yield_from_func(self, code): + def yield_from_func(self, code): raise NotImplementedError() - def generate_evaluation_code(self, code, source_cname=None, decref_source=False): - if source_cname is None: - self.arg.generate_evaluation_code(code) - code.putln("%s = %s(%s, %s);" % ( + def generate_evaluation_code(self, code, source_cname=None, decref_source=False): + if source_cname is None: + self.arg.generate_evaluation_code(code) + code.putln("%s = %s(%s, %s);" % ( Naming.retval_cname, - self.yield_from_func(code), + self.yield_from_func(code), Naming.generator_cname, - self.arg.py_result() if source_cname is None else source_cname)) - if source_cname is None: - self.arg.generate_disposal_code(code) - self.arg.free_temps(code) - elif decref_source: - code.put_decref_clear(source_cname, py_object_type) + self.arg.py_result() if source_cname is None else source_cname)) + if source_cname is None: + self.arg.generate_disposal_code(code) + self.arg.free_temps(code) + elif decref_source: + code.put_decref_clear(source_cname, py_object_type) code.put_xgotref(Naming.retval_cname) code.putln("if (likely(%s)) {" % Naming.retval_cname) @@ -9843,26 +9843,26 @@ class _YieldDelegationExprNode(YieldExprNode): code.putln("} else {") # either error or sub-generator has normally terminated: return value => node result if self.result_is_used: - self.fetch_iteration_result(code) + self.fetch_iteration_result(code) else: - self.handle_iteration_exception(code) + self.handle_iteration_exception(code) code.putln("}") - def fetch_iteration_result(self, code): - # YieldExprNode has allocated the result temp for us - code.putln("%s = NULL;" % self.result()) - code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result()) - code.put_gotref(self.result()) - - def handle_iteration_exception(self, code): + def fetch_iteration_result(self, code): + # YieldExprNode has allocated the result temp for us + code.putln("%s = NULL;" % self.result()) + code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result()) + code.put_gotref(self.result()) + + def handle_iteration_exception(self, code): code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();") - code.putln("if (exc_type) {") + code.putln("if (exc_type) {") code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&" " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();") - code.putln("else %s" % code.error_goto(self.pos)) - code.putln("}") - - + code.putln("else %s" % code.error_goto(self.pos)) + code.putln("}") + + class YieldFromExprNode(_YieldDelegationExprNode): # "yield from GEN" expression is_yield_from = True @@ -9880,44 +9880,44 @@ class YieldFromExprNode(_YieldDelegationExprNode): class AwaitExprNode(_YieldDelegationExprNode): - # 'await' expression node - # - # arg ExprNode the Awaitable value to await - # label_num integer yield label number - - is_await = True - expr_keyword = 'await' - - def coerce_yield_argument(self, env): - if self.arg is not None: - # FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ? - self.arg = self.arg.coerce_to_pyobject(env) - - def yield_from_func(self, code): - code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c")) - return "__Pyx_Coroutine_Yield_From" - - -class AwaitIterNextExprNode(AwaitExprNode): - # 'await' expression node as part of 'async for' iteration - # - # Breaks out of loop on StopAsyncIteration exception. - + # 'await' expression node + # + # arg ExprNode the Awaitable value to await + # label_num integer yield label number + + is_await = True + expr_keyword = 'await' + + def coerce_yield_argument(self, env): + if self.arg is not None: + # FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ? + self.arg = self.arg.coerce_to_pyobject(env) + + def yield_from_func(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c")) + return "__Pyx_Coroutine_Yield_From" + + +class AwaitIterNextExprNode(AwaitExprNode): + # 'await' expression node as part of 'async for' iteration + # + # Breaks out of loop on StopAsyncIteration exception. + def _generate_break(self, code): - code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) + code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();") code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || (" " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&" " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {") - code.putln("PyErr_Clear();") - code.putln("break;") - code.putln("}") + code.putln("PyErr_Clear();") + code.putln("break;") + code.putln("}") def fetch_iteration_result(self, code): assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop" self._generate_break(code) - super(AwaitIterNextExprNode, self).fetch_iteration_result(code) - + super(AwaitIterNextExprNode, self).fetch_iteration_result(code) + def generate_sent_value_handling_code(self, code, value_cname): assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop" code.putln("if (unlikely(!%s)) {" % value_cname) @@ -9925,7 +9925,7 @@ class AwaitIterNextExprNode(AwaitExprNode): # all non-break exceptions are errors, as in parent class code.putln(code.error_goto(self.pos)) code.putln("}") - + class GlobalsExprNode(AtomicExprNode): type = dict_type @@ -10046,7 +10046,7 @@ class UnopNode(ExprNode): operand = self.operand.compile_time_value(denv) try: return func(operand) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def infer_type(self, env): @@ -10088,7 +10088,7 @@ class UnopNode(ExprNode): return self.operand.check_const() def is_py_operation(self): - return self.operand.type.is_pyobject or self.operand.type.is_ctuple + return self.operand.type.is_pyobject or self.operand.type.is_ctuple def is_pythran_operation(self, env): np_pythran = has_np_pythran(env) @@ -10117,14 +10117,14 @@ class UnopNode(ExprNode): self.operand.pythran_result())) elif self.operand.type.is_pyobject: self.generate_py_operation_code(code) - elif self.is_temp: - if self.is_cpp_operation() and self.exception_check == '+': - translate_cpp_exception(code, self.pos, - "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), - self.result() if self.type.is_pyobject else None, - self.exception_value, self.in_nogil_context) - else: - code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result())) + elif self.is_temp: + if self.is_cpp_operation() and self.exception_check == '+': + translate_cpp_exception(code, self.pos, + "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result())) def generate_py_operation_code(self, code): function = self.py_operation_function(code) @@ -10142,23 +10142,23 @@ class UnopNode(ExprNode): (self.operator, self.operand.type)) self.type = PyrexTypes.error_type - def analyse_cpp_operation(self, env, overload_check=True): - entry = env.lookup_operator(self.operator, [self.operand]) - if overload_check and not entry: - self.type_error() - return - if entry: - self.exception_check = entry.type.exception_check - self.exception_value = entry.type.exception_value - if self.exception_check == '+': - self.is_temp = True - if self.exception_value is None: - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) - else: - self.exception_check = '' - self.exception_value = '' + def analyse_cpp_operation(self, env, overload_check=True): + entry = env.lookup_operator(self.operator, [self.operand]) + if overload_check and not entry: + self.type_error() + return + if entry: + self.exception_check = entry.type.exception_check + self.exception_value = entry.type.exception_value + if self.exception_check == '+': + self.is_temp = True + if self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + else: + self.exception_check = '' + self.exception_value = '' cpp_type = self.operand.type.find_cpp_operation_type(self.operator) - if overload_check and cpp_type is None: + if overload_check and cpp_type is None: error(self.pos, "'%s' operator not defined for %s" % ( self.operator, type)) self.type_error() @@ -10181,7 +10181,7 @@ class NotNode(UnopNode): operand = self.operand.compile_time_value(denv) try: return not operand - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def infer_unop_type(self, env, operand_type): @@ -10191,7 +10191,7 @@ class NotNode(UnopNode): self.operand = self.operand.analyse_types(env) operand_type = self.operand.type if operand_type.is_cpp_class: - self.analyse_cpp_operation(env) + self.analyse_cpp_operation(env) else: self.operand = self.operand.coerce_to_boolean(env) return self @@ -10329,12 +10329,12 @@ class AmpersandNode(CUnopNode): self.operand = self.operand.analyse_types(env) argtype = self.operand.type if argtype.is_cpp_class: - self.analyse_cpp_operation(env, overload_check=False) + self.analyse_cpp_operation(env, overload_check=False) if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()): if argtype.is_memoryviewslice: self.error("Cannot take address of memoryview slice") else: - self.error("Taking address of non-lvalue (type %s)" % argtype) + self.error("Taking address of non-lvalue (type %s)" % argtype) return self if argtype.is_pyobject: self.error("Cannot take address of Python %s" % ( @@ -10342,8 +10342,8 @@ class AmpersandNode(CUnopNode): "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else "object")) return self - if not argtype.is_cpp_class or not self.type: - self.type = PyrexTypes.c_ptr_type(argtype) + if not argtype.is_cpp_class or not self.type: + self.type = PyrexTypes.c_ptr_type(argtype) return self def check_const(self): @@ -10358,11 +10358,11 @@ class AmpersandNode(CUnopNode): return "(&%s)" % self.operand.result() def generate_result_code(self, code): - if (self.operand.type.is_cpp_class and self.exception_check == '+'): - translate_cpp_exception(code, self.pos, - "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), - self.result() if self.type.is_pyobject else None, - self.exception_value, self.in_nogil_context) + if (self.operand.type.is_cpp_class and self.exception_check == '+'): + translate_cpp_exception(code, self.pos, + "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) unop_node_classes = { @@ -10432,15 +10432,15 @@ class TypecastNode(ExprNode): return CoerceIntToBytesNode(self.operand, env) elif self.operand.type.can_coerce_to_pyobject(env): self.result_ctype = py_object_type - self.operand = self.operand.coerce_to(self.type, env) + self.operand = self.operand.coerce_to(self.type, env) else: if self.operand.type.is_ptr: if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct): error(self.pos, "Python objects cannot be cast from pointers of primitive types") else: # Should this be an error? - warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( - self.operand.type, self.type)) + warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( + self.operand.type, self.type)) self.operand = self.operand.coerce_to_simple(env) elif from_py and not to_py: if self.type.create_from_py_utility_code(env): @@ -10449,8 +10449,8 @@ class TypecastNode(ExprNode): if not (self.type.base_type.is_void or self.type.base_type.is_struct): error(self.pos, "Python objects cannot be cast to pointers of primitive types") else: - warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( - self.type, self.operand.type)) + warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( + self.type, self.operand.type)) elif from_py and to_py: if self.typecheck: self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True) @@ -10462,13 +10462,13 @@ class TypecastNode(ExprNode): elif self.operand.type.is_fused: self.operand = self.operand.coerce_to(self.type, env) #self.type = self.operand.type - if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil: - op_type = self.operand.type - if op_type.is_ptr: - op_type = op_type.base_type - if op_type.is_cfunction and not op_type.nogil: - warning(self.pos, - "Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1) + if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil: + op_type = self.operand.type + if op_type.is_ptr: + op_type = op_type.base_type + if op_type.is_cfunction and not op_type.nogil: + warning(self.pos, + "Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1) return self def is_simple(self): @@ -10538,7 +10538,7 @@ ERR_STEPS = ("Strides may only be given to indicate contiguity. " ERR_NOT_POINTER = "Can only create cython.array from pointer or array" ERR_BASE_TYPE = "Pointer base type does not match cython.array base type" - + class CythonArrayNode(ExprNode): """ Used when a pointer of base_type is cast to a memoryviewslice with that @@ -10667,7 +10667,7 @@ class CythonArrayNode(ExprNode): axes[-1] = ('direct', 'contig') self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes) - self.coercion_type.validate_memslice_dtype(self.pos) + self.coercion_type.validate_memslice_dtype(self.pos) self.type = self.get_cython_array_type(env) MemoryView.use_cython_array_utility_code(env) env.use_utility_code(MemoryView.typeinfo_to_format_code) @@ -10697,7 +10697,7 @@ class CythonArrayNode(ExprNode): shapes_temp = code.funcstate.allocate_temp(py_object_type, True) format_temp = code.funcstate.allocate_temp(py_object_type, True) - itemsize = "sizeof(%s)" % dtype.empty_declaration_code() + itemsize = "sizeof(%s)" % dtype.empty_declaration_code() type_info = Buffer.get_type_information_cname(code, dtype) if self.operand.type.is_ptr: @@ -10805,8 +10805,8 @@ class SizeofTypeNode(SizeofNode): def check_type(self): arg_type = self.arg_type - if not arg_type: - return + if not arg_type: + return if arg_type.is_pyobject and not arg_type.is_extension_type: error(self.pos, "Cannot take sizeof Python object") elif arg_type.is_void: @@ -10820,7 +10820,7 @@ class SizeofTypeNode(SizeofNode): # we want the size of the actual struct arg_code = self.arg_type.declaration_code("", deref=1) else: - arg_code = self.arg_type.empty_declaration_code() + arg_code = self.arg_type.empty_declaration_code() return "(sizeof(%s))" % arg_code @@ -10851,78 +10851,78 @@ class SizeofVarNode(SizeofNode): def generate_result_code(self, code): pass - -class TypeidNode(ExprNode): - # C++ typeid operator applied to a type or variable - # - # operand ExprNode - # arg_type ExprNode - # is_variable boolean - - type = PyrexTypes.error_type - - subexprs = ['operand'] - - arg_type = None - is_variable = None - is_temp = 1 - - def get_type_info_type(self, env): - env_module = env - while not env_module.is_module_scope: - env_module = env_module.outer_scope - typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos) - typeinfo_entry = typeinfo_module.lookup('type_info') - return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type)) - + +class TypeidNode(ExprNode): + # C++ typeid operator applied to a type or variable + # + # operand ExprNode + # arg_type ExprNode + # is_variable boolean + + type = PyrexTypes.error_type + + subexprs = ['operand'] + + arg_type = None + is_variable = None + is_temp = 1 + + def get_type_info_type(self, env): + env_module = env + while not env_module.is_module_scope: + env_module = env_module.outer_scope + typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos) + typeinfo_entry = typeinfo_module.lookup('type_info') + return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type)) + cpp_message = 'typeid operator' - def analyse_types(self, env): + def analyse_types(self, env): self.cpp_check(env) - type_info = self.get_type_info_type(env) - if not type_info: - self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator") - return self - self.type = type_info - as_type = self.operand.analyse_as_type(env) - if as_type: - self.arg_type = as_type - self.is_type = True - else: - self.arg_type = self.operand.analyse_types(env) - self.is_type = False - if self.arg_type.type.is_pyobject: - self.error("Cannot use typeid on a Python object") - return self - elif self.arg_type.type.is_void: - self.error("Cannot use typeid on void") - return self - elif not self.arg_type.type.is_complete(): - self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type) - return self - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) - return self - - def error(self, mess): - error(self.pos, mess) - self.type = PyrexTypes.error_type - self.result_code = "<error>" - - def check_const(self): - return True - - def calculate_result_code(self): - return self.temp_code - - def generate_result_code(self, code): - if self.is_type: - arg_code = self.arg_type.empty_declaration_code() - else: - arg_code = self.arg_type.result() - translate_cpp_exception(code, self.pos, - "%s = typeid(%s);" % (self.temp_code, arg_code), - None, None, self.in_nogil_context) - + type_info = self.get_type_info_type(env) + if not type_info: + self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator") + return self + self.type = type_info + as_type = self.operand.analyse_as_type(env) + if as_type: + self.arg_type = as_type + self.is_type = True + else: + self.arg_type = self.operand.analyse_types(env) + self.is_type = False + if self.arg_type.type.is_pyobject: + self.error("Cannot use typeid on a Python object") + return self + elif self.arg_type.type.is_void: + self.error("Cannot use typeid on void") + return self + elif not self.arg_type.type.is_complete(): + self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type) + return self + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + return self + + def error(self, mess): + error(self.pos, mess) + self.type = PyrexTypes.error_type + self.result_code = "<error>" + + def check_const(self): + return True + + def calculate_result_code(self): + return self.temp_code + + def generate_result_code(self, code): + if self.is_type: + arg_code = self.arg_type.empty_declaration_code() + else: + arg_code = self.arg_type.result() + translate_cpp_exception(code, self.pos, + "%s = typeid(%s);" % (self.temp_code, arg_code), + None, None, self.in_nogil_context) + class TypeofNode(ExprNode): # Compile-time type of an expression, as a string. # @@ -11034,7 +11034,7 @@ class BinopNode(ExprNode): operand2 = self.operand2.compile_time_value(denv) try: return func(operand1, operand2) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) def infer_type(self, env): @@ -11068,7 +11068,7 @@ class BinopNode(ExprNode): return self.is_py_operation_types(self.operand1.type, self.operand2.type) def is_py_operation_types(self, type1, type2): - return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple + return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple def is_pythran_operation(self, env): return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env) @@ -11089,14 +11089,14 @@ class BinopNode(ExprNode): self.type_error() return func_type = entry.type - self.exception_check = func_type.exception_check - self.exception_value = func_type.exception_value - if self.exception_check == '+': - # Used by NumBinopNodes to break up expressions involving multiple - # operators so that exceptions can be handled properly. - self.is_temp = 1 - if self.exception_value is None: - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check == '+': + # Used by NumBinopNodes to break up expressions involving multiple + # operators so that exceptions can be handled properly. + self.is_temp = 1 + if self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) if func_type.is_ptr: func_type = func_type.base_type if len(func_type.args) == 1: @@ -11153,19 +11153,19 @@ class BinopNode(ExprNode): if self.type.is_pythran_expr: code.putln("// Pythran binop") code.putln("__Pyx_call_destructor(%s);" % self.result()) - if self.operator == '**': - code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % ( - self.result(), - self.result(), - self.operand1.pythran_result(), - self.operand2.pythran_result())) - else: - code.putln("new (&%s) decltype(%s){%s %s %s};" % ( - self.result(), - self.result(), - self.operand1.pythran_result(), - self.operator, - self.operand2.pythran_result())) + if self.operator == '**': + code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % ( + self.result(), + self.result(), + self.operand1.pythran_result(), + self.operand2.pythran_result())) + else: + code.putln("new (&%s) decltype(%s){%s %s %s};" % ( + self.result(), + self.result(), + self.operand1.pythran_result(), + self.operator, + self.operand2.pythran_result())) elif self.operand1.type.is_pyobject: function = self.py_operation_function(code) if self.operator == '**': @@ -11182,15 +11182,15 @@ class BinopNode(ExprNode): code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) elif self.is_temp: - # C++ overloaded operators with exception values are currently all - # handled through temporaries. - if self.is_cpp_operation() and self.exception_check == '+': - translate_cpp_exception(code, self.pos, - "%s = %s;" % (self.result(), self.calculate_result_code()), - self.result() if self.type.is_pyobject else None, - self.exception_value, self.in_nogil_context) - else: - code.putln("%s = %s;" % (self.result(), self.calculate_result_code())) + # C++ overloaded operators with exception values are currently all + # handled through temporaries. + if self.is_cpp_operation() and self.exception_check == '+': + translate_cpp_exception(code, self.pos, + "%s = %s;" % (self.result(), self.calculate_result_code()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + code.putln("%s = %s;" % (self.result(), self.calculate_result_code())) def type_error(self): if not (self.operand1.type.is_error @@ -11222,8 +11222,8 @@ class CBinopNode(BinopNode): cpp_type = None if type1.is_cpp_class or type1.is_ptr: cpp_type = type1.find_cpp_operation_type(self.operator, type2) - if cpp_type is None and (type2.is_cpp_class or type2.is_ptr): - cpp_type = type2.find_cpp_operation_type(self.operator, type1) + if cpp_type is None and (type2.is_cpp_class or type2.is_ptr): + cpp_type = type2.find_cpp_operation_type(self.operator, type1) # FIXME: do we need to handle other cases here? return cpp_type @@ -11327,7 +11327,7 @@ class NumBinopNode(BinopNode): self.operand1.result(), self.operand2.result(), self.overflow_bit_node.overflow_bit) - elif self.type.is_cpp_class or self.infix: + elif self.type.is_cpp_class or self.infix: if is_pythran_expr(self.type): result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result() else: @@ -11416,14 +11416,14 @@ class AddNode(NumBinopNode): def py_operation_function(self, code): type1, type2 = self.operand1.type, self.operand2.type - + if type1 is unicode_type or type2 is unicode_type: if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type): is_unicode_concat = True elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode): # Assume that even if we don't know the second type, it's going to be a string. is_unicode_concat = True - else: + else: # Operation depends on the second type. is_unicode_concat = False @@ -11500,7 +11500,7 @@ class DivNode(NumBinopNode): func = compile_time_binary_operators[self.operator] if self.operator == '/' and self.truedivision is None: # => true div for floats, floor div for integers - if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types): + if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types): func = compile_time_binary_operators['//'] return func @@ -11519,23 +11519,23 @@ class DivNode(NumBinopNode): func = self.find_compile_time_binary_operator( operand1, operand2) return func(operand1, operand2) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) - def _check_truedivision(self, env): + def _check_truedivision(self, env): if self.cdivision or env.directives['cdivision']: self.ctruedivision = False else: self.ctruedivision = self.truedivision - - def infer_type(self, env): - self._check_truedivision(env) - return self.result_type( - self.operand1.infer_type(env), + + def infer_type(self, env): + self._check_truedivision(env) + return self.result_type( + self.operand1.infer_type(env), self.operand2.infer_type(env), env) - - def analyse_operation(self, env): - self._check_truedivision(env) + + def analyse_operation(self, env): + self._check_truedivision(env) NumBinopNode.analyse_operation(self, env) if self.is_cpp_operation(): self.cdivision = True @@ -11550,7 +11550,7 @@ class DivNode(NumBinopNode): self.operand2 = self.operand2.coerce_to_simple(env) def compute_c_result_type(self, type1, type2): - if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class: + if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class: if not type1.is_float and not type2.is_float: widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type) widest_type = PyrexTypes.widest_numeric_type(type2, widest_type) @@ -11566,19 +11566,19 @@ class DivNode(NumBinopNode): def generate_evaluation_code(self, code): if not self.type.is_pyobject and not self.type.is_complex: if self.cdivision is None: - self.cdivision = ( - code.globalstate.directives['cdivision'] - or self.type.is_float - or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed) - ) + self.cdivision = ( + code.globalstate.directives['cdivision'] + or self.type.is_float + or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed) + ) if not self.cdivision: - code.globalstate.use_utility_code( - UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) def generate_div_warning_code(self, code): - in_nogil = self.in_nogil_context + in_nogil = self.in_nogil_context if not self.type.is_pyobject: if self.zerodivision_check: if not self.infix: @@ -11586,62 +11586,62 @@ class DivNode(NumBinopNode): else: zero_test = "%s == 0" % self.operand2.result() code.putln("if (unlikely(%s)) {" % zero_test) - if in_nogil: - code.put_ensure_gil() + if in_nogil: + code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message()) - if in_nogil: - code.put_release_ensured_gil() + if in_nogil: + code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if self.type.is_int and self.type.signed and self.operator != '%': - code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c")) + code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c")) if self.operand2.type.signed == 2: # explicitly signed, no runtime check needed minus1_check = 'unlikely(%s == -1)' % self.operand2.result() else: - type_of_op2 = self.operand2.type.empty_declaration_code() + type_of_op2 = self.operand2.type.empty_declaration_code() minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % ( type_of_op2, self.operand2.result(), type_of_op2) code.putln("else if (sizeof(%s) == sizeof(long) && %s " " && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % ( - self.type.empty_declaration_code(), + self.type.empty_declaration_code(), minus1_check, self.operand1.result())) - if in_nogil: - code.put_ensure_gil() + if in_nogil: + code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");') - if in_nogil: - code.put_release_ensured_gil() + if in_nogil: + code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln("}") if code.globalstate.directives['cdivision_warnings'] and self.operator != '/': - code.globalstate.use_utility_code( - UtilityCode.load_cached("CDivisionWarning", "CMath.c")) + code.globalstate.use_utility_code( + UtilityCode.load_cached("CDivisionWarning", "CMath.c")) code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % ( self.operand1.result(), self.operand2.result())) - warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % { + warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % { 'FILENAME': Naming.filename_cname, 'LINENO': Naming.lineno_cname, - } - - if in_nogil: - result_code = 'result' - code.putln("int %s;" % result_code) - code.put_ensure_gil() - code.putln(code.set_error_info(self.pos, used=True)) - code.putln("%s = %s;" % (result_code, warning_code)) - code.put_release_ensured_gil() - else: - result_code = warning_code - code.putln(code.set_error_info(self.pos, used=True)) - - code.put("if (unlikely(%s)) " % result_code) + } + + if in_nogil: + result_code = 'result' + code.putln("int %s;" % result_code) + code.put_ensure_gil() + code.putln(code.set_error_info(self.pos, used=True)) + code.putln("%s = %s;" % (result_code, warning_code)) + code.put_release_ensured_gil() + else: + result_code = warning_code + code.putln(code.set_error_info(self.pos, used=True)) + + code.put("if (unlikely(%s)) " % result_code) code.put_goto(code.error_label) code.putln("}") def calculate_result_code(self): - if self.type.is_complex or self.is_cpp_operation(): + if self.type.is_complex or self.is_cpp_operation(): return NumBinopNode.calculate_result_code(self) elif self.type.is_float and self.operator == '//': return "floor(%s / %s)" % ( @@ -11725,12 +11725,12 @@ class ModNode(DivNode): if not self.type.is_pyobject and not self.cdivision: if self.type.is_int: code.globalstate.use_utility_code( - UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type)) + UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type)) else: # float code.globalstate.use_utility_code( - UtilityCode.load_cached("ModFloat", "CMath.c").specialize( + UtilityCode.load_cached("ModFloat", "CMath.c").specialize( self.type, math_h_modifier=self.type.math_h_modifier)) - # NOTE: skipping over DivNode here + # NOTE: skipping over DivNode here NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) @@ -11752,19 +11752,19 @@ class ModNode(DivNode): self.operand2.result()) def py_operation_function(self, code): - type1, type2 = self.operand1.type, self.operand2.type - # ("..." % x) must call "x.__rmod__()" for string subtypes. - if type1 is unicode_type: - if self.operand1.may_be_none() or ( - type2.is_extension_type and type2.subtype_of(type1) or - type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): + type1, type2 = self.operand1.type, self.operand2.type + # ("..." % x) must call "x.__rmod__()" for string subtypes. + if type1 is unicode_type: + if self.operand1.may_be_none() or ( + type2.is_extension_type and type2.subtype_of(type1) or + type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): return '__Pyx_PyUnicode_FormatSafe' else: return 'PyUnicode_Format' - elif type1 is str_type: - if self.operand1.may_be_none() or ( - type2.is_extension_type and type2.subtype_of(type1) or - type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): + elif type1 is str_type: + if self.operand1.may_be_none() or ( + type2.is_extension_type and type2.subtype_of(type1) or + type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): return '__Pyx_PyString_FormatSafe' else: return '__Pyx_PyString_Format' @@ -11780,18 +11780,18 @@ class PowNode(NumBinopNode): if self.type.real_type.is_float: self.operand1 = self.operand1.coerce_to(self.type, env) self.operand2 = self.operand2.coerce_to(self.type, env) - self.pow_func = self.type.binary_op('**') + self.pow_func = self.type.binary_op('**') else: error(self.pos, "complex int powers not supported") self.pow_func = "<error>" elif self.type.is_float: self.pow_func = "pow" + self.type.math_h_modifier elif self.type.is_int: - self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_') + self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_') env.use_utility_code( - UtilityCode.load_cached("IntPow", "CMath.c").specialize( + UtilityCode.load_cached("IntPow", "CMath.c").specialize( func_name=self.pow_func, - type=self.type.empty_declaration_code(), + type=self.type.empty_declaration_code(), signed=self.type.signed and 1 or 0)) elif not self.type.is_error: error(self.pos, "got unexpected types for C power operator: %s, %s" % @@ -11812,7 +11812,7 @@ class PowNode(NumBinopNode): def py_operation_function(self, code): if (self.type.is_pyobject and self.operand1.constant_result == 2 and - isinstance(self.operand1.constant_result, _py_int_types) and + isinstance(self.operand1.constant_result, _py_int_types) and self.operand2.type is py_object_type): code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c')) if self.inplace: @@ -11913,13 +11913,13 @@ class BoolBinopNode(ExprNode): my_label = and_label = code.new_label('next_and') else: my_label = or_label = code.new_label('next_or') - self.operand1.generate_bool_evaluation_code( + self.operand1.generate_bool_evaluation_code( code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label) and_label, or_label = outer_labels code.put_label(my_label) - self.operand2.generate_bool_evaluation_code( + self.operand2.generate_bool_evaluation_code( code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through) def generate_evaluation_code(self, code): @@ -11928,7 +11928,7 @@ class BoolBinopNode(ExprNode): or_label = and_label = None end_label = code.new_label('bool_binop_done') self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label) - code.put_label(end_label) + code.put_label(end_label) gil_message = "Truth-testing Python object" @@ -12024,43 +12024,43 @@ class BoolBinopResultNode(ExprNode): self.arg.generate_evaluation_code(code) if and_label or or_label: test_result, uses_temp = self.generate_operand_test(code) - if uses_temp and (and_label and or_label): - # cannot become final result => free early - # disposal: uses_temp and (and_label and or_label) - self.arg.generate_disposal_code(code) + if uses_temp and (and_label and or_label): + # cannot become final result => free early + # disposal: uses_temp and (and_label and or_label) + self.arg.generate_disposal_code(code) sense = '!' if or_label else '' code.putln("if (%s%s) {" % (sense, test_result)) if uses_temp: code.funcstate.release_temp(test_result) - if not uses_temp or not (and_label and or_label): - # disposal: (not uses_temp) or {not (and_label and or_label) [if]} - self.arg.generate_disposal_code(code) + if not uses_temp or not (and_label and or_label): + # disposal: (not uses_temp) or {not (and_label and or_label) [if]} + self.arg.generate_disposal_code(code) - if or_label and or_label != fall_through: + if or_label and or_label != fall_through: # value is false => short-circuit to next 'or' code.put_goto(or_label) if and_label: # value is true => go to next 'and' - if or_label: + if or_label: code.putln("} else {") - if not uses_temp: - # disposal: (not uses_temp) and {(and_label and or_label) [else]} - self.arg.generate_disposal_code(code) - if and_label != fall_through: - code.put_goto(and_label) + if not uses_temp: + # disposal: (not uses_temp) and {(and_label and or_label) [else]} + self.arg.generate_disposal_code(code) + if and_label != fall_through: + code.put_goto(and_label) if not and_label or not or_label: # if no next 'and' or 'or', we provide the result - if and_label or or_label: - code.putln("} else {") + if and_label or or_label: + code.putln("} else {") self.value.generate_evaluation_code(code) self.value.make_owned_reference(code) code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type))) self.value.generate_post_assignment_code(code) - # disposal: {not (and_label and or_label) [else]} + # disposal: {not (and_label and or_label) [else]} self.arg.generate_disposal_code(code) self.value.free_temps(code) - if end_label != fall_through: + if end_label != fall_through: code.put_goto(end_label) if and_label or or_label: @@ -12077,7 +12077,7 @@ class CondExprNode(ExprNode): true_val = None false_val = None - is_temp = True + is_temp = True subexprs = ['test', 'true_val', 'false_val'] @@ -12107,8 +12107,8 @@ class CondExprNode(ExprNode): def analyse_result_type(self, env): self.type = PyrexTypes.independent_spanning_type( self.true_val.type, self.false_val.type) - if self.type.is_reference: - self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) + if self.type.is_reference: + self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) if self.type.is_pyobject: self.result_ctype = py_object_type elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral(): @@ -12116,16 +12116,16 @@ class CondExprNode(ExprNode): if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject: self.true_val = self.true_val.coerce_to(self.type, env) self.false_val = self.false_val.coerce_to(self.type, env) - if self.type.is_error: + if self.type.is_error: self.type_error() return self - def coerce_to_integer(self, env): - self.true_val = self.true_val.coerce_to_integer(env) - self.false_val = self.false_val.coerce_to_integer(env) - self.result_ctype = None - return self.analyse_result_type(env) - + def coerce_to_integer(self, env): + self.true_val = self.true_val.coerce_to_integer(env) + self.false_val = self.false_val.coerce_to_integer(env) + self.result_ctype = None + return self.analyse_result_type(env) + def coerce_to(self, dst_type, env): self.true_val = self.true_val.coerce_to(dst_type, env) self.false_val = self.false_val.coerce_to(dst_type, env) @@ -12202,8 +12202,8 @@ class CmpNode(object): def calculate_cascaded_constant_result(self, operand1_result): func = compile_time_binary_operators[self.operator] operand2_result = self.operand2.constant_result - if (isinstance(operand1_result, any_string_type) and - isinstance(operand2_result, any_string_type) and + if (isinstance(operand1_result, any_string_type) and + isinstance(operand2_result, any_string_type) and type(operand1_result) != type(operand2_result)): # string comparison of different types isn't portable return @@ -12228,7 +12228,7 @@ class CmpNode(object): operand2 = self.operand2.compile_time_value(denv) try: result = func(operand1, operand2) - except Exception as e: + except Exception as e: self.compile_time_value_error(e) result = None if result: @@ -12300,9 +12300,9 @@ class CmpNode(object): if new_common_type is None: # fall back to generic type compatibility tests - if type1.is_ctuple or type2.is_ctuple: - new_common_type = py_object_type - elif type1 == type2: + if type1.is_ctuple or type2.is_ctuple: + new_common_type = py_object_type + elif type1 == type2: new_common_type = type1 elif type1.is_pyobject or type2.is_pyobject: if type2.is_numeric or type2.is_string: @@ -12414,7 +12414,7 @@ class CmpNode(object): if self.operand2.type is Builtin.dict_type: self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c") - self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF" + self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF" return True elif self.operand2.type is Builtin.set_type: self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") @@ -12424,13 +12424,13 @@ class CmpNode(object): elif self.operand2.type is Builtin.unicode_type: self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c") - self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF" + self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF" return True else: if not self.operand2.type.is_pyobject: self.operand2 = self.operand2.coerce_to_pyobject(env) self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c") - self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF" + self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF" return True return False @@ -12501,20 +12501,20 @@ class CmpNode(object): common_type = type1 code1 = operand1.result_as(common_type) code2 = operand2.result_as(common_type) - statement = "%s = %s(%s %s %s);" % ( + statement = "%s = %s(%s %s %s);" % ( result_code, coerce_result, code1, self.c_operator(op), - code2) - if self.is_cpp_comparison() and self.exception_check == '+': - translate_cpp_exception( - code, - self.pos, - statement, - result_code if self.type.is_pyobject else None, - self.exception_value, - self.in_nogil_context) + code2) + if self.is_cpp_comparison() and self.exception_check == '+': + translate_cpp_exception( + code, + self.pos, + statement, + result_code if self.type.is_pyobject else None, + self.exception_value, + self.in_nogil_context) else: code.putln(statement) @@ -12658,7 +12658,7 @@ class PrimaryCmpNode(ExprNode, CmpNode): def analyse_cpp_comparison(self, env): type1 = self.operand1.type type2 = self.operand2.type - self.is_pycmp = False + self.is_pycmp = False entry = env.lookup_operator(self.operator, [self.operand1, self.operand2]) if entry is None: error(self.pos, "Invalid types for '%s' (%s, %s)" % @@ -12669,12 +12669,12 @@ class PrimaryCmpNode(ExprNode, CmpNode): func_type = entry.type if func_type.is_ptr: func_type = func_type.base_type - self.exception_check = func_type.exception_check - self.exception_value = func_type.exception_value - if self.exception_check == '+': - self.is_temp = True - if self.exception_value is None: - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check == '+': + self.is_temp = True + if self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) if len(func_type.args) == 1: self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env) else: @@ -12836,10 +12836,10 @@ class CascadedCmpNode(Node, CmpNode): def has_python_operands(self): return self.operand2.type.is_pyobject - def is_cpp_comparison(self): - # cascaded comparisons aren't currently implemented for c++ classes. - return False - + def is_cpp_comparison(self): + # cascaded comparisons aren't currently implemented for c++ classes. + return False + def optimise_comparison(self, operand1, env, result_is_bool=False): if self.find_special_bool_compare_function(env, operand1, result_is_bool): self.is_pycmp = False @@ -12912,19 +12912,19 @@ binop_node_classes = { "**": PowNode, } - -def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs): + +def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs): # Construct binop node of appropriate class for # given operator. - return binop_node_classes[operator]( - pos, - operator=operator, - operand1=operand1, - operand2=operand2, - inplace=inplace, - **kwargs) - - + return binop_node_classes[operator]( + pos, + operator=operator, + operand1=operand1, + operand2=operand2, + inplace=inplace, + **kwargs) + + #------------------------------------------------------------------- # # Coercion nodes @@ -12961,7 +12961,7 @@ class CoercionNode(ExprNode): code.annotate((file, line, col-1), AnnotationItem( style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type))) - + class CoerceToMemViewSliceNode(CoercionNode): """ Coerce an object to a memoryview slice. This holds a new reference in @@ -13190,7 +13190,7 @@ class CoerceToPyTypeNode(CoercionNode): # to a Python object. type = py_object_type - target_type = py_object_type + target_type = py_object_type is_temp = 1 def __init__(self, arg, env, type=py_object_type): @@ -13210,17 +13210,17 @@ class CoerceToPyTypeNode(CoercionNode): self.type = unicode_type elif arg.type.is_complex: self.type = Builtin.complex_type - self.target_type = self.type + self.target_type = self.type elif arg.type.is_string or arg.type.is_cpp_string: if (type not in (bytes_type, bytearray_type) and not env.directives['c_string_encoding']): error(arg.pos, "default encoding required for conversion from '%s' to '%s'" % (arg.type, type)) - self.type = self.target_type = type + self.type = self.target_type = type else: # FIXME: check that the target type and the resulting type are compatible - self.target_type = type + self.target_type = type gil_message = "Converting to Python object" @@ -13248,11 +13248,11 @@ class CoerceToPyTypeNode(CoercionNode): return self def generate_result_code(self, code): - code.putln('%s; %s' % ( - self.arg.type.to_py_call_code( - self.arg.result(), - self.result(), - self.target_type), + code.putln('%s; %s' % ( + self.arg.type.to_py_call_code( + self.arg.result(), + self.result(), + self.target_type), code.error_goto_if_null(self.result(), self.pos))) code.put_gotref(self.py_result()) @@ -13320,7 +13320,7 @@ class CoerceFromPyTypeNode(CoercionNode): return self def is_ephemeral(self): - return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral() + return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral() def generate_result_code(self, code): from_py_function = None @@ -13330,7 +13330,7 @@ class CoerceFromPyTypeNode(CoercionNode): from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):] NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found") - code.putln(self.type.from_py_call_code( + code.putln(self.type.from_py_call_code( self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function)) if self.type.is_pyobject: code.put_gotref(self.py_result()) @@ -13346,14 +13346,14 @@ class CoerceToBooleanNode(CoercionNode): type = PyrexTypes.c_bint_type _special_builtins = { - Builtin.list_type: 'PyList_GET_SIZE', - Builtin.tuple_type: 'PyTuple_GET_SIZE', - Builtin.set_type: 'PySet_GET_SIZE', - Builtin.frozenset_type: 'PySet_GET_SIZE', - Builtin.bytes_type: 'PyBytes_GET_SIZE', + Builtin.list_type: 'PyList_GET_SIZE', + Builtin.tuple_type: 'PyTuple_GET_SIZE', + Builtin.set_type: 'PySet_GET_SIZE', + Builtin.frozenset_type: 'PySet_GET_SIZE', + Builtin.bytes_type: 'PyBytes_GET_SIZE', Builtin.bytearray_type: 'PyByteArray_GET_SIZE', - Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE', - } + Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE', + } def __init__(self, arg, env): CoercionNode.__init__(self, arg) @@ -13390,7 +13390,7 @@ class CoerceToBooleanNode(CoercionNode): self.arg.py_result(), code.error_goto_if_neg(self.result(), self.pos))) - + class CoerceToComplexNode(CoercionNode): def __init__(self, arg, dst_type, env): diff --git a/contrib/tools/cython/Cython/Compiler/FlowControl.pxd b/contrib/tools/cython/Cython/Compiler/FlowControl.pxd index 1b855455f7..c87370b819 100644 --- a/contrib/tools/cython/Cython/Compiler/FlowControl.pxd +++ b/contrib/tools/cython/Cython/Compiler/FlowControl.pxd @@ -12,7 +12,7 @@ cdef class ControlBlock: cdef public dict gen cdef public set bounded - # Big integer bitsets + # Big integer bitsets cdef public object i_input cdef public object i_output cdef public object i_gen @@ -87,11 +87,11 @@ cdef class Uninitialized: cdef class Unknown: pass - -cdef class MessageCollection: - cdef set messages - - + +cdef class MessageCollection: + cdef set messages + + @cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock, assmt=NameAssignment) cdef check_definitions(ControlFlow flow, dict compiler_directives) @@ -99,7 +99,7 @@ cdef check_definitions(ControlFlow flow, dict compiler_directives) @cython.final cdef class ControlFlowAnalysis(CythonTransform): cdef object gv_ctx - cdef object constant_folder + cdef object constant_folder cdef set reductions cdef list env_stack cdef list stack diff --git a/contrib/tools/cython/Cython/Compiler/FlowControl.py b/contrib/tools/cython/Cython/Compiler/FlowControl.py index 7e8270b198..df04471f90 100644 --- a/contrib/tools/cython/Cython/Compiler/FlowControl.py +++ b/contrib/tools/cython/Cython/Compiler/FlowControl.py @@ -2,7 +2,7 @@ from __future__ import absolute_import import cython cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, - Builtin=object, InternalError=object, error=object, warning=object, + Builtin=object, InternalError=object, error=object, warning=object, py_object_type=object, unspecified_type=object, object_expr=object, fake_rhs_expr=object, TypedExprNode=object) @@ -15,9 +15,9 @@ from . import PyrexTypes from .Visitor import TreeVisitor, CythonTransform from .Errors import error, warning, InternalError -from .Optimize import ConstantFolding +from .Optimize import ConstantFolding + - class TypedExprNode(ExprNodes.ExprNode): # Used for declaring assignments of a specified type without a known entry. def __init__(self, type, may_be_none=None, pos=None): @@ -256,7 +256,7 @@ class ControlFlow(object): for entry in block.bounded: block.i_kill |= self.assmts[entry].bit - for assmts in self.assmts.values(): + for assmts in self.assmts.values(): self.entry_point.i_gen |= assmts.bit self.entry_point.i_output = self.entry_point.i_gen @@ -486,11 +486,11 @@ class GV(object): if annotate_defs: for stat in block.stats: if isinstance(stat, NameAssignment): - label += '\n %s [%s %s]' % ( - stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) + label += '\n %s [%s %s]' % ( + stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) elif isinstance(stat, NameReference): if stat.entry: - label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) + label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) if not label: label = 'empty' pid = ctx.nodeid(block) @@ -505,16 +505,16 @@ class GV(object): class MessageCollection(object): """Collect error/warnings messages first then sort""" def __init__(self): - self.messages = set() + self.messages = set() def error(self, pos, message): - self.messages.add((pos, True, message)) + self.messages.add((pos, True, message)) def warning(self, pos, message): - self.messages.add((pos, False, message)) + self.messages.add((pos, False, message)) def report(self): - for pos, is_error, message in sorted(self.messages): + for pos, is_error, message in sorted(self.messages): if is_error: error(pos, message) else: @@ -582,14 +582,14 @@ def check_definitions(flow, compiler_directives): node.cf_maybe_null = False # Find uninitialized references and cf-hints - for node, entry in references.items(): + for node, entry in references.items(): if Uninitialized in node.cf_state: node.cf_maybe_null = True if not entry.from_closure and len(node.cf_state) == 1: node.cf_is_null = True if (node.allow_null or entry.from_closure - or entry.is_pyclass_attr or entry.type.is_error): - pass # Can be uninitialized here + or entry.is_pyclass_attr or entry.type.is_error): + pass # Can be uninitialized here elif node.cf_is_null: if entry.error_on_uninitialized or ( Options.error_on_uninitialized and ( @@ -635,7 +635,7 @@ def check_definitions(flow, compiler_directives): for entry in flow.entries: if (not entry.cf_references and not entry.is_pyclass_attr): - if entry.name != '_' and not entry.name.startswith('unused'): + if entry.name != '_' and not entry.name.startswith('unused'): # '_' is often used for unused variables, e.g. in loops if entry.is_arg: if warn_unused_arg: @@ -675,7 +675,7 @@ class ControlFlowAnalysis(CythonTransform): def visit_ModuleNode(self, node): self.gv_ctx = GVContext() - self.constant_folder = ConstantFolding() + self.constant_folder = ConstantFolding() # Set of NameNode reductions self.reductions = set() @@ -778,13 +778,13 @@ class ControlFlowAnalysis(CythonTransform): if entry is None: # TODO: This shouldn't happen... return self.flow.mark_assignment(lhs, rhs, entry) - elif lhs.is_sequence_constructor: - for i, arg in enumerate(lhs.args): - if not rhs or arg.is_starred: - item_node = None - else: - item_node = rhs.inferable_item_node(i) - self.mark_assignment(arg, item_node) + elif lhs.is_sequence_constructor: + for i, arg in enumerate(lhs.args): + if not rhs or arg.is_starred: + item_node = None + else: + item_node = rhs.inferable_item_node(i) + self.mark_assignment(arg, item_node) else: self._visit(lhs) @@ -832,7 +832,7 @@ class ControlFlowAnalysis(CythonTransform): self.in_inplace_assignment = True self.visitchildren(node) self.in_inplace_assignment = False - self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) + self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) return node def visit_DelStatNode(self, node): @@ -843,8 +843,8 @@ class ControlFlowAnalysis(CythonTransform): error(arg.pos, "can not delete variable '%s' " "referenced in nested scope" % entry.name) - if not node.ignore_nonexisting: - self._visit(arg) # mark reference + if not node.ignore_nonexisting: + self._visit(arg) # mark reference self.flow.mark_deletion(arg, entry) else: self._visit(arg) @@ -981,11 +981,11 @@ class ControlFlowAnalysis(CythonTransform): for arg in sequence.args[:2]: self.mark_assignment(target, arg) if len(sequence.args) > 2: - self.mark_assignment(target, self.constant_folder( + self.mark_assignment(target, self.constant_folder( ExprNodes.binop_node(node.pos, '+', sequence.args[0], - sequence.args[2]))) + sequence.args[2]))) if not is_special: # A for-loop basically translates to subsequent calls to @@ -996,9 +996,9 @@ class ControlFlowAnalysis(CythonTransform): self.mark_assignment(target, node.item) - def visit_AsyncForStatNode(self, node): - return self.visit_ForInStatNode(node) - + def visit_AsyncForStatNode(self, node): + return self.visit_ForInStatNode(node) + def visit_ForInStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() @@ -1010,9 +1010,9 @@ class ControlFlowAnalysis(CythonTransform): if isinstance(node, Nodes.ForInStatNode): self.mark_forloop_target(node) - elif isinstance(node, Nodes.AsyncForStatNode): - # not entirely correct, but good enough for now - self.mark_assignment(node.target, node.item) + elif isinstance(node, Nodes.AsyncForStatNode): + # not entirely correct, but good enough for now + self.mark_assignment(node.target, node.item) else: # Parallel self.mark_assignment(node.target) @@ -1090,8 +1090,8 @@ class ControlFlowAnalysis(CythonTransform): self.flow.nextblock() self.mark_assignment(node.target, node.bound1) if node.step is not None: - self.mark_assignment(node.target, self.constant_folder( - ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) + self.mark_assignment(node.target, self.constant_folder( + ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) # Body block self.flow.nextblock() self._visit(node.body) @@ -1182,7 +1182,7 @@ class ControlFlowAnalysis(CythonTransform): # Exception entry point entry_point = self.flow.newblock() self.flow.block = entry_point - self._visit(node.finally_except_clause) + self._visit(node.finally_except_clause) if self.flow.block and self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) diff --git a/contrib/tools/cython/Cython/Compiler/FusedNode.py b/contrib/tools/cython/Cython/Compiler/FusedNode.py index 5b743b51f9..26d6ffd3d6 100644 --- a/contrib/tools/cython/Cython/Compiler/FusedNode.py +++ b/contrib/tools/cython/Cython/Compiler/FusedNode.py @@ -93,8 +93,8 @@ class FusedCFuncDefNode(StatListNode): for cname, fused_to_specific in permutations: copied_node = copy.deepcopy(self.node) - # keep signature object identity for special casing in DefNode.analyse_declarations() - copied_node.entry.signature = self.node.entry.signature + # keep signature object identity for special casing in DefNode.analyse_declarations() + copied_node.entry.signature = self.node.entry.signature self._specialize_function_args(copied_node.args, fused_to_specific) copied_node.return_type = self.node.return_type.specialize( @@ -219,7 +219,7 @@ class FusedCFuncDefNode(StatListNode): if arg.type.is_fused: arg.type = arg.type.specialize(fused_to_specific) if arg.type.is_memoryviewslice: - arg.type.validate_memslice_dtype(arg.pos) + arg.type.validate_memslice_dtype(arg.pos) def create_new_local_scope(self, node, env, f2s): """ @@ -281,14 +281,14 @@ class FusedCFuncDefNode(StatListNode): """ for specialized_type in normal_types: # all_numeric = all_numeric and specialized_type.is_numeric - pyx_code.context.update( - py_type_name=specialized_type.py_type_name(), - specialized_type_name=specialized_type.specialization_string, - ) + pyx_code.context.update( + py_type_name=specialized_type.py_type_name(), + specialized_type_name=specialized_type.specialization_string, + ) pyx_code.put_chunk( u""" - if isinstance(arg, {{py_type_name}}): - dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break + if isinstance(arg, {{py_type_name}}): + dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break """) def _dtype_name(self, dtype): @@ -418,27 +418,27 @@ class FusedCFuncDefNode(StatListNode): to each specialization, which obtains the buffer each time and tries to match the format string. """ - # The first thing to find a match in this loop breaks out of the loop - pyx_code.put_chunk( - u""" + # The first thing to find a match in this loop breaks out of the loop + pyx_code.put_chunk( + u""" """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u""" - if ndarray is not None: - if isinstance(arg, ndarray): - dtype = arg.dtype + if ndarray is not None: + if isinstance(arg, ndarray): + dtype = arg.dtype """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u""" - elif __pyx_memoryview_check(arg): - arg_base = arg.base - if isinstance(arg_base, ndarray): - dtype = arg_base.dtype - else: - dtype = None - else: - dtype = None - - itemsize = -1 - if dtype is not None: - itemsize = dtype.itemsize - kind = ord(dtype.kind) + elif __pyx_memoryview_check(arg): + arg_base = arg.base + if isinstance(arg_base, ndarray): + dtype = arg_base.dtype + else: + dtype = None + else: + dtype = None + + itemsize = -1 + if dtype is not None: + itemsize = dtype.itemsize + kind = ord(dtype.kind) dtype_signed = kind == 'i' """) pyx_code.indent(2) @@ -463,13 +463,13 @@ class FusedCFuncDefNode(StatListNode): else: arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1) """) - pyx_code.named_insertion_point("numpy_dtype_checks") + pyx_code.named_insertion_point("numpy_dtype_checks") self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types) - pyx_code.dedent(2) + pyx_code.dedent(2) - for specialized_type in buffer_types: - self._buffer_parse_format_string_check( - pyx_code, decl_code, specialized_type, env) + for specialized_type in buffer_types: + self._buffer_parse_format_string_check( + pyx_code, decl_code, specialized_type, env) def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types): """ @@ -535,23 +535,23 @@ class FusedCFuncDefNode(StatListNode): Specialize fused types and split into normal types and buffer types. """ specialized_types = PyrexTypes.get_specialized_types(arg.type) - - # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py) - specialized_types.sort() - + + # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py) + specialized_types.sort() + seen_py_type_names = set() normal_types, buffer_types, pythran_types = [], [], [] - has_object_fallback = False + has_object_fallback = False for specialized_type in specialized_types: py_type_name = specialized_type.py_type_name() if py_type_name: if py_type_name in seen_py_type_names: continue seen_py_type_names.add(py_type_name) - if py_type_name == 'object': - has_object_fallback = True - else: - normal_types.append(specialized_type) + if py_type_name == 'object': + has_object_fallback = True + else: + normal_types.append(specialized_type) elif specialized_type.is_pythran_expr: pythran_types.append(specialized_type) elif specialized_type.is_buffer or specialized_type.is_memoryviewslice: @@ -620,7 +620,7 @@ class FusedCFuncDefNode(StatListNode): # FIXME: use a typed signature - currently fails badly because # default arguments inherit the types we specify here! - dest_sig = [None] * {{n_fused}} + dest_sig = [None] * {{n_fused}} if kwargs is not None and not kwargs: kwargs = None @@ -659,22 +659,22 @@ class FusedCFuncDefNode(StatListNode): normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg) self._unpack_argument(pyx_code) - - # 'unrolled' loop, first match breaks out of it - if pyx_code.indenter("while 1:"): - if normal_types: - self._fused_instance_checks(normal_types, pyx_code, env) + + # 'unrolled' loop, first match breaks out of it + if pyx_code.indenter("while 1:"): + if normal_types: + self._fused_instance_checks(normal_types, pyx_code, env) if buffer_types or pythran_types: env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c")) self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env) - if has_object_fallback: - pyx_code.context.update(specialized_type_name='object') - pyx_code.putln(self.match) - else: - pyx_code.putln(self.no_match) - pyx_code.putln("break") - pyx_code.dedent() - + if has_object_fallback: + pyx_code.context.update(specialized_type_name='object') + pyx_code.putln(self.match) + else: + pyx_code.putln(self.no_match) + pyx_code.putln("break") + pyx_code.dedent() + fused_index += 1 all_buffer_types.update(buffer_types) all_buffer_types.update(ty.org_buffer for ty in pythran_types) @@ -716,14 +716,14 @@ class FusedCFuncDefNode(StatListNode): fragment_code = pyx_code.getvalue() # print decl_code.getvalue() # print fragment_code - from .Optimize import ConstantFolding - fragment = TreeFragment.TreeFragment( - fragment_code, level='module', pipeline=[ConstantFolding()]) + from .Optimize import ConstantFolding + fragment = TreeFragment.TreeFragment( + fragment_code, level='module', pipeline=[ConstantFolding()]) ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root) UtilityCode.declare_declarations_in_scope( decl_code.getvalue(), env.global_scope()) ast.scope = env - # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self' + # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self' ast.analyse_declarations(env) py_func = ast.stats[-1] # the DefNode self.fragment_scope = ast.scope @@ -806,7 +806,7 @@ class FusedCFuncDefNode(StatListNode): if self.py_func: args = [CloneNode(default) for default in defaults if default] self.defaults_tuple = TupleNode(self.pos, args=args) - self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env) + self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env) self.defaults_tuple = ProxyNode(self.defaults_tuple) self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object) @@ -829,15 +829,15 @@ class FusedCFuncDefNode(StatListNode): else: nodes = self.nodes - signatures = [StringEncoding.EncodedString(node.specialized_signature_string) - for node in nodes] + signatures = [StringEncoding.EncodedString(node.specialized_signature_string) + for node in nodes] keys = [ExprNodes.StringNode(node.pos, value=sig) - for node, sig in zip(nodes, signatures)] - values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True) - for node in nodes] + for node, sig in zip(nodes, signatures)] + values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True) + for node in nodes] + + self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values)) - self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values)) - self.specialized_pycfuncs = values for pycfuncnode in values: pycfuncnode.is_specialization = True diff --git a/contrib/tools/cython/Cython/Compiler/Future.py b/contrib/tools/cython/Cython/Compiler/Future.py index af9f55260e..848792e00b 100644 --- a/contrib/tools/cython/Cython/Compiler/Future.py +++ b/contrib/tools/cython/Cython/Compiler/Future.py @@ -4,12 +4,12 @@ def _get_feature(name): return getattr(__future__, name, object()) unicode_literals = _get_feature("unicode_literals") -with_statement = _get_feature("with_statement") # dummy +with_statement = _get_feature("with_statement") # dummy division = _get_feature("division") print_function = _get_feature("print_function") absolute_import = _get_feature("absolute_import") nested_scopes = _get_feature("nested_scopes") # dummy generators = _get_feature("generators") # dummy -generator_stop = _get_feature("generator_stop") +generator_stop = _get_feature("generator_stop") del _get_feature diff --git a/contrib/tools/cython/Cython/Compiler/Lexicon.py b/contrib/tools/cython/Cython/Compiler/Lexicon.py index f88cfa8d5a..72c9ceaefd 100644 --- a/contrib/tools/cython/Cython/Compiler/Lexicon.py +++ b/contrib/tools/cython/Cython/Compiler/Lexicon.py @@ -3,11 +3,11 @@ # Cython Scanner - Lexical Definitions # -from __future__ import absolute_import, unicode_literals +from __future__ import absolute_import, unicode_literals raw_prefixes = "rR" bytes_prefixes = "bB" -string_prefixes = "fFuU" + bytes_prefixes +string_prefixes = "fFuU" + bytes_prefixes char_prefixes = "cC" any_string_prefix = raw_prefixes + string_prefixes + char_prefixes IDENT = 'IDENT' @@ -26,25 +26,25 @@ def make_lexicon(): hexdigit = Any("0123456789ABCDEFabcdef") indentation = Bol + Rep(Any(" \t")) - def underscore_digits(d): - return Rep1(d) + Rep(Str("_") + Rep1(d)) - - decimal = underscore_digits(digit) + def underscore_digits(d): + return Rep1(d) + Rep(Str("_") + Rep1(d)) + + decimal = underscore_digits(digit) dot = Str(".") exponent = Any("Ee") + Opt(Any("+-")) + decimal decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal) name = letter + Rep(letter | digit) - intconst = decimal | (Str("0") + ((Any("Xx") + underscore_digits(hexdigit)) | - (Any("Oo") + underscore_digits(octdigit)) | - (Any("Bb") + underscore_digits(bindigit)) )) + intconst = decimal | (Str("0") + ((Any("Xx") + underscore_digits(hexdigit)) | + (Any("Oo") + underscore_digits(octdigit)) | + (Any("Bb") + underscore_digits(bindigit)) )) intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu"))) intliteral = intconst + intsuffix fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent) imagconst = (intconst | fltconst) + Any("jJ") - # invalid combinations of prefixes are caught in p_string_literal - beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) | + # invalid combinations of prefixes are caught in p_string_literal + beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) | Any(char_prefixes) ) + (Str("'") | Str('"') | Str("'''") | Str('"""')) two_oct = octdigit + octdigit @@ -70,9 +70,9 @@ def make_lexicon(): return Lexicon([ (name, IDENT), - (intliteral, Method('strip_underscores', symbol='INT')), - (fltconst, Method('strip_underscores', symbol='FLOAT')), - (imagconst, Method('strip_underscores', symbol='IMAG')), + (intliteral, Method('strip_underscores', symbol='INT')), + (fltconst, Method('strip_underscores', symbol='FLOAT')), + (imagconst, Method('strip_underscores', symbol='IMAG')), (punct | diphthong, TEXT), (bra, Method('open_bracket_action')), diff --git a/contrib/tools/cython/Cython/Compiler/Main.py b/contrib/tools/cython/Cython/Compiler/Main.py index 78b5131e6e..af873843b5 100644 --- a/contrib/tools/cython/Cython/Compiler/Main.py +++ b/contrib/tools/cython/Cython/Compiler/Main.py @@ -7,39 +7,39 @@ from __future__ import absolute_import import os import re import sys -import io +import io if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3): sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2])) sys.exit(1) -try: - from __builtin__ import basestring -except ImportError: - basestring = str - +try: + from __builtin__ import basestring +except ImportError: + basestring = str + # Do not import Parsing here, import it when needed, because Parsing imports # Nodes, which globally needs debug command line options initialized to set a # conditional metaclass. These options are processed by CmdLine called from # main() in this file. # import Parsing from . import Errors -from .StringEncoding import EncodedString +from .StringEncoding import EncodedString from .Scanning import PyrexScanner, FileSourceDescriptor from .Errors import PyrexError, CompileError, error, warning from .Symtab import ModuleScope from .. import Utils from . import Options -from . import Version # legacy import needed by old PyTables versions -version = Version.version # legacy attribute - use "Cython.__version__" instead - +from . import Version # legacy import needed by old PyTables versions +version = Version.version # legacy attribute - use "Cython.__version__" instead + module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$") verbose = 0 -standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), - os.path.pardir, 'Includes')) +standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir, 'Includes')) class CompilationData(object): # Bundles the information that is passed from transform to transform. @@ -69,10 +69,10 @@ class Context(object): # language_level int currently 2 or 3 for Python 2/3 cython_scope = None - language_level = None # warn when not set but default to Py2 + language_level = None # warn when not set but default to Py2 def __init__(self, include_directories, compiler_directives, cpp=False, - language_level=None, options=None): + language_level=None, options=None): # cython_scope is a hack, set to False by subclasses, in order to break # an infinite loop. # Better code organization would fix it. @@ -87,51 +87,51 @@ class Context(object): self.cpp = cpp self.options = options - self.pxds = {} # full name -> node tree - self._interned = {} # (type(value), value, *key_args) -> interned_value + self.pxds = {} # full name -> node tree + self._interned = {} # (type(value), value, *key_args) -> interned_value - if language_level is not None: - self.set_language_level(language_level) + if language_level is not None: + self.set_language_level(language_level) self.gdb_debug_outputwriter = None def set_language_level(self, level): - from .Future import print_function, unicode_literals, absolute_import, division - future_directives = set() - if level == '3str': - level = 3 - else: - level = int(level) - if level >= 3: - future_directives.add(unicode_literals) - if level >= 3: - future_directives.update([print_function, absolute_import, division]) + from .Future import print_function, unicode_literals, absolute_import, division + future_directives = set() + if level == '3str': + level = 3 + else: + level = int(level) + if level >= 3: + future_directives.add(unicode_literals) + if level >= 3: + future_directives.update([print_function, absolute_import, division]) self.language_level = level - self.future_directives = future_directives + self.future_directives = future_directives if level >= 3: self.modules['builtins'] = self.modules['__builtin__'] - def intern_ustring(self, value, encoding=None): - key = (EncodedString, value, encoding) - try: - return self._interned[key] - except KeyError: - pass - value = EncodedString(value) - if encoding: - value.encoding = encoding - self._interned[key] = value - return value - - def intern_value(self, value, *key): - key = (type(value), value) + key - try: - return self._interned[key] - except KeyError: - pass - self._interned[key] = value - return value - + def intern_ustring(self, value, encoding=None): + key = (EncodedString, value, encoding) + try: + return self._interned[key] + except KeyError: + pass + value = EncodedString(value) + if encoding: + value.encoding = encoding + self._interned[key] = value + return value + + def intern_value(self, value, *key): + key = (type(value), value) + key + try: + return self._interned[key] + except KeyError: + pass + self._interned[key] = value + return value + # pipeline creation functions can now be found in Pipeline.py def process_pxd(self, source_desc, scope, module_name): @@ -149,8 +149,8 @@ class Context(object): def nonfatal_error(self, exc): return Errors.report_error(exc) - def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, - absolute_fallback=True): + def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, + absolute_fallback=True): # Finds and returns the module scope corresponding to # the given relative or absolute module name. If this # is the first time the module has been requested, finds @@ -161,27 +161,27 @@ class Context(object): debug_find_module = 0 if debug_find_module: print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % ( - module_name, relative_to, pos, need_pxd)) + module_name, relative_to, pos, need_pxd)) scope = None pxd_pathname = None if relative_to: - if module_name: - # from .module import ... - qualified_name = relative_to.qualify_name(module_name) - else: - # from . import ... - qualified_name = relative_to.qualified_name - scope = relative_to - relative_to = None - else: - qualified_name = module_name - - if not module_name_pattern.match(qualified_name): - raise CompileError(pos or (module_name, 0, 0), - "'%s' is not a valid module name" % module_name) - - if relative_to: + if module_name: + # from .module import ... + qualified_name = relative_to.qualify_name(module_name) + else: + # from . import ... + qualified_name = relative_to.qualified_name + scope = relative_to + relative_to = None + else: + qualified_name = module_name + + if not module_name_pattern.match(qualified_name): + raise CompileError(pos or (module_name, 0, 0), + "'%s' is not a valid module name" % module_name) + + if relative_to: if debug_find_module: print("...trying relative import") scope = relative_to.lookup_submodule(module_name) @@ -192,12 +192,12 @@ class Context(object): if not scope: if debug_find_module: print("...trying absolute import") - if absolute_fallback: - qualified_name = module_name + if absolute_fallback: + qualified_name = module_name scope = self - for name in qualified_name.split("."): + for name in qualified_name.split("."): scope = scope.find_submodule(name) - + if debug_find_module: print("...scope = %s" % scope) if not scope.pxd_file_loaded: @@ -210,16 +210,16 @@ class Context(object): if debug_find_module: print("......found %s" % pxd_pathname) if not pxd_pathname and need_pxd: - # Set pxd_file_loaded such that we don't need to - # look for the non-existing pxd file next time. - scope.pxd_file_loaded = True - package_pathname = self.search_include_directories(qualified_name, ".py", pos) + # Set pxd_file_loaded such that we don't need to + # look for the non-existing pxd file next time. + scope.pxd_file_loaded = True + package_pathname = self.search_include_directories(qualified_name, ".py", pos) if package_pathname and package_pathname.endswith('__init__.py'): pass else: - error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep)) + error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep)) if pxd_pathname: - scope.pxd_file_loaded = True + scope.pxd_file_loaded = True try: if debug_find_module: print("Context.find_module: Parsing %s" % pxd_pathname) @@ -229,7 +229,7 @@ class Context(object): if Options.source_root: rel_path = os.path.relpath(pxd_pathname, Options.source_root) source_desc = FileSourceDescriptor(pxd_pathname, rel_path) - err, result = self.process_pxd(source_desc, scope, qualified_name) + err, result = self.process_pxd(source_desc, scope, qualified_name) if err: raise err (pxd_codenodes, pxd_scope) = result @@ -239,15 +239,15 @@ class Context(object): return scope def find_pxd_file(self, qualified_name, pos, sys_path=False): - # Search include path (and sys.path if sys_path is True) for - # the .pxd file corresponding to the given fully-qualified - # module name. + # Search include path (and sys.path if sys_path is True) for + # the .pxd file corresponding to the given fully-qualified + # module name. # Will find either a dotted filename or a file in a # package directory. If a source file position is given, # the directory containing the source file is searched first # for a dotted filename, and its containing package root # directory is searched first for a non-dotted filename. - pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path) + pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path) if pxd is None: # XXX Keep this until Includes/Deprecated is removed if (qualified_name.startswith('python') or qualified_name in ('stdlib', 'stdio', 'stl')): @@ -287,13 +287,13 @@ class Context(object): def search_include_directories(self, qualified_name, suffix, pos, include=False, sys_path=False): - include_dirs = self.include_directories - if sys_path: - include_dirs = include_dirs + sys.path - # include_dirs must be hashable for caching in @cached_function - include_dirs = tuple(include_dirs + [standard_include_path]) - return search_include_directories(include_dirs, qualified_name, - suffix, pos, include) + include_dirs = self.include_directories + if sys_path: + include_dirs = include_dirs + sys.path + # include_dirs must be hashable for caching in @cached_function + include_dirs = tuple(include_dirs + [standard_include_path]) + return search_include_directories(include_dirs, qualified_name, + suffix, pos, include) def find_root_package_dir(self, file_path): return Utils.find_root_package_dir(file_path) @@ -301,10 +301,10 @@ class Context(object): def check_package_dir(self, dir, package_names): return Utils.check_package_dir(dir, tuple(package_names)) - def c_file_out_of_date(self, source_path, output_path): - if not os.path.exists(output_path): + def c_file_out_of_date(self, source_path, output_path): + if not os.path.exists(output_path): return 1 - c_time = Utils.modification_time(output_path) + c_time = Utils.modification_time(output_path) if Utils.file_newer_than(source_path, c_time): return 1 pos = [source_path] @@ -362,44 +362,44 @@ class Context(object): # Parse the given source file and return a parse tree. num_errors = Errors.num_errors try: - with Utils.open_source_file(source_filename) as f: + with Utils.open_source_file(source_filename) as f: from . import Parsing s = PyrexScanner(f, source_desc, source_encoding = f.encoding, scope = scope, context = self) tree = Parsing.p_module(s, pxd, full_module_name) - if self.options.formal_grammar: - try: - from ..Parser import ConcreteSyntaxTree - except ImportError: - raise RuntimeError( + if self.options.formal_grammar: + try: + from ..Parser import ConcreteSyntaxTree + except ImportError: + raise RuntimeError( "Formal grammar can only be used with compiled Cython with an available pgen.") - ConcreteSyntaxTree.p_module(source_filename) - except UnicodeDecodeError as e: + ConcreteSyntaxTree.p_module(source_filename) + except UnicodeDecodeError as e: #import traceback #traceback.print_exc() - raise self._report_decode_error(source_desc, e) - - if Errors.num_errors > num_errors: - raise CompileError() - return tree - - def _report_decode_error(self, source_desc, exc): - msg = exc.args[-1] - position = exc.args[2] - encoding = exc.args[0] - - line = 1 - column = idx = 0 - with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f: - for line, data in enumerate(f, 1): - idx += len(data) - if idx >= position: - column = position - (idx - len(data)) + 1 + raise self._report_decode_error(source_desc, e) + + if Errors.num_errors > num_errors: + raise CompileError() + return tree + + def _report_decode_error(self, source_desc, exc): + msg = exc.args[-1] + position = exc.args[2] + encoding = exc.args[0] + + line = 1 + column = idx = 0 + with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f: + for line, data in enumerate(f, 1): + idx += len(data) + if idx >= position: + column = position - (idx - len(data)) + 1 break - return error((source_desc, line, column), - "Decoding error, missing or incorrect coding=<encoding-name> " - "at top of source (cannot decode with encoding %r: %s)" % (encoding, msg)) + return error((source_desc, line, column), + "Decoding error, missing or incorrect coding=<encoding-name> " + "at top of source (cannot decode with encoding %r: %s)" % (encoding, msg)) def extract_module_name(self, path, options): # Find fully_qualified module name from the full pathname @@ -419,9 +419,9 @@ class Context(object): return ".".join(names) def setup_errors(self, options, result): - Errors.reset() # clear any remaining error state + Errors.reset() # clear any remaining error state if options.use_listing_file: - path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis") + path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis") else: path = None Errors.open_listing_file(path=path, @@ -443,30 +443,30 @@ class Context(object): result.c_file = None -def get_output_filename(source_filename, cwd, options): - if options.cplus: - c_suffix = ".cpp" - else: - c_suffix = ".c" - suggested_file_name = Utils.replace_suffix(source_filename, c_suffix) - if options.output_file: - out_path = os.path.join(cwd, options.output_file) - if os.path.isdir(out_path): - return os.path.join(out_path, os.path.basename(suggested_file_name)) - else: - return out_path - else: - return suggested_file_name - +def get_output_filename(source_filename, cwd, options): + if options.cplus: + c_suffix = ".cpp" + else: + c_suffix = ".c" + suggested_file_name = Utils.replace_suffix(source_filename, c_suffix) + if options.output_file: + out_path = os.path.join(cwd, options.output_file) + if os.path.isdir(out_path): + return os.path.join(out_path, os.path.basename(suggested_file_name)) + else: + return out_path + else: + return suggested_file_name + def create_default_resultobj(compilation_source, options): result = CompilationResult() result.main_source_file = compilation_source.source_desc.filename result.compilation_source = compilation_source source_desc = compilation_source.source_desc - result.c_file = get_output_filename(source_desc.filename, - compilation_source.cwd, options) - result.embedded_metadata = options.embedded_metadata + result.c_file = get_output_filename(source_desc.filename, + compilation_source.cwd, options) + result.embedded_metadata = options.embedded_metadata return result @@ -481,10 +481,10 @@ def run_pipeline(source, options, full_module_name=None, context=None): # Set up source object cwd = os.getcwd() abs_path = os.path.abspath(source) - full_module_name = full_module_name or options.module_name or context.extract_module_name(source, options) + full_module_name = full_module_name or options.module_name or context.extract_module_name(source, options) + + Utils.raise_error_if_module_name_forbidden(full_module_name) - Utils.raise_error_if_module_name_forbidden(full_module_name) - if options.relative_path_in_code_position_comments: rel_path = full_module_name.replace('.', os.sep) + source_ext if not abs_path.endswith(rel_path): @@ -503,9 +503,9 @@ def run_pipeline(source, options, full_module_name=None, context=None): # By default, decide based on whether an html file already exists. html_filename = os.path.splitext(result.c_file)[0] + ".html" if os.path.exists(html_filename): - with io.open(html_filename, "r", encoding="UTF-8") as html_file: - if u'<!-- Generated by Cython' in html_file.read(100): - options.annotate = True + with io.open(html_filename, "r", encoding="UTF-8") as html_file: + if u'<!-- Generated by Cython' in html_file.read(100): + options.annotate = True # Get pipeline if source_ext.lower() == '.py' or not source_ext: @@ -537,10 +537,10 @@ class CompilationSource(object): class CompilationOptions(object): - r""" - See default_options at the end of this module for a list of all possible - options and CmdLine.usage and CmdLine.parse_command_line() for their - meaning. + r""" + See default_options at the end of this module for a list of all possible + options and CmdLine.usage and CmdLine.parse_command_line() for their + meaning. """ def __init__(self, defaults=None, **kw): self.include_path = [] @@ -559,15 +559,15 @@ class CompilationOptions(object): # ignore valid options that are not in the defaults unknown_options.difference_update(['include_path']) if unknown_options: - message = "got unknown compilation option%s, please remove: %s" % ( + message = "got unknown compilation option%s, please remove: %s" % ( 's' if len(unknown_options) > 1 else '', - ', '.join(unknown_options)) + ', '.join(unknown_options)) raise ValueError(message) - directive_defaults = Options.get_directive_defaults() - directives = dict(options['compiler_directives']) # copy mutable field + directive_defaults = Options.get_directive_defaults() + directives = dict(options['compiler_directives']) # copy mutable field # check for invalid directives - unknown_directives = set(directives) - set(directive_defaults) + unknown_directives = set(directives) - set(directive_defaults) if unknown_directives: message = "got unknown compiler directive%s: %s" % ( 's' if len(unknown_directives) > 1 else '', @@ -579,13 +579,13 @@ class CompilationOptions(object): warnings.warn("C++ mode forced when in Pythran mode!") options['cplus'] = True if 'language_level' in directives and 'language_level' not in kw: - options['language_level'] = directives['language_level'] - elif not options.get('language_level'): - options['language_level'] = directive_defaults.get('language_level') - if 'formal_grammar' in directives and 'formal_grammar' not in kw: - options['formal_grammar'] = directives['formal_grammar'] - if options['cache'] is True: - options['cache'] = os.path.join(Utils.get_cython_cache_dir(), 'compiler') + options['language_level'] = directives['language_level'] + elif not options.get('language_level'): + options['language_level'] = directive_defaults.get('language_level') + if 'formal_grammar' in directives and 'formal_grammar' not in kw: + options['formal_grammar'] = directives['formal_grammar'] + if options['cache'] is True: + options['cache'] = os.path.join(Utils.get_cython_cache_dir(), 'compiler') self.__dict__.update(options) @@ -598,84 +598,84 @@ class CompilationOptions(object): return Context(self.include_path, self.compiler_directives, self.cplus, self.language_level, options=self) - def get_fingerprint(self): - r""" - Return a string that contains all the options that are relevant for cache invalidation. - """ - # Collect only the data that can affect the generated file(s). - data = {} - - for key, value in self.__dict__.items(): - if key in ['show_version', 'errors_to_stderr', 'verbose', 'quiet']: - # verbosity flags have no influence on the compilation result - continue - elif key in ['output_file', 'output_dir']: - # ignore the exact name of the output file - continue - elif key in ['timestamps']: - # the cache cares about the content of files, not about the timestamps of sources - continue - elif key in ['cache']: - # hopefully caching has no influence on the compilation result - continue - elif key in ['compiler_directives']: - # directives passed on to the C compiler do not influence the generated C code - continue - elif key in ['include_path']: - # this path changes which headers are tracked as dependencies, - # it has no influence on the generated C code - continue - elif key in ['working_path']: - # this path changes where modules and pxd files are found; - # their content is part of the fingerprint anyway, their - # absolute path does not matter - continue - elif key in ['create_extension']: - # create_extension() has already mangled the options, e.g., - # embedded_metadata, when the fingerprint is computed so we - # ignore it here. - continue - elif key in ['build_dir']: - # the (temporary) directory where we collect dependencies - # has no influence on the C output - continue - elif key in ['use_listing_file', 'generate_pxi', 'annotate', 'annotate_coverage_xml']: - # all output files are contained in the cache so the types of - # files generated must be part of the fingerprint - data[key] = value - elif key in ['formal_grammar', 'evaluate_tree_assertions']: - # these bits can change whether compilation to C passes/fails - data[key] = value - elif key in ['embedded_metadata', 'emit_linenums', 'c_line_in_traceback', 'gdb_debug', 'relative_path_in_code_position_comments']: - # the generated code contains additional bits when these are set - data[key] = value - elif key in ['cplus', 'language_level', 'compile_time_env', 'np_pythran']: - # assorted bits that, e.g., influence the parser - data[key] = value - elif key == ['capi_reexport_cincludes']: - if self.capi_reexport_cincludes: - # our caching implementation does not yet include fingerprints of all the header files - raise NotImplementedError('capi_reexport_cincludes is not compatible with Cython caching') - elif key == ['common_utility_include_dir']: - if self.common_utility_include_dir: - raise NotImplementedError('common_utility_include_dir is not compatible with Cython caching yet') - else: - # any unexpected option should go into the fingerprint; it's better - # to recompile than to return incorrect results from the cache. - data[key] = value - - def to_fingerprint(item): - r""" - Recursively turn item into a string, turning dicts into lists with - deterministic ordering. - """ - if isinstance(item, dict): - item = sorted([(repr(key), to_fingerprint(value)) for key, value in item.items()]) - return repr(item) - - return to_fingerprint(data) - - + def get_fingerprint(self): + r""" + Return a string that contains all the options that are relevant for cache invalidation. + """ + # Collect only the data that can affect the generated file(s). + data = {} + + for key, value in self.__dict__.items(): + if key in ['show_version', 'errors_to_stderr', 'verbose', 'quiet']: + # verbosity flags have no influence on the compilation result + continue + elif key in ['output_file', 'output_dir']: + # ignore the exact name of the output file + continue + elif key in ['timestamps']: + # the cache cares about the content of files, not about the timestamps of sources + continue + elif key in ['cache']: + # hopefully caching has no influence on the compilation result + continue + elif key in ['compiler_directives']: + # directives passed on to the C compiler do not influence the generated C code + continue + elif key in ['include_path']: + # this path changes which headers are tracked as dependencies, + # it has no influence on the generated C code + continue + elif key in ['working_path']: + # this path changes where modules and pxd files are found; + # their content is part of the fingerprint anyway, their + # absolute path does not matter + continue + elif key in ['create_extension']: + # create_extension() has already mangled the options, e.g., + # embedded_metadata, when the fingerprint is computed so we + # ignore it here. + continue + elif key in ['build_dir']: + # the (temporary) directory where we collect dependencies + # has no influence on the C output + continue + elif key in ['use_listing_file', 'generate_pxi', 'annotate', 'annotate_coverage_xml']: + # all output files are contained in the cache so the types of + # files generated must be part of the fingerprint + data[key] = value + elif key in ['formal_grammar', 'evaluate_tree_assertions']: + # these bits can change whether compilation to C passes/fails + data[key] = value + elif key in ['embedded_metadata', 'emit_linenums', 'c_line_in_traceback', 'gdb_debug', 'relative_path_in_code_position_comments']: + # the generated code contains additional bits when these are set + data[key] = value + elif key in ['cplus', 'language_level', 'compile_time_env', 'np_pythran']: + # assorted bits that, e.g., influence the parser + data[key] = value + elif key == ['capi_reexport_cincludes']: + if self.capi_reexport_cincludes: + # our caching implementation does not yet include fingerprints of all the header files + raise NotImplementedError('capi_reexport_cincludes is not compatible with Cython caching') + elif key == ['common_utility_include_dir']: + if self.common_utility_include_dir: + raise NotImplementedError('common_utility_include_dir is not compatible with Cython caching yet') + else: + # any unexpected option should go into the fingerprint; it's better + # to recompile than to return incorrect results from the cache. + data[key] = value + + def to_fingerprint(item): + r""" + Recursively turn item into a string, turning dicts into lists with + deterministic ordering. + """ + if isinstance(item, dict): + item = sorted([(repr(key), to_fingerprint(value)) for key, value in item.items()]) + return repr(item) + + return to_fingerprint(data) + + class CompilationResult(object): """ Results from the Cython compiler: @@ -745,14 +745,14 @@ def compile_multiple(sources, options): timestamps = options.timestamps verbose = options.verbose context = None - cwd = os.getcwd() + cwd = os.getcwd() for source in sources: if source not in processed: if context is None: context = options.create_context() - output_filename = get_output_filename(source, cwd, options) - out_of_date = context.c_file_out_of_date(source, output_filename) - if (not timestamps) or out_of_date: + output_filename = get_output_filename(source, cwd, options) + out_of_date = context.c_file_out_of_date(source, output_filename) + if (not timestamps) or out_of_date: if verbose: sys.stderr.write("Compiling %s\n" % source) @@ -782,71 +782,71 @@ def compile(source, options = None, full_module_name = None, **kwds): return compile_multiple(source, options) -@Utils.cached_function -def search_include_directories(dirs, qualified_name, suffix, pos, include=False): - """ - Search the list of include directories for the given file name. - - If a source file position is given, first searches the directory - containing that file. Returns None if not found, but does not - report an error. - - The 'include' option will disable package dereferencing. - """ - - if pos: - file_desc = pos[0] - if not isinstance(file_desc, FileSourceDescriptor): - raise RuntimeError("Only file sources for code supported") - if include: - dirs = (os.path.dirname(file_desc.filename),) + dirs - else: - dirs = (Utils.find_root_package_dir(file_desc.filename),) + dirs - - dotted_filename = qualified_name - if suffix: - dotted_filename += suffix - - if not include: - names = qualified_name.split('.') - package_names = tuple(names[:-1]) - module_name = names[-1] - module_filename = module_name + suffix - package_filename = "__init__" + suffix - - for dirname in dirs: - path = os.path.join(dirname, dotted_filename) - if os.path.exists(path): - return path - - if not include: - package_dir = Utils.check_package_dir(dirname, package_names) - if package_dir is not None: - path = os.path.join(package_dir, module_filename) - if os.path.exists(path): - return path - path = os.path.join(package_dir, module_name, - package_filename) - if os.path.exists(path): - return path - - # Arcadia-specific lookup: search for packages in include paths, - # ignoring existence of __init__.py files as packages markers - # (they are not required by Arcadia build system) - if not include: - for dir in dirs: - package_dir = os.path.join(dir, *package_names) - path = os.path.join(package_dir, module_filename) - if os.path.exists(path): - return path - path = os.path.join(dir, package_dir, module_name, - package_filename) - if os.path.exists(path): - return path - - return None - - +@Utils.cached_function +def search_include_directories(dirs, qualified_name, suffix, pos, include=False): + """ + Search the list of include directories for the given file name. + + If a source file position is given, first searches the directory + containing that file. Returns None if not found, but does not + report an error. + + The 'include' option will disable package dereferencing. + """ + + if pos: + file_desc = pos[0] + if not isinstance(file_desc, FileSourceDescriptor): + raise RuntimeError("Only file sources for code supported") + if include: + dirs = (os.path.dirname(file_desc.filename),) + dirs + else: + dirs = (Utils.find_root_package_dir(file_desc.filename),) + dirs + + dotted_filename = qualified_name + if suffix: + dotted_filename += suffix + + if not include: + names = qualified_name.split('.') + package_names = tuple(names[:-1]) + module_name = names[-1] + module_filename = module_name + suffix + package_filename = "__init__" + suffix + + for dirname in dirs: + path = os.path.join(dirname, dotted_filename) + if os.path.exists(path): + return path + + if not include: + package_dir = Utils.check_package_dir(dirname, package_names) + if package_dir is not None: + path = os.path.join(package_dir, module_filename) + if os.path.exists(path): + return path + path = os.path.join(package_dir, module_name, + package_filename) + if os.path.exists(path): + return path + + # Arcadia-specific lookup: search for packages in include paths, + # ignoring existence of __init__.py files as packages markers + # (they are not required by Arcadia build system) + if not include: + for dir in dirs: + package_dir = os.path.join(dir, *package_names) + path = os.path.join(package_dir, module_filename) + if os.path.exists(path): + return path + path = os.path.join(dir, package_dir, module_name, + package_filename) + if os.path.exists(path): + return path + + return None + + # ------------------------------------------------------------------------ # # Main command-line entry point @@ -875,7 +875,7 @@ def main(command_line = 0): result = compile(sources, options) if result.num_errors > 0: any_failures = 1 - except (EnvironmentError, PyrexError) as e: + except (EnvironmentError, PyrexError) as e: sys.stderr.write(str(e) + '\n') any_failures = 1 if any_failures: @@ -895,7 +895,7 @@ default_options = dict( cplus = 0, output_file = None, annotate = None, - annotate_coverage_xml = None, + annotate_coverage_xml = None, generate_pxi = 0, capi_reexport_cincludes = 0, working_path = "", @@ -903,21 +903,21 @@ default_options = dict( verbose = 0, quiet = 0, compiler_directives = {}, - embedded_metadata = {}, + embedded_metadata = {}, evaluate_tree_assertions = False, emit_linenums = False, relative_path_in_code_position_comments = True, c_line_in_traceback = True, - language_level = None, # warn but default to 2 - formal_grammar = False, + language_level = None, # warn but default to 2 + formal_grammar = False, gdb_debug = False, - module_name = None, - init_suffix = None, + module_name = None, + init_suffix = None, compile_time_env = None, common_utility_include_dir = None, output_dir=None, build_dir=None, - cache=None, + cache=None, create_extension=None, np_pythran=False ) diff --git a/contrib/tools/cython/Cython/Compiler/MemoryView.py b/contrib/tools/cython/Cython/Compiler/MemoryView.py index 9974bb4db3..0406d6c716 100644 --- a/contrib/tools/cython/Cython/Compiler/MemoryView.py +++ b/contrib/tools/cython/Cython/Compiler/MemoryView.py @@ -25,7 +25,7 @@ ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the " def concat_flags(*flags): return "(%s)" % "|".join(flags) - + format_flag = "PyBUF_FORMAT" memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)" @@ -67,7 +67,7 @@ memview_typeptr_cname = '__pyx_memoryview_type' memview_objstruct_cname = '__pyx_memoryview_obj' memviewslice_cname = u'__Pyx_memviewslice' - + def put_init_entry(mv_cname, code): code.putln("%s.data = NULL;" % mv_cname) code.putln("%s.memview = NULL;" % mv_cname) @@ -76,7 +76,7 @@ def put_init_entry(mv_cname, code): #def axes_to_str(axes): # return "".join([access[0].upper()+packing[0] for (access, packing) in axes]) - + def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code, have_gil=False, first_assignment=True): "We can avoid decreffing the lhs if we know it is the first assignment" @@ -97,7 +97,7 @@ def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code, if not pretty_rhs: code.funcstate.release_temp(rhstmp) - + def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code, have_gil=False, first_assignment=False): if not first_assignment: @@ -108,7 +108,7 @@ def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code code.putln("%s = %s;" % (lhs_cname, rhs_cname)) - + def get_buf_flags(specs): is_c_contig, is_f_contig = is_cf_contig(specs) @@ -124,13 +124,13 @@ def get_buf_flags(specs): else: return memview_strided_access - + def insert_newaxes(memoryviewtype, n): axes = [('direct', 'strided')] * n axes.extend(memoryviewtype.axes) return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes) - + def broadcast_types(src, dst): n = abs(src.ndim - dst.ndim) if src.ndim < dst.ndim: @@ -173,20 +173,20 @@ def valid_memslice_dtype(dtype, i=0): class MemoryViewSliceBufferEntry(Buffer.BufferEntry): - """ - May be used during code generation time to be queried for - shape/strides/suboffsets attributes, or to perform indexing or slicing. - """ + """ + May be used during code generation time to be queried for + shape/strides/suboffsets attributes, or to perform indexing or slicing. + """ def __init__(self, entry): self.entry = entry self.type = entry.type self.cname = entry.cname - + self.buf_ptr = "%s.data" % self.cname dtype = self.entry.type.dtype - self.buf_ptr_type = PyrexTypes.CPtrType(dtype) - self.init_attributes() + self.buf_ptr_type = PyrexTypes.CPtrType(dtype) + self.init_attributes() def get_buf_suboffsetvars(self): return self._for_all_ndim("%s.suboffsets[%d]") @@ -203,12 +203,12 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry): return self._generate_buffer_lookup_code(code, axes) def _generate_buffer_lookup_code(self, code, axes, cast_result=True): - """ - Generate a single expression that indexes the memory view slice - in each dimension. - """ + """ + Generate a single expression that indexes the memory view slice + in each dimension. + """ bufp = self.buf_ptr - type_decl = self.type.dtype.empty_declaration_code() + type_decl = self.type.dtype.empty_declaration_code() for dim, index, access, packing in axes: shape = "%s.shape[%d]" % (self.cname, dim) @@ -257,9 +257,9 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry): then it must be coercible to Py_ssize_t Simply call __pyx_memoryview_slice_memviewslice with the right - arguments, unless the dimension is omitted or a bare ':', in which - case we copy over the shape/strides/suboffsets attributes directly - for that dimension. + arguments, unless the dimension is omitted or a bare ':', in which + case we copy over the shape/strides/suboffsets attributes directly + for that dimension. """ src = self.cname @@ -267,46 +267,46 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry): code.putln("%(dst)s.memview = %(src)s.memview;" % locals()) code.put_incref_memoryviewslice(dst) - all_dimensions_direct = all(access == 'direct' for access, packing in self.type.axes) - suboffset_dim_temp = [] - - def get_suboffset_dim(): - # create global temp variable at request - if not suboffset_dim_temp: - suboffset_dim = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) - code.putln("%s = -1;" % suboffset_dim) - suboffset_dim_temp.append(suboffset_dim) - return suboffset_dim_temp[0] - + all_dimensions_direct = all(access == 'direct' for access, packing in self.type.axes) + suboffset_dim_temp = [] + + def get_suboffset_dim(): + # create global temp variable at request + if not suboffset_dim_temp: + suboffset_dim = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = -1;" % suboffset_dim) + suboffset_dim_temp.append(suboffset_dim) + return suboffset_dim_temp[0] + dim = -1 - new_ndim = 0 + new_ndim = 0 for index in indices: - if index.is_none: - # newaxis - for attrib, value in [('shape', 1), ('strides', 0), ('suboffsets', -1)]: - code.putln("%s.%s[%d] = %d;" % (dst, attrib, new_ndim, value)) - - new_ndim += 1 - continue - - dim += 1 - access, packing = self.type.axes[dim] + if index.is_none: + # newaxis + for attrib, value in [('shape', 1), ('strides', 0), ('suboffsets', -1)]: + code.putln("%s.%s[%d] = %d;" % (dst, attrib, new_ndim, value)) + + new_ndim += 1 + continue + + dim += 1 + access, packing = self.type.axes[dim] if isinstance(index, ExprNodes.SliceNode): # slice, unspecified dimension, or part of ellipsis - d = dict(locals()) + d = dict(locals()) for s in "start stop step".split(): idx = getattr(index, s) have_idx = d['have_' + s] = not idx.is_none - d[s] = idx.result() if have_idx else "0" + d[s] = idx.result() if have_idx else "0" - if not (d['have_start'] or d['have_stop'] or d['have_step']): + if not (d['have_start'] or d['have_stop'] or d['have_step']): # full slice (:), simply copy over the extent, stride # and suboffset. Also update suboffset_dim if needed d['access'] = access - util_name = "SimpleSlice" + util_name = "SimpleSlice" else: - util_name = "ToughSlice" + util_name = "ToughSlice" d['error_goto'] = code.error_goto(index.pos) new_ndim += 1 @@ -314,42 +314,42 @@ class MemoryViewSliceBufferEntry(Buffer.BufferEntry): # normal index idx = index.result() - indirect = access != 'direct' - if indirect: - generic = access == 'full' + indirect = access != 'direct' + if indirect: + generic = access == 'full' if new_ndim != 0: return error(index.pos, "All preceding dimensions must be " "indexed and not sliced") - d = dict( - locals(), - wraparound=int(directives['wraparound']), + d = dict( + locals(), + wraparound=int(directives['wraparound']), boundscheck=int(directives['boundscheck']), - ) + ) if d['boundscheck']: d['error_goto'] = code.error_goto(index.pos) - util_name = "SliceIndex" + util_name = "SliceIndex" + + _, impl = TempitaUtilityCode.load_as_string(util_name, "MemoryView_C.c", context=d) + code.put(impl) - _, impl = TempitaUtilityCode.load_as_string(util_name, "MemoryView_C.c", context=d) - code.put(impl) + if suboffset_dim_temp: + code.funcstate.release_temp(suboffset_dim_temp[0]) - if suboffset_dim_temp: - code.funcstate.release_temp(suboffset_dim_temp[0]) - def empty_slice(pos): none = ExprNodes.NoneNode(pos) return ExprNodes.SliceNode(pos, start=none, stop=none, step=none) - -def unellipsify(indices, ndim): + +def unellipsify(indices, ndim): result = [] seen_ellipsis = False have_slices = False - newaxes = [newaxis for newaxis in indices if newaxis.is_none] + newaxes = [newaxis for newaxis in indices if newaxis.is_none] n_indices = len(indices) - len(newaxes) for index in indices: @@ -364,7 +364,7 @@ def unellipsify(indices, ndim): result.extend([full_slice] * nslices) seen_ellipsis = True else: - have_slices = have_slices or index.is_slice or index.is_none + have_slices = have_slices or index.is_slice or index.is_none result.append(index) result_length = len(result) - len(newaxes) @@ -373,9 +373,9 @@ def unellipsify(indices, ndim): nslices = ndim - result_length result.extend([empty_slice(indices[-1].pos)] * nslices) - return have_slices, result, newaxes + return have_slices, result, newaxes + - def get_memoryview_flag(access, packing): if access == 'full' and packing in ('strided', 'follow'): return 'generic' @@ -391,47 +391,47 @@ def get_memoryview_flag(access, packing): assert (access, packing) == ('direct', 'contig'), (access, packing) return 'contiguous' - + def get_is_contig_func_name(contig_type, ndim): assert contig_type in ('C', 'F') return "__pyx_memviewslice_is_contig_%s%d" % (contig_type, ndim) - + def get_is_contig_utility(contig_type, ndim): assert contig_type in ('C', 'F') C = dict(context, ndim=ndim, contig_type=contig_type) utility = load_memview_c_utility("MemviewSliceCheckContig", C, requires=[is_contig_utility]) return utility - -def slice_iter(slice_type, slice_result, ndim, code): - if slice_type.is_c_contig or slice_type.is_f_contig: - return ContigSliceIter(slice_type, slice_result, ndim, code) + +def slice_iter(slice_type, slice_result, ndim, code): + if slice_type.is_c_contig or slice_type.is_f_contig: + return ContigSliceIter(slice_type, slice_result, ndim, code) else: - return StridedSliceIter(slice_type, slice_result, ndim, code) + return StridedSliceIter(slice_type, slice_result, ndim, code) class SliceIter(object): - def __init__(self, slice_type, slice_result, ndim, code): + def __init__(self, slice_type, slice_result, ndim, code): self.slice_type = slice_type - self.slice_result = slice_result + self.slice_result = slice_result self.code = code self.ndim = ndim - + class ContigSliceIter(SliceIter): def start_loops(self): code = self.code code.begin_block() - type_decl = self.slice_type.dtype.empty_declaration_code() + type_decl = self.slice_type.dtype.empty_declaration_code() - total_size = ' * '.join("%s.shape[%d]" % (self.slice_result, i) - for i in range(self.ndim)) + total_size = ' * '.join("%s.shape[%d]" % (self.slice_result, i) + for i in range(self.ndim)) code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size) code.putln("Py_ssize_t __pyx_temp_idx;") code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % ( - type_decl, type_decl, self.slice_result)) + type_decl, type_decl, self.slice_result)) code.putln("for (__pyx_temp_idx = 0; " "__pyx_temp_idx < __pyx_temp_extent; " "__pyx_temp_idx++) {") @@ -443,20 +443,20 @@ class ContigSliceIter(SliceIter): self.code.putln("}") self.code.end_block() - + class StridedSliceIter(SliceIter): def start_loops(self): code = self.code code.begin_block() for i in range(self.ndim): - t = i, self.slice_result, i + t = i, self.slice_result, i code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t) code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t) code.putln("char *__pyx_temp_pointer_%d;" % i) code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i) - code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_result) + code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_result) for i in range(self.ndim): if i > 0: @@ -486,23 +486,23 @@ def copy_c_or_fortran_cname(memview): return "__pyx_memoryview_copy_slice_%s_%s" % ( memview.specialization_suffix(), c_or_f) - + def get_copy_new_utility(pos, from_memview, to_memview): - if (from_memview.dtype != to_memview.dtype and - not (from_memview.dtype.is_const and from_memview.dtype.const_base_type == to_memview.dtype)): - error(pos, "dtypes must be the same!") - return + if (from_memview.dtype != to_memview.dtype and + not (from_memview.dtype.is_const and from_memview.dtype.const_base_type == to_memview.dtype)): + error(pos, "dtypes must be the same!") + return if len(from_memview.axes) != len(to_memview.axes): - error(pos, "number of dimensions must be same") - return + error(pos, "number of dimensions must be same") + return if not (to_memview.is_c_contig or to_memview.is_f_contig): - error(pos, "to_memview must be c or f contiguous.") - return + error(pos, "to_memview must be c or f contiguous.") + return for (access, packing) in from_memview.axes: if access != 'direct': - error(pos, "cannot handle 'full' or 'ptr' access at this time.") - return + error(pos, "cannot handle 'full' or 'ptr' access at this time.") + return if to_memview.is_c_contig: mode = 'c' @@ -516,14 +516,14 @@ def get_copy_new_utility(pos, from_memview, to_memview): context=dict( context, mode=mode, - dtype_decl=to_memview.dtype.empty_declaration_code(), + dtype_decl=to_memview.dtype.empty_declaration_code(), contig_flag=contig_flag, ndim=to_memview.ndim, func_cname=copy_c_or_fortran_cname(to_memview), dtype_is_object=int(to_memview.dtype.is_pyobject)), requires=[copy_contents_new_utility]) - + def get_axes_specs(env, axes): ''' get_axes_specs(env, axes) -> list of (access, packing) specs for each axis. @@ -569,7 +569,7 @@ def get_axes_specs(env, axes): if entry.name in view_constant_to_access_packing: axes_specs.append(view_constant_to_access_packing[entry.name]) else: - raise CompileError(axis.step.pos, INVALID_ERR) + raise CompileError(axis.step.pos, INVALID_ERR) else: raise CompileError(axis.step.pos, INVALID_ERR) @@ -704,7 +704,7 @@ def validate_axes_specs(positions, specs, is_c_contig, is_f_contig): if access == 'ptr': last_indirect_dimension = idx - for idx, (pos, (access, packing)) in enumerate(zip(positions, specs)): + for idx, (pos, (access, packing)) in enumerate(zip(positions, specs)): if not (access in access_specs and packing in packing_specs): diff --git a/contrib/tools/cython/Cython/Compiler/ModuleNode.py b/contrib/tools/cython/Cython/Compiler/ModuleNode.py index 0eb4a02ab3..cd7166408e 100644 --- a/contrib/tools/cython/Cython/Compiler/ModuleNode.py +++ b/contrib/tools/cython/Cython/Compiler/ModuleNode.py @@ -9,12 +9,12 @@ cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=objec error=object, warning=object, py_object_type=object, UtilityCode=object, EncodedString=object, re=object) -from collections import defaultdict -import json -import operator +from collections import defaultdict +import json +import operator import os import re - + from .PyrexTypes import CPtrType from . import Future from . import Annotate @@ -28,7 +28,7 @@ from . import Pythran from .Errors import error, warning from .PyrexTypes import py_object_type -from ..Utils import open_new_file, replace_suffix, decode_filename, build_hex_version +from ..Utils import open_new_file, replace_suffix, decode_filename, build_hex_version from .Code import UtilityCode, IncludeCode from .StringEncoding import EncodedString from .Pythran import has_np_pythran @@ -37,25 +37,25 @@ def check_c_declarations_pxd(module_node): module_node.scope.check_c_classes_pxd() return module_node - + def check_c_declarations(module_node): module_node.scope.check_c_classes() module_node.scope.check_c_functions() return module_node - -def generate_c_code_config(env, options): - if Options.annotate or options.annotate: - emit_linenums = False - else: - emit_linenums = options.emit_linenums - - return Code.CCodeConfig( - emit_linenums=emit_linenums, - emit_code_comments=env.directives['emit_code_comments'], - c_line_in_traceback=options.c_line_in_traceback) - - + +def generate_c_code_config(env, options): + if Options.annotate or options.annotate: + emit_linenums = False + else: + emit_linenums = options.emit_linenums + + return Code.CCodeConfig( + emit_linenums=emit_linenums, + emit_code_comments=env.directives['emit_code_comments'], + c_line_in_traceback=options.c_line_in_traceback) + + class ModuleNode(Nodes.Node, Nodes.BlockNode): # doc string or None # body StatListNode @@ -108,13 +108,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def analyse_declarations(self, env): if has_np_pythran(env): Pythran.include_pythran_generic(env) - if self.directives: - env.old_style_globals = self.directives['old_style_globals'] + if self.directives: + env.old_style_globals = self.directives['old_style_globals'] if not Options.docstrings: env.doc = self.doc = None elif Options.embed_pos_in_docstring: env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos)) - if self.doc is not None: + if self.doc is not None: env.doc = EncodedString(env.doc + u'\n' + self.doc) env.doc.encoding = self.doc.encoding else: @@ -123,17 +123,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.body.analyse_declarations(env) - def prepare_utility_code(self): - # prepare any utility code that must be created before code generation - # specifically: CythonUtilityCode - env = self.scope - if env.has_import_star: - self.create_import_star_conversion_utility_code(env) - for name, entry in sorted(env.entries.items()): - if (entry.create_wrapper and entry.scope is env - and entry.is_type and entry.type.is_enum): - entry.type.create_type_wrapper(env) - + def prepare_utility_code(self): + # prepare any utility code that must be created before code generation + # specifically: CythonUtilityCode + env = self.scope + if env.has_import_star: + self.create_import_star_conversion_utility_code(env) + for name, entry in sorted(env.entries.items()): + if (entry.create_wrapper and entry.scope is env + and entry.is_type and entry.type.is_enum): + entry.type.create_type_wrapper(env) + def process_implementation(self, options, result): env = self.scope env.return_type = PyrexTypes.c_void_type @@ -142,7 +142,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.sort_cdef_classes(env) self.generate_c_code(env, options, result) self.generate_h_code(env, options, result) - self.generate_api_code(env, options, result) + self.generate_api_code(env, options, result) def has_imported_c_functions(self): for module in self.referenced_modules: @@ -161,18 +161,18 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): h_vars = h_entries(env.var_entries) h_funcs = h_entries(env.cfunc_entries) h_extension_types = h_entries(env.c_class_entries) - if h_types or h_vars or h_funcs or h_extension_types: + if h_types or h_vars or h_funcs or h_extension_types: result.h_file = replace_suffix(result.c_file, ".h") h_code = Code.CCodeWriter() - c_code_config = generate_c_code_config(env, options) - Code.GlobalState(h_code, self, c_code_config) + c_code_config = generate_c_code_config(env, options) + Code.GlobalState(h_code, self, c_code_config) if options.generate_pxi: result.i_file = replace_suffix(result.c_file, ".pxi") i_code = Code.PyrexCodeWriter(result.i_file) else: i_code = None - h_code.put_generated_by() + h_code.put_generated_by() h_guard = Naming.h_guard_prefix + self.api_name(env) h_code.put_h_guard(h_guard) h_code.putln("") @@ -185,8 +185,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): h_code.putln("#ifndef %s" % api_guard) h_code.putln("") self.generate_extern_c_macro_definition(h_code) - h_code.putln("") - self.generate_dl_import_macro(h_code) + h_code.putln("") + self.generate_dl_import_macro(h_code) if h_extension_types: h_code.putln("") for entry in h_extension_types: @@ -208,8 +208,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): h_code.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */") h_code.putln("") h_code.putln("#if PY_MAJOR_VERSION < 3") - init_name = 'init' + (options.init_suffix or env.module_name) - h_code.putln("PyMODINIT_FUNC %s(void);" % init_name) + init_name = 'init' + (options.init_suffix or env.module_name) + h_code.putln("PyMODINIT_FUNC %s(void);" % init_name) h_code.putln("#else") h_code.putln("PyMODINIT_FUNC %s(void);" % self.mod_init_func_cname('PyInit', env, options)) h_code.putln("#endif") @@ -227,13 +227,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): Naming.extern_c_macro, entry.type.declaration_code(entry.cname))) if i_code: - i_code.putln("cdef extern %s" % ( - entry.type.declaration_code(entry.cname, pyrex=1))) + i_code.putln("cdef extern %s" % ( + entry.type.declaration_code(entry.cname, pyrex=1))) def api_name(self, env): return env.qualified_name.replace(".", "__") - def generate_api_code(self, env, options, result): + def generate_api_code(self, env, options, result): def api_entries(entries, pxd=0): return [entry for entry in entries if entry.api or (pxd and entry.defined_in_pxd)] @@ -243,16 +243,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if api_vars or api_funcs or api_extension_types: result.api_file = replace_suffix(result.c_file, "_api.h") h_code = Code.CCodeWriter() - c_code_config = generate_c_code_config(env, options) - Code.GlobalState(h_code, self, c_code_config) - h_code.put_generated_by() + c_code_config = generate_c_code_config(env, options) + Code.GlobalState(h_code, self, c_code_config) + h_code.put_generated_by() api_guard = Naming.api_guard_prefix + self.api_name(env) h_code.put_h_guard(api_guard) - # Work around https://bugs.python.org/issue4709 - h_code.putln('#ifdef __MINGW64__') - h_code.putln('#define MS_WIN64') - h_code.putln('#endif') - + # Work around https://bugs.python.org/issue4709 + h_code.putln('#ifdef __MINGW64__') + h_code.putln('#define MS_WIN64') + h_code.putln('#endif') + h_code.putln('#include "Python.h"') if result.h_file: h_code.putln('#include "%s"' % os.path.basename(result.h_file)) @@ -267,14 +267,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): h_code.putln("") for entry in api_funcs: type = CPtrType(entry.type) - cname = env.mangle(Naming.func_prefix_api, entry.name) + cname = env.mangle(Naming.func_prefix_api, entry.name) h_code.putln("static %s = 0;" % type.declaration_code(cname)) h_code.putln("#define %s %s" % (entry.name, cname)) if api_vars: h_code.putln("") for entry in api_vars: type = CPtrType(entry.type) - cname = env.mangle(Naming.varptr_prefix_api, entry.name) + cname = env.mangle(Naming.varptr_prefix_api, entry.name) h_code.putln("static %s = 0;" % type.declaration_code(cname)) h_code.putln("#define %s (*%s)" % (entry.name, cname)) h_code.put(UtilityCode.load_as_string("PyIdentifierFromString", "ImportExport.c")[0]) @@ -283,28 +283,28 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if api_funcs: h_code.put(UtilityCode.load_as_string("FunctionImport", "ImportExport.c")[1]) if api_extension_types: - h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[0]) + h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[0]) h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[1]) h_code.putln("") h_code.putln("static int import_%s(void) {" % self.api_name(env)) h_code.putln("PyObject *module = 0;") - h_code.putln('module = PyImport_ImportModule("%s");' % env.qualified_name) + h_code.putln('module = PyImport_ImportModule("%s");' % env.qualified_name) h_code.putln("if (!module) goto bad;") for entry in api_funcs: - cname = env.mangle(Naming.func_prefix_api, entry.name) + cname = env.mangle(Naming.func_prefix_api, entry.name) sig = entry.type.signature_string() h_code.putln( 'if (__Pyx_ImportFunction(module, "%s", (void (**)(void))&%s, "%s") < 0) goto bad;' % (entry.name, cname, sig)) for entry in api_vars: - cname = env.mangle(Naming.varptr_prefix_api, entry.name) - sig = entry.type.empty_declaration_code() + cname = env.mangle(Naming.varptr_prefix_api, entry.name) + sig = entry.type.empty_declaration_code() h_code.putln( 'if (__Pyx_ImportVoidPtr(module, "%s", (void **)&%s, "%s") < 0) goto bad;' % (entry.name, cname, sig)) - with ModuleImportGenerator(h_code, imported_modules={env.qualified_name: 'module'}) as import_generator: - for entry in api_extension_types: - self.generate_type_import_call(entry.type, h_code, import_generator, error_code="goto bad;") + with ModuleImportGenerator(h_code, imported_modules={env.qualified_name: 'module'}) as import_generator: + for entry in api_extension_types: + self.generate_type_import_call(entry.type, h_code, import_generator, error_code="goto bad;") h_code.putln("Py_DECREF(module); module = 0;") h_code.putln("return 0;") h_code.putln("bad:") @@ -333,8 +333,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): var_entries = type.scope.var_entries if var_entries: for entry in var_entries: - i_code.putln("cdef %s" % ( - entry.type.declaration_code(entry.cname, pyrex=1))) + i_code.putln("cdef %s" % ( + entry.type.declaration_code(entry.cname, pyrex=1))) else: i_code.putln("pass") i_code.dedent() @@ -345,19 +345,19 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if Options.annotate or options.annotate: rootwriter = Annotate.AnnotationCCodeWriter() else: - rootwriter = Code.CCodeWriter() - - c_code_config = generate_c_code_config(env, options) - - globalstate = Code.GlobalState( - rootwriter, self, - code_config=c_code_config, - common_utility_include_dir=options.common_utility_include_dir, - ) + rootwriter = Code.CCodeWriter() + + c_code_config = generate_c_code_config(env, options) + + globalstate = Code.GlobalState( + rootwriter, self, + code_config=c_code_config, + common_utility_include_dir=options.common_utility_include_dir, + ) globalstate.initialize_main_c_code() h_code = globalstate['h_code'] - self.generate_module_preamble(env, options, modules, result.embedded_metadata, h_code) + self.generate_module_preamble(env, options, modules, result.embedded_metadata, h_code) globalstate.module_pos = self.pos globalstate.directives = self.directives @@ -382,21 +382,21 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.generate_lambda_definitions(env, code) # generate normal variable and function definitions self.generate_variable_definitions(env, code) - + self.body.generate_function_definitions(env, code) - + code.mark_pos(None) self.generate_typeobj_definitions(env, code) self.generate_method_table(env, code) if env.has_import_star: self.generate_import_star(env, code) - self.generate_pymoduledef_struct(env, options, code) + self.generate_pymoduledef_struct(env, options, code) + + # initialise the macro to reduce the code size of one-time functionality + code.putln(UtilityCode.load_as_string("SmallCodeConfig", "ModuleSetupCode.c")[0].strip()) - # initialise the macro to reduce the code size of one-time functionality - code.putln(UtilityCode.load_as_string("SmallCodeConfig", "ModuleSetupCode.c")[0].strip()) - # init_globals is inserted before this - self.generate_module_init_func(modules[:-1], env, options, globalstate['init_module']) + self.generate_module_init_func(modules[:-1], env, options, globalstate['init_module']) self.generate_module_cleanup_func(env, globalstate['cleanup_module']) if Options.embed: self.generate_main_method(env, globalstate['main_method']) @@ -418,29 +418,29 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if options.gdb_debug: self._serialize_lineno_map(env, rootwriter) if Options.annotate or options.annotate: - self._generate_annotations(rootwriter, result, options) + self._generate_annotations(rootwriter, result, options) - def _generate_annotations(self, rootwriter, result, options): + def _generate_annotations(self, rootwriter, result, options): self.annotate(rootwriter) - coverage_xml_filename = Options.annotate_coverage_xml or options.annotate_coverage_xml - if coverage_xml_filename and os.path.exists(coverage_xml_filename): - try: - import xml.etree.cElementTree as ET - except ImportError: - import xml.etree.ElementTree as ET - coverage_xml = ET.parse(coverage_xml_filename).getroot() + coverage_xml_filename = Options.annotate_coverage_xml or options.annotate_coverage_xml + if coverage_xml_filename and os.path.exists(coverage_xml_filename): + try: + import xml.etree.cElementTree as ET + except ImportError: + import xml.etree.ElementTree as ET + coverage_xml = ET.parse(coverage_xml_filename).getroot() if hasattr(coverage_xml, 'iter'): iterator = coverage_xml.iter() # Python 2.7 & 3.2+ else: iterator = coverage_xml.getiterator() for el in iterator: - el.tail = None # save some memory - else: - coverage_xml = None - - rootwriter.save_annotation(result.main_source_file, result.c_file, coverage_xml=coverage_xml) - + el.tail = None # save some memory + else: + coverage_xml = None + + rootwriter.save_annotation(result.main_source_file, result.c_file, coverage_xml=coverage_xml) + # if we included files, additionally generate one annotation file for each if not self.scope.included_files: return @@ -459,28 +459,28 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if target_file_dir != target_dir and not os.path.exists(target_file_dir): try: os.makedirs(target_file_dir) - except OSError as e: + except OSError as e: import errno if e.errno != errno.EEXIST: raise - rootwriter.save_annotation(source_file, target_file, coverage_xml=coverage_xml) + rootwriter.save_annotation(source_file, target_file, coverage_xml=coverage_xml) def _serialize_lineno_map(self, env, ccodewriter): tb = env.context.gdb_debug_outputwriter markers = ccodewriter.buffer.allmarkers() - d = defaultdict(list) + d = defaultdict(list) for c_lineno, cython_lineno in enumerate(markers): if cython_lineno > 0: - d[cython_lineno].append(c_lineno + 1) + d[cython_lineno].append(c_lineno + 1) tb.start('LineNumberMapping') - for cython_lineno, c_linenos in sorted(d.items()): - tb.add_entry( - 'LineNumber', - c_linenos=' '.join(map(str, c_linenos)), - cython_lineno=str(cython_lineno), - ) + for cython_lineno, c_linenos in sorted(d.items()): + tb.add_entry( + 'LineNumber', + c_linenos=' '.join(map(str, c_linenos)), + cython_lineno=str(cython_lineno), + ) tb.end('LineNumberMapping') tb.serialize() @@ -625,16 +625,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.generate_cvariable_declarations(module, modulecode, defined_here) self.generate_cfunction_declarations(module, modulecode, defined_here) - def _put_setup_code(self, code, name): - code.put(UtilityCode.load_as_string(name, "ModuleSetupCode.c")[1]) - - def generate_module_preamble(self, env, options, cimported_modules, metadata, code): - code.put_generated_by() - if metadata: - code.putln("/* BEGIN: Cython Metadata") - code.putln(json.dumps(metadata, indent=4, sort_keys=True)) - code.putln("END: Cython Metadata */") - code.putln("") + def _put_setup_code(self, code, name): + code.put(UtilityCode.load_as_string(name, "ModuleSetupCode.c")[1]) + + def generate_module_preamble(self, env, options, cimported_modules, metadata, code): + code.put_generated_by() + if metadata: + code.putln("/* BEGIN: Cython Metadata") + code.putln(json.dumps(metadata, indent=4, sort_keys=True)) + code.putln("END: Cython Metadata */") + code.putln("") code.putln("#ifndef PY_SSIZE_T_CLEAN") code.putln("#define PY_SSIZE_T_CLEAN") @@ -644,9 +644,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if inc.location == inc.INITIAL: inc.write(code) code.putln("#ifndef Py_PYTHON_H") - code.putln(" #error Python headers needed to compile C extensions, " - "please install development version of Python.") - code.putln("#elif PY_VERSION_HEX < 0x02060000 || " + code.putln(" #error Python headers needed to compile C extensions, " + "please install development version of Python.") + code.putln("#elif PY_VERSION_HEX < 0x02060000 || " "(0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)") code.putln(" #error Cython requires Python 2.6+ or Python 3.3+.") code.putln("#else") @@ -654,23 +654,23 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): from .. import __version__ code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_')) - code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__)) + code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__)) code.putln("#define CYTHON_FUTURE_DIVISION %d" % ( Future.division in env.context.future_directives)) - self._put_setup_code(code, "CModulePreamble") - if env.context.options.cplus: - self._put_setup_code(code, "CppInitCode") - else: - self._put_setup_code(code, "CInitCode") + self._put_setup_code(code, "CModulePreamble") + if env.context.options.cplus: + self._put_setup_code(code, "CppInitCode") + else: + self._put_setup_code(code, "CInitCode") self._put_setup_code(code, "PythonCompatibility") - self._put_setup_code(code, "MathInitCode") + self._put_setup_code(code, "MathInitCode") # Using "(void)cname" to prevent "unused" warnings. - if options.c_line_in_traceback: + if options.c_line_in_traceback: cinfo = "%s = %s; (void)%s; " % (Naming.clineno_cname, Naming.line_c_macro, Naming.clineno_cname) - else: - cinfo = "" + else: + cinfo = "" code.putln("#define __PYX_MARK_ERR_POS(f_index, lineno) \\") code.putln(" { %s = %s[f_index]; (void)%s; %s = lineno; (void)%s; %s}" % ( Naming.filename_cname, Naming.filetable_cname, Naming.filename_cname, @@ -679,7 +679,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): )) code.putln("#define __PYX_ERR(f_index, lineno, Ln_error) \\") code.putln(" { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }") - + code.putln("") self.generate_extern_c_macro_definition(code) code.putln("") @@ -707,13 +707,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding: error(self.pos, "a default encoding must be provided if c_string_type is not a byte type") code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII %s' % int(c_string_encoding == 'ascii')) - code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 %s' % - int(c_string_encoding.replace('-', '').lower() == 'utf8')) + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 %s' % + int(c_string_encoding.replace('-', '').lower() == 'utf8')) if c_string_encoding == 'default': code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 1') else: - code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT ' - '(PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)') + code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT ' + '(PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)') code.putln('#define __PYX_DEFAULT_STRING_ENCODING "%s"' % c_string_encoding) if c_string_type == 'bytearray': c_string_func_name = 'ByteArray' @@ -734,10 +734,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln('static PyObject *%s = NULL;' % env.module_cname) code.putln('static PyObject *%s;' % env.module_dict_cname) code.putln('static PyObject *%s;' % Naming.builtins_cname) - code.putln('static PyObject *%s = NULL;' % Naming.cython_runtime_cname) + code.putln('static PyObject *%s = NULL;' % Naming.cython_runtime_cname) code.putln('static PyObject *%s;' % Naming.empty_tuple) code.putln('static PyObject *%s;' % Naming.empty_bytes) - code.putln('static PyObject *%s;' % Naming.empty_unicode) + code.putln('static PyObject *%s;' % Naming.empty_unicode) if Options.pre_import is not None: code.putln('static PyObject *%s;' % Naming.preimport_cname) code.putln('static int %s;' % Naming.lineno_cname) @@ -759,11 +759,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln(" #endif") code.putln("#endif") - def generate_dl_import_macro(self, code): - code.putln("#ifndef DL_IMPORT") - code.putln(" #define DL_IMPORT(_T) _T") - code.putln("#endif") - + def generate_dl_import_macro(self, code): + code.putln("#ifndef DL_IMPORT") + code.putln(" #define DL_IMPORT(_T) _T") + code.putln("#endif") + def generate_includes(self, env, cimported_modules, code, early=True, late=True): includes = [] for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey): @@ -810,8 +810,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): pass elif type.is_struct_or_union or type.is_cpp_class: self.generate_struct_union_predeclaration(entry, code) - elif type.is_ctuple and entry.used: - self.generate_struct_union_predeclaration(entry.type.struct_entry, code) + elif type.is_ctuple and entry.used: + self.generate_struct_union_predeclaration(entry.type.struct_entry, code) elif type.is_extension_type: self.generate_objstruct_predeclaration(type, code) # Actual declarations @@ -825,8 +825,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.generate_enum_definition(entry, code) elif type.is_struct_or_union: self.generate_struct_union_definition(entry, code) - elif type.is_ctuple and entry.used: - self.generate_struct_union_definition(entry.type.struct_entry, code) + elif type.is_ctuple and entry.used: + self.generate_struct_union_definition(entry.type.struct_entry, code) elif type.is_cpp_class: self.generate_cpp_class_definition(entry, code) elif type.is_extension_type: @@ -869,8 +869,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def generate_struct_union_predeclaration(self, entry, code): type = entry.type if type.is_cpp_class and type.templates: - code.putln("template <typename %s>" % ", typename ".join( - [T.empty_declaration_code() for T in type.templates])) + code.putln("template <typename %s>" % ", typename ".join( + [T.empty_declaration_code() for T in type.templates])) code.putln(self.sue_predeclaration(type, type.kind, type.cname)) def sue_header_footer(self, type, kind, name): @@ -899,10 +899,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln(header) var_entries = scope.var_entries if not var_entries: - error(entry.pos, "Empty struct or union definition not allowed outside a 'cdef extern from' block") + error(entry.pos, "Empty struct or union definition not allowed outside a 'cdef extern from' block") for attr in var_entries: code.putln( - "%s;" % attr.type.declaration_code(attr.cname)) + "%s;" % attr.type.declaration_code(attr.cname)) code.putln(footer) if packed: code.putln("#if defined(__SUNPRO_C)") @@ -917,13 +917,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): scope = type.scope if scope: if type.templates: - code.putln("template <class %s>" % ", class ".join( - [T.empty_declaration_code() for T in type.templates])) + code.putln("template <class %s>" % ", class ".join( + [T.empty_declaration_code() for T in type.templates])) # Just let everything be public. code.put("struct %s" % type.cname) if type.base_classes: base_class_decl = ", public ".join( - [base_class.empty_declaration_code() for base_class in type.base_classes]) + [base_class.empty_declaration_code() for base_class in type.base_classes]) code.put(" : public %s" % base_class_decl) code.putln(" {") py_attrs = [e for e in scope.entries.values() @@ -932,8 +932,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): constructor = None destructor = None for attr in scope.var_entries: - if attr.type.is_cfunction: - code.put("inline ") + if attr.type.is_cfunction: + code.put("inline ") if attr.type.is_cfunction and attr.type.is_static_method: code.put("static ") elif attr.name == "<init>": @@ -943,7 +943,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): elif attr.type.is_cfunction: code.put("virtual ") has_virtual_methods = True - code.putln("%s;" % attr.type.declaration_code(attr.cname)) + code.putln("%s;" % attr.type.declaration_code(attr.cname)) is_implementing = 'init_module' in code.globalstate.parts if constructor or py_attrs: if constructor: @@ -1022,11 +1022,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.mark_pos(entry.pos) type = entry.type name = entry.cname or entry.name or "" - header, footer = self.sue_header_footer(type, "enum", name) + header, footer = self.sue_header_footer(type, "enum", name) code.putln(header) enum_values = entry.enum_values if not enum_values: - error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block") + error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block") else: last_entry = enum_values[-1] # this does not really generate code, just builds the result value @@ -1080,15 +1080,15 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if type.vtabstruct_cname: code.putln("") - code.putln("struct %s {" % type.vtabstruct_cname) + code.putln("struct %s {" % type.vtabstruct_cname) if type.base_type and type.base_type.vtabstruct_cname: code.putln("struct %s %s;" % ( type.base_type.vtabstruct_cname, Naming.obj_base_cname)) for method_entry in scope.cfunc_entries: if not method_entry.is_inherited: - code.putln("%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname)) - code.putln("};") + code.putln("%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname)) + code.putln("};") def generate_exttype_vtabptr_declaration(self, entry, code): if not entry.used: @@ -1155,7 +1155,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): else: attr_type = attr.type code.putln( - "%s;" % attr_type.declaration_code(attr.cname)) + "%s;" % attr_type.declaration_code(attr.cname)) code.putln(footer) if type.objtypedef_cname is not None: # Only for exposing public typedef name. @@ -1164,15 +1164,15 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def generate_c_class_declarations(self, env, code, definition): for entry in env.c_class_entries: if definition or entry.defined_in_pxd: - code.putln("static PyTypeObject *%s = 0;" % ( - entry.type.typeptr_cname)) + code.putln("static PyTypeObject *%s = 0;" % ( + entry.type.typeptr_cname)) def generate_cvariable_declarations(self, env, code, definition): if env.is_cython_builtin: return for entry in env.var_entries: if (entry.in_cinclude or entry.in_closure or - (entry.visibility == 'private' and not (entry.defined_in_pxd or entry.used))): + (entry.visibility == 'private' and not (entry.defined_in_pxd or entry.used))): continue storage_class = None @@ -1192,7 +1192,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): storage_class = "static" dll_linkage = None if entry.init is not None: - init = entry.type.literal_code(entry.init) + init = entry.type.literal_code(entry.init) type = entry.type cname = entry.cname @@ -1206,7 +1206,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if storage_class: code.put("%s " % storage_class) code.put(type.declaration_code( - cname, dll_linkage=dll_linkage)) + cname, dll_linkage=dll_linkage)) if init is not None: code.put_safe(" = %s" % init) code.putln(";") @@ -1220,10 +1220,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def generate_variable_definitions(self, env, code): for entry in env.var_entries: - if not entry.in_cinclude and entry.visibility == "public": + if not entry.in_cinclude and entry.visibility == "public": code.put(entry.type.declaration_code(entry.cname)) if entry.init is not None: - init = entry.type.literal_code(entry.init) + init = entry.type.literal_code(entry.init) code.put_safe(" = %s" % init) code.putln(";") @@ -1252,9 +1252,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if scope.defines_any_special(["__setitem__", "__delitem__"]): self.generate_ass_subscript_function(scope, code) if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]): - warning(self.pos, - "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, " - "use __getitem__, __setitem__, and __delitem__ instead", 1) + warning(self.pos, + "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, " + "use __getitem__, __setitem__, and __delitem__ instead", 1) code.putln("#if PY_MAJOR_VERSION >= 3") code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.") code.putln("#endif") @@ -1269,7 +1269,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if scope.defines_any_special(["__set__", "__delete__"]): self.generate_descr_set_function(scope, code) if not scope.is_closure_class_scope and scope.defines_any(["__dict__"]): - self.generate_dict_getter_function(scope, code) + self.generate_dict_getter_function(scope, code) if scope.defines_any_special(TypeSlots.richcmp_special_methods): self.generate_richcmp_function(scope, code) self.generate_property_accessors(scope, code) @@ -1290,7 +1290,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln( "%s = (%s)o;" % ( type.declaration_code("p"), - type.empty_declaration_code())) + type.empty_declaration_code())) def generate_new_function(self, scope, code, cclass_entry): tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__') @@ -1332,8 +1332,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("static int %s = 0;" % freecount_name) code.putln("") code.putln( - "static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {" % ( - slot_func, unused_marker, unused_marker)) + "static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {" % ( + slot_func, unused_marker, unused_marker)) need_self_cast = (type.vtabslot_cname or (py_buffers or memoryview_slices or py_attrs) or @@ -1355,9 +1355,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): else: type_safety_check = ' & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)' obj_struct = type.declaration_code("", deref=True) - code.putln( - "if (CYTHON_COMPILING_IN_CPYTHON && likely((%s > 0) & (t->tp_basicsize == sizeof(%s))%s)) {" % ( - freecount_name, obj_struct, type_safety_check)) + code.putln( + "if (CYTHON_COMPILING_IN_CPYTHON && likely((%s > 0) & (t->tp_basicsize == sizeof(%s))%s)) {" % ( + freecount_name, obj_struct, type_safety_check)) code.putln("o = (PyObject*)%s[--%s];" % ( freelist_name, freecount_name)) code.putln("memset(o, 0, sizeof(%s));" % obj_struct) @@ -1379,10 +1379,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("p = %s;" % type.cast_code("o")) #if need_self_cast: # self.generate_self_cast(scope, code) - - # from this point on, ensure DECREF(o) on failure - needs_error_cleanup = False - + + # from this point on, ensure DECREF(o) on failure + needs_error_cleanup = False + if type.vtabslot_cname: vtab_base_type = type while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname: @@ -1396,16 +1396,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): struct_type_cast, type.vtabptr_cname)) for entry in cpp_class_attrs: - code.putln("new((void*)&(p->%s)) %s();" % ( - entry.cname, entry.type.empty_declaration_code())) + code.putln("new((void*)&(p->%s)) %s();" % ( + entry.cname, entry.type.empty_declaration_code())) for entry in py_attrs: - if entry.name == "__dict__": - needs_error_cleanup = True - code.put("p->%s = PyDict_New(); if (unlikely(!p->%s)) goto bad;" % ( - entry.cname, entry.cname)) - else: - code.put_init_var_to_py_none(entry, "p->%s", nanny=False) + if entry.name == "__dict__": + needs_error_cleanup = True + code.put("p->%s = PyDict_New(); if (unlikely(!p->%s)) goto bad;" % ( + entry.cname, entry.cname)) + else: + code.put_init_var_to_py_none(entry, "p->%s", nanny=False) for entry in memoryview_slices: code.putln("p->%s.data = NULL;" % entry.cname) @@ -1422,16 +1422,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): cinit_args = "o, %s, NULL" % Naming.empty_tuple else: cinit_args = "o, a, k" - needs_error_cleanup = True - code.putln("if (unlikely(%s(%s) < 0)) goto bad;" % ( - new_func_entry.func_cname, cinit_args)) - + needs_error_cleanup = True + code.putln("if (unlikely(%s(%s) < 0)) goto bad;" % ( + new_func_entry.func_cname, cinit_args)) + code.putln( "return o;") - if needs_error_cleanup: - code.putln("bad:") - code.put_decref_clear("o", py_object_type, nanny=False) - code.putln("return NULL;") + if needs_error_cleanup: + code.putln("bad:") + code.put_decref_clear("o", py_object_type, nanny=False) + code.putln("return NULL;") code.putln( "}") @@ -1455,14 +1455,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): weakref_slot = None dict_slot = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None - if dict_slot not in scope.var_entries: - dict_slot = None - + if dict_slot not in scope.var_entries: + dict_slot = None + _, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries() cpp_class_attrs = [entry for entry in scope.var_entries if entry.type.is_cpp_class] - if py_attrs or cpp_class_attrs or memoryview_slices or weakref_slot or dict_slot: + if py_attrs or cpp_class_attrs or memoryview_slices or weakref_slot or dict_slot: self.generate_self_cast(scope, code) if not is_final_type: @@ -1493,11 +1493,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if weakref_slot: code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);") - if dict_slot: - code.putln("if (p->__dict__) PyDict_Clear(p->__dict__);") - + if dict_slot: + code.putln("if (p->__dict__) PyDict_Clear(p->__dict__);") + for entry in cpp_class_attrs: - code.putln("__Pyx_call_destructor(p->%s);" % entry.cname) + code.putln("__Pyx_call_destructor(p->%s);" % entry.cname) for entry in py_attrs: code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False, @@ -1514,7 +1514,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if base_type.scope and base_type.scope.needs_gc(): code.putln("PyObject_GC_Track(o);") else: - code.putln("#if CYTHON_USE_TYPE_SLOTS") + code.putln("#if CYTHON_USE_TYPE_SLOTS") code.putln("if (PyType_IS_GC(Py_TYPE(o)->tp_base))") code.putln("#endif") code.putln("PyObject_GC_Track(o);") @@ -1548,12 +1548,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): ' & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)') type = scope.parent_type - code.putln( - "if (CYTHON_COMPILING_IN_CPYTHON && ((%s < %d) & (Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % ( - freecount_name, - freelist_size, - type.declaration_code("", deref=True), - type_safety_check)) + code.putln( + "if (CYTHON_COMPILING_IN_CPYTHON && ((%s < %d) & (Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % ( + freecount_name, + freelist_size, + type.declaration_code("", deref=True), + type_safety_check)) code.putln("%s[%s++] = %s;" % ( freelist_name, freecount_name, type.cast_code("o"))) code.putln("} else {") @@ -1584,10 +1584,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): slot_func = scope.mangle_internal("tp_traverse") base_type = scope.parent_type.base_type if tp_slot.slot_code(scope) != slot_func: - return # never used + return # never used code.putln("") code.putln( - "static int %s(PyObject *o, visitproc v, void *a) {" % slot_func) + "static int %s(PyObject *o, visitproc v, void *a) {" % slot_func) have_entries, (py_attrs, py_buffers, memoryview_slices) = ( scope.get_refcounted_entries(include_gc_simple=False)) @@ -1613,19 +1613,19 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): # the module cleanup, which may already have cleared it. # In that case, fall back to traversing the type hierarchy. base_cname = base_type.typeptr_cname - code.putln( - "e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : " - "__Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % ( - base_cname, base_cname, base_cname, slot_func)) + code.putln( + "e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : " + "__Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % ( + base_cname, base_cname, base_cname, slot_func)) code.globalstate.use_utility_code( UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c")) for entry in py_attrs: var_code = "p->%s" % entry.cname var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code) - code.putln("if (%s) {" % var_code) + code.putln("if (%s) {" % var_code) code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject) - code.putln("}") + code.putln("}") # Traverse buffer exporting objects. # Note: not traversing memoryview attributes of memoryview slices! @@ -1634,14 +1634,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): for entry in py_buffers: cname = entry.cname + ".obj" code.putln("if (p->%s) {" % cname) - code.putln("e = (*v)(p->%s, a); if (e) return e;" % cname) + code.putln("e = (*v)(p->%s, a); if (e) return e;" % cname) code.putln("}") - code.putln("return 0;") - code.putln("}") + code.putln("return 0;") + code.putln("}") def generate_clear_function(self, scope, code, cclass_entry): - tp_slot = TypeSlots.get_slot_by_name("tp_clear") + tp_slot = TypeSlots.get_slot_by_name("tp_clear") slot_func = scope.mangle_internal("tp_clear") base_type = scope.parent_type.base_type if tp_slot.slot_code(scope) != slot_func: @@ -1679,9 +1679,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): # the module cleanup, which may already have cleared it. # In that case, fall back to traversing the type hierarchy. base_cname = base_type.typeptr_cname - code.putln( - "if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % ( - base_cname, base_cname, base_cname, slot_func)) + code.putln( + "if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % ( + base_cname, base_cname, base_cname, slot_func)) code.globalstate.use_utility_code( UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c")) @@ -1705,26 +1705,26 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if cclass_entry.cname == '__pyx_memoryviewslice': code.putln("__PYX_XDEC_MEMVIEW(&p->from_slice, 1);") - code.putln("return 0;") - code.putln("}") + code.putln("return 0;") + code.putln("}") def generate_getitem_int_function(self, scope, code): # This function is put into the sq_item slot when # a __getitem__ method is present. It converts its # argument to a Python integer and calls mp_subscript. code.putln( - "static PyObject *%s(PyObject *o, Py_ssize_t i) {" % ( - scope.mangle_internal("sq_item"))) + "static PyObject *%s(PyObject *o, Py_ssize_t i) {" % ( + scope.mangle_internal("sq_item"))) code.putln( - "PyObject *r;") + "PyObject *r;") code.putln( - "PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;") + "PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;") code.putln( - "r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);") + "r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);") code.putln( - "Py_DECREF(x);") + "Py_DECREF(x);") code.putln( - "return r;") + "return r;") code.putln( "}") @@ -1737,40 +1737,40 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): del_entry = scope.lookup_here("__delitem__") code.putln("") code.putln( - "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( - scope.mangle_internal("mp_ass_subscript"))) + "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( + scope.mangle_internal("mp_ass_subscript"))) code.putln( - "if (v) {") + "if (v) {") if set_entry: - code.putln("return %s(o, i, v);" % set_entry.func_cname) + code.putln("return %s(o, i, v);" % set_entry.func_cname) else: self.generate_guarded_basetype_call( base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code) code.putln( - "PyErr_Format(PyExc_NotImplementedError,") + "PyErr_Format(PyExc_NotImplementedError,") code.putln( - ' "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name);') + ' "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name);') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( - "else {") + "else {") if del_entry: code.putln( - "return %s(o, i);" % ( - del_entry.func_cname)) + "return %s(o, i);" % ( + del_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code) code.putln( - "PyErr_Format(PyExc_NotImplementedError,") + "PyErr_Format(PyExc_NotImplementedError,") code.putln( - ' "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);') + ' "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( "}") @@ -1802,42 +1802,42 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): del_entry = scope.lookup_here("__delslice__") code.putln("") code.putln( - "static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" % ( - scope.mangle_internal("sq_ass_slice"))) + "static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" % ( + scope.mangle_internal("sq_ass_slice"))) code.putln( - "if (v) {") + "if (v) {") if set_entry: code.putln( - "return %s(o, i, j, v);" % ( - set_entry.func_cname)) + "return %s(o, i, j, v);" % ( + set_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code) code.putln( - "PyErr_Format(PyExc_NotImplementedError,") + "PyErr_Format(PyExc_NotImplementedError,") code.putln( - ' "2-element slice assignment not supported by %.200s", Py_TYPE(o)->tp_name);') + ' "2-element slice assignment not supported by %.200s", Py_TYPE(o)->tp_name);') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( - "else {") + "else {") if del_entry: code.putln( - "return %s(o, i, j);" % ( - del_entry.func_cname)) + "return %s(o, i, j);" % ( + del_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code) code.putln( - "PyErr_Format(PyExc_NotImplementedError,") + "PyErr_Format(PyExc_NotImplementedError,") code.putln( - ' "2-element slice deletion not supported by %.200s", Py_TYPE(o)->tp_name);') + ' "2-element slice deletion not supported by %.200s", Py_TYPE(o)->tp_name);') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( "}") @@ -1935,12 +1935,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): getattribute_entry = lookup_here_or_base("__getattribute__") code.putln("") code.putln( - "static PyObject *%s(PyObject *o, PyObject *n) {" % ( - scope.mangle_internal("tp_getattro"))) + "static PyObject *%s(PyObject *o, PyObject *n) {" % ( + scope.mangle_internal("tp_getattro"))) if getattribute_entry is not None: code.putln( - "PyObject *v = %s(o, n);" % ( - getattribute_entry.func_cname)) + "PyObject *v = %s(o, n);" % ( + getattribute_entry.func_cname)) else: if not has_instance_dict and scope.parent_type.is_final_type: # Final with no dict => use faster type attribute lookup. @@ -1962,8 +1962,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln( "PyErr_Clear();") code.putln( - "v = %s(o, n);" % ( - getattr_entry.func_cname)) + "v = %s(o, n);" % ( + getattr_entry.func_cname)) code.putln( "}") code.putln( @@ -1980,34 +1980,34 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): del_entry = scope.lookup_here("__delattr__") code.putln("") code.putln( - "static int %s(PyObject *o, PyObject *n, PyObject *v) {" % ( - scope.mangle_internal("tp_setattro"))) + "static int %s(PyObject *o, PyObject *n, PyObject *v) {" % ( + scope.mangle_internal("tp_setattro"))) code.putln( - "if (v) {") + "if (v) {") if set_entry: code.putln( - "return %s(o, n, v);" % ( - set_entry.func_cname)) + "return %s(o, n, v);" % ( + set_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, None, "tp_setattro", "o, n, v", code) code.putln( - "return PyObject_GenericSetAttr(o, n, v);") + "return PyObject_GenericSetAttr(o, n, v);") code.putln( - "}") + "}") code.putln( - "else {") + "else {") if del_entry: code.putln( - "return %s(o, n);" % ( - del_entry.func_cname)) + "return %s(o, n);" % ( + del_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, None, "tp_setattro", "o, n, v", code) code.putln( - "return PyObject_GenericSetAttr(o, n, 0);") + "return PyObject_GenericSetAttr(o, n, 0);") code.putln( - "}") + "}") code.putln( "}") @@ -2019,8 +2019,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): user_get_entry = scope.lookup_here("__get__") code.putln("") code.putln( - "static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" % ( - scope.mangle_internal("tp_descr_get"))) + "static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" % ( + scope.mangle_internal("tp_descr_get"))) code.putln( "PyObject *r = 0;") code.putln( @@ -2030,8 +2030,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): #code.put_incref("i", py_object_type) #code.put_incref("c", py_object_type) code.putln( - "r = %s(o, i, c);" % ( - user_get_entry.func_cname)) + "r = %s(o, i, c);" % ( + user_get_entry.func_cname)) #code.put_decref("i", py_object_type) #code.put_decref("c", py_object_type) code.putln( @@ -2048,38 +2048,38 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): user_del_entry = scope.lookup_here("__delete__") code.putln("") code.putln( - "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( - scope.mangle_internal("tp_descr_set"))) + "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % ( + scope.mangle_internal("tp_descr_set"))) code.putln( - "if (v) {") + "if (v) {") if user_set_entry: code.putln( - "return %s(o, i, v);" % ( - user_set_entry.func_cname)) + "return %s(o, i, v);" % ( + user_set_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, None, "tp_descr_set", "o, i, v", code) code.putln( - 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') + 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( - "else {") + "else {") if user_del_entry: code.putln( - "return %s(o, i);" % ( - user_del_entry.func_cname)) + "return %s(o, i);" % ( + user_del_entry.func_cname)) else: self.generate_guarded_basetype_call( base_type, None, "tp_descr_set", "o, i, v", code) code.putln( - 'PyErr_SetString(PyExc_NotImplementedError, "__delete__");') + 'PyErr_SetString(PyExc_NotImplementedError, "__delete__");') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( "}") @@ -2098,11 +2098,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): get_entry = property_scope.lookup_here("__get__") code.putln("") code.putln( - "static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % ( - property_entry.getter_cname)) + "static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % ( + property_entry.getter_cname)) code.putln( - "return %s(o);" % ( - get_entry.func_cname)) + "return %s(o);" % ( + get_entry.func_cname)) code.putln( "}") @@ -2114,34 +2114,34 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): del_entry = property_scope.lookup_here("__del__") code.putln("") code.putln( - "static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" % ( - property_entry.setter_cname)) + "static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" % ( + property_entry.setter_cname)) code.putln( - "if (v) {") + "if (v) {") if set_entry: code.putln( - "return %s(o, v);" % ( - set_entry.func_cname)) + "return %s(o, v);" % ( + set_entry.func_cname)) else: code.putln( - 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') + 'PyErr_SetString(PyExc_NotImplementedError, "__set__");') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( - "else {") + "else {") if del_entry: code.putln( - "return %s(o);" % ( - del_entry.func_cname)) + "return %s(o);" % ( + del_entry.func_cname)) else: code.putln( - 'PyErr_SetString(PyExc_NotImplementedError, "__del__");') + 'PyErr_SetString(PyExc_NotImplementedError, "__del__");') code.putln( - "return -1;") + "return -1;") code.putln( - "}") + "}") code.putln( "}") @@ -2167,7 +2167,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): else: objstruct = "struct %s" % type.objstruct_cname code.putln( - "sizeof(%s), /*tp_basicsize*/" % objstruct) + "sizeof(%s), /*tp_basicsize*/" % objstruct) code.putln( "0, /*tp_itemsize*/") for slot in TypeSlots.slot_table: @@ -2178,88 +2178,88 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def generate_method_table(self, env, code): if env.is_c_class_scope and not env.pyfunc_entries: return - binding = env.directives['binding'] - + binding = env.directives['binding'] + code.putln("") - wrapper_code_writer = code.insertion_point() - + wrapper_code_writer = code.insertion_point() + code.putln( - "static PyMethodDef %s[] = {" % ( - env.method_table_cname)) + "static PyMethodDef %s[] = {" % ( + env.method_table_cname)) for entry in env.pyfunc_entries: - if not entry.fused_cfunction and not (binding and entry.is_overridable): - code.put_pymethoddef(entry, ",", wrapper_code_writer=wrapper_code_writer) + if not entry.fused_cfunction and not (binding and entry.is_overridable): + code.put_pymethoddef(entry, ",", wrapper_code_writer=wrapper_code_writer) code.putln( - "{0, 0, 0, 0}") + "{0, 0, 0, 0}") code.putln( "};") - if wrapper_code_writer.getvalue(): - wrapper_code_writer.putln("") - - def generate_dict_getter_function(self, scope, code): - dict_attr = scope.lookup_here("__dict__") - if not dict_attr or not dict_attr.is_variable: - return - func_name = scope.mangle_internal("__dict__getter") - dict_name = dict_attr.cname - code.putln("") - code.putln("static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % func_name) - self.generate_self_cast(scope, code) - code.putln("if (unlikely(!p->%s)){" % dict_name) - code.putln("p->%s = PyDict_New();" % dict_name) - code.putln("}") - code.putln("Py_XINCREF(p->%s);" % dict_name) - code.putln("return p->%s;" % dict_name) - code.putln("}") - + if wrapper_code_writer.getvalue(): + wrapper_code_writer.putln("") + + def generate_dict_getter_function(self, scope, code): + dict_attr = scope.lookup_here("__dict__") + if not dict_attr or not dict_attr.is_variable: + return + func_name = scope.mangle_internal("__dict__getter") + dict_name = dict_attr.cname + code.putln("") + code.putln("static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % func_name) + self.generate_self_cast(scope, code) + code.putln("if (unlikely(!p->%s)){" % dict_name) + code.putln("p->%s = PyDict_New();" % dict_name) + code.putln("}") + code.putln("Py_XINCREF(p->%s);" % dict_name) + code.putln("return p->%s;" % dict_name) + code.putln("}") + def generate_getset_table(self, env, code): if env.property_entries: code.putln("") code.putln( "static struct PyGetSetDef %s[] = {" % - env.getset_table_cname) + env.getset_table_cname) for entry in env.property_entries: - doc = entry.doc - if doc: - if doc.is_unicode: - doc = doc.as_utf8_string() - doc_code = doc.as_c_string_literal() + doc = entry.doc + if doc: + if doc.is_unicode: + doc = doc.as_utf8_string() + doc_code = doc.as_c_string_literal() else: doc_code = "0" code.putln( - '{(char *)"%s", %s, %s, (char *)%s, 0},' % ( + '{(char *)"%s", %s, %s, (char *)%s, 0},' % ( entry.name, entry.getter_cname or "0", entry.setter_cname or "0", doc_code)) code.putln( - "{0, 0, 0, 0, 0}") + "{0, 0, 0, 0, 0}") code.putln( "};") - def create_import_star_conversion_utility_code(self, env): - # Create all conversion helpers that are needed for "import *" assignments. - # Must be done before code generation to support CythonUtilityCode. - for name, entry in sorted(env.entries.items()): - if entry.is_cglobal and entry.used: - if not entry.type.is_pyobject: - entry.type.create_from_py_utility_code(env) - + def create_import_star_conversion_utility_code(self, env): + # Create all conversion helpers that are needed for "import *" assignments. + # Must be done before code generation to support CythonUtilityCode. + for name, entry in sorted(env.entries.items()): + if entry.is_cglobal and entry.used: + if not entry.type.is_pyobject: + entry.type.create_from_py_utility_code(env) + def generate_import_star(self, env, code): - env.use_utility_code(UtilityCode.load_cached("CStringEquals", "StringTools.c")) + env.use_utility_code(UtilityCode.load_cached("CStringEquals", "StringTools.c")) code.putln() - code.enter_cfunc_scope() # as we need labels - code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set) - - code.putln("static const char* internal_type_names[] = {") + code.enter_cfunc_scope() # as we need labels + code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set) + + code.putln("static const char* internal_type_names[] = {") for name, entry in sorted(env.entries.items()): if entry.is_type: code.putln('"%s",' % name) code.putln("0") code.putln("};") - - code.putln("const char** type_name = internal_type_names;") + + code.putln("const char** type_name = internal_type_names;") code.putln("while (*type_name) {") code.putln("if (__Pyx_StrEq(name, *type_name)) {") code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);') @@ -2267,17 +2267,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("}") code.putln("type_name++;") code.putln("}") - + old_error_label = code.new_error_label() - code.putln("if (0);") # so the first one can be "else if" - msvc_count = 0 - for name, entry in sorted(env.entries.items()): - if entry.is_cglobal and entry.used and not entry.type.is_const: - msvc_count += 1 - if msvc_count % 100 == 0: - code.putln("#ifdef _MSC_VER") - code.putln("if (0); /* Workaround for MSVC C1061. */") - code.putln("#endif") + code.putln("if (0);") # so the first one can be "else if" + msvc_count = 0 + for name, entry in sorted(env.entries.items()): + if entry.is_cglobal and entry.used and not entry.type.is_const: + msvc_count += 1 + if msvc_count % 100 == 0: + code.putln("#ifdef _MSC_VER") + code.putln("if (0); /* Workaround for MSVC C1061. */") + code.putln("#endif") code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name) if entry.type.is_pyobject: if entry.type.is_extension_type or entry.type.is_builtin_type: @@ -2289,13 +2289,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("%s = %s;" % ( entry.cname, PyrexTypes.typecast(entry.type, py_object_type, "o"))) - elif entry.type.create_from_py_utility_code(env): - # if available, utility code was already created in self.prepare_utility_code() - code.putln(entry.type.from_py_call_code( - 'o', entry.cname, entry.pos, code)) + elif entry.type.create_from_py_utility_code(env): + # if available, utility code was already created in self.prepare_utility_code() + code.putln(entry.type.from_py_call_code( + 'o', entry.cname, entry.pos, code)) else: - code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % ( - name, entry.type)) + code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % ( + name, entry.type)) code.putln(code.error_goto(entry.pos)) code.putln("}") code.putln("else {") @@ -2310,17 +2310,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("bad:") code.putln("return -1;") code.putln("}") - code.putln("") + code.putln("") code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1]) - code.exit_cfunc_scope() # done with labels + code.exit_cfunc_scope() # done with labels - def generate_module_init_func(self, imported_modules, env, options, code): + def generate_module_init_func(self, imported_modules, env, options, code): subfunction = self.mod_init_subfunction(self.pos, self.scope, code) - code.enter_cfunc_scope(self.scope) + code.enter_cfunc_scope(self.scope) code.putln("") code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0]) - init_name = 'init' + (options.init_suffix or env.module_name) + init_name = 'init' + (options.init_suffix or env.module_name) header2 = "__Pyx_PyMODINIT_FUNC %s(void)" % init_name header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env, options) code.putln("#if PY_MAJOR_VERSION < 3") @@ -2342,7 +2342,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("") # main module init code lives in Py_mod_exec function, not in PyInit function - code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % ( + code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % ( self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env), Naming.pymodinit_module_arg)) code.putln("#endif") # PEP489 @@ -2354,26 +2354,26 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): tempdecl_code = code.insertion_point() - profile = code.globalstate.directives['profile'] - linetrace = code.globalstate.directives['linetrace'] - if profile or linetrace: - code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c")) - + profile = code.globalstate.directives['profile'] + linetrace = code.globalstate.directives['linetrace'] + if profile or linetrace: + code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c")) + code.put_declare_refcount_context() code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") - # Most extension modules simply can't deal with it, and Cython isn't ready either. - # See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support - code.putln("if (%s) {" % Naming.module_cname) + # Most extension modules simply can't deal with it, and Cython isn't ready either. + # See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support + code.putln("if (%s) {" % Naming.module_cname) # Hack: enforce single initialisation. - code.putln("if (%s == %s) return 0;" % ( + code.putln("if (%s == %s) return 0;" % ( Naming.module_cname, Naming.pymodinit_module_arg, )) - code.putln('PyErr_SetString(PyExc_RuntimeError,' - ' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' % - env.module_name) - code.putln("return -1;") - code.putln("}") + code.putln('PyErr_SetString(PyExc_RuntimeError,' + ' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' % + env.module_name) + code.putln("return -1;") + code.putln("}") code.putln("#elif PY_MAJOR_VERSION >= 3") # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489). code.putln("if (%s) return __Pyx_NewRef(%s);" % ( @@ -2382,31 +2382,31 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): )) code.putln("#endif") - if profile or linetrace: - tempdecl_code.put_trace_declarations() - code.put_trace_frame_init() - + if profile or linetrace: + tempdecl_code.put_trace_declarations() + code.put_trace_frame_init() + refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1] code.putln(refnanny_import_code.rstrip()) code.put_setup_refcount_context(header3) env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c")) - code.put_error_if_neg(self.pos, "__Pyx_check_binary_version()") - - code.putln("#ifdef __Pxy_PyFrame_Initialize_Offsets") - code.putln("__Pxy_PyFrame_Initialize_Offsets();") - code.putln("#endif") - code.putln("%s = PyTuple_New(0); %s" % ( - Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos))) - code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % ( - Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos))) - code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % ( - Naming.empty_unicode, code.error_goto_if_null(Naming.empty_unicode, self.pos))) + code.put_error_if_neg(self.pos, "__Pyx_check_binary_version()") + + code.putln("#ifdef __Pxy_PyFrame_Initialize_Offsets") + code.putln("__Pxy_PyFrame_Initialize_Offsets();") + code.putln("#endif") + code.putln("%s = PyTuple_New(0); %s" % ( + Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos))) + code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % ( + Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos))) + code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % ( + Naming.empty_unicode, code.error_goto_if_null(Naming.empty_unicode, self.pos))) for ext_type in ('CyFunction', 'FusedFunction', 'Coroutine', 'Generator', 'AsyncGen', 'StopAsyncIteration'): - code.putln("#ifdef __Pyx_%s_USED" % ext_type) - code.put_error_if_neg(self.pos, "__pyx_%s_init()" % ext_type) - code.putln("#endif") + code.putln("#ifdef __Pyx_%s_USED" % ext_type) + code.put_error_if_neg(self.pos, "__pyx_%s_init()" % ext_type) + code.putln("#endif") code.putln("/*--- Library function declarations ---*/") if env.directives['np_pythran']: @@ -2419,21 +2419,21 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("#endif") code.putln("/*--- Module creation code ---*/") - self.generate_module_creation_code(env, options, code) + self.generate_module_creation_code(env, options, code) code.putln("/*--- Initialize various global constants etc. ---*/") - code.put_error_if_neg(self.pos, "__Pyx_InitGlobals()") + code.put_error_if_neg(self.pos, "__Pyx_InitGlobals()") - code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || " - "__PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)") - code.put_error_if_neg(self.pos, "__Pyx_init_sys_getdefaultencoding_params()") + code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || " + "__PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)") + code.put_error_if_neg(self.pos, "__Pyx_init_sys_getdefaultencoding_params()") code.putln("#endif") code.putln("if (%s%s) {" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))) - code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % ( - env.module_cname, - code.intern_identifier(EncodedString("__name__")), - code.intern_identifier(EncodedString("__main__")))) + code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % ( + env.module_cname, + code.intern_identifier(EncodedString("__name__")), + code.intern_identifier(EncodedString("__main__")))) code.putln("}") # set up __file__ and __path__, then add the module to sys.modules @@ -2476,20 +2476,20 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("/*--- Execution code ---*/") code.mark_pos(None) - code.putln("#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)") - code.put_error_if_neg(self.pos, "__Pyx_patch_abc()") - code.putln("#endif") - - if profile or linetrace: - code.put_trace_call(header3, self.pos, nogil=not code.funcstate.gil_owned) - code.funcstate.can_trace = True - + code.putln("#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)") + code.put_error_if_neg(self.pos, "__Pyx_patch_abc()") + code.putln("#endif") + + if profile or linetrace: + code.put_trace_call(header3, self.pos, nogil=not code.funcstate.gil_owned) + code.funcstate.can_trace = True + self.body.generate_execution_code(code) - if profile or linetrace: - code.funcstate.can_trace = False - code.put_trace_return("Py_None", nogil=not code.funcstate.gil_owned) - + if profile or linetrace: + code.funcstate.can_trace = False + code.put_trace_return("Py_None", nogil=not code.funcstate.gil_owned) + code.putln() code.putln("/*--- Wrapped vars code ---*/") self.generate_wrapped_entries_code(env, code) @@ -2506,15 +2506,15 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.put_xdecref(cname, type) code.putln('if (%s) {' % env.module_cname) code.putln('if (%s) {' % env.module_dict_cname) - code.put_add_traceback("init %s" % env.qualified_name) + code.put_add_traceback("init %s" % env.qualified_name) code.globalstate.use_utility_code(Nodes.traceback_utility_code) - # Module reference and module dict are in global variables which might still be needed - # for cleanup, atexit code, etc., so leaking is better than crashing. - # At least clearing the module dict here might be a good idea, but could still break - # user code in atexit or other global registries. - ##code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False) + # Module reference and module dict are in global variables which might still be needed + # for cleanup, atexit code, etc., so leaking is better than crashing. + # At least clearing the module dict here might be a good idea, but could still break + # user code in atexit or other global registries. + ##code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False) code.putln('}') - code.put_decref_clear(env.module_cname, py_object_type, nanny=False, clear_before_decref=True) + code.put_decref_clear(env.module_cname, py_object_type, nanny=False, clear_before_decref=True) code.putln('} else if (!PyErr_Occurred()) {') code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' % env.qualified_name) code.putln('}') @@ -2562,7 +2562,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): self.call_code = orig_code.insertion_point() code = function_code code.enter_cfunc_scope(scope) - prototypes.putln("static CYTHON_SMALL_CODE int %s(void); /*proto*/" % self.cfunc_name) + prototypes.putln("static CYTHON_SMALL_CODE int %s(void); /*proto*/" % self.cfunc_name) code.putln("static int %s(void) {" % self.cfunc_name) code.put_declare_refcount_context() self.tempdecl_code = code.insertion_point() @@ -2730,11 +2730,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): # if entry.type.is_pyobject and entry.used: # code.putln("Py_DECREF(%s); %s = 0;" % ( # code.entry_as_pyobject(entry), entry.cname)) - if Options.pre_import is not None: - code.put_decref_clear(Naming.preimport_cname, py_object_type, - nanny=False, clear_before_decref=True) - for cname in [env.module_dict_cname, Naming.cython_runtime_cname, Naming.builtins_cname]: - code.put_decref_clear(cname, py_object_type, nanny=False, clear_before_decref=True) + if Options.pre_import is not None: + code.put_decref_clear(Naming.preimport_cname, py_object_type, + nanny=False, clear_before_decref=True) + for cname in [env.module_dict_cname, Naming.cython_runtime_cname, Naming.builtins_cname]: + code.put_decref_clear(cname, py_object_type, nanny=False, clear_before_decref=True) def generate_main_method(self, env, code): module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__')) @@ -2742,18 +2742,18 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): wmain = "wmain" else: wmain = Options.embed - main_method = UtilityCode.load_cached("MainFunction", "Embed.c") + main_method = UtilityCode.load_cached("MainFunction", "Embed.c") code.globalstate.use_utility_code( main_method.specialize( - module_name=env.module_name, - module_is_main=module_is_main, - main_method=Options.embed, - wmain_method=wmain)) + module_name=env.module_name, + module_is_main=module_is_main, + main_method=Options.embed, + wmain_method=wmain)) def mod_init_func_cname(self, prefix, env, options=None): - return '%s_%s' % (prefix, options and options.init_suffix or env.module_name) + return '%s_%s' % (prefix, options and options.init_suffix or env.module_name) - def generate_pymoduledef_struct(self, env, options, code): + def generate_pymoduledef_struct(self, env, options, code): if env.doc: doc = "%s" % code.get_string_const(env.doc) else: @@ -2781,7 +2781,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("") code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname) code.putln(" PyModuleDef_HEAD_INIT,") - code.putln(' "%s",' % (options.module_name or env.module_name)) + code.putln(' "%s",' % (options.module_name or env.module_name)) code.putln(" %s, /* m_doc */" % doc) code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT") code.putln(" 0, /* m_size */") @@ -2800,7 +2800,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln("};") code.putln("#endif") - def generate_module_creation_code(self, env, options, code): + def generate_module_creation_code(self, env, options, code): # Generate code to create the module object and # install the builtins. if env.doc: @@ -2818,7 +2818,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln( '%s = Py_InitModule4("%s", %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % ( env.module_cname, - options.module_name or env.module_name, + options.module_name or env.module_name, env.method_table_cname, doc, env.module_cname)) @@ -2841,12 +2841,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): '%s = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); %s' % ( Naming.builtins_cname, code.error_goto_if_null(Naming.builtins_cname, self.pos))) - code.put_incref(Naming.builtins_cname, py_object_type, nanny=False) + code.put_incref(Naming.builtins_cname, py_object_type, nanny=False) code.putln( '%s = PyImport_AddModule((char *) "cython_runtime"); %s' % ( Naming.cython_runtime_cname, code.error_goto_if_null(Naming.cython_runtime_cname, self.pos))) - code.put_incref(Naming.cython_runtime_cname, py_object_type, nanny=False) + code.put_incref(Naming.cython_runtime_cname, py_object_type, nanny=False) code.putln( 'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s;' % ( env.module_cname, @@ -2858,7 +2858,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): Naming.preimport_cname, Options.pre_import, code.error_goto_if_null(Naming.preimport_cname, self.pos))) - code.put_incref(Naming.preimport_cname, py_object_type, nanny=False) + code.put_incref(Naming.preimport_cname, py_object_type, nanny=False) def generate_global_init_code(self, env, code): # Generate code to initialise global PyObject * @@ -2869,7 +2869,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): entry.type.global_init_code(entry, code) def generate_wrapped_entries_code(self, env, code): - for name, entry in sorted(env.entries.items()): + for name, entry in sorted(env.entries.items()): if (entry.create_wrapper and not entry.is_type and entry.scope is env): @@ -2892,13 +2892,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): entries = [] for entry in env.var_entries: if (entry.api - or entry.defined_in_pxd - or (Options.cimport_from_pyx and not entry.visibility == 'extern')): + or entry.defined_in_pxd + or (Options.cimport_from_pyx and not entry.visibility == 'extern')): entries.append(entry) if entries: env.use_utility_code(UtilityCode.load_cached("VoidPtrExport", "ImportExport.c")) for entry in entries: - signature = entry.type.empty_declaration_code() + signature = entry.type.empty_declaration_code() name = code.intern_identifier(entry.name) code.putln('if (__Pyx_ExportVoidPtr(%s, (void *)&%s, "%s") < 0) %s' % ( name, entry.cname, signature, @@ -2909,14 +2909,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): entries = [] for entry in env.cfunc_entries: if (entry.api - or entry.defined_in_pxd - or (Options.cimport_from_pyx and not entry.visibility == 'extern')): + or entry.defined_in_pxd + or (Options.cimport_from_pyx and not entry.visibility == 'extern')): entries.append(entry) if entries: env.use_utility_code( UtilityCode.load_cached("FunctionExport", "ImportExport.c")) - # Note: while this looks like it could be more cheaply stored and read from a struct array, - # investigation shows that the resulting binary is smaller with repeated functions calls. + # Note: while this looks like it could be more cheaply stored and read from a struct array, + # investigation shows that the resulting binary is smaller with repeated functions calls. for entry in entries: signature = entry.type.signature_string() code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % ( @@ -2929,10 +2929,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): # Generate type import code for all exported extension types in # an imported module. #if module.c_class_entries: - with ModuleImportGenerator(code) as import_generator: - for entry in module.c_class_entries: - if entry.defined_in_pxd: - self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) + with ModuleImportGenerator(code) as import_generator: + for entry in module.c_class_entries: + if entry.defined_in_pxd: + self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) def specialize_fused_types(self, pxd_env): """ @@ -2957,7 +2957,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): UtilityCode.load_cached("VoidPtrImport", "ImportExport.c")) temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) code.putln( - '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( + '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( temp, module.qualified_name, temp, @@ -2968,7 +2968,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): cname = entry.cname else: cname = module.mangle(Naming.varptr_prefix, entry.name) - signature = entry.type.empty_declaration_code() + signature = entry.type.empty_declaration_code() code.putln( 'if (__Pyx_ImportVoidPtr(%s, "%s", (void **)&%s, "%s") < 0) %s' % ( temp, entry.name, cname, signature, @@ -2987,7 +2987,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): UtilityCode.load_cached("FunctionImport", "ImportExport.c")) temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) code.putln( - '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( + '%s = PyImport_ImportModule("%s"); if (!%s) %s' % ( temp, module.qualified_name, temp, @@ -3007,33 +3007,33 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): def generate_type_init_code(self, env, code): # Generate type import code for extern extension types # and type ready code for non-extern ones. - with ModuleImportGenerator(code) as import_generator: - for entry in env.c_class_entries: - if entry.visibility == 'extern' and not entry.utility_code_definition: - self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) - else: - self.generate_base_type_import_code(env, entry, code, import_generator) - self.generate_exttype_vtable_init_code(entry, code) - if entry.type.early_init: - self.generate_type_ready_code(entry, code) - - def generate_base_type_import_code(self, env, entry, code, import_generator): + with ModuleImportGenerator(code) as import_generator: + for entry in env.c_class_entries: + if entry.visibility == 'extern' and not entry.utility_code_definition: + self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator) + else: + self.generate_base_type_import_code(env, entry, code, import_generator) + self.generate_exttype_vtable_init_code(entry, code) + if entry.type.early_init: + self.generate_type_ready_code(entry, code) + + def generate_base_type_import_code(self, env, entry, code, import_generator): base_type = entry.type.base_type if (base_type and base_type.module_name != env.qualified_name and not - base_type.is_builtin_type and not entry.utility_code_definition): - self.generate_type_import_code(env, base_type, self.pos, code, import_generator) + base_type.is_builtin_type and not entry.utility_code_definition): + self.generate_type_import_code(env, base_type, self.pos, code, import_generator) - def generate_type_import_code(self, env, type, pos, code, import_generator): + def generate_type_import_code(self, env, type, pos, code, import_generator): # If not already done, generate code to import the typeobject of an # extension type defined in another module, and extract its C method # table pointer if any. if type in env.types_imported: return - if type.name not in Code.ctypedef_builtins_map: - # see corresponding condition in generate_type_import_call() below! - code.globalstate.use_utility_code( - UtilityCode.load_cached("TypeImport", "ImportExport.c")) - self.generate_type_import_call(type, code, import_generator, error_pos=pos) + if type.name not in Code.ctypedef_builtins_map: + # see corresponding condition in generate_type_import_call() below! + code.globalstate.use_utility_code( + UtilityCode.load_cached("TypeImport", "ImportExport.c")) + self.generate_type_import_call(type, code, import_generator, error_pos=pos) if type.vtabptr_cname: code.globalstate.use_utility_code( UtilityCode.load_cached('GetVTable', 'ImportExport.c')) @@ -3044,7 +3044,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.error_goto_if_null(type.vtabptr_cname, pos))) env.types_imported.add(type) - def generate_type_import_call(self, type, code, import_generator, error_code=None, error_pos=None): + def generate_type_import_call(self, type, code, import_generator, error_code=None, error_pos=None): if type.typedef_flag: objstruct = type.objstruct_cname else: @@ -3054,11 +3054,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): condition = replacement = None if module_name not in ('__builtin__', 'builtins'): module_name = '"%s"' % module_name - elif type.name in Code.ctypedef_builtins_map: - # Fast path for special builtins, don't actually import - ctypename = Code.ctypedef_builtins_map[type.name] - code.putln('%s = %s;' % (type.typeptr_cname, ctypename)) - return + elif type.name in Code.ctypedef_builtins_map: + # Fast path for special builtins, don't actually import + ctypename = Code.ctypedef_builtins_map[type.name] + code.putln('%s = %s;' % (type.typeptr_cname, ctypename)) + return else: module_name = '__Pyx_BUILTIN_MODULE_NAME' if type.name in Code.non_portable_builtins_map: @@ -3067,14 +3067,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): # Some builtin types have a tp_basicsize which differs from sizeof(...): sizeof_objstruct = Code.basicsize_builtins_map[objstruct] - if not error_code: - assert error_pos is not None - error_code = code.error_goto(error_pos) - - module = import_generator.imported_module(module_name, error_code) - code.put('%s = __Pyx_ImportType(%s, %s,' % ( + if not error_code: + assert error_pos is not None + error_code = code.error_goto(error_pos) + + module = import_generator.imported_module(module_name, error_code) + code.put('%s = __Pyx_ImportType(%s, %s,' % ( type.typeptr_cname, - module, + module, module_name)) if condition and replacement: @@ -3090,7 +3090,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): if sizeof_objstruct != objstruct: if not condition: code.putln("") # start in new line - code.putln("#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000") + code.putln("#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000") code.putln('sizeof(%s),' % objstruct) code.putln("#else") code.putln('sizeof(%s),' % sizeof_objstruct) @@ -3098,18 +3098,18 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): else: code.put('sizeof(%s), ' % objstruct) - # check_size - if type.check_size and type.check_size in ('error', 'warn', 'ignore'): - check_size = type.check_size - elif not type.is_external or type.is_subclassed: - check_size = 'error' - else: - raise RuntimeError("invalid value for check_size '%s' when compiling %s.%s" % ( - type.check_size, module_name, type.name)) - code.putln('__Pyx_ImportType_CheckSize_%s);' % check_size.title()) - - code.putln(' if (!%s) %s' % (type.typeptr_cname, error_code)) - + # check_size + if type.check_size and type.check_size in ('error', 'warn', 'ignore'): + check_size = type.check_size + elif not type.is_external or type.is_subclassed: + check_size = 'error' + else: + raise RuntimeError("invalid value for check_size '%s' when compiling %s.%s" % ( + type.check_size, module_name, type.name)) + code.putln('__Pyx_ImportType_CheckSize_%s);' % check_size.title()) + + code.putln(' if (!%s) %s' % (type.typeptr_cname, error_code)) + def generate_type_ready_code(self, entry, code): Nodes.CClassDefNode.generate_type_ready_code(entry, code) @@ -3131,7 +3131,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): c_method_entries = [ entry for entry in type.scope.cfunc_entries - if entry.func_cname] + if entry.func_cname] if c_method_entries: for meth_entry in c_method_entries: cast = meth_entry.type.signature_cast_string() @@ -3142,47 +3142,47 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): cast, meth_entry.func_cname)) - -class ModuleImportGenerator(object): - """ - Helper to generate module import while importing external types. - This is used to avoid excessive re-imports of external modules when multiple types are looked up. - """ - def __init__(self, code, imported_modules=None): - self.code = code - self.imported = {} - if imported_modules: - for name, cname in imported_modules.items(): - self.imported['"%s"' % name] = cname - self.temps = [] # remember original import order for freeing - - def imported_module(self, module_name_string, error_code): - if module_name_string in self.imported: - return self.imported[module_name_string] - - code = self.code - temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) - self.temps.append(temp) - code.putln('%s = PyImport_ImportModule(%s); if (unlikely(!%s)) %s' % ( - temp, module_name_string, temp, error_code)) - code.put_gotref(temp) - self.imported[module_name_string] = temp - return temp - - def __enter__(self): - return self - - def __exit__(self, *exc): - code = self.code - for temp in self.temps: - code.put_decref_clear(temp, py_object_type) - code.funcstate.release_temp(temp) - - + +class ModuleImportGenerator(object): + """ + Helper to generate module import while importing external types. + This is used to avoid excessive re-imports of external modules when multiple types are looked up. + """ + def __init__(self, code, imported_modules=None): + self.code = code + self.imported = {} + if imported_modules: + for name, cname in imported_modules.items(): + self.imported['"%s"' % name] = cname + self.temps = [] # remember original import order for freeing + + def imported_module(self, module_name_string, error_code): + if module_name_string in self.imported: + return self.imported[module_name_string] + + code = self.code + temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + self.temps.append(temp) + code.putln('%s = PyImport_ImportModule(%s); if (unlikely(!%s)) %s' % ( + temp, module_name_string, temp, error_code)) + code.put_gotref(temp) + self.imported[module_name_string] = temp + return temp + + def __enter__(self): + return self + + def __exit__(self, *exc): + code = self.code + for temp in self.temps: + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + + def generate_cfunction_declaration(entry, env, code, definition): from_cy_utility = entry.used and entry.utility_code_definition - if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and ( - definition or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)): + if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and ( + definition or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)): if entry.visibility == 'extern': storage_class = Naming.extern_c_macro dll_linkage = "DL_IMPORT" @@ -3203,7 +3203,7 @@ def generate_cfunction_declaration(entry, env, code, definition): type = CPtrType(type) header = type.declaration_code( - entry.cname, dll_linkage=dll_linkage) + entry.cname, dll_linkage=dll_linkage) modifiers = code.build_function_modifiers(entry.func_modifiers) code.putln("%s %s%s; /*proto*/" % ( storage_class, @@ -3216,7 +3216,7 @@ def generate_cfunction_declaration(entry, env, code, definition): # #------------------------------------------------------------------------------------ -refnanny_utility_code = UtilityCode.load("Refnanny", "ModuleSetupCode.c") +refnanny_utility_code = UtilityCode.load("Refnanny", "ModuleSetupCode.c") packed_struct_utility_code = UtilityCode(proto=""" #if defined(__GNUC__) diff --git a/contrib/tools/cython/Cython/Compiler/Naming.py b/contrib/tools/cython/Cython/Compiler/Naming.py index 68afd2b21c..2c9b620788 100644 --- a/contrib/tools/cython/Cython/Compiler/Naming.py +++ b/contrib/tools/cython/Cython/Compiler/Naming.py @@ -18,7 +18,7 @@ arg_prefix = pyrex_prefix + "arg_" funcdoc_prefix = pyrex_prefix + "doc_" enum_prefix = pyrex_prefix + "e_" func_prefix = pyrex_prefix + "f_" -func_prefix_api = pyrex_prefix + "api_f_" +func_prefix_api = pyrex_prefix + "api_f_" pyfunc_prefix = pyrex_prefix + "pf_" pywrap_prefix = pyrex_prefix + "pw_" genbody_prefix = pyrex_prefix + "gb_" @@ -28,7 +28,7 @@ const_prefix = pyrex_prefix + "k_" py_const_prefix = pyrex_prefix + "kp_" label_prefix = pyrex_prefix + "L" pymethdef_prefix = pyrex_prefix + "mdef_" -method_wrapper_prefix = pyrex_prefix + "specialmethod_" +method_wrapper_prefix = pyrex_prefix + "specialmethod_" methtab_prefix = pyrex_prefix + "methods_" memtab_prefix = pyrex_prefix + "members_" objstruct_prefix = pyrex_prefix + "obj_" @@ -38,7 +38,7 @@ type_prefix = pyrex_prefix + "t_" typeobj_prefix = pyrex_prefix + "type_" var_prefix = pyrex_prefix + "v_" varptr_prefix = pyrex_prefix + "vp_" -varptr_prefix_api = pyrex_prefix + "api_vp_" +varptr_prefix_api = pyrex_prefix + "api_vp_" wrapperbase_prefix= pyrex_prefix + "wrapperbase_" pybuffernd_prefix = pyrex_prefix + "pybuffernd_" pybufferstruct_prefix = pyrex_prefix + "pybuffer_" @@ -62,10 +62,10 @@ interned_prefixes = { 'codeobj': pyrex_prefix + "codeobj_", 'slice': pyrex_prefix + "slice_", 'ustring': pyrex_prefix + "ustring_", - 'umethod': pyrex_prefix + "umethod_", + 'umethod': pyrex_prefix + "umethod_", } -ctuple_type_prefix = pyrex_prefix + "ctuple_" +ctuple_type_prefix = pyrex_prefix + "ctuple_" args_cname = pyrex_prefix + "args" generator_cname = pyrex_prefix + "generator" sent_value_cname = pyrex_prefix + "sent_value" @@ -83,7 +83,7 @@ kwds_cname = pyrex_prefix + "kwds" lineno_cname = pyrex_prefix + "lineno" clineno_cname = pyrex_prefix + "clineno" cfilenm_cname = pyrex_prefix + "cfilenm" -local_tstate_cname = pyrex_prefix + "tstate" +local_tstate_cname = pyrex_prefix + "tstate" module_cname = pyrex_prefix + "m" moddoc_cname = pyrex_prefix + "mdoc" methtable_cname = pyrex_prefix + "methods" @@ -97,7 +97,7 @@ gilstate_cname = pyrex_prefix + "state" skip_dispatch_cname = pyrex_prefix + "skip_dispatch" empty_tuple = pyrex_prefix + "empty_tuple" empty_bytes = pyrex_prefix + "empty_bytes" -empty_unicode = pyrex_prefix + "empty_unicode" +empty_unicode = pyrex_prefix + "empty_unicode" print_function = pyrex_prefix + "print" print_function_kwargs = pyrex_prefix + "print_kwargs" cleanup_cname = pyrex_prefix + "module_cleanup" @@ -117,9 +117,9 @@ frame_code_cname = pyrex_prefix + "frame_code" binding_cfunc = pyrex_prefix + "binding_PyCFunctionType" fused_func_prefix = pyrex_prefix + 'fuse_' quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty temping -tp_dict_version_temp = pyrex_prefix + "tp_dict_version" -obj_dict_version_temp = pyrex_prefix + "obj_dict_version" -type_dict_guard_temp = pyrex_prefix + "type_dict_guard" +tp_dict_version_temp = pyrex_prefix + "tp_dict_version" +obj_dict_version_temp = pyrex_prefix + "obj_dict_version" +type_dict_guard_temp = pyrex_prefix + "type_dict_guard" cython_runtime_cname = pyrex_prefix + "cython_runtime" global_code_object_cache_find = pyrex_prefix + 'find_code_object' diff --git a/contrib/tools/cython/Cython/Compiler/Nodes.py b/contrib/tools/cython/Cython/Compiler/Nodes.py index 6fbad234c7..6436c5002d 100644 --- a/contrib/tools/cython/Cython/Compiler/Nodes.py +++ b/contrib/tools/cython/Cython/Compiler/Nodes.py @@ -22,22 +22,22 @@ from . import PyrexTypes from . import TypeSlots from .PyrexTypes import py_object_type, error_type from .Symtab import (ModuleScope, LocalScope, ClosureScope, - StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope) + StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope) from .Code import UtilityCode -from .StringEncoding import EncodedString -from . import Future +from .StringEncoding import EncodedString +from . import Future from . import Options from . import DebugFlags from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer -from ..Utils import add_metaclass +from ..Utils import add_metaclass -if sys.version_info[0] >= 3: - _py_int_types = int -else: - _py_int_types = (int, long) +if sys.version_info[0] >= 3: + _py_int_types = int +else: + _py_int_types = (int, long) + - def relative_position(pos): return (pos[0].get_filenametable_entry(), pos[1]) @@ -69,25 +69,25 @@ def embed_position(pos, docstring): def analyse_type_annotation(annotation, env, assigned_value=None): - base_type = None + base_type = None is_ambiguous = False - explicit_pytype = explicit_ctype = False - if annotation.is_dict_literal: + explicit_pytype = explicit_ctype = False + if annotation.is_dict_literal: warning(annotation.pos, "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.") - for name, value in annotation.key_value_pairs: - if not name.is_string_literal: - continue - if name.value in ('type', b'type'): - explicit_pytype = True - if not explicit_ctype: - annotation = value - elif name.value in ('ctype', b'ctype'): - explicit_ctype = True - annotation = value - if explicit_pytype and explicit_ctype: - warning(annotation.pos, "Duplicate type declarations found in signature annotation") - arg_type = annotation.analyse_as_type(env) + for name, value in annotation.key_value_pairs: + if not name.is_string_literal: + continue + if name.value in ('type', b'type'): + explicit_pytype = True + if not explicit_ctype: + annotation = value + elif name.value in ('ctype', b'ctype'): + explicit_ctype = True + annotation = value + if explicit_pytype and explicit_ctype: + warning(annotation.pos, "Duplicate type declarations found in signature annotation") + arg_type = annotation.analyse_as_type(env) if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'): # Map builtin numeric Python types to C types in safe cases. if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject: @@ -102,19 +102,19 @@ def analyse_type_annotation(annotation, env, assigned_value=None): elif arg_type is not None and annotation.is_string_literal: warning(annotation.pos, "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.") - if arg_type is not None: - if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject: - warning(annotation.pos, - "Python type declaration in signature annotation does not refer to a Python type") - base_type = CAnalysedBaseTypeNode( - annotation.pos, type=arg_type, is_arg=True) + if arg_type is not None: + if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject: + warning(annotation.pos, + "Python type declaration in signature annotation does not refer to a Python type") + base_type = CAnalysedBaseTypeNode( + annotation.pos, type=arg_type, is_arg=True) elif is_ambiguous: warning(annotation.pos, "Ambiguous types in annotation, ignoring") - else: + else: warning(annotation.pos, "Unknown type declaration in annotation, ignoring") - return base_type, arg_type - - + return base_type, arg_type + + def write_func_call(func, codewriter_class): def f(*args, **kwds): if len(args) > 1 and isinstance(args[1], codewriter_class): @@ -122,10 +122,10 @@ def write_func_call(func, codewriter_class): # but only if new code is generated node, code = args[:2] marker = ' /* %s -> %s.%s %s */' % ( - ' ' * code.call_level, - node.__class__.__name__, - func.__name__, - node.pos[1:]) + ' ' * code.call_level, + node.__class__.__name__, + func.__name__, + node.pos[1:]) pristine = code.buffer.stream.tell() code.putln(marker) start = code.buffer.stream.tell() @@ -133,10 +133,10 @@ def write_func_call(func, codewriter_class): res = func(*args, **kwds) code.call_level -= 4 if start == code.buffer.stream.tell(): - # no code written => undo writing marker - code.buffer.stream.truncate(pristine) + # no code written => undo writing marker + code.buffer.stream.truncate(pristine) else: - marker = marker.replace('->', '<-', 1) + marker = marker.replace('->', '<-', 1) code.putln(marker) return res else: @@ -170,7 +170,7 @@ class CheckAnalysers(type): def call(*args, **kwargs): retval = func(*args, **kwargs) if retval is None: - print('%s %s %s' % (name, args, kwargs)) + print('%s %s %s' % (name, args, kwargs)) return retval return call @@ -181,14 +181,14 @@ class CheckAnalysers(type): return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs) -def _with_metaclass(cls): - if DebugFlags.debug_trace_code_generation: - return add_metaclass(VerboseCodeWriter)(cls) - #return add_metaclass(CheckAnalysers)(cls) - return cls - - -@_with_metaclass +def _with_metaclass(cls): + if DebugFlags.debug_trace_code_generation: + return add_metaclass(VerboseCodeWriter)(cls) + #return add_metaclass(CheckAnalysers)(cls) + return cls + + +@_with_metaclass class Node(object): # pos (string, int, int) Source file position # is_name boolean Is a NameNode @@ -199,7 +199,7 @@ class Node(object): is_nonecheck = 0 is_literal = 0 is_terminator = 0 - is_wrapper = False # is a DefNode wrapper for a C function + is_wrapper = False # is a DefNode wrapper for a C function temps = None # All descendants should set child_attrs to a list of the attributes @@ -207,9 +207,9 @@ class Node(object): # can either contain a single node or a list of nodes. See Visitor.py. child_attrs = None - # Subset of attributes that are evaluated in the outer scope (e.g. function default arguments). - outer_attrs = None - + # Subset of attributes that are evaluated in the outer scope (e.g. function default arguments). + outer_attrs = None + cf_state = None # This may be an additional (or 'actual') type that will be checked when @@ -225,7 +225,7 @@ class Node(object): gil_message = "Operation" nogil_check = None - in_nogil_context = False # For use only during code generation. + in_nogil_context = False # For use only during code generation. def gil_error(self, env=None): error(self.pos, "%s not allowed without gil" % self.gil_message) @@ -349,9 +349,9 @@ class Node(object): if not self.pos: return u'' source_desc, line, col = self.pos - contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore') + contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore') # line numbers start at 1 - lines = contents[max(0, line-3):line] + lines = contents[max(0, line-3):line] current = lines[-1] if mark_column: current = current[:col] + marker + current[col:] @@ -420,10 +420,10 @@ class StatListNode(Node): child_attrs = ["stats"] - @staticmethod + @staticmethod def create_analysed(pos, env, *args, **kw): node = StatListNode(pos, *args, **kw) - return node # No node-specific analysis needed + return node # No node-specific analysis needed def analyse_declarations(self, env): #print "StatListNode.analyse_declarations" ### @@ -432,8 +432,8 @@ class StatListNode(Node): def analyse_expressions(self, env): #print "StatListNode.analyse_expressions" ### - self.stats = [stat.analyse_expressions(env) - for stat in self.stats] + self.stats = [stat.analyse_expressions(env) + for stat in self.stats] return self def generate_function_definitions(self, env, code): @@ -529,7 +529,7 @@ class CDeclaratorNode(Node): # Only C++ functions have templates. return None - + class CNameDeclaratorNode(CDeclaratorNode): # name string The Cython name being declared # cname string or None C name, if specified @@ -556,37 +556,37 @@ class CNameDeclaratorNode(CDeclaratorNode): self.type = base_type return self, base_type - + class CPtrDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode child_attrs = ["base"] - def analyse_templates(self): - return self.base.analyse_templates() - + def analyse_templates(self): + return self.base.analyse_templates() + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): if base_type.is_pyobject: - error(self.pos, "Pointer base type cannot be a Python object") + error(self.pos, "Pointer base type cannot be a Python object") ptr_type = PyrexTypes.c_ptr_type(base_type) return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) - + class CReferenceDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode child_attrs = ["base"] - def analyse_templates(self): - return self.base.analyse_templates() - + def analyse_templates(self): + return self.base.analyse_templates() + def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): if base_type.is_pyobject: - error(self.pos, "Reference base type cannot be a Python object") + error(self.pos, "Reference base type cannot be a Python object") ref_type = PyrexTypes.c_ref_type(base_type) return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) - + class CArrayDeclaratorNode(CDeclaratorNode): # base CDeclaratorNode # dimension ExprNode @@ -594,7 +594,7 @@ class CArrayDeclaratorNode(CDeclaratorNode): child_attrs = ["base", "dimension"] def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): - if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction: + if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction: from .ExprNodes import TupleNode if isinstance(self.dimension, TupleNode): args = self.dimension.args @@ -622,11 +622,11 @@ class CArrayDeclaratorNode(CDeclaratorNode): else: size = None if not base_type.is_complete(): - error(self.pos, "Array element type '%s' is incomplete" % base_type) + error(self.pos, "Array element type '%s' is incomplete" % base_type) if base_type.is_pyobject: - error(self.pos, "Array element cannot be a Python object") + error(self.pos, "Array element cannot be a Python object") if base_type.is_cfunction: - error(self.pos, "Array element cannot be a function") + error(self.pos, "Array element cannot be a function") array_type = PyrexTypes.c_array_type(base_type, size) return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) @@ -672,15 +672,15 @@ class CFuncDeclaratorNode(CDeclaratorNode): return None def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False): - if directive_locals is None: - directive_locals = {} + if directive_locals is None: + directive_locals = {} if nonempty: nonempty -= 1 func_type_args = [] for i, arg_node in enumerate(self.args): name_declarator, type = arg_node.analyse( - env, nonempty=nonempty, - is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives)) + env, nonempty=nonempty, + is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives)) name = name_declarator.name if name in directive_locals: type_node = directive_locals[name] @@ -694,8 +694,8 @@ class CFuncDeclaratorNode(CDeclaratorNode): else: type = other_type if name_declarator.cname: - error(self.pos, "Function argument cannot have C name specification") - if i == 0 and env.is_c_class_scope and type.is_unspecified: + error(self.pos, "Function argument cannot have C name specification") + if i == 0 and env.is_c_class_scope and type.is_unspecified: # fix the type of self type = env.parent_type # Turn *[] argument into ** @@ -721,7 +721,7 @@ class CFuncDeclaratorNode(CDeclaratorNode): if (return_type.is_pyobject and (self.exception_value or self.exception_check) and self.exception_check != '+'): - error(self.pos, "Exception clause not allowed for function returning Python object") + error(self.pos, "Exception clause not allowed for function returning Python object") else: if self.exception_value is None and self.exception_check and self.exception_check != '+': # Use an explicit exception return value to speed up exception checks. @@ -741,11 +741,11 @@ class CFuncDeclaratorNode(CDeclaratorNode): and not exc_val_type.is_pyobject and not (exc_val_type.is_cfunction and not exc_val_type.return_type.is_pyobject - and not exc_val_type.args) - and not (exc_val_type == PyrexTypes.c_char_type - and self.exception_value.value == '*')): + and not exc_val_type.args) + and not (exc_val_type == PyrexTypes.c_char_type + and self.exception_value.value == '*')): error(self.exception_value.pos, - "Exception value must be a Python exception or cdef function with no arguments or *.") + "Exception value must be a Python exception or cdef function with no arguments or *.") exc_val = self.exception_value else: self.exception_value = self.exception_value.coerce_to( @@ -760,15 +760,15 @@ class CFuncDeclaratorNode(CDeclaratorNode): "Exception value incompatible with function return type") exc_check = self.exception_check if return_type.is_cfunction: - error(self.pos, "Function cannot return a function") + error(self.pos, "Function cannot return a function") func_type = PyrexTypes.CFuncType( return_type, func_type_args, self.has_varargs, - optional_arg_count=self.optional_arg_count, - exception_value=exc_val, exception_check=exc_check, - calling_convention=self.base.calling_convention, - nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable, - is_const_method=self.is_const_method, - templates=self.templates) + optional_arg_count=self.optional_arg_count, + exception_value=exc_val, exception_check=exc_check, + calling_convention=self.base.calling_convention, + nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable, + is_const_method=self.is_const_method, + templates=self.templates) if self.optional_arg_count: if func_type.is_fused: @@ -802,7 +802,7 @@ class CFuncDeclaratorNode(CDeclaratorNode): arg_count_member = '%sn' % Naming.pyrex_prefix scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos) - for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]: + for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]: scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True) struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name) @@ -811,12 +811,12 @@ class CFuncDeclaratorNode(CDeclaratorNode): struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname) op_args_struct = env.global_scope().declare_struct_or_union( - name=struct_cname, - kind='struct', - scope=scope, - typedef_flag=0, - pos=self.pos, - cname=struct_cname) + name=struct_cname, + kind='struct', + scope=scope, + typedef_flag=0, + pos=self.pos, + cname=struct_cname) op_args_struct.defined_in_pxd = 1 op_args_struct.used = 1 @@ -854,7 +854,7 @@ class CArgDeclNode(Node): # is_dynamic boolean Non-literal arg stored inside CyFunction child_attrs = ["base_type", "declarator", "default", "annotation"] - outer_attrs = ["default", "annotation"] + outer_attrs = ["default", "annotation"] is_self_arg = 0 is_type_arg = 0 @@ -868,7 +868,7 @@ class CArgDeclNode(Node): annotation = None is_dynamic = 0 - def analyse(self, env, nonempty=0, is_self_arg=False): + def analyse(self, env, nonempty=0, is_self_arg=False): if is_self_arg: self.base_type.is_self_arg = self.is_self_arg = True if self.type is None: @@ -878,7 +878,7 @@ class CArgDeclNode(Node): if self.base_type.is_basic_c_type: # char, short, long called "int" type = self.base_type.analyse(env, could_be_name=True) - arg_name = type.empty_declaration_code() + arg_name = type.empty_declaration_code() else: arg_name = self.base_type.name self.declarator.name = EncodedString(arg_name) @@ -919,8 +919,8 @@ class CArgDeclNode(Node): if not annotation: return None base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default) - if base_type is not None: - self.base_type = base_type + if base_type is not None: + self.base_type = base_type return arg_type def calculate_default_value_code(self, code): @@ -937,7 +937,7 @@ class CArgDeclNode(Node): if self.default: self.default.annotate(code) - def generate_assignment_code(self, code, target=None, overloaded_assignment=False): + def generate_assignment_code(self, code, target=None, overloaded_assignment=False): default = self.default if default is None or default.is_literal: return @@ -945,7 +945,7 @@ class CArgDeclNode(Node): target = self.calculate_default_value_code(code) default.generate_evaluation_code(code) default.make_owned_reference(code) - result = default.result() if overloaded_assignment else default.result_as(self.type) + result = default.result() if overloaded_assignment else default.result_as(self.type) code.putln("%s = %s;" % (target, result)) if self.type.is_pyobject: code.put_giveref(default.result()) @@ -964,16 +964,16 @@ class CBaseTypeNode(Node): def analyse_as_type(self, env): return self.analyse(env) - + class CAnalysedBaseTypeNode(Node): # type type child_attrs = [] - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): return self.type - + class CSimpleBaseTypeNode(CBaseTypeNode): # name string # module_path [string] Qualifying name components @@ -990,7 +990,7 @@ class CSimpleBaseTypeNode(CBaseTypeNode): is_basic_c_type = False complex = False - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): # Return type descriptor. #print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ### type = None @@ -1073,7 +1073,7 @@ class MemoryViewSliceTypeNode(CBaseTypeNode): name = 'memoryview' child_attrs = ['base_type_node', 'axes'] - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): base_type = self.base_type_node.analyse(env) if base_type.is_error: return base_type @@ -1082,7 +1082,7 @@ class MemoryViewSliceTypeNode(CBaseTypeNode): try: axes_specs = MemoryView.get_axes_specs(env, self.axes) - except CompileError as e: + except CompileError as e: error(e.position, e.message_only) self.type = PyrexTypes.ErrorType() return self.type @@ -1091,7 +1091,7 @@ class MemoryViewSliceTypeNode(CBaseTypeNode): self.type = error_type else: self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs) - self.type.validate_memslice_dtype(self.pos) + self.type.validate_memslice_dtype(self.pos) self.use_memview_utilities(env) return self.type @@ -1109,7 +1109,7 @@ class CNestedBaseTypeNode(CBaseTypeNode): child_attrs = ['base_type'] - def analyse(self, env, could_be_name=None): + def analyse(self, env, could_be_name=None): base_type = self.base_type.analyse(env) if base_type is PyrexTypes.error_type: return PyrexTypes.error_type @@ -1139,12 +1139,12 @@ class TemplatedTypeNode(CBaseTypeNode): name = None - def analyse(self, env, could_be_name=False, base_type=None): + def analyse(self, env, could_be_name=False, base_type=None): if base_type is None: base_type = self.base_type_node.analyse(env) if base_type.is_error: return base_type - if base_type.is_cpp_class and base_type.is_template_type(): + if base_type.is_cpp_class and base_type.is_template_type(): # Templated class if self.keyword_args and self.keyword_args.key_value_pairs: error(self.pos, "c++ templates cannot take keyword arguments") @@ -1172,8 +1172,8 @@ class TemplatedTypeNode(CBaseTypeNode): if sys.version_info[0] < 3: # Py 2.x enforces byte strings as keyword arguments ... - options = dict([(name.encode('ASCII'), value) - for name, value in options.items()]) + options = dict([(name.encode('ASCII'), value) + for name, value in options.items()]) self.type = PyrexTypes.BufferType(base_type, **options) if has_np_pythran(env) and is_pythran_buffer(self.type): @@ -1192,10 +1192,10 @@ class TemplatedTypeNode(CBaseTypeNode): dimension = None else: dimension = self.positional_args[0] - self.array_declarator = CArrayDeclaratorNode( - self.pos, - base=empty_declarator, - dimension=dimension) + self.array_declarator = CArrayDeclaratorNode( + self.pos, + base=empty_declarator, + dimension=dimension) self.type = self.array_declarator.analyse(base_type, env)[1] if self.type.is_fused and env.fused_to_specific: @@ -1203,37 +1203,37 @@ class TemplatedTypeNode(CBaseTypeNode): return self.type - + class CComplexBaseTypeNode(CBaseTypeNode): # base_type CBaseTypeNode # declarator CDeclaratorNode child_attrs = ["base_type", "declarator"] - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): base = self.base_type.analyse(env, could_be_name) _, type = self.declarator.analyse(base, env) return type -class CTupleBaseTypeNode(CBaseTypeNode): - # components [CBaseTypeNode] - - child_attrs = ["components"] - - def analyse(self, env, could_be_name=False): - component_types = [] - for c in self.components: - type = c.analyse(env) - if type.is_pyobject: - error(c.pos, "Tuple types can't (yet) contain Python objects.") - return error_type - component_types.append(type) - entry = env.declare_tuple_type(self.pos, component_types) - entry.used = True - return entry.type - - +class CTupleBaseTypeNode(CBaseTypeNode): + # components [CBaseTypeNode] + + child_attrs = ["components"] + + def analyse(self, env, could_be_name=False): + component_types = [] + for c in self.components: + type = c.analyse(env) + if type.is_pyobject: + error(c.pos, "Tuple types can't (yet) contain Python objects.") + return error_type + component_types.append(type) + entry = env.declare_tuple_type(self.pos, component_types) + entry.used = True + return entry.type + + class FusedTypeNode(CBaseTypeNode): """ Represents a fused type in a ctypedef statement: @@ -1253,7 +1253,7 @@ class FusedTypeNode(CBaseTypeNode): # Omit the typedef declaration that self.declarator would produce entry.in_cinclude = True - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): types = [] for type_node in self.types: type = type_node.analyse_as_type(env) @@ -1278,7 +1278,7 @@ class CConstTypeNode(CBaseTypeNode): child_attrs = ["base_type"] - def analyse(self, env, could_be_name=False): + def analyse(self, env, could_be_name=False): base = self.base_type.analyse(env, could_be_name) if base.is_pyobject: error(self.pos, @@ -1305,7 +1305,7 @@ class CVarDefNode(StatNode): decorators = None directive_locals = None - def analyse_declarations(self, env, dest_scope=None): + def analyse_declarations(self, env, dest_scope=None): if self.directive_locals is None: self.directive_locals = {} if not dest_scope: @@ -1339,18 +1339,18 @@ class CVarDefNode(StatNode): for declarator in self.declarators: if (len(self.declarators) > 1 - and not isinstance(declarator, CNameDeclaratorNode) - and env.directives['warn.multiple_declarators']): - warning( - declarator.pos, - "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " + and not isinstance(declarator, CNameDeclaratorNode) + and env.directives['warn.multiple_declarators']): + warning( + declarator.pos, + "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). " "Each pointer declaration should be on its own line.", 1) - create_extern_wrapper = (self.overridable - and self.visibility == 'extern' - and env.is_module_scope) - if create_extern_wrapper: - declarator.overridable = False + create_extern_wrapper = (self.overridable + and self.visibility == 'extern' + and env.is_module_scope) + if create_extern_wrapper: + declarator.overridable = False if isinstance(declarator, CFuncDeclaratorNode): name_declarator, type = declarator.analyse( base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd) @@ -1359,9 +1359,9 @@ class CVarDefNode(StatNode): base_type, env, visibility=visibility, in_pxd=self.in_pxd) if not type.is_complete(): if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice): - error(declarator.pos, "Variable type '%s' is incomplete" % type) + error(declarator.pos, "Variable type '%s' is incomplete" % type) if self.visibility == 'extern' and type.is_pyobject: - error(declarator.pos, "Python object cannot be declared extern") + error(declarator.pos, "Python object cannot be declared extern") name = name_declarator.name cname = name_declarator.cname if name == '': @@ -1370,27 +1370,27 @@ class CVarDefNode(StatNode): if type.is_reference and self.visibility != 'extern': error(declarator.pos, "C++ references cannot be declared; use a pointer instead") if type.is_cfunction: - if 'staticmethod' in env.directives: - type.is_static_method = True - self.entry = dest_scope.declare_cfunction( - name, type, declarator.pos, - cname=cname, visibility=self.visibility, in_pxd=self.in_pxd, - api=self.api, modifiers=self.modifiers, overridable=self.overridable) + if 'staticmethod' in env.directives: + type.is_static_method = True + self.entry = dest_scope.declare_cfunction( + name, type, declarator.pos, + cname=cname, visibility=self.visibility, in_pxd=self.in_pxd, + api=self.api, modifiers=self.modifiers, overridable=self.overridable) if self.entry is not None: self.entry.directive_locals = copy.copy(self.directive_locals) - if create_extern_wrapper: - self.entry.type.create_to_py_utility_code(env) - self.entry.create_wrapper = True + if create_extern_wrapper: + self.entry.type.create_to_py_utility_code(env) + self.entry.create_wrapper = True else: if self.overridable: warning(self.pos, "cpdef variables will not be supported in Cython 3; " "currently they are no different from cdef variables", 2) if self.directive_locals: error(self.pos, "Decorators can only be followed by functions") - self.entry = dest_scope.declare_var( - name, type, declarator.pos, - cname=cname, visibility=visibility, in_pxd=self.in_pxd, - api=self.api, is_cdef=1) + self.entry = dest_scope.declare_var( + name, type, declarator.pos, + cname=cname, visibility=visibility, in_pxd=self.in_pxd, + api=self.api, is_cdef=1) if Options.docstrings: self.entry.doc = embed_position(self.pos, self.doc) @@ -1412,8 +1412,8 @@ class CStructOrUnionDefNode(StatNode): def declare(self, env, scope=None): self.entry = env.declare_struct_or_union( self.name, self.kind, scope, self.typedef_flag, self.pos, - self.cname, visibility=self.visibility, api=self.api, - packed=self.packed) + self.cname, visibility=self.visibility, api=self.api, + packed=self.packed) def analyse_declarations(self, env): scope = None @@ -1449,7 +1449,7 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode): # attributes [CVarDefNode] or None # entry Entry # base_classes [CBaseTypeNode] - # templates [(string, bool)] or None + # templates [(string, bool)] or None # decorators [DecoratorNode] or None decorators = None @@ -1458,25 +1458,25 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode): if self.templates is None: template_types = None else: - template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) - for template_name, required in self.templates] - num_optional_templates = sum(not required for _, required in self.templates) - if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]): - error(self.pos, "Required template parameters must precede optional template parameters.") + template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) + for template_name, required in self.templates] + num_optional_templates = sum(not required for _, required in self.templates) + if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]): + error(self.pos, "Required template parameters must precede optional template parameters.") self.entry = env.declare_cpp_class( - self.name, None, self.pos, self.cname, - base_classes=[], visibility=self.visibility, templates=template_types) + self.name, None, self.pos, self.cname, + base_classes=[], visibility=self.visibility, templates=template_types) def analyse_declarations(self, env): - if self.templates is None: - template_types = template_names = None - else: - template_names = [template_name for template_name, _ in self.templates] - template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) - for template_name, required in self.templates] + if self.templates is None: + template_types = template_names = None + else: + template_names = [template_name for template_name, _ in self.templates] + template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required) + for template_name, required in self.templates] scope = None if self.attributes is not None: - scope = CppClassScope(self.name, env, templates=template_names) + scope = CppClassScope(self.name, env, templates=template_names) def base_ok(base_class): if base_class.is_cpp_class or base_class.is_struct: return True @@ -1485,7 +1485,7 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode): base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes]) self.entry = env.declare_cpp_class( self.name, scope, self.pos, - self.cname, base_class_types, visibility=self.visibility, templates=template_types) + self.cname, base_class_types, visibility=self.visibility, templates=template_types) if self.entry is None: return self.entry.is_cpp_class = 1 @@ -1503,14 +1503,14 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode): if self.in_pxd and not env.in_cinclude: self.entry.defined_in_pxd = 1 for attr in self.attributes: - declare = getattr(attr, 'declare', None) - if declare: - attr.declare(scope) + declare = getattr(attr, 'declare', None) + if declare: + attr.declare(scope) attr.analyse_declarations(scope) for func in func_attributes(self.attributes): defined_funcs.append(func) if self.templates is not None: - func.template_declaration = "template <typename %s>" % ", typename ".join(template_names) + func.template_declaration = "template <typename %s>" % ", typename ".join(template_names) self.body = StatListNode(self.pos, stats=defined_funcs) self.scope = scope @@ -1542,11 +1542,11 @@ class CEnumDefNode(StatNode): child_attrs = ["items"] def declare(self, env): - self.entry = env.declare_enum( - self.name, self.pos, - cname=self.cname, typedef_flag=self.typedef_flag, - visibility=self.visibility, api=self.api, - create_wrapper=self.create_wrapper) + self.entry = env.declare_enum( + self.name, self.pos, + cname=self.cname, typedef_flag=self.typedef_flag, + visibility=self.visibility, api=self.api, + create_wrapper=self.create_wrapper) def analyse_declarations(self, env): if self.items is not None: @@ -1560,19 +1560,19 @@ class CEnumDefNode(StatNode): def generate_execution_code(self, code): if self.visibility == 'public' or self.api: - code.mark_pos(self.pos) + code.mark_pos(self.pos) temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) for item in self.entry.enum_values: code.putln("%s = PyInt_FromLong(%s); %s" % ( - temp, - item.cname, - code.error_goto_if_null(temp, item.pos))) + temp, + item.cname, + code.error_goto_if_null(temp, item.pos))) code.put_gotref(temp) code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % ( - Naming.moddict_cname, - item.name, - temp, - code.error_goto(item.pos))) + Naming.moddict_cname, + item.name, + temp, + code.error_goto(item.pos))) code.put_decref_clear(temp, PyrexTypes.py_object_type) code.funcstate.release_temp(temp) @@ -1590,14 +1590,14 @@ class CEnumDefItemNode(StatNode): if not self.value.type.is_int: self.value = self.value.coerce_to(PyrexTypes.c_int_type, env) self.value = self.value.analyse_const_expression(env) - entry = env.declare_const( - self.name, enum_entry.type, - self.value, self.pos, cname=self.cname, - visibility=enum_entry.visibility, api=enum_entry.api, - create_wrapper=enum_entry.create_wrapper and enum_entry.name is None) + entry = env.declare_const( + self.name, enum_entry.type, + self.value, self.pos, cname=self.cname, + visibility=enum_entry.visibility, api=enum_entry.api, + create_wrapper=enum_entry.create_wrapper and enum_entry.name is None) enum_entry.enum_values.append(entry) - if enum_entry.name: - enum_entry.type.values.append(entry.name) + if enum_entry.name: + enum_entry.type.values.append(entry.name) class CTypeDefNode(StatNode): @@ -1616,9 +1616,9 @@ class CTypeDefNode(StatNode): name = name_declarator.name cname = name_declarator.cname - entry = env.declare_typedef( - name, type, self.pos, - cname=cname, visibility=self.visibility, api=self.api) + entry = env.declare_typedef( + name, type, self.pos, + cname=cname, visibility=self.visibility, api=self.api) if type.is_fused: entry.in_cinclude = True @@ -1644,11 +1644,11 @@ class FuncDefNode(StatNode, BlockNode): # pymethdef_required boolean Force Python method struct generation # directive_locals { string : ExprNode } locals defined by cython.locals(...) # directive_returns [ExprNode] type defined by cython.returns(...) - # star_arg PyArgDeclNode or None * argument - # starstar_arg PyArgDeclNode or None ** argument - # - # is_async_def boolean is a Coroutine function - # + # star_arg PyArgDeclNode or None * argument + # starstar_arg PyArgDeclNode or None ** argument + # + # is_async_def boolean is a Coroutine function + # # has_fused_arguments boolean # Whether this cdef function has fused parameters. This is needed # by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes @@ -1660,13 +1660,13 @@ class FuncDefNode(StatNode, BlockNode): pymethdef_required = False is_generator = False is_generator_body = False - is_async_def = False + is_async_def = False modifiers = [] has_fused_arguments = False star_arg = None starstar_arg = None is_cyfunction = False - code_object = None + code_object = None def analyse_default_values(self, env): default_seen = 0 @@ -1677,7 +1677,7 @@ class FuncDefNode(StatNode, BlockNode): arg.default = arg.default.analyse_types(env) arg.default = arg.default.coerce_to(arg.type, env) else: - error(arg.pos, "This argument cannot have a default value") + error(arg.pos, "This argument cannot have a default value") arg.default = None elif arg.kw_only: default_seen = 1 @@ -1692,11 +1692,11 @@ class FuncDefNode(StatNode, BlockNode): annotation = annotation.analyse_types(env) return annotation - def analyse_annotations(self, env): - for arg in self.args: - if arg.annotation: + def analyse_annotations(self, env): + for arg in self.args: + if arg.annotation: arg.annotation = self.analyse_annotation(env, arg.annotation) - + def align_argument_type(self, env, arg): # @cython.locals() directive_locals = self.directive_locals @@ -1704,7 +1704,7 @@ class FuncDefNode(StatNode, BlockNode): if arg.name in directive_locals: type_node = directive_locals[arg.name] other_type = type_node.analyse_as_type(env) - elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']: + elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']: type_node = arg.annotation other_type = arg.inject_type_from_annotations(env) if other_type is None: @@ -1713,7 +1713,7 @@ class FuncDefNode(StatNode, BlockNode): return arg if other_type is None: error(type_node.pos, "Not a type") - elif orig_type is not py_object_type and not orig_type.same_as(other_type): + elif orig_type is not py_object_type and not orig_type.same_as(other_type): error(arg.base_type.pos, "Signature does not agree with previous declaration") error(type_node.pos, "Previous declaration here") else: @@ -1729,8 +1729,8 @@ class FuncDefNode(StatNode, BlockNode): genv = genv.outer_scope if self.needs_closure: lenv = ClosureScope(name=self.entry.name, - outer_scope=genv, - parent_scope=env, + outer_scope=genv, + parent_scope=env, scope_name=self.entry.cname) else: lenv = LocalScope(name=self.entry.name, @@ -1782,9 +1782,9 @@ class FuncDefNode(StatNode, BlockNode): UtilityCode.load_cached("Profile", "Profile.c")) # Generate C code for header and body of function - code.enter_cfunc_scope(lenv) + code.enter_cfunc_scope(lenv) code.return_from_error_cleanup_label = code.new_label() - code.funcstate.gil_owned = not lenv.nogil + code.funcstate.gil_owned = not lenv.nogil # ----- Top-level constants used by this function code.mark_pos(self.pos) @@ -1798,9 +1798,9 @@ class FuncDefNode(StatNode, BlockNode): with_pymethdef = (self.needs_assignment_synthesis(env, code) or self.pymethdef_required) if self.py_func: - self.py_func.generate_function_header( - code, with_pymethdef=with_pymethdef, proto_only=True) - self.generate_function_header(code, with_pymethdef=with_pymethdef) + self.py_func.generate_function_header( + code, with_pymethdef=with_pymethdef, proto_only=True) + self.generate_function_header(code, with_pymethdef=with_pymethdef) # ----- Local variable declarations # Find function scope cenv = env @@ -1829,9 +1829,9 @@ class FuncDefNode(StatNode, BlockNode): elif self.return_type.is_memoryviewslice: init = ' = ' + MemoryView.memslice_entry_init - code.putln("%s%s;" % ( - self.return_type.declaration_code(Naming.retval_cname), - init)) + code.putln("%s%s;" % ( + self.return_type.declaration_code(Naming.retval_cname), + init)) tempvardecl_code = code.insertion_point() self.generate_keyword_list(code) @@ -1842,39 +1842,39 @@ class FuncDefNode(StatNode, BlockNode): # See if we need to acquire the GIL for variable declarations, or for # refnanny only - # Closures are not currently possible for cdef nogil functions, - # but check them anyway - have_object_args = self.needs_closure or self.needs_outer_scope + # Closures are not currently possible for cdef nogil functions, + # but check them anyway + have_object_args = self.needs_closure or self.needs_outer_scope for arg in lenv.arg_entries: if arg.type.is_pyobject: have_object_args = True break - used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used] - + used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used] + acquire_gil_for_var_decls_only = ( - lenv.nogil and lenv.has_with_gil_block and - (have_object_args or used_buffer_entries)) + lenv.nogil and lenv.has_with_gil_block and + (have_object_args or used_buffer_entries)) acquire_gil_for_refnanny_only = ( - lenv.nogil and lenv.has_with_gil_block and not - acquire_gil_for_var_decls_only) + lenv.nogil and lenv.has_with_gil_block and not + acquire_gil_for_var_decls_only) use_refnanny = not lenv.nogil or lenv.has_with_gil_block if acquire_gil or acquire_gil_for_var_decls_only: code.put_ensure_gil() - code.funcstate.gil_owned = True + code.funcstate.gil_owned = True elif lenv.nogil and lenv.has_with_gil_block: code.declare_gilstate() - if profile or linetrace: + if profile or linetrace: if not self.is_generator: # generators are traced when iterated, not at creation tempvardecl_code.put_trace_declarations() code_object = self.code_object.calculate_result_code(code) if self.code_object else None code.put_trace_frame_init(code_object) - + # ----- Special check for getbuffer if is_getbuffer_slot: self.getbuffer_check(code) @@ -1896,31 +1896,31 @@ class FuncDefNode(StatNode, BlockNode): slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname code.putln("%s = (%s)%s(%s, %s, NULL);" % ( Naming.cur_scope_cname, - lenv.scope_class.type.empty_declaration_code(), + lenv.scope_class.type.empty_declaration_code(), slot_func_cname, lenv.scope_class.type.typeptr_cname, Naming.empty_tuple)) code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname) - # Scope unconditionally DECREFed on return. - code.putln("%s = %s;" % ( - Naming.cur_scope_cname, + # Scope unconditionally DECREFed on return. + code.putln("%s = %s;" % ( + Naming.cur_scope_cname, lenv.scope_class.type.cast_code("Py_None"))) code.put_incref("Py_None", py_object_type) - code.putln(code.error_goto(self.pos)) - code.putln("} else {") - code.put_gotref(Naming.cur_scope_cname) + code.putln(code.error_goto(self.pos)) + code.putln("} else {") + code.put_gotref(Naming.cur_scope_cname) code.putln("}") # Note that it is unsafe to decref the scope at this point. if self.needs_outer_scope: if self.is_cyfunction: code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % ( outer_scope_cname, - cenv.scope_class.type.empty_declaration_code(), + cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)) else: code.putln("%s = (%s) %s;" % ( outer_scope_cname, - cenv.scope_class.type.empty_declaration_code(), + cenv.scope_class.type.empty_declaration_code(), Naming.self_cname)) if lenv.is_passthrough: code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname)) @@ -1948,21 +1948,21 @@ class FuncDefNode(StatNode, BlockNode): is_cdef = isinstance(self, CFuncDefNode) for entry in lenv.arg_entries: if entry.type.is_pyobject: - if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: + if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: code.put_var_incref(entry) # Note: defaults are always incref-ed. For def functions, we # we acquire arguments from object conversion, so we have # new references. If we are a cdef function, we need to # incref our arguments - elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1: - code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned) + elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1: + code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned) for entry in lenv.var_entries: - if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure: - if entry.xdecref_cleanup: - code.put_var_xincref(entry) - else: - code.put_var_incref(entry) + if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure: + if entry.xdecref_cleanup: + code.put_var_xincref(entry) + else: + code.put_var_incref(entry) # ----- Initialise local buffer auxiliary variables for entry in lenv.var_entries + lenv.arg_entries: @@ -1978,14 +1978,14 @@ class FuncDefNode(StatNode, BlockNode): if acquire_gil_for_var_decls_only: code.put_release_ensured_gil() - code.funcstate.gil_owned = False + code.funcstate.gil_owned = False # ------------------------- # ----- Function body ----- # ------------------------- self.generate_function_body(env, code) - code.mark_pos(self.pos, trace=False) + code.mark_pos(self.pos, trace=False) code.putln("") code.putln("/* function exit code */") @@ -2013,16 +2013,16 @@ class FuncDefNode(StatNode, BlockNode): # Clean up buffers -- this calls a Python function # so need to save and restore error state - buffers_present = len(used_buffer_entries) > 0 - #memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice] + buffers_present = len(used_buffer_entries) > 0 + #memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice] if buffers_present: code.globalstate.use_utility_code(restore_exception_utility_code) code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;") - code.putln("__Pyx_PyThreadState_declare") - code.putln("__Pyx_PyThreadState_assign") + code.putln("__Pyx_PyThreadState_declare") + code.putln("__Pyx_PyThreadState_assign") code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);") - for entry in used_buffer_entries: - Buffer.put_release_buffer_code(code, entry) + for entry in used_buffer_entries: + Buffer.put_release_buffer_code(code, entry) #code.putln("%s = 0;" % entry.cname) code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}") @@ -2051,13 +2051,13 @@ class FuncDefNode(StatNode, BlockNode): warning(self.entry.pos, "Unraisable exception in function '%s'." % self.entry.qualified_name, 0) - code.put_unraisable(self.entry.qualified_name, lenv.nogil) + code.put_unraisable(self.entry.qualified_name, lenv.nogil) default_retval = self.return_type.default_value if err_val is None and default_retval: err_val = default_retval if err_val is not None: - if err_val != Naming.retval_cname: - code.putln("%s = %s;" % (Naming.retval_cname, err_val)) + if err_val != Naming.retval_cname: + code.putln("%s = %s;" % (Naming.retval_cname, err_val)) elif not self.return_type.is_void: code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname) @@ -2072,8 +2072,8 @@ class FuncDefNode(StatNode, BlockNode): # ----- Non-error return cleanup code.put_label(code.return_label) - for entry in used_buffer_entries: - Buffer.put_release_buffer_code(code, entry) + for entry in used_buffer_entries: + Buffer.put_release_buffer_code(code, entry) if is_getbuffer_slot: self.getbuffer_normal_cleanup(code) @@ -2081,13 +2081,13 @@ class FuncDefNode(StatNode, BlockNode): # See if our return value is uninitialized on non-error return # from . import MemoryView # MemoryView.err_if_nogil_initialized_check(self.pos, env) - cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname)) + cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname)) code.putln( 'if (%s) {' % cond) if env.nogil: code.put_ensure_gil() code.putln( - 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");') + 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");') if env.nogil: code.put_release_ensured_gil() code.putln( @@ -2101,18 +2101,18 @@ class FuncDefNode(StatNode, BlockNode): continue if entry.type.is_memoryviewslice: - code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil) + code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil) elif entry.type.is_pyobject: if not entry.is_arg or len(entry.cf_assignments) > 1: - if entry.xdecref_cleanup: - code.put_var_xdecref(entry) - else: - code.put_var_decref(entry) + if entry.xdecref_cleanup: + code.put_var_xdecref(entry) + else: + code.put_var_decref(entry) # Decref any increfed args for entry in lenv.arg_entries: if entry.type.is_pyobject: - if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: + if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure: code.put_var_decref(entry) elif (entry.type.is_memoryviewslice and (not is_cdef or len(entry.cf_assignments) > 1)): @@ -2137,7 +2137,7 @@ class FuncDefNode(StatNode, BlockNode): # Returning -1 for __hash__ is supposed to signal an error # We do as Python instances and coerce -1 into -2. code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % ( - Naming.retval_cname, Naming.retval_cname)) + Naming.retval_cname, Naming.retval_cname)) if profile or linetrace: code.funcstate.can_trace = False @@ -2157,7 +2157,7 @@ class FuncDefNode(StatNode, BlockNode): if acquire_gil or (lenv.nogil and lenv.has_with_gil_block): # release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode) code.put_release_ensured_gil() - code.funcstate.gil_owned = False + code.funcstate.gil_owned = False if not self.return_type.is_void: code.putln("return %s;" % Naming.retval_cname) @@ -2180,7 +2180,7 @@ class FuncDefNode(StatNode, BlockNode): if arg.type.is_void: error(arg.pos, "Invalid use of 'void'") elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice): - error(arg.pos, "Argument type '%s' is incomplete" % arg.type) + error(arg.pos, "Argument type '%s' is incomplete" % arg.type) entry = env.declare_arg(arg.name, arg.type, arg.pos) if arg.annotation: entry.annotation = arg.annotation @@ -2199,10 +2199,10 @@ class FuncDefNode(StatNode, BlockNode): typeptr_cname, arg.accept_none, arg.name, - arg.type.is_builtin_type and arg.type.require_exact, + arg.type.is_builtin_type and arg.type.require_exact, code.error_goto(arg.pos))) else: - error(arg.pos, "Cannot test type of extern C class without type object name specification") + error(arg.pos, "Cannot test type of extern C class without type object name specification") def generate_arg_none_check(self, arg, code): # Generate None check for one argument. @@ -2221,7 +2221,7 @@ class FuncDefNode(StatNode, BlockNode): pass def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) # Evaluate and store argument default values for arg in self.args: if not arg.is_dynamic: @@ -2336,16 +2336,16 @@ class CFuncDefNode(FuncDefNode): def unqualified_name(self): return self.entry.name - @property - def code_object(self): - # share the CodeObject with the cpdef wrapper (if available) - return self.py_func.code_object if self.py_func else None - + @property + def code_object(self): + # share the CodeObject with the cpdef wrapper (if available) + return self.py_func.code_object if self.py_func else None + def analyse_declarations(self, env): self.is_c_class_method = env.is_c_class_scope if self.directive_locals is None: self.directive_locals = {} - self.directive_locals.update(env.directives.get('locals', {})) + self.directive_locals.update(env.directives.get('locals', {})) if self.directive_returns is not None: base_type = self.directive_returns.analyse_as_type(env) if base_type is None: @@ -2356,14 +2356,14 @@ class CFuncDefNode(FuncDefNode): self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod') # The 2 here is because we need both function and argument names. if isinstance(self.declarator, CFuncDeclaratorNode): - name_declarator, type = self.declarator.analyse( - base_type, env, nonempty=2 * (self.body is not None), + name_declarator, type = self.declarator.analyse( + base_type, env, nonempty=2 * (self.body is not None), directive_locals=self.directive_locals, visibility=self.visibility) else: - name_declarator, type = self.declarator.analyse( + name_declarator, type = self.declarator.analyse( base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility) if not type.is_cfunction: - error(self.pos, "Suite attached to non-function declaration") + error(self.pos, "Suite attached to non-function declaration") # Remember the actual type according to the function header # written here, because the type in the symbol table entry # may be different if we're overriding a C method inherited @@ -2380,9 +2380,9 @@ class CFuncDefNode(FuncDefNode): opt_arg_count = self.cfunc_declarator.optional_arg_count if (self.visibility == 'public' or self.api) and opt_arg_count: error(self.cfunc_declarator.pos, - "Function with optional arguments may not be declared public or api") + "Function with optional arguments may not be declared public or api") - if type.exception_check == '+' and self.visibility != 'extern': + if type.exception_check == '+' and self.visibility != 'extern': warning(self.cfunc_declarator.pos, "Only extern functions can throw C++ exceptions.") @@ -2403,7 +2403,7 @@ class CFuncDefNode(FuncDefNode): if type_arg.type.is_buffer or type_arg.type.is_pythran_expr: if self.type.nogil: error(formal_arg.pos, - "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") + "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") elif 'inline' in self.modifiers: warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) @@ -2416,13 +2416,13 @@ class CFuncDefNode(FuncDefNode): type.is_static_method = self.is_static_method self.entry = env.declare_cfunction( name, type, self.pos, - cname=cname, visibility=self.visibility, api=self.api, - defining=self.body is not None, modifiers=self.modifiers, - overridable=self.overridable) + cname=cname, visibility=self.visibility, api=self.api, + defining=self.body is not None, modifiers=self.modifiers, + overridable=self.overridable) self.entry.inline_func_in_pxd = self.inline_in_pxd self.return_type = type.return_type if self.return_type.is_array and self.visibility != 'extern': - error(self.pos, "Function cannot return an array") + error(self.pos, "Function cannot return an array") if self.return_type.is_cpp_class: self.return_type.check_nullary_constructor(self.pos, "used as a return value") @@ -2440,34 +2440,34 @@ class CFuncDefNode(FuncDefNode): # TODO(robertwb): Finish this up, perhaps via more function refactoring. error(self.pos, "static cpdef methods not yet supported") name = self.entry.name - py_func_body = self.call_self_node(is_module_scope=env.is_module_scope) + py_func_body = self.call_self_node(is_module_scope=env.is_module_scope) if self.is_static_method: from .ExprNodes import NameNode decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))] decorators[0].decorator.analyse_types(env) else: decorators = [] - self.py_func = DefNode(pos=self.pos, - name=self.entry.name, - args=self.args, - star_arg=None, - starstar_arg=None, - doc=self.doc, - body=py_func_body, - decorators=decorators, - is_wrapper=1) + self.py_func = DefNode(pos=self.pos, + name=self.entry.name, + args=self.args, + star_arg=None, + starstar_arg=None, + doc=self.doc, + body=py_func_body, + decorators=decorators, + is_wrapper=1) self.py_func.is_module_scope = env.is_module_scope self.py_func.analyse_declarations(env) - self.py_func.entry.is_overridable = True - self.py_func_stat = StatListNode(self.pos, stats=[self.py_func]) + self.py_func.entry.is_overridable = True + self.py_func_stat = StatListNode(self.pos, stats=[self.py_func]) self.py_func.type = PyrexTypes.py_object_type self.entry.as_variable = self.py_func.entry self.entry.used = self.entry.as_variable.used = True # Reset scope entry the above cfunction env.entries[name] = self.entry if (not self.entry.is_final_cmethod and - (not env.is_module_scope or Options.lookup_module_cpdef)): - self.override = OverrideCheckNode(self.pos, py_func=self.py_func) + (not env.is_module_scope or Options.lookup_module_cpdef)): + self.override = OverrideCheckNode(self.pos, py_func=self.py_func) self.body = StatListNode(self.pos, stats=[self.override, self.body]) def _validate_type_visibility(self, type, pos, env): @@ -2480,7 +2480,7 @@ class CFuncDefNode(FuncDefNode): if public_or_api and entry and env.is_module_scope: if not (entry.visibility in ('public', 'extern') or entry.api or entry.in_cinclude): - error(pos, "Function declared public or api may not have private types") + error(pos, "Function declared public or api may not have private types") def call_self_node(self, omit_optional_args=0, is_module_scope=0): from . import ExprNodes @@ -2535,22 +2535,22 @@ class CFuncDefNode(FuncDefNode): def analyse_expressions(self, env): self.local_scope.directives = env.directives - if self.py_func_stat is not None: - # this will also analyse the default values and the function name assignment - self.py_func_stat = self.py_func_stat.analyse_expressions(env) - elif self.py_func is not None: + if self.py_func_stat is not None: + # this will also analyse the default values and the function name assignment + self.py_func_stat = self.py_func_stat.analyse_expressions(env) + elif self.py_func is not None: # this will also analyse the default values self.py_func = self.py_func.analyse_expressions(env) else: self.analyse_default_values(env) - self.analyse_annotations(env) + self.analyse_annotations(env) self.acquire_gil = self.need_gil_acquisition(self.local_scope) return self def needs_assignment_synthesis(self, env, code=None): return False - def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None): + def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None): scope = self.local_scope arg_decls = [] type = self.type @@ -2591,8 +2591,8 @@ class CFuncDefNode(FuncDefNode): code.globalstate.parts['module_declarations'].putln(self.template_declaration) code.putln(self.template_declaration) if needs_proto: - code.globalstate.parts['module_declarations'].putln( - "%s%s%s; /* proto*/" % (storage_class, modifiers, header)) + code.globalstate.parts['module_declarations'].putln( + "%s%s%s; /* proto*/" % (storage_class, modifiers, header)) code.putln("%s%s%s {" % (storage_class, modifiers, header)) def generate_argument_declarations(self, env, code): @@ -2656,9 +2656,9 @@ class CFuncDefNode(FuncDefNode): self.generate_arg_none_check(arg, code) def generate_execution_code(self, code): - if code.globalstate.directives['linetrace']: - code.mark_pos(self.pos) - code.putln("") # generate line tracing code + if code.globalstate.directives['linetrace']: + code.mark_pos(self.pos) + code.putln("") # generate line tracing code super(CFuncDefNode, self).generate_execution_code(code) if self.py_func_stat: self.py_func_stat.generate_execution_code(code) @@ -2684,11 +2684,11 @@ class CFuncDefNode(FuncDefNode): entry = entry.prev_entry entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k) code.putln() - self.generate_function_header( - code, 0, - with_dispatch=entry.type.is_overridable, - with_opt_args=entry.type.optional_arg_count, - cname=entry.func_cname) + self.generate_function_header( + code, 0, + with_dispatch=entry.type.is_overridable, + with_opt_args=entry.type.optional_arg_count, + cname=entry.func_cname) if not self.return_type.is_void: code.put('return ') args = self.type.args @@ -2719,7 +2719,7 @@ class PyArgDeclNode(Node): def generate_function_definitions(self, env, code): self.entry.generate_function_definitions(env, code) - + class DecoratorNode(Node): # A decorator # @@ -2749,8 +2749,8 @@ class DefNode(FuncDefNode): # # decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions - child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] - outer_attrs = ["decorators", "return_type_annotation"] + child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] + outer_attrs = ["decorators", "return_type_annotation"] is_staticmethod = False is_classmethod = False @@ -2791,8 +2791,8 @@ class DefNode(FuncDefNode): self.num_required_kw_args = rk self.num_required_args = r - def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, except_val=None, modifiers=None, - nogil=False, with_gil=False): + def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, except_val=None, modifiers=None, + nogil=False, with_gil=False): if self.star_arg: error(self.star_arg.pos, "cdef function cannot have star argument") if self.starstar_arg: @@ -2803,19 +2803,19 @@ class DefNode(FuncDefNode): cfunc_args = [] for formal_arg in self.args: name_declarator, type = formal_arg.analyse(scope, nonempty=1) - cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name, - cname=None, + cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name, + cname=None, annotation=formal_arg.annotation, - type=py_object_type, - pos=formal_arg.pos)) - cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type, - args=cfunc_args, - has_varargs=False, - exception_value=None, + type=py_object_type, + pos=formal_arg.pos)) + cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type, + args=cfunc_args, + has_varargs=False, + exception_value=None, exception_check=exception_check, - nogil=nogil, - with_gil=with_gil, - is_overridable=overridable) + nogil=nogil, + with_gil=with_gil, + is_overridable=overridable) cfunc = CVarDefNode(self.pos, type=cfunc_type) else: if scope is None: @@ -2826,7 +2826,7 @@ class DefNode(FuncDefNode): error(cfunc.pos, "previous declaration here") for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)): name_declarator, type = formal_arg.analyse(scope, nonempty=1, - is_self_arg=(i == 0 and scope.is_c_class_scope)) + is_self_arg=(i == 0 and scope.is_c_class_scope)) if type is None or type is PyrexTypes.py_object_type: formal_arg.type = type_arg.type formal_arg.name_declarator = name_declarator @@ -2834,29 +2834,29 @@ class DefNode(FuncDefNode): if exception_value is None and cfunc_type.exception_value is not None: from .ExprNodes import ConstNode exception_value = ConstNode( - self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type) + self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type) declarator = CFuncDeclaratorNode(self.pos, - base=CNameDeclaratorNode(self.pos, name=self.name, cname=None), - args=self.args, - has_varargs=False, - exception_check=cfunc_type.exception_check, - exception_value=exception_value, - with_gil=cfunc_type.with_gil, - nogil=cfunc_type.nogil) + base=CNameDeclaratorNode(self.pos, name=self.name, cname=None), + args=self.args, + has_varargs=False, + exception_check=cfunc_type.exception_check, + exception_value=exception_value, + with_gil=cfunc_type.with_gil, + nogil=cfunc_type.nogil) return CFuncDefNode(self.pos, - modifiers=modifiers or [], - base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type), - declarator=declarator, - body=self.body, - doc=self.doc, - overridable=cfunc_type.is_overridable, - type=cfunc_type, - with_gil=cfunc_type.with_gil, - nogil=cfunc_type.nogil, - visibility='private', - api=False, - directive_locals=getattr(cfunc, 'directive_locals', {}), - directive_returns=returns) + modifiers=modifiers or [], + base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type), + declarator=declarator, + body=self.body, + doc=self.doc, + overridable=cfunc_type.is_overridable, + type=cfunc_type, + with_gil=cfunc_type.with_gil, + nogil=cfunc_type.nogil, + visibility='private', + api=False, + directive_locals=getattr(cfunc, 'directive_locals', {}), + directive_returns=returns) def is_cdef_func_compatible(self): """Determines if the function's signature is compatible with a @@ -2895,13 +2895,13 @@ class DefNode(FuncDefNode): self.analyse_signature(env) self.return_type = self.entry.signature.return_type() - # if a signature annotation provides a more specific return object type, use it - if self.return_type is py_object_type and self.return_type_annotation: - if env.directives['annotation_typing'] and not self.entry.is_special: + # if a signature annotation provides a more specific return object type, use it + if self.return_type is py_object_type and self.return_type_annotation: + if env.directives['annotation_typing'] and not self.entry.is_special: _, return_type = analyse_type_annotation(self.return_type_annotation, env) - if return_type and return_type.is_pyobject: - self.return_type = return_type - + if return_type and return_type.is_pyobject: + self.return_type = return_type + self.create_local_scope(env) self.py_wrapper = DefNodeWrapper( @@ -2915,7 +2915,7 @@ class DefNode(FuncDefNode): self.py_wrapper.analyse_declarations(env) def analyse_argument_types(self, env): - self.directive_locals = env.directives.get('locals', {}) + self.directive_locals = env.directives.get('locals', {}) allow_none_for_extension_args = env.directives['allow_none_for_extension_args'] f2s = env.fused_to_specific @@ -2943,7 +2943,7 @@ class DefNode(FuncDefNode): self.align_argument_type(env, arg) if name_declarator and name_declarator.cname: - error(self.pos, "Python function argument cannot have C name specification") + error(self.pos, "Python function argument cannot have C name specification") arg.type = arg.type.as_argument_type() arg.hdr_type = None arg.needs_conversion = 0 @@ -2955,7 +2955,7 @@ class DefNode(FuncDefNode): elif arg.not_none: arg.accept_none = False elif (arg.type.is_extension_type or arg.type.is_builtin_type - or arg.type.is_buffer or arg.type.is_memoryviewslice): + or arg.type.is_buffer or arg.type.is_memoryviewslice): if arg.default and arg.default.constant_result is None: # special case: def func(MyType obj = None) arg.accept_none = True @@ -3000,8 +3000,8 @@ class DefNode(FuncDefNode): sig = self.entry.signature nfixed = sig.num_fixed_args() - if (sig is TypeSlots.pymethod_signature and nfixed == 1 - and len(self.args) == 0 and self.star_arg): + if (sig is TypeSlots.pymethod_signature and nfixed == 1 + and len(self.args) == 0 and self.star_arg): # this is the only case where a diverging number of # arguments is not an error - when we have no explicit # 'self' parameter as in method(*args) @@ -3019,7 +3019,7 @@ class DefNode(FuncDefNode): sig.has_generic_args = True if ((self.is_classmethod or self.is_staticmethod) and - self.has_fused_arguments and env.is_c_class_scope): + self.has_fused_arguments and env.is_c_class_scope): del self.decorator_indirection.stats[:] for i in range(min(nfixed, len(self.args))): @@ -3052,7 +3052,7 @@ class DefNode(FuncDefNode): if not sig.has_generic_args: self.bad_signature() for arg in self.args: - if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type): + if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type): arg.needs_type_test = 1 def bad_signature(self): @@ -3065,8 +3065,8 @@ class DefNode(FuncDefNode): desc = "Special method" else: desc = "Method" - error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % ( - desc, self.name, len(self.args), expected_str)) + error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % ( + desc, self.name, len(self.args), expected_str)) def declare_pyfunction(self, env): #print "DefNode.declare_pyfunction:", self.name, "in", env ### @@ -3075,7 +3075,7 @@ class DefNode(FuncDefNode): if entry: if entry.is_final_cmethod and not env.parent_type.is_final_type: error(self.pos, "Only final types can have final Python (def/cpdef) methods") - if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper: + if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper: warning(self.pos, "Overriding cdef method with def method.", 5) entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper) self.entry = entry @@ -3085,8 +3085,8 @@ class DefNode(FuncDefNode): entry.doc = embed_position(self.pos, self.doc) entry.doc_cname = Naming.funcdoc_prefix + prefix + name if entry.is_special: - if entry.name in TypeSlots.invisible or not entry.doc or ( - entry.name in '__getattr__' and env.directives['fast_getattr']): + if entry.name in TypeSlots.invisible or not entry.doc or ( + entry.name in '__getattr__' and env.directives['fast_getattr']): entry.wrapperbase_cname = None else: entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name @@ -3131,8 +3131,8 @@ class DefNode(FuncDefNode): def analyse_expressions(self, env): self.local_scope.directives = env.directives self.analyse_default_values(env) - self.analyse_annotations(env) - if self.return_type_annotation: + self.analyse_annotations(env) + if self.return_type_annotation: self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation) if not self.needs_assignment_synthesis(env) and self.decorators: @@ -3145,17 +3145,17 @@ class DefNode(FuncDefNode): def needs_assignment_synthesis(self, env, code=None): if self.is_staticmethod: return True - if self.specialized_cpdefs or self.entry.is_fused_specialized: + if self.specialized_cpdefs or self.entry.is_fused_specialized: return False if self.no_assignment_synthesis: return False - if self.entry.is_special: - return False + if self.entry.is_special: + return False if self.entry.is_anonymous: return True - if env.is_module_scope or env.is_c_class_scope: + if env.is_module_scope or env.is_c_class_scope: if code is None: - return self.local_scope.directives['binding'] + return self.local_scope.directives['binding'] else: return code.globalstate.directives['binding'] return env.is_py_class_scope or env.is_closure_scope @@ -3168,8 +3168,8 @@ class DefNode(FuncDefNode): def generate_function_definitions(self, env, code): if self.defaults_getter: - # defaults getter must never live in class scopes, it's always a module function - self.defaults_getter.generate_function_definitions(env.global_scope(), code) + # defaults getter must never live in class scopes, it's always a module function + self.defaults_getter.generate_function_definitions(env.global_scope(), code) # Before closure cnames are mangled if self.py_wrapper_required: @@ -3291,15 +3291,15 @@ class DefNodeWrapper(FuncDefNode): if not arg.hdr_type.create_to_py_utility_code(env): pass # will fail later - if self.starstar_arg and not self.starstar_arg.entry.cf_used: - # we will set the kwargs argument to NULL instead of a new dict - # and must therefore correct the control flow state - entry = self.starstar_arg.entry - entry.xdecref_cleanup = 1 - for ass in entry.cf_assignments: - if not ass.is_arg and ass.lhs.is_name: - ass.lhs.cf_maybe_null = True - + if self.starstar_arg and not self.starstar_arg.entry.cf_used: + # we will set the kwargs argument to NULL instead of a new dict + # and must therefore correct the control flow state + entry = self.starstar_arg.entry + entry.xdecref_cleanup = 1 + for ass in entry.cf_assignments: + if not ass.is_arg and ass.lhs.is_name: + ass.lhs.cf_maybe_null = True + def signature_has_nongeneric_args(self): argcount = len(self.args) if argcount == 0 or ( @@ -3342,7 +3342,7 @@ class DefNodeWrapper(FuncDefNode): if preprocessor_guard: code.putln(preprocessor_guard) - code.enter_cfunc_scope(lenv) + code.enter_cfunc_scope(lenv) code.return_from_error_cleanup_label = code.new_label() with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or @@ -3421,14 +3421,14 @@ class DefNodeWrapper(FuncDefNode): arg_code_list.append("CYTHON_UNUSED PyObject *unused") if sig.has_generic_args: arg_code_list.append( - "PyObject *%s, PyObject *%s" % ( - Naming.args_cname, Naming.kwds_cname)) + "PyObject *%s, PyObject *%s" % ( + Naming.args_cname, Naming.kwds_cname)) arg_code = ", ".join(arg_code_list) # Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__' mf = "" if (entry.name in ("__getbuffer__", "__releasebuffer__") - and entry.scope.is_c_class_scope): + and entry.scope.is_c_class_scope): mf = "CYTHON_UNUSED " with_pymethdef = False @@ -3442,7 +3442,7 @@ class DefNodeWrapper(FuncDefNode): # want the prototype for the "fused cpdef", in case we're # checking to see if our method was overridden in Python self.target.fused_py_func.generate_function_header( - code, with_pymethdef, proto_only=True) + code, with_pymethdef, proto_only=True) return if (Options.docstrings and entry.doc and @@ -3453,12 +3453,12 @@ class DefNodeWrapper(FuncDefNode): docstr = entry.doc if docstr.is_unicode: - docstr = docstr.as_utf8_string() + docstr = docstr.as_utf8_string() - if not (entry.is_special and entry.name in ('__getbuffer__', '__releasebuffer__')): - code.putln('static char %s[] = %s;' % ( + if not (entry.is_special and entry.name in ('__getbuffer__', '__releasebuffer__')): + code.putln('static char %s[] = %s;' % ( entry.doc_cname, - docstr.as_c_string_literal())) + docstr.as_c_string_literal())) if entry.is_special: code.putln('#if CYTHON_COMPILING_IN_CPYTHON') @@ -3468,7 +3468,7 @@ class DefNodeWrapper(FuncDefNode): if with_pymethdef or self.target.fused_py_func: code.put( - "static PyMethodDef %s = " % entry.pymethdef_cname) + "static PyMethodDef %s = " % entry.pymethdef_cname) code.put_pymethoddef(self.target.entry, ";", allow_skip=False) code.putln("%s {" % header) @@ -3497,7 +3497,7 @@ class DefNodeWrapper(FuncDefNode): for arg in self.args: if not arg.type.is_pyobject: if not arg.type.create_from_py_utility_code(env): - pass # will fail later + pass # will fail later if not self.signature_has_generic_args(): if has_star_or_kw_args: @@ -3544,11 +3544,11 @@ class DefNodeWrapper(FuncDefNode): code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" % Naming.args_cname) code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % ( - self.name, Naming.args_cname, self.error_value())) + self.name, Naming.args_cname, self.error_value())) code.putln("}") if self.starstar_arg: - if self.star_arg or not self.starstar_arg.entry.cf_used: + if self.star_arg or not self.starstar_arg.entry.cf_used: kwarg_check = "unlikely(%s)" % Naming.kwds_cname else: kwarg_check = "%s" % Naming.kwds_cname @@ -3562,38 +3562,38 @@ class DefNodeWrapper(FuncDefNode): kwarg_check, Naming.kwds_cname, self.name, bool(self.starstar_arg), self.error_value())) - if self.starstar_arg and self.starstar_arg.entry.cf_used: - if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references): - code.putln("if (%s) {" % kwarg_check) - code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % ( - self.starstar_arg.entry.cname, - Naming.kwds_cname, - self.starstar_arg.entry.cname, - self.error_value())) - code.put_gotref(self.starstar_arg.entry.cname) - code.putln("} else {") - code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,)) - code.putln("}") - self.starstar_arg.entry.xdecref_cleanup = 1 - else: - code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % ( - self.starstar_arg.entry.cname, - Naming.kwds_cname, - Naming.kwds_cname)) - code.putln("if (unlikely(!%s)) return %s;" % ( - self.starstar_arg.entry.cname, self.error_value())) - self.starstar_arg.entry.xdecref_cleanup = 0 - code.put_gotref(self.starstar_arg.entry.cname) + if self.starstar_arg and self.starstar_arg.entry.cf_used: + if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references): + code.putln("if (%s) {" % kwarg_check) + code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % ( + self.starstar_arg.entry.cname, + Naming.kwds_cname, + self.starstar_arg.entry.cname, + self.error_value())) + code.put_gotref(self.starstar_arg.entry.cname) + code.putln("} else {") + code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,)) + code.putln("}") + self.starstar_arg.entry.xdecref_cleanup = 1 + else: + code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % ( + self.starstar_arg.entry.cname, + Naming.kwds_cname, + Naming.kwds_cname)) + code.putln("if (unlikely(!%s)) return %s;" % ( + self.starstar_arg.entry.cname, self.error_value())) + self.starstar_arg.entry.xdecref_cleanup = 0 + code.put_gotref(self.starstar_arg.entry.cname) if self.self_in_stararg and not self.target.is_staticmethod: # need to create a new tuple with 'self' inserted as first item code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % ( - self.star_arg.entry.cname, - Naming.args_cname, - self.star_arg.entry.cname)) - if self.starstar_arg and self.starstar_arg.entry.cf_used: + self.star_arg.entry.cname, + Naming.args_cname, + self.star_arg.entry.cname)) + if self.starstar_arg and self.starstar_arg.entry.cf_used: code.putln("{") - code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type) + code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type) code.putln("return %s;" % self.error_value()) code.putln("}") else: @@ -3618,8 +3618,8 @@ class DefNodeWrapper(FuncDefNode): elif self.star_arg: code.put_incref(Naming.args_cname, py_object_type) code.putln("%s = %s;" % ( - self.star_arg.entry.cname, - Naming.args_cname)) + self.star_arg.entry.cname, + Naming.args_cname)) self.star_arg.entry.xdecref_cleanup = 0 def generate_tuple_and_keyword_parsing_code(self, args, success_label, code): @@ -3660,8 +3660,8 @@ class DefNodeWrapper(FuncDefNode): all_args = tuple(positional_args) + tuple(kw_only_args) code.putln("static PyObject **%s[] = {%s,0};" % ( Naming.pykwdlist_cname, - ','.join(['&%s' % code.intern_identifier(arg.name) - for arg in all_args]))) + ','.join(['&%s' % code.intern_identifier(arg.name) + for arg in all_args]))) # Before being converted and assigned to the target variables, # borrowed references to all unpacked argument values are @@ -3691,14 +3691,14 @@ class DefNodeWrapper(FuncDefNode): else: compare = '<' code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % ( - Naming.args_cname, compare, min_positional_args)) + Naming.args_cname, compare, min_positional_args)) code.put_goto(argtuple_error_label) if self.num_required_kw_args: # pure error case: keywords required but not passed if max_positional_args > min_positional_args and not self.star_arg: code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % ( - Naming.args_cname, max_positional_args)) + Naming.args_cname, max_positional_args)) code.put_goto(argtuple_error_label) code.putln('} else {') for i, arg in enumerate(kw_only_args): @@ -3708,8 +3708,8 @@ class DefNodeWrapper(FuncDefNode): code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c")) code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( - self.name, - pystring_cname)) + self.name, + pystring_cname)) code.putln(code.error_goto(self.pos)) break @@ -3764,9 +3764,9 @@ class DefNodeWrapper(FuncDefNode): code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % ( - self.name, has_fixed_positional_count, - min_positional_args, max_positional_args, - Naming.args_cname)) + self.name, has_fixed_positional_count, + min_positional_args, max_positional_args, + Naming.args_cname)) code.putln(code.error_goto(self.pos)) def generate_arg_assignment(self, arg, item, code): @@ -3785,9 +3785,9 @@ class DefNodeWrapper(FuncDefNode): item, arg.entry.cname, arg.pos, code)) if arg.default: code.putln('} else {') - code.putln("%s = %s;" % ( - arg.entry.cname, - arg.calculate_default_value_code(code))) + code.putln("%s = %s;" % ( + arg.entry.cname, + arg.calculate_default_value_code(code))) if arg.type.is_memoryviewslice: code.put_incref_memoryviewslice(arg.entry.cname, have_gil=True) @@ -3799,18 +3799,18 @@ class DefNodeWrapper(FuncDefNode): if self.starstar_arg: self.starstar_arg.entry.xdecref_cleanup = 0 code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % ( - self.starstar_arg.entry.cname, - self.starstar_arg.entry.cname, - self.error_value())) + self.starstar_arg.entry.cname, + self.starstar_arg.entry.cname, + self.error_value())) code.put_gotref(self.starstar_arg.entry.cname) if self.star_arg: self.star_arg.entry.xdecref_cleanup = 0 code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % ( - Naming.args_cname, - max_positional_args)) + Naming.args_cname, + max_positional_args)) code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % ( - self.star_arg.entry.cname, Naming.args_cname, - max_positional_args, Naming.args_cname)) + self.star_arg.entry.cname, Naming.args_cname, + max_positional_args, Naming.args_cname)) code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname) if self.starstar_arg: code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type) @@ -3854,7 +3854,7 @@ class DefNodeWrapper(FuncDefNode): for i in range(max_positional_args-1, -1, -1): code.put('case %2d: ' % (i+1)) code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % ( - i, Naming.args_cname, i)) + i, Naming.args_cname, i)) code.putln('CYTHON_FALLTHROUGH;') code.putln('case 0: break;') if not self.star_arg: @@ -3916,16 +3916,16 @@ class DefNodeWrapper(FuncDefNode): code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c")) code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % ( - self.name, has_fixed_positional_count, - min_positional_args, max_positional_args, i)) + self.name, has_fixed_positional_count, + min_positional_args, max_positional_args, i)) code.putln(code.error_goto(self.pos)) code.putln('}') elif arg.kw_only: code.putln('else {') code.globalstate.use_utility_code( UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c")) - code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( - self.name, pystring_cname)) + code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % ( + self.name, pystring_cname)) code.putln(code.error_goto(self.pos)) code.putln('}') if max_positional_args > 0: @@ -3948,19 +3948,19 @@ class DefNodeWrapper(FuncDefNode): pos_arg_count = "0" elif self.star_arg: code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % ( - max_positional_args, max_positional_args)) + max_positional_args, max_positional_args)) pos_arg_count = "used_pos_args" else: pos_arg_count = "pos_args" code.globalstate.use_utility_code( UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c")) - code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % ( - Naming.kwds_cname, - Naming.pykwdlist_cname, - self.starstar_arg and self.starstar_arg.entry.cname or '0', - pos_arg_count, - self.name, - code.error_goto(self.pos))) + code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % ( + Naming.kwds_cname, + Naming.pykwdlist_cname, + self.starstar_arg and self.starstar_arg.entry.cname or '0', + pos_arg_count, + self.name, + code.error_goto(self.pos))) code.putln('}') def generate_optional_kwonly_args_unpacking_code(self, all_args, code): @@ -4017,9 +4017,9 @@ class DefNodeWrapper(FuncDefNode): self.generate_arg_conversion_to_pyobject(arg, code) else: if new_type.assignable_from(old_type): - code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname)) + code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname)) else: - error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type)) + error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type)) def generate_arg_conversion_from_pyobject(self, arg, code): new_type = arg.type @@ -4032,7 +4032,7 @@ class DefNodeWrapper(FuncDefNode): code, )) else: - error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type) + error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type) def generate_arg_conversion_to_pyobject(self, arg, code): old_type = arg.hdr_type @@ -4045,7 +4045,7 @@ class DefNodeWrapper(FuncDefNode): code.error_goto_if_null(arg.entry.cname, arg.pos))) code.put_var_gotref(arg.entry) else: - error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type) + error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type) def generate_argument_type_tests(self, code): # Generate type tests for args whose signature @@ -4070,7 +4070,7 @@ class GeneratorDefNode(DefNode): # is_generator = True - is_coroutine = False + is_coroutine = False is_iterable_coroutine = False is_asyncgen = False gen_type_name = 'Generator' @@ -4092,10 +4092,10 @@ class GeneratorDefNode(DefNode): body_cname = self.gbody.entry.func_cname name = code.intern_identifier(self.name) qualname = code.intern_identifier(self.qualname) - module_name = code.intern_identifier(self.module_name) + module_name = code.intern_identifier(self.module_name) code.putln('{') - code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New(' + code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New(' '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % ( self.gen_type_name, body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL', @@ -4119,11 +4119,11 @@ class GeneratorDefNode(DefNode): self.gbody.generate_function_definitions(env, code) -class AsyncDefNode(GeneratorDefNode): +class AsyncDefNode(GeneratorDefNode): gen_type_name = 'Coroutine' - is_coroutine = True - - + is_coroutine = True + + class IterableAsyncDefNode(AsyncDefNode): gen_type_name = 'IterableCoroutine' is_iterable_coroutine = True @@ -4139,9 +4139,9 @@ class GeneratorBodyDefNode(DefNode): # is_generator_body = True - is_inlined = False + is_inlined = False is_async_gen_body = False - inlined_comprehension_type = None # container type for inlined comprehensions + inlined_comprehension_type = None # container type for inlined comprehensions def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False): super(GeneratorBodyDefNode, self).__init__( @@ -4184,7 +4184,7 @@ class GeneratorBodyDefNode(DefNode): self.body.generate_function_definitions(lenv, code) # Generate C code for header and body of function - code.enter_cfunc_scope(lenv) + code.enter_cfunc_scope(lenv) code.return_from_error_cleanup_label = code.new_label() # ----- Top-level constants used by this function @@ -4217,23 +4217,23 @@ class GeneratorBodyDefNode(DefNode): code.putln('%s' % (code.error_goto_if_null(Naming.sent_value_cname, self.pos))) - # ----- prepare target container for inlined comprehension - if self.is_inlined and self.inlined_comprehension_type is not None: - target_type = self.inlined_comprehension_type - if target_type is Builtin.list_type: - comp_init = 'PyList_New(0)' - elif target_type is Builtin.set_type: - comp_init = 'PySet_New(NULL)' - elif target_type is Builtin.dict_type: - comp_init = 'PyDict_New()' - else: - raise InternalError( - "invalid type of inlined comprehension: %s" % target_type) - code.putln("%s = %s; %s" % ( - Naming.retval_cname, comp_init, - code.error_goto_if_null(Naming.retval_cname, self.pos))) - code.put_gotref(Naming.retval_cname) - + # ----- prepare target container for inlined comprehension + if self.is_inlined and self.inlined_comprehension_type is not None: + target_type = self.inlined_comprehension_type + if target_type is Builtin.list_type: + comp_init = 'PyList_New(0)' + elif target_type is Builtin.set_type: + comp_init = 'PySet_New(NULL)' + elif target_type is Builtin.dict_type: + comp_init = 'PyDict_New()' + else: + raise InternalError( + "invalid type of inlined comprehension: %s" % target_type) + code.putln("%s = %s; %s" % ( + Naming.retval_cname, comp_init, + code.error_goto_if_null(Naming.retval_cname, self.pos))) + code.put_gotref(Naming.retval_cname) + # ----- Function body self.generate_function_body(env, code) # ----- Closure initialization @@ -4242,8 +4242,8 @@ class GeneratorBodyDefNode(DefNode): lenv.scope_class.type.declaration_code(Naming.cur_scope_cname), lenv.scope_class.type.cast_code('%s->closure' % Naming.generator_cname))) - # FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases - code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname) + # FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases + code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname) if profile or linetrace: code.funcstate.can_trace = False @@ -4254,7 +4254,7 @@ class GeneratorBodyDefNode(DefNode): # on normal generator termination, we do not take the exception propagation # path: no traceback info is required and not creating it is much faster - if not self.is_inlined and not self.body.is_terminator: + if not self.is_inlined and not self.body.is_terminator: if self.is_async_gen_body: code.globalstate.use_utility_code( UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) @@ -4265,11 +4265,11 @@ class GeneratorBodyDefNode(DefNode): if not self.body.is_terminator: code.put_goto(code.return_label) code.put_label(code.error_label) - if self.is_inlined and self.inlined_comprehension_type is not None: - code.put_xdecref_clear(Naming.retval_cname, py_object_type) - if Future.generator_stop in env.global_scope().context.future_directives: - # PEP 479: turn accidental StopIteration exceptions into a RuntimeError - code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c")) + if self.is_inlined and self.inlined_comprehension_type is not None: + code.put_xdecref_clear(Naming.retval_cname, py_object_type) + if Future.generator_stop in env.global_scope().context.future_directives: + # PEP 479: turn accidental StopIteration exceptions into a RuntimeError + code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c")) code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body)) for cname, type in code.funcstate.all_managed_temps(): code.put_xdecref(cname, type) @@ -4277,22 +4277,22 @@ class GeneratorBodyDefNode(DefNode): # ----- Non-error return cleanup code.put_label(code.return_label) - if self.is_inlined: - code.put_xgiveref(Naming.retval_cname) - else: - code.put_xdecref_clear(Naming.retval_cname, py_object_type) - # For Py3.7, clearing is already done below. - code.putln("#if !CYTHON_USE_EXC_INFO_STACK") + if self.is_inlined: + code.put_xgiveref(Naming.retval_cname) + else: + code.put_xdecref_clear(Naming.retval_cname, py_object_type) + # For Py3.7, clearing is already done below. + code.putln("#if !CYTHON_USE_EXC_INFO_STACK") code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname) - code.putln("#endif") + code.putln("#endif") code.putln('%s->resume_label = -1;' % Naming.generator_cname) # clean up as early as possible to help breaking any reference cycles - code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname) + code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname) if profile or linetrace: code.put_trace_return(Naming.retval_cname, nogil=not code.funcstate.gil_owned) code.put_finish_refcount_context() - code.putln("return %s;" % Naming.retval_cname) + code.putln("return %s;" % Naming.retval_cname) code.putln("}") # ----- Go back and insert temp variable declarations @@ -4343,14 +4343,14 @@ class OverrideCheckNode(StatNode): self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type) call_node = ExprNodes.SimpleCallNode( self.pos, function=self.func_node, - args=[ExprNodes.NameNode(self.pos, name=arg.name) - for arg in self.args[first_arg:]]) - if env.return_type.is_void or env.return_type.is_returncode: - self.body = StatListNode(self.pos, stats=[ - ExprStatNode(self.pos, expr=call_node), - ReturnStatNode(self.pos, value=None)]) - else: - self.body = ReturnStatNode(self.pos, value=call_node) + args=[ExprNodes.NameNode(self.pos, name=arg.name) + for arg in self.args[first_arg:]]) + if env.return_type.is_void or env.return_type.is_returncode: + self.body = StatListNode(self.pos, stats=[ + ExprStatNode(self.pos, expr=call_node), + ReturnStatNode(self.pos, value=None)]) + else: + self.body = ReturnStatNode(self.pos, value=call_node) self.body = self.body.analyse_expressions(env) return self @@ -4367,25 +4367,25 @@ class OverrideCheckNode(StatNode): if self.py_func.is_module_scope: code.putln("else {") else: - code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0)" - " || (Py_TYPE(%s)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % ( - self_arg, self_arg)) - - code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") - code.globalstate.use_utility_code( - UtilityCode.load_cached("PyDictVersioning", "ObjectHandling.c")) - # TODO: remove the object dict version check by 'inlining' the getattr implementation for methods. - # This would allow checking the dict versions around _PyType_Lookup() if it returns a descriptor, - # and would (tada!) make this check a pure type based thing instead of supporting only a single - # instance at a time. - code.putln("static PY_UINT64_T %s = __PYX_DICT_VERSION_INIT, %s = __PYX_DICT_VERSION_INIT;" % ( - Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) - code.putln("if (unlikely(!__Pyx_object_dict_version_matches(%s, %s, %s))) {" % ( - self_arg, Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) - code.putln("PY_UINT64_T %s = __Pyx_get_tp_dict_version(%s);" % ( - Naming.type_dict_guard_temp, self_arg)) - code.putln("#endif") - + code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0)" + " || (Py_TYPE(%s)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % ( + self_arg, self_arg)) + + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyDictVersioning", "ObjectHandling.c")) + # TODO: remove the object dict version check by 'inlining' the getattr implementation for methods. + # This would allow checking the dict versions around _PyType_Lookup() if it returns a descriptor, + # and would (tada!) make this check a pure type based thing instead of supporting only a single + # instance at a time. + code.putln("static PY_UINT64_T %s = __PYX_DICT_VERSION_INIT, %s = __PYX_DICT_VERSION_INIT;" % ( + Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("if (unlikely(!__Pyx_object_dict_version_matches(%s, %s, %s))) {" % ( + self_arg, Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("PY_UINT64_T %s = __Pyx_get_tp_dict_version(%s);" % ( + Naming.type_dict_guard_temp, self_arg)) + code.putln("#endif") + func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) self.func_node.set_cname(func_node_temp) # need to get attribute manually--scope would return cdef method @@ -4395,48 +4395,48 @@ class OverrideCheckNode(StatNode): code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % ( func_node_temp, self_arg, interned_attr_cname, err)) code.put_gotref(func_node_temp) - + is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp - is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)(void*)%s)" % ( + is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)(void*)%s)" % ( func_node_temp, self.py_func.entry.func_cname) code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden)) self.body.generate_execution_code(code) code.putln("}") - - # NOTE: it's not 100% sure that we catch the exact versions here that were used for the lookup, - # but it is very unlikely that the versions change during lookup, and the type dict safe guard - # should increase the chance of detecting such a case. - code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") - code.putln("%s = __Pyx_get_tp_dict_version(%s);" % ( - Naming.tp_dict_version_temp, self_arg)) - code.putln("%s = __Pyx_get_object_dict_version(%s);" % ( - Naming.obj_dict_version_temp, self_arg)) - # Safety check that the type dict didn't change during the lookup. Since CPython looks up the - # attribute (descriptor) first in the type dict and then in the instance dict or through the - # descriptor, the only really far-away lookup when we get here is one in the type dict. So we - # double check the type dict version before and afterwards to guard against later changes of - # the type dict during the lookup process. - code.putln("if (unlikely(%s != %s)) {" % ( - Naming.type_dict_guard_temp, Naming.tp_dict_version_temp)) - code.putln("%s = %s = __PYX_DICT_VERSION_INIT;" % ( - Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) - code.putln("}") - code.putln("#endif") - + + # NOTE: it's not 100% sure that we catch the exact versions here that were used for the lookup, + # but it is very unlikely that the versions change during lookup, and the type dict safe guard + # should increase the chance of detecting such a case. + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.putln("%s = __Pyx_get_tp_dict_version(%s);" % ( + Naming.tp_dict_version_temp, self_arg)) + code.putln("%s = __Pyx_get_object_dict_version(%s);" % ( + Naming.obj_dict_version_temp, self_arg)) + # Safety check that the type dict didn't change during the lookup. Since CPython looks up the + # attribute (descriptor) first in the type dict and then in the instance dict or through the + # descriptor, the only really far-away lookup when we get here is one in the type dict. So we + # double check the type dict version before and afterwards to guard against later changes of + # the type dict during the lookup process. + code.putln("if (unlikely(%s != %s)) {" % ( + Naming.type_dict_guard_temp, Naming.tp_dict_version_temp)) + code.putln("%s = %s = __PYX_DICT_VERSION_INIT;" % ( + Naming.tp_dict_version_temp, Naming.obj_dict_version_temp)) + code.putln("}") + code.putln("#endif") + code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type) code.funcstate.release_temp(func_node_temp) - - code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + + code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS") + code.putln("}") + code.putln("#endif") + code.putln("}") - code.putln("#endif") - code.putln("}") - - + class ClassDefNode(StatNode, BlockNode): pass - + class PyClassDefNode(ClassDefNode): # A Python class definition. # @@ -4462,7 +4462,7 @@ class PyClassDefNode(ClassDefNode): mkw = None def __init__(self, pos, name, bases, doc, body, decorators=None, - keyword_args=None, force_py3_semantics=False): + keyword_args=None, force_py3_semantics=False): StatNode.__init__(self, pos) self.name = name self.doc = doc @@ -4477,30 +4477,30 @@ class PyClassDefNode(ClassDefNode): doc_node = None allow_py2_metaclass = not force_py3_semantics - if keyword_args: + if keyword_args: allow_py2_metaclass = False self.is_py3_style_class = True - if keyword_args.is_dict_literal: - if keyword_args.key_value_pairs: - for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]: - if item.key.value == 'metaclass': - if self.metaclass is not None: - error(item.pos, "keyword argument 'metaclass' passed multiple times") - # special case: we already know the metaclass, - # so we don't need to do the "build kwargs, - # find metaclass" dance at runtime - self.metaclass = item.value - del keyword_args.key_value_pairs[i] - self.mkw = keyword_args - else: - assert self.metaclass is not None + if keyword_args.is_dict_literal: + if keyword_args.key_value_pairs: + for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]: + if item.key.value == 'metaclass': + if self.metaclass is not None: + error(item.pos, "keyword argument 'metaclass' passed multiple times") + # special case: we already know the metaclass, + # so we don't need to do the "build kwargs, + # find metaclass" dance at runtime + self.metaclass = item.value + del keyword_args.key_value_pairs[i] + self.mkw = keyword_args + else: + assert self.metaclass is not None else: - # MergedDictNode - self.mkw = ExprNodes.ProxyNode(keyword_args) + # MergedDictNode + self.mkw = ExprNodes.ProxyNode(keyword_args) if force_py3_semantics or self.bases or self.mkw or self.metaclass: if self.metaclass is None: - if keyword_args and not keyword_args.is_dict_literal: + if keyword_args and not keyword_args.is_dict_literal: # **kwargs may contain 'metaclass' arg mkdict = self.mkw else: @@ -4541,20 +4541,20 @@ class PyClassDefNode(ClassDefNode): from . import ExprNodes return CClassDefNode(self.pos, - visibility='private', - module_name=None, - class_name=self.name, + visibility='private', + module_name=None, + class_name=self.name, bases=self.bases or ExprNodes.TupleNode(self.pos, args=[]), - decorators=self.decorators, - body=self.body, - in_pxd=False, - doc=self.doc) + decorators=self.decorators, + body=self.body, + in_pxd=False, + doc=self.doc) def create_scope(self, env): genv = env while genv.is_py_class_scope or genv.is_c_class_scope: genv = genv.outer_scope - cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv) + cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv) return cenv def analyse_declarations(self, env): @@ -4564,8 +4564,8 @@ class PyClassDefNode(ClassDefNode): for decorator in self.decorators[::-1]: class_result = SimpleCallNode( decorator.pos, - function=decorator.decorator, - args=[class_result]) + function=decorator.decorator, + args=[class_result]) self.decorators = None self.class_result = class_result if self.bases: @@ -4599,7 +4599,7 @@ class PyClassDefNode(ClassDefNode): self.body.generate_function_definitions(self.scope, code) def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) code.pyclass_stack.append(self) cenv = self.scope if self.bases: @@ -4641,7 +4641,7 @@ class PyClassDefNode(ClassDefNode): self.bases.free_temps(code) code.pyclass_stack.pop() - + class CClassDefNode(ClassDefNode): # An extension type definition. # @@ -4654,7 +4654,7 @@ class CClassDefNode(ClassDefNode): # bases TupleNode Base class(es) # objstruct_name string or None Specified C name of object struct # typeobj_name string or None Specified C name of type object - # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match + # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match # in_pxd boolean Is in a .pxd file # decorators [DecoratorNode] list of decorators or None # doc string or None @@ -4671,7 +4671,7 @@ class CClassDefNode(ClassDefNode): api = False objstruct_name = None typeobj_name = None - check_size = None + check_size = None decorators = None shadow = False @@ -4697,20 +4697,20 @@ class CClassDefNode(ClassDefNode): home_scope = env self.entry = home_scope.declare_c_class( - name=self.class_name, - pos=self.pos, - defining=0, - implementing=0, - module_name=self.module_name, - base_type=None, - objstruct_cname=self.objstruct_name, - typeobj_cname=self.typeobj_name, - visibility=self.visibility, - typedef_flag=self.typedef_flag, - check_size = self.check_size, - api=self.api, - buffer_defaults=self.buffer_defaults(env), - shadow=self.shadow) + name=self.class_name, + pos=self.pos, + defining=0, + implementing=0, + module_name=self.module_name, + base_type=None, + objstruct_cname=self.objstruct_name, + typeobj_cname=self.typeobj_name, + visibility=self.visibility, + typedef_flag=self.typedef_flag, + check_size = self.check_size, + api=self.api, + buffer_defaults=self.buffer_defaults(env), + shadow=self.shadow) def analyse_declarations(self, env): #print "CClassDefNode.analyse_declarations:", self.class_name @@ -4718,9 +4718,9 @@ class CClassDefNode(ClassDefNode): #print "...module_name =", self.module_name if env.in_cinclude and not self.objstruct_name: - error(self.pos, "Object struct name specification required for C class defined in 'extern from' block") + error(self.pos, "Object struct name specification required for C class defined in 'extern from' block") if self.decorators: - error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name) + error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name) self.base_type = None # Now that module imports are cached, we need to # import the modules for extern classes. @@ -4780,25 +4780,25 @@ class CClassDefNode(ClassDefNode): if self.visibility == 'extern': if (self.module_name == '__builtin__' and - self.class_name in Builtin.builtin_types and - env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython + self.class_name in Builtin.builtin_types and + env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1) self.entry = home_scope.declare_c_class( - name=self.class_name, - pos=self.pos, - defining=has_body and self.in_pxd, - implementing=has_body and not self.in_pxd, - module_name=self.module_name, - base_type=self.base_type, - objstruct_cname=self.objstruct_name, - typeobj_cname=self.typeobj_name, - check_size=self.check_size, - visibility=self.visibility, - typedef_flag=self.typedef_flag, - api=self.api, - buffer_defaults=self.buffer_defaults(env), - shadow=self.shadow) + name=self.class_name, + pos=self.pos, + defining=has_body and self.in_pxd, + implementing=has_body and not self.in_pxd, + module_name=self.module_name, + base_type=self.base_type, + objstruct_cname=self.objstruct_name, + typeobj_cname=self.typeobj_name, + check_size=self.check_size, + visibility=self.visibility, + typedef_flag=self.typedef_flag, + api=self.api, + buffer_defaults=self.buffer_defaults(env), + shadow=self.shadow) if self.shadow: home_scope.lookup(self.class_name).as_variable = self.entry @@ -4813,15 +4813,15 @@ class CClassDefNode(ClassDefNode): if has_body: self.body.analyse_declarations(scope) - dict_entry = self.scope.lookup_here("__dict__") - if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented): - dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter") - self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos) + dict_entry = self.scope.lookup_here("__dict__") + if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented): + dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter") + self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos) if self.in_pxd: scope.defined = 1 else: scope.implemented = 1 - + if len(self.bases.args) > 1: if not has_body or self.in_pxd: error(self.bases.args[1].pos, "Only declare first base in declaration.") @@ -4865,7 +4865,7 @@ class CClassDefNode(ClassDefNode): def generate_execution_code(self, code): # This is needed to generate evaluation code for # default values of method arguments. - code.mark_pos(self.pos) + code.mark_pos(self.pos) if self.body: self.body.generate_execution_code(code) if not self.entry.type.early_init: @@ -4922,10 +4922,10 @@ class CClassDefNode(ClassDefNode): code.error_goto(entry.pos))) # Don't inherit tp_print from builtin types, restoring the # behavior of using tp_repr or tp_str instead. - # ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1) - code.putln("#if PY_VERSION_HEX < 0x030800B1") + # ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1) + code.putln("#if PY_VERSION_HEX < 0x030800B1") code.putln("%s.tp_print = 0;" % typeobj_cname) - code.putln("#endif") + code.putln("#endif") # Use specialised attribute lookup for types with generic lookup but no instance dict. getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro') @@ -4993,14 +4993,14 @@ class CClassDefNode(ClassDefNode): code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % ( typeobj_cname, code.error_goto(entry.pos))) - if not type.scope.is_internal and not type.scope.directives.get('internal'): + if not type.scope.is_internal and not type.scope.directives.get('internal'): # scope.is_internal is set for types defined by # Cython (such as closures), the 'internal' # directive is set by users code.putln( - 'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % ( + 'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % ( Naming.module_cname, - code.intern_identifier(scope.class_name), + code.intern_identifier(scope.class_name), typeobj_cname, code.error_goto(entry.pos))) weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None @@ -5127,7 +5127,7 @@ class ExprStatNode(StatNode): if type is None: error(type_node.pos, "Unknown type") else: - env.declare_var(var.value, type, var.pos, is_cdef=True) + env.declare_var(var.value, type, var.pos, is_cdef=True) self.__class__ = PassStatNode elif getattr(expr, 'annotation', None) is not None: if expr.is_name: @@ -5139,7 +5139,7 @@ class ExprStatNode(StatNode): self.__class__ = PassStatNode def analyse_expressions(self, env): - self.expr.result_is_used = False # hint that .result() may safely be left empty + self.expr.result_is_used = False # hint that .result() may safely be left empty self.expr = self.expr.analyse_expressions(env) # Repeat in case of node replacement. self.expr.result_is_used = False # hint that .result() may safely be left empty @@ -5152,7 +5152,7 @@ class ExprStatNode(StatNode): gil_message = "Discarding owned Python object" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) self.expr.result_is_used = False # hint that .result() may safely be left empty self.expr.generate_evaluation_code(code) if not self.expr.is_temp and self.expr.result(): @@ -5181,7 +5181,7 @@ class AssignmentNode(StatNode): def analyse_expressions(self, env): node = self.analyse_types(env) - if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode): + if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode): if node.rhs.type.is_ptr and node.rhs.is_ephemeral(): error(self.pos, "Storing unsafe C derivative of temporary Python reference") return node @@ -5191,7 +5191,7 @@ class AssignmentNode(StatNode): # self.analyse_expressions_2(env) def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) self.generate_rhs_evaluation_code(code) self.generate_assignment_code(code) @@ -5201,16 +5201,16 @@ class SingleAssignmentNode(AssignmentNode): # # a = b # - # lhs ExprNode Left hand side - # rhs ExprNode Right hand side - # first bool Is this guaranteed the first assignment to lhs? - # is_overloaded_assignment bool Is this assignment done via an overloaded operator= - # exception_check - # exception_value + # lhs ExprNode Left hand side + # rhs ExprNode Right hand side + # first bool Is this guaranteed the first assignment to lhs? + # is_overloaded_assignment bool Is this assignment done via an overloaded operator= + # exception_check + # exception_value child_attrs = ["lhs", "rhs"] first = False - is_overloaded_assignment = False + is_overloaded_assignment = False declaration_only = False def analyse_declarations(self, env): @@ -5222,17 +5222,17 @@ class SingleAssignmentNode(AssignmentNode): if func_name: args, kwds = self.rhs.explicit_args_kwds() if func_name in ['declare', 'typedef']: - if len(args) > 2: - error(args[2].pos, "Invalid positional argument.") + if len(args) > 2: + error(args[2].pos, "Invalid positional argument.") return - if kwds is not None: - kwdict = kwds.compile_time_value(None) - if func_name == 'typedef' or 'visibility' not in kwdict: - error(kwds.pos, "Invalid keyword argument.") - return - visibility = kwdict['visibility'] - else: - visibility = 'private' + if kwds is not None: + kwdict = kwds.compile_time_value(None) + if func_name == 'typedef' or 'visibility' not in kwdict: + error(kwds.pos, "Invalid keyword argument.") + return + visibility = kwdict['visibility'] + else: + visibility = 'private' type = args[0].analyse_as_type(env) if type is None: error(args[0].pos, "Unknown type") @@ -5247,7 +5247,7 @@ class SingleAssignmentNode(AssignmentNode): error(lhs.pos, "Invalid declaration") return for var, pos in vars: - env.declare_var(var, type, pos, is_cdef=True, visibility=visibility) + env.declare_var(var, type, pos, is_cdef=True, visibility=visibility) if len(args) == 2: # we have a value self.rhs = args[1] @@ -5289,7 +5289,7 @@ class SingleAssignmentNode(AssignmentNode): "fused_type does not take keyword arguments") fusednode = FusedTypeNode(self.rhs.pos, - name=self.lhs.name, types=args) + name=self.lhs.name, types=args) fusednode.analyse_declarations(env) if self.declaration_only: @@ -5297,44 +5297,44 @@ class SingleAssignmentNode(AssignmentNode): else: self.lhs.analyse_target_declaration(env) - def analyse_types(self, env, use_temp=0): + def analyse_types(self, env, use_temp=0): from . import ExprNodes self.rhs = self.rhs.analyse_types(env) - - unrolled_assignment = self.unroll_rhs(env) - if unrolled_assignment: - return unrolled_assignment - + + unrolled_assignment = self.unroll_rhs(env) + if unrolled_assignment: + return unrolled_assignment + self.lhs = self.lhs.analyse_target_types(env) self.lhs.gil_assignment_check(env) - unrolled_assignment = self.unroll_lhs(env) - if unrolled_assignment: - return unrolled_assignment - - if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode): - self.lhs.analyse_broadcast_operation(self.rhs) - self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs) - elif self.lhs.type.is_array: - if not isinstance(self.lhs, ExprNodes.SliceIndexNode): - # cannot assign to C array, only to its full slice - self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None) - self.lhs = self.lhs.analyse_target_types(env) - - if self.lhs.type.is_cpp_class: - op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type]) - if op: - rhs = self.rhs - self.is_overloaded_assignment = True - self.exception_check = op.type.exception_check - self.exception_value = op.type.exception_value - if self.exception_check == '+' and self.exception_value is None: - env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) - else: - rhs = self.rhs.coerce_to(self.lhs.type, env) - else: - rhs = self.rhs.coerce_to(self.lhs.type, env) - + unrolled_assignment = self.unroll_lhs(env) + if unrolled_assignment: + return unrolled_assignment + + if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode): + self.lhs.analyse_broadcast_operation(self.rhs) + self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs) + elif self.lhs.type.is_array: + if not isinstance(self.lhs, ExprNodes.SliceIndexNode): + # cannot assign to C array, only to its full slice + self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None) + self.lhs = self.lhs.analyse_target_types(env) + + if self.lhs.type.is_cpp_class: + op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type]) + if op: + rhs = self.rhs + self.is_overloaded_assignment = True + self.exception_check = op.type.exception_check + self.exception_value = op.type.exception_value + if self.exception_check == '+' and self.exception_value is None: + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + else: + rhs = self.rhs.coerce_to(self.lhs.type, env) + else: + rhs = self.rhs.coerce_to(self.lhs.type, env) + if use_temp or rhs.is_attribute or ( not rhs.is_name and not rhs.is_literal and rhs.type.is_pyobject): @@ -5345,152 +5345,152 @@ class SingleAssignmentNode(AssignmentNode): self.rhs = rhs return self - def unroll(self, node, target_size, env): - from . import ExprNodes, UtilNodes - - base = node - start_node = stop_node = step_node = check_node = None - - if node.type.is_ctuple: - slice_size = node.type.size - - elif node.type.is_ptr or node.type.is_array: - while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop): - base = node = node.base - if isinstance(node, ExprNodes.SliceIndexNode): - base = node.base - start_node = node.start - if start_node: - start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) - stop_node = node.stop - if stop_node: - stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) - else: - if node.type.is_array and node.type.size: - stop_node = ExprNodes.IntNode( - self.pos, value=str(node.type.size), - constant_result=(node.type.size if isinstance(node.type.size, _py_int_types) - else ExprNodes.constant_value_not_set)) - else: - error(self.pos, "C array iteration requires known end index") - return - step_node = None #node.step - if step_node: - step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) - - # TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here. - def get_const(node, none_value): - if node is None: - return none_value - elif node.has_constant_result(): - return node.constant_result - else: - raise ValueError("Not a constant.") - - try: - slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1) - except ValueError: - error(self.pos, "C array assignment currently requires known endpoints") - return - - elif node.type.is_array: - slice_size = node.type.size - if not isinstance(slice_size, _py_int_types): - return # might still work when coercing to Python - else: - return - - else: - return - - if slice_size != target_size: - error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % ( - slice_size, target_size)) - return - - items = [] - base = UtilNodes.LetRefNode(base) - refs = [base] - if start_node and not start_node.is_literal: - start_node = UtilNodes.LetRefNode(start_node) - refs.append(start_node) - if stop_node and not stop_node.is_literal: - stop_node = UtilNodes.LetRefNode(stop_node) - refs.append(stop_node) - if step_node and not step_node.is_literal: - step_node = UtilNodes.LetRefNode(step_node) - refs.append(step_node) - - for ix in range(target_size): - ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type) - if step_node is not None: - if step_node.has_constant_result(): - step_value = ix_node.constant_result * step_node.constant_result - ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value) - else: - ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node) - if start_node is not None: - if start_node.has_constant_result() and ix_node.has_constant_result(): - index_value = ix_node.constant_result + start_node.constant_result - ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value) - else: - ix_node = ExprNodes.AddNode( - self.pos, operator='+', operand1=start_node, operand2=ix_node) - items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env))) - return check_node, refs, items - - def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env): - from . import UtilNodes - assignments = [] - for lhs, rhs in zip(lhs_list, rhs_list): - assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first)) - node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env) - if check_node: - node = StatListNode(pos=self.pos, stats=[check_node, node]) - for ref in refs[::-1]: - node = UtilNodes.LetNode(ref, node) - return node - - def unroll_rhs(self, env): - from . import ExprNodes - if not isinstance(self.lhs, ExprNodes.TupleNode): - return - if any(arg.is_starred for arg in self.lhs.args): - return - - unrolled = self.unroll(self.rhs, len(self.lhs.args), env) - if not unrolled: - return - check_node, refs, rhs = unrolled - return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env) - - def unroll_lhs(self, env): - if self.lhs.type.is_ctuple: - # Handled directly. - return - from . import ExprNodes - if not isinstance(self.rhs, ExprNodes.TupleNode): - return - - unrolled = self.unroll(self.lhs, len(self.rhs.args), env) - if not unrolled: - return - check_node, refs, lhs = unrolled - return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env) - + def unroll(self, node, target_size, env): + from . import ExprNodes, UtilNodes + + base = node + start_node = stop_node = step_node = check_node = None + + if node.type.is_ctuple: + slice_size = node.type.size + + elif node.type.is_ptr or node.type.is_array: + while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop): + base = node = node.base + if isinstance(node, ExprNodes.SliceIndexNode): + base = node.base + start_node = node.start + if start_node: + start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + stop_node = node.stop + if stop_node: + stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + else: + if node.type.is_array and node.type.size: + stop_node = ExprNodes.IntNode( + self.pos, value=str(node.type.size), + constant_result=(node.type.size if isinstance(node.type.size, _py_int_types) + else ExprNodes.constant_value_not_set)) + else: + error(self.pos, "C array iteration requires known end index") + return + step_node = None #node.step + if step_node: + step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + + # TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here. + def get_const(node, none_value): + if node is None: + return none_value + elif node.has_constant_result(): + return node.constant_result + else: + raise ValueError("Not a constant.") + + try: + slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1) + except ValueError: + error(self.pos, "C array assignment currently requires known endpoints") + return + + elif node.type.is_array: + slice_size = node.type.size + if not isinstance(slice_size, _py_int_types): + return # might still work when coercing to Python + else: + return + + else: + return + + if slice_size != target_size: + error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % ( + slice_size, target_size)) + return + + items = [] + base = UtilNodes.LetRefNode(base) + refs = [base] + if start_node and not start_node.is_literal: + start_node = UtilNodes.LetRefNode(start_node) + refs.append(start_node) + if stop_node and not stop_node.is_literal: + stop_node = UtilNodes.LetRefNode(stop_node) + refs.append(stop_node) + if step_node and not step_node.is_literal: + step_node = UtilNodes.LetRefNode(step_node) + refs.append(step_node) + + for ix in range(target_size): + ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type) + if step_node is not None: + if step_node.has_constant_result(): + step_value = ix_node.constant_result * step_node.constant_result + ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value) + else: + ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node) + if start_node is not None: + if start_node.has_constant_result() and ix_node.has_constant_result(): + index_value = ix_node.constant_result + start_node.constant_result + ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value) + else: + ix_node = ExprNodes.AddNode( + self.pos, operator='+', operand1=start_node, operand2=ix_node) + items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env))) + return check_node, refs, items + + def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env): + from . import UtilNodes + assignments = [] + for lhs, rhs in zip(lhs_list, rhs_list): + assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first)) + node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env) + if check_node: + node = StatListNode(pos=self.pos, stats=[check_node, node]) + for ref in refs[::-1]: + node = UtilNodes.LetNode(ref, node) + return node + + def unroll_rhs(self, env): + from . import ExprNodes + if not isinstance(self.lhs, ExprNodes.TupleNode): + return + if any(arg.is_starred for arg in self.lhs.args): + return + + unrolled = self.unroll(self.rhs, len(self.lhs.args), env) + if not unrolled: + return + check_node, refs, rhs = unrolled + return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env) + + def unroll_lhs(self, env): + if self.lhs.type.is_ctuple: + # Handled directly. + return + from . import ExprNodes + if not isinstance(self.rhs, ExprNodes.TupleNode): + return + + unrolled = self.unroll(self.lhs, len(self.rhs.args), env) + if not unrolled: + return + check_node, refs, lhs = unrolled + return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env) + def generate_rhs_evaluation_code(self, code): self.rhs.generate_evaluation_code(code) - def generate_assignment_code(self, code, overloaded_assignment=False): - if self.is_overloaded_assignment: - self.lhs.generate_assignment_code( - self.rhs, - code, - overloaded_assignment=self.is_overloaded_assignment, - exception_check=self.exception_check, - exception_value=self.exception_value) - else: - self.lhs.generate_assignment_code(self.rhs, code) + def generate_assignment_code(self, code, overloaded_assignment=False): + if self.is_overloaded_assignment: + self.lhs.generate_assignment_code( + self.rhs, + code, + overloaded_assignment=self.is_overloaded_assignment, + exception_check=self.exception_check, + exception_value=self.exception_value) + else: + self.lhs.generate_assignment_code(self.rhs, code) def generate_function_definitions(self, env, code): self.rhs.generate_function_definitions(env, code) @@ -5510,14 +5510,14 @@ class CascadedAssignmentNode(AssignmentNode): # # Used internally: # - # coerced_values [ExprNode] RHS coerced to all distinct LHS types - # cloned_values [ExprNode] cloned RHS value for each LHS - # assignment_overloads [Bool] If each assignment uses a C++ operator= + # coerced_values [ExprNode] RHS coerced to all distinct LHS types + # cloned_values [ExprNode] cloned RHS value for each LHS + # assignment_overloads [Bool] If each assignment uses a C++ operator= - child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"] - cloned_values = None + child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"] + cloned_values = None coerced_values = None - assignment_overloads = None + assignment_overloads = None def analyse_declarations(self, env): for lhs in self.lhs_list: @@ -5526,23 +5526,23 @@ class CascadedAssignmentNode(AssignmentNode): def analyse_types(self, env, use_temp=0): from .ExprNodes import CloneNode, ProxyNode - # collect distinct types used on the LHS + # collect distinct types used on the LHS lhs_types = set() - for i, lhs in enumerate(self.lhs_list): - lhs = self.lhs_list[i] = lhs.analyse_target_types(env) + for i, lhs in enumerate(self.lhs_list): + lhs = self.lhs_list[i] = lhs.analyse_target_types(env) lhs.gil_assignment_check(env) lhs_types.add(lhs.type) rhs = self.rhs.analyse_types(env) - # common special case: only one type needed on the LHS => coerce only once + # common special case: only one type needed on the LHS => coerce only once if len(lhs_types) == 1: - # Avoid coercion for overloaded assignment operators. - if next(iter(lhs_types)).is_cpp_class: - op = env.lookup_operator('=', [lhs, self.rhs]) - if not op: - rhs = rhs.coerce_to(lhs_types.pop(), env) - else: - rhs = rhs.coerce_to(lhs_types.pop(), env) + # Avoid coercion for overloaded assignment operators. + if next(iter(lhs_types)).is_cpp_class: + op = env.lookup_operator('=', [lhs, self.rhs]) + if not op: + rhs = rhs.coerce_to(lhs_types.pop(), env) + else: + rhs = rhs.coerce_to(lhs_types.pop(), env) if not rhs.is_name and not rhs.is_literal and ( use_temp or rhs.is_attribute or rhs.type.is_pyobject): @@ -5551,42 +5551,42 @@ class CascadedAssignmentNode(AssignmentNode): rhs = rhs.coerce_to_simple(env) self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs - # clone RHS and coerce it to all distinct LHS types + # clone RHS and coerce it to all distinct LHS types self.coerced_values = [] coerced_values = {} - self.assignment_overloads = [] + self.assignment_overloads = [] for lhs in self.lhs_list: - overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs]) - self.assignment_overloads.append(overloaded) + overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs]) + self.assignment_overloads.append(overloaded) if lhs.type not in coerced_values and lhs.type != rhs.type: - rhs = CloneNode(self.rhs) - if not overloaded: - rhs = rhs.coerce_to(lhs.type, env) + rhs = CloneNode(self.rhs) + if not overloaded: + rhs = rhs.coerce_to(lhs.type, env) self.coerced_values.append(rhs) coerced_values[lhs.type] = rhs - # clone coerced values for all LHS assignments - self.cloned_values = [] + # clone coerced values for all LHS assignments + self.cloned_values = [] for lhs in self.lhs_list: rhs = coerced_values.get(lhs.type, self.rhs) - self.cloned_values.append(CloneNode(rhs)) + self.cloned_values.append(CloneNode(rhs)) return self def generate_rhs_evaluation_code(self, code): self.rhs.generate_evaluation_code(code) - def generate_assignment_code(self, code, overloaded_assignment=False): - # prepare all coercions + def generate_assignment_code(self, code, overloaded_assignment=False): + # prepare all coercions for rhs in self.coerced_values: rhs.generate_evaluation_code(code) - # assign clones to LHS - for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads): + # assign clones to LHS + for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads): rhs.generate_evaluation_code(code) - lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload) - # dispose of coerced values and original RHS - for rhs_value in self.coerced_values: - rhs_value.generate_disposal_code(code) - rhs_value.free_temps(code) + lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload) + # dispose of coerced values and original RHS + for rhs_value in self.coerced_values: + rhs_value.generate_disposal_code(code) + rhs_value.free_temps(code) self.rhs.generate_disposal_code(code) self.rhs.free_temps(code) @@ -5596,7 +5596,7 @@ class CascadedAssignmentNode(AssignmentNode): def annotate(self, code): for rhs in self.coerced_values: rhs.annotate(code) - for lhs, rhs in zip(self.lhs_list, self.cloned_values): + for lhs, rhs in zip(self.lhs_list, self.cloned_values): lhs.annotate(code) rhs.annotate(code) self.rhs.annotate(code) @@ -5623,18 +5623,18 @@ class ParallelAssignmentNode(AssignmentNode): stat.analyse_declarations(env) def analyse_expressions(self, env): - self.stats = [stat.analyse_types(env, use_temp=1) - for stat in self.stats] + self.stats = [stat.analyse_types(env, use_temp=1) + for stat in self.stats] return self # def analyse_expressions(self, env): # for stat in self.stats: -# stat.analyse_expressions_1(env, use_temp=1) +# stat.analyse_expressions_1(env, use_temp=1) # for stat in self.stats: # stat.analyse_expressions_2(env) def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) for stat in self.stats: stat.generate_rhs_evaluation_code(code) for stat in self.stats: @@ -5677,7 +5677,7 @@ class InPlaceAssignmentNode(AssignmentNode): self.lhs = self.lhs.analyse_target_types(env) # When assigning to a fully indexed buffer or memoryview, coerce the rhs - if self.lhs.is_memview_index or self.lhs.is_buffer_access: + if self.lhs.is_memview_index or self.lhs.is_buffer_access: self.rhs = self.rhs.coerce_to(self.lhs.type, env) elif self.lhs.type.is_string and self.operator in '+-': # use pointer arithmetic for char* LHS instead of string concat @@ -5685,31 +5685,31 @@ class InPlaceAssignmentNode(AssignmentNode): return self def generate_execution_code(self, code): - code.mark_pos(self.pos) - lhs, rhs = self.lhs, self.rhs - rhs.generate_evaluation_code(code) - lhs.generate_subexpr_evaluation_code(code) + code.mark_pos(self.pos) + lhs, rhs = self.lhs, self.rhs + rhs.generate_evaluation_code(code) + lhs.generate_subexpr_evaluation_code(code) c_op = self.operator if c_op == "//": c_op = "/" elif c_op == "**": error(self.pos, "No C inplace power operator") - if lhs.is_buffer_access or lhs.is_memview_index: - if lhs.type.is_pyobject: + if lhs.is_buffer_access or lhs.is_memview_index: + if lhs.type.is_pyobject: error(self.pos, "In-place operators not allowed on object buffers in this release.") - if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']: + if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']: error(self.pos, "In-place non-c divide operators not allowed on int buffers.") - lhs.generate_buffer_setitem_code(rhs, code, c_op) - elif lhs.is_memview_slice: - error(self.pos, "Inplace operators not supported on memoryview slices") + lhs.generate_buffer_setitem_code(rhs, code, c_op) + elif lhs.is_memview_slice: + error(self.pos, "Inplace operators not supported on memoryview slices") else: # C++ # TODO: make sure overload is declared - code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result())) - lhs.generate_subexpr_disposal_code(code) - lhs.free_subexpr_temps(code) - rhs.generate_disposal_code(code) - rhs.free_temps(code) + code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result())) + lhs.generate_subexpr_disposal_code(code) + lhs.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) def annotate(self, code): self.lhs.annotate(code) @@ -5744,7 +5744,7 @@ class PrintStatNode(StatNode): gil_message = "Python print statement" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) if self.stream: self.stream.generate_evaluation_code(code) stream_result = self.stream.py_result() @@ -5806,14 +5806,14 @@ class ExecStatNode(StatNode): gil_message = "Python exec statement" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) args = [] for arg in self.args: arg.generate_evaluation_code(code) - args.append(arg.py_result()) + args.append(arg.py_result()) args = tuple(args + ['0', '0'][:3-len(args)]) temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True) - code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args)) + code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args)) for arg in self.args: arg.generate_disposal_code(code) arg.free_temps(code) @@ -5843,7 +5843,7 @@ class DelStatNode(StatNode): def analyse_expressions(self, env): for i, arg in enumerate(self.args): arg = self.args[i] = arg.analyse_target_expression(env, None) - if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice): + if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice): if arg.is_name and arg.entry.is_cglobal: error(arg.pos, "Deletion of global C variable") elif arg.type.is_ptr and arg.type.base_type.is_cpp_class: @@ -5865,7 +5865,7 @@ class DelStatNode(StatNode): gil_message = "Deleting Python object" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) for arg in self.args: if (arg.type.is_pyobject or arg.type.is_memoryviewslice or @@ -5915,7 +5915,7 @@ class BreakStatNode(StatNode): return self def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) if not code.break_label: error(self.pos, "break statement not inside loop") else: @@ -5931,11 +5931,11 @@ class ContinueStatNode(StatNode): return self def generate_execution_code(self, code): - if not code.continue_label: - error(self.pos, "continue statement not inside loop") - return - code.mark_pos(self.pos) - code.put_goto(code.continue_label) + if not code.continue_label: + error(self.pos, "continue statement not inside loop") + return + code.mark_pos(self.pos) + code.put_goto(code.continue_label) class ReturnStatNode(StatNode): @@ -5965,14 +5965,14 @@ class ReturnStatNode(StatNode): error(self.pos, "Return with value in async generator") self.value = self.value.analyse_types(env) if return_type.is_void or return_type.is_returncode: - error(self.value.pos, "Return with value in void function") + error(self.value.pos, "Return with value in void function") else: self.value = self.value.coerce_to(env.return_type, env) else: if (not return_type.is_void - and not return_type.is_pyobject - and not return_type.is_returncode): - error(self.pos, "Return value required") + and not return_type.is_pyobject + and not return_type.is_returncode): + error(self.pos, "Return value required") return self def nogil_check(self, env): @@ -5999,38 +5999,38 @@ class ReturnStatNode(StatNode): if self.return_type.is_memoryviewslice: from . import MemoryView MemoryView.put_acquire_memoryviewslice( - lhs_cname=Naming.retval_cname, - lhs_type=self.return_type, + lhs_cname=Naming.retval_cname, + lhs_type=self.return_type, lhs_pos=value.pos, rhs=value, - code=code, - have_gil=self.in_nogil_context) + code=code, + have_gil=self.in_nogil_context) value.generate_post_assignment_code(code) elif self.in_generator: # return value == raise StopIteration(value), but uncatchable - code.globalstate.use_utility_code( - UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c")) - code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % ( - Naming.retval_cname, + code.globalstate.use_utility_code( + UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c")) + code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % ( + Naming.retval_cname, value.py_result())) value.generate_disposal_code(code) else: value.make_owned_reference(code) - code.putln("%s = %s;" % ( - Naming.retval_cname, + code.putln("%s = %s;" % ( + Naming.retval_cname, value.result_as(self.return_type))) value.generate_post_assignment_code(code) value.free_temps(code) else: if self.return_type.is_pyobject: - if self.in_generator: + if self.in_generator: if self.in_async_gen: code.globalstate.use_utility_code( UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ") - code.putln("%s = NULL;" % Naming.retval_cname) - else: - code.put_init_to_py_none(Naming.retval_cname, self.return_type) + code.putln("%s = NULL;" % Naming.retval_cname) + else: + code.put_init_to_py_none(Naming.retval_cname, self.return_type) elif self.return_type.is_returncode: self.put_return(code, self.return_type.default_value) @@ -6083,8 +6083,8 @@ class RaiseStatNode(StatNode): exc = self.exc_type from . import ExprNodes if (isinstance(exc, ExprNodes.SimpleCallNode) and - not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))): - exc = exc.function # extract the exception type + not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))): + exc = exc.function # extract the exception type if exc.is_name and exc.entry.is_builtin: self.builtin_exc_name = exc.name if self.builtin_exc_name == 'MemoryError': @@ -6095,7 +6095,7 @@ class RaiseStatNode(StatNode): gil_message = "Raising exception" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) if self.builtin_exc_name == 'MemoryError': code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos)) return @@ -6169,7 +6169,7 @@ class ReraiseStatNode(StatNode): gil_message = "Raising exception" def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) vars = code.funcstate.exc_vars if vars: code.globalstate.use_utility_code(restore_exception_utility_code) @@ -6177,7 +6177,7 @@ class ReraiseStatNode(StatNode): code.put_giveref(vars[1]) # fresh exceptions may not have a traceback yet (-> finally!) code.put_xgiveref(vars[2]) - code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars)) + code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars)) for varname in vars: code.put("%s = 0; " % varname) code.putln() @@ -6203,7 +6203,7 @@ class AssertStatNode(StatNode): # prevent tuple values from being interpreted as argument value tuples from .ExprNodes import TupleNode value = TupleNode(value.pos, args=[value], slow=True) - self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env) + self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env) else: self.value = value.coerce_to_pyobject(env) return self @@ -6214,21 +6214,21 @@ class AssertStatNode(StatNode): def generate_execution_code(self, code): code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS") code.putln("if (unlikely(!Py_OptimizeFlag)) {") - code.mark_pos(self.pos) + code.mark_pos(self.pos) self.cond.generate_evaluation_code(code) code.putln( - "if (unlikely(!%s)) {" % self.cond.result()) + "if (unlikely(!%s)) {" % self.cond.result()) if self.value: self.value.generate_evaluation_code(code) code.putln( - "PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result()) + "PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result()) self.value.generate_disposal_code(code) self.value.free_temps(code) else: code.putln( "PyErr_SetNone(PyExc_AssertionError);") code.putln( - code.error_goto(self.pos)) + code.error_goto(self.pos)) code.putln( "}") self.cond.generate_disposal_code(code) @@ -6263,7 +6263,7 @@ class IfStatNode(StatNode): self.else_clause.analyse_declarations(env) def analyse_expressions(self, env): - self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses] + self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses] if self.else_clause: self.else_clause = self.else_clause.analyse_expressions(env) return self @@ -6271,17 +6271,17 @@ class IfStatNode(StatNode): def generate_execution_code(self, code): code.mark_pos(self.pos) end_label = code.new_label() - last = len(self.if_clauses) + last = len(self.if_clauses) if self.else_clause: # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that. self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True) else: - last -= 1 # avoid redundant goto at end of last if-clause - for i, if_clause in enumerate(self.if_clauses): + last -= 1 # avoid redundant goto at end of last if-clause + for i, if_clause in enumerate(self.if_clauses): self._set_branch_hint(if_clause, if_clause.body) - if_clause.generate_execution_code(code, end_label, is_last=i == last) + if_clause.generate_execution_code(code, end_label, is_last=i == last) if self.else_clause: - code.mark_pos(self.else_clause.pos) + code.mark_pos(self.else_clause.pos) code.putln("/*else*/ {") self.else_clause.generate_execution_code(code) code.putln("}") @@ -6328,13 +6328,13 @@ class IfClauseNode(Node): self.body.analyse_declarations(env) def analyse_expressions(self, env): - self.condition = self.condition.analyse_temp_boolean_expression(env) + self.condition = self.condition.analyse_temp_boolean_expression(env) self.body = self.body.analyse_expressions(env) return self - def generate_execution_code(self, code, end_label, is_last): + def generate_execution_code(self, code, end_label, is_last): self.condition.generate_evaluation_code(code) - code.mark_pos(self.pos) + code.mark_pos(self.pos) condition = self.condition.result() if self.branch_hint: condition = '%s(%s)' % (self.branch_hint, condition) @@ -6342,8 +6342,8 @@ class IfClauseNode(Node): self.condition.generate_disposal_code(code) self.condition.free_temps(code) self.body.generate_execution_code(code) - code.mark_pos(self.pos, trace=False) - if not (is_last or self.body.is_terminator): + code.mark_pos(self.pos, trace=False) + if not (is_last or self.body.is_terminator): code.put_goto(end_label) code.putln("}") @@ -6364,21 +6364,21 @@ class SwitchCaseNode(StatNode): child_attrs = ['conditions', 'body'] - def generate_condition_evaluation_code(self, code): + def generate_condition_evaluation_code(self, code): for cond in self.conditions: cond.generate_evaluation_code(code) - - def generate_execution_code(self, code): - num_conditions = len(self.conditions) - line_tracing_enabled = code.globalstate.directives['linetrace'] - for i, cond in enumerate(self.conditions, 1): + + def generate_execution_code(self, code): + num_conditions = len(self.conditions) + line_tracing_enabled = code.globalstate.directives['linetrace'] + for i, cond in enumerate(self.conditions, 1): code.putln("case %s:" % cond.result()) - code.mark_pos(cond.pos) # Tracing code must appear *after* the 'case' statement. - if line_tracing_enabled and i < num_conditions: - # Allow fall-through after the line tracing code. - code.putln('CYTHON_FALLTHROUGH;') + code.mark_pos(cond.pos) # Tracing code must appear *after* the 'case' statement. + if line_tracing_enabled and i < num_conditions: + # Allow fall-through after the line tracing code. + code.putln('CYTHON_FALLTHROUGH;') self.body.generate_execution_code(code) - code.mark_pos(self.pos, trace=False) + code.mark_pos(self.pos, trace=False) code.putln("break;") def generate_function_definitions(self, env, code): @@ -6391,7 +6391,7 @@ class SwitchCaseNode(StatNode): cond.annotate(code) self.body.annotate(code) - + class SwitchStatNode(StatNode): # Generated in the optimization of an if-elif-else node # @@ -6403,11 +6403,11 @@ class SwitchStatNode(StatNode): def generate_execution_code(self, code): self.test.generate_evaluation_code(code) - # Make sure all conditions are evaluated before going into the switch() statement. - # This is required in order to prevent any execution code from leaking into the space between the cases. - for case in self.cases: - case.generate_condition_evaluation_code(code) - code.mark_pos(self.pos) + # Make sure all conditions are evaluated before going into the switch() statement. + # This is required in order to prevent any execution code from leaking into the space between the cases. + for case in self.cases: + case.generate_condition_evaluation_code(code) + code.mark_pos(self.pos) code.putln("switch (%s) {" % self.test.result()) for case in self.cases: case.generate_execution_code(code) @@ -6438,7 +6438,7 @@ class SwitchStatNode(StatNode): if self.else_clause is not None: self.else_clause.annotate(code) - + class LoopNode(object): pass @@ -6466,7 +6466,7 @@ class WhileStatNode(LoopNode, StatNode): return self def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() code.putln( "while (1) {") @@ -6474,7 +6474,7 @@ class WhileStatNode(LoopNode, StatNode): self.condition.generate_evaluation_code(code) self.condition.generate_disposal_code(code) code.putln( - "if (!%s) break;" % self.condition.result()) + "if (!%s) break;" % self.condition.result()) self.condition.free_temps(code) self.body.generate_execution_code(code) code.put_label(code.continue_label) @@ -6519,15 +6519,15 @@ class DictIterationNextNode(Node): key_target, value_target, tuple_target, is_dict_flag): Node.__init__( self, dict_obj.pos, - dict_obj=dict_obj, - expected_size=expected_size, - pos_index_var=pos_index_var, - key_target=key_target, - value_target=value_target, - tuple_target=tuple_target, - is_dict_flag=is_dict_flag, - is_temp=True, - type=PyrexTypes.c_bint_type) + dict_obj=dict_obj, + expected_size=expected_size, + pos_index_var=pos_index_var, + key_target=key_target, + value_target=value_target, + tuple_target=tuple_target, + is_dict_flag=is_dict_flag, + is_temp=True, + type=PyrexTypes.c_bint_type) def analyse_expressions(self, env): from . import ExprNodes @@ -6594,7 +6594,7 @@ class DictIterationNextNode(Node): target.generate_assignment_code(result, code) var.release(code) - + class SetIterationNextNode(Node): # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode # and checking the set size for changes. Created in Optimize.py. @@ -6657,46 +6657,46 @@ class SetIterationNextNode(Node): def ForStatNode(pos, **kw): if 'iterator' in kw: - if kw['iterator'].is_async: - return AsyncForStatNode(pos, **kw) - else: - return ForInStatNode(pos, **kw) + if kw['iterator'].is_async: + return AsyncForStatNode(pos, **kw) + else: + return ForInStatNode(pos, **kw) else: return ForFromStatNode(pos, **kw) - -class _ForInStatNode(LoopNode, StatNode): - # Base class of 'for-in' statements. + +class _ForInStatNode(LoopNode, StatNode): + # Base class of 'for-in' statements. # # target ExprNode - # iterator IteratorNode | AIterAwaitExprNode(AsyncIteratorNode) + # iterator IteratorNode | AIterAwaitExprNode(AsyncIteratorNode) # body StatNode # else_clause StatNode - # item NextNode | AwaitExprNode(AsyncNextNode) - # is_async boolean true for 'async for' statements + # item NextNode | AwaitExprNode(AsyncNextNode) + # is_async boolean true for 'async for' statements - child_attrs = ["target", "item", "iterator", "body", "else_clause"] + child_attrs = ["target", "item", "iterator", "body", "else_clause"] item = None - is_async = False + is_async = False + + def _create_item_node(self): + raise NotImplementedError("must be implemented by subclasses") - def _create_item_node(self): - raise NotImplementedError("must be implemented by subclasses") - def analyse_declarations(self, env): self.target.analyse_target_declaration(env) self.body.analyse_declarations(env) if self.else_clause: self.else_clause.analyse_declarations(env) - self._create_item_node() + self._create_item_node() def analyse_expressions(self, env): self.target = self.target.analyse_target_types(env) self.iterator = self.iterator.analyse_expressions(env) - self._create_item_node() # must rewrap self.item after analysis + self._create_item_node() # must rewrap self.item after analysis self.item = self.item.analyse_expressions(env) - if (not self.is_async and - (self.iterator.type.is_ptr or self.iterator.type.is_array) and - self.target.type.assignable_from(self.iterator.type)): + if (not self.is_async and + (self.iterator.type.is_ptr or self.iterator.type.is_array) and + self.target.type.assignable_from(self.iterator.type)): # C array slice optimization. pass else: @@ -6707,7 +6707,7 @@ class _ForInStatNode(LoopNode, StatNode): return self def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() self.iterator.generate_evaluation_code(code) code.putln("for (;;) {") @@ -6762,36 +6762,36 @@ class _ForInStatNode(LoopNode, StatNode): self.item.annotate(code) -class ForInStatNode(_ForInStatNode): - # 'for' statement - - is_async = False - - def _create_item_node(self): - from .ExprNodes import NextNode - self.item = NextNode(self.iterator) - - -class AsyncForStatNode(_ForInStatNode): - # 'async for' statement - # - # iterator AIterAwaitExprNode(AsyncIteratorNode) - # item AwaitIterNextExprNode(AsyncIteratorNode) - - is_async = True - +class ForInStatNode(_ForInStatNode): + # 'for' statement + + is_async = False + + def _create_item_node(self): + from .ExprNodes import NextNode + self.item = NextNode(self.iterator) + + +class AsyncForStatNode(_ForInStatNode): + # 'async for' statement + # + # iterator AIterAwaitExprNode(AsyncIteratorNode) + # item AwaitIterNextExprNode(AsyncIteratorNode) + + is_async = True + def __init__(self, pos, **kw): - assert 'item' not in kw - from . import ExprNodes - # AwaitExprNodes must appear before running MarkClosureVisitor + assert 'item' not in kw + from . import ExprNodes + # AwaitExprNodes must appear before running MarkClosureVisitor kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None) - _ForInStatNode.__init__(self, pos, **kw) - - def _create_item_node(self): - from . import ExprNodes - self.item.arg = ExprNodes.AsyncNextNode(self.iterator) - - + _ForInStatNode.__init__(self, pos, **kw) + + def _create_item_node(self): + from . import ExprNodes + self.item.arg = ExprNodes.AsyncNextNode(self.iterator) + + class ForFromStatNode(LoopNode, StatNode): # for name from expr rel name rel expr # @@ -6837,8 +6837,8 @@ class ForFromStatNode(LoopNode, StatNode): self.bound2 = self.bound2.analyse_types(env) if self.step is not None: if isinstance(self.step, ExprNodes.UnaryMinusNode): - warning(self.step.pos, "Probable infinite loop in for-from-by statement. " - "Consider switching the directions of the relations.", 2) + warning(self.step.pos, "Probable infinite loop in for-from-by statement. " + "Consider switching the directions of the relations.", 2) self.step = self.step.analyse_types(env) self.set_up_loop(env) @@ -6879,8 +6879,8 @@ class ForFromStatNode(LoopNode, StatNode): if target_type.is_numeric or target_type.is_enum: self.is_py_target = False - if isinstance(self.target, ExprNodes.BufferIndexNode): - raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.") + if isinstance(self.target, ExprNodes.BufferIndexNode): + raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.") self.loopvar_node = self.target self.py_loopvar_node = None else: @@ -6890,7 +6890,7 @@ class ForFromStatNode(LoopNode, StatNode): self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env) def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) old_loop_labels = code.new_loop_labels() from_range = self.from_range self.bound1.generate_evaluation_code(code) @@ -6916,19 +6916,19 @@ class ForFromStatNode(LoopNode, StatNode): else: loopvar_name = self.loopvar_node.result() if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>': - # Handle the case where the endpoint of an unsigned int iteration - # is within step of 0. - code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % ( - loopvar_name, - self.bound1.result(), offset, step, - loopvar_name, self.relation2, self.bound2.result(), step, - loopvar_name, incop)) - else: - code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % ( - loopvar_name, - self.bound1.result(), offset, - loopvar_name, self.relation2, self.bound2.result(), - loopvar_name, incop)) + # Handle the case where the endpoint of an unsigned int iteration + # is within step of 0. + code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % ( + loopvar_name, + self.bound1.result(), offset, step, + loopvar_name, self.relation2, self.bound2.result(), step, + loopvar_name, incop)) + else: + code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % ( + loopvar_name, + self.bound1.result(), offset, + loopvar_name, self.relation2, self.bound2.result(), + loopvar_name, incop)) coerced_loopvar_node = self.py_loopvar_node if coerced_loopvar_node is None and from_range: @@ -6952,15 +6952,15 @@ class ForFromStatNode(LoopNode, StatNode): if self.target.entry.scope.is_module_scope: code.globalstate.use_utility_code( UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) - lookup_func = '__Pyx_GetModuleGlobalName(%s, %s); %s' + lookup_func = '__Pyx_GetModuleGlobalName(%s, %s); %s' else: code.globalstate.use_utility_code( UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) - lookup_func = '__Pyx_GetNameInClass(%s, {}, %s); %s'.format( + lookup_func = '__Pyx_GetNameInClass(%s, {}, %s); %s'.format( self.target.entry.scope.namespace_cname) - code.putln(lookup_func % ( + code.putln(lookup_func % ( target_node.result(), - interned_cname, + interned_cname, code.error_goto_if_null(target_node.result(), self.target.pos))) code.put_gotref(target_node.result()) else: @@ -7007,7 +7007,7 @@ class ForFromStatNode(LoopNode, StatNode): '<=': ("", "++"), '<' : ("+1", "++"), '>=': ("", "--"), - '>' : ("-1", "--"), + '>' : ("-1", "--"), } def generate_function_definitions(self, env, code): @@ -7086,7 +7086,7 @@ class WithStatNode(StatNode): self.body.generate_function_definitions(env, code) def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) code.putln("/*with:*/ {") self.manager.generate_evaluation_code(code) self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False) @@ -7095,7 +7095,7 @@ class WithStatNode(StatNode): code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % ( self.exit_var, self.manager.py_result(), - code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')), + code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')), code.error_goto_if_null(self.exit_var, self.pos), )) code.put_gotref(self.exit_var) @@ -7202,9 +7202,9 @@ class TryExceptStatNode(StatNode): gil_message = "Try-except statement" def generate_execution_code(self, code): - code.mark_pos(self.pos) # before changing the error label, in case of tracing errors - code.putln("{") - + code.mark_pos(self.pos) # before changing the error label, in case of tracing errors + code.putln("{") + old_return_label = code.return_label old_break_label = code.break_label old_continue_label = code.continue_label @@ -7219,7 +7219,7 @@ class TryExceptStatNode(StatNode): try_end_label = code.new_label('try_end') exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False) - for _ in range(3)] + for _ in range(3)] save_exc = code.insertion_point() code.putln( "/*try:*/ {") @@ -7227,7 +7227,7 @@ class TryExceptStatNode(StatNode): code.break_label = try_break_label code.continue_label = try_continue_label self.body.generate_execution_code(code) - code.mark_pos(self.pos, trace=False) + code.mark_pos(self.pos, trace=False) code.putln( "}") temps_to_clean_up = code.funcstate.all_free_managed_temps() @@ -7239,8 +7239,8 @@ class TryExceptStatNode(StatNode): if not self.in_generator: save_exc.putln("__Pyx_PyThreadState_declare") save_exc.putln("__Pyx_PyThreadState_assign") - save_exc.putln("__Pyx_ExceptionSave(%s);" % ( - ', '.join(['&%s' % var for var in exc_save_vars]))) + save_exc.putln("__Pyx_ExceptionSave(%s);" % ( + ', '.join(['&%s' % var for var in exc_save_vars]))) for var in exc_save_vars: save_exc.put_xgotref(var) @@ -7262,7 +7262,7 @@ class TryExceptStatNode(StatNode): code.return_label = except_return_label normal_case_terminates = self.body.is_terminator if self.else_clause: - code.mark_pos(self.else_clause.pos) + code.mark_pos(self.else_clause.pos) code.putln( "/*else:*/ {") self.else_clause.generate_execution_code(code) @@ -7299,17 +7299,17 @@ class TryExceptStatNode(StatNode): if not normal_case_terminates and not code.label_used(try_end_label): code.put_goto(try_end_label) code.put_label(exit_label) - code.mark_pos(self.pos, trace=False) - if can_raise: - restore_saved_exception() + code.mark_pos(self.pos, trace=False) + if can_raise: + restore_saved_exception() code.put_goto(old_label) if code.label_used(except_end_label): if not normal_case_terminates and not code.label_used(try_end_label): code.put_goto(try_end_label) code.put_label(except_end_label) - if can_raise: - restore_saved_exception() + if can_raise: + restore_saved_exception() if code.label_used(try_end_label): code.put_label(try_end_label) code.putln("}") @@ -7431,22 +7431,22 @@ class ExceptClauseNode(Node): and self.target is None): # most simple case: no exception variable, empty body (pass) # => reset the exception state, done - code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")) - code.putln("__Pyx_ErrRestore(0,0,0);") + code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")) + code.putln("__Pyx_ErrRestore(0,0,0);") code.put_goto(end_label) code.putln("}") return exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True) - for _ in range(3)] + for _ in range(3)] code.put_add_traceback(self.function_name) # We always have to fetch the exception value even if # there is no target, because this also normalises the # exception and stores it in the thread state. code.globalstate.use_utility_code(get_exception_utility_code) exc_args = "&%s, &%s, &%s" % tuple(exc_vars) - code.putln("if (__Pyx_GetException(%s) < 0) %s" % ( - exc_args, code.error_goto(self.pos))) + code.putln("if (__Pyx_GetException(%s) < 0) %s" % ( + exc_args, code.error_goto(self.pos))) for var in exc_vars: code.put_gotref(var) if self.target: @@ -7468,9 +7468,9 @@ class ExceptClauseNode(Node): if not self.body.is_terminator: for var in exc_vars: - # FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars), - # but I don't think it's the right solution. - code.put_xdecref_clear(var, py_object_type) + # FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars), + # but I don't think it's the right solution. + code.put_xdecref_clear(var, py_object_type) code.put_goto(end_label) for new_label, old_label in [(code.break_label, old_break_label), @@ -7508,42 +7508,42 @@ class TryFinallyStatNode(StatNode): # # body StatNode # finally_clause StatNode - # finally_except_clause deep-copy of finally_clause for exception case + # finally_except_clause deep-copy of finally_clause for exception case # in_generator inside of generator => must store away current exception also in return case # - # Each of the continue, break, return and error gotos runs - # into its own deep-copy of the finally block code. + # Each of the continue, break, return and error gotos runs + # into its own deep-copy of the finally block code. # In addition, if we're doing an error, we save the # exception on entry to the finally block and restore # it on exit. - child_attrs = ["body", "finally_clause", "finally_except_clause"] + child_attrs = ["body", "finally_clause", "finally_except_clause"] preserve_exception = 1 # handle exception case, in addition to return/break/continue handle_error_case = True func_return_type = None - finally_except_clause = None + finally_except_clause = None is_try_finally_in_nogil = False in_generator = False - @staticmethod + @staticmethod def create_analysed(pos, env, body, finally_clause): node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause) return node def analyse_declarations(self, env): self.body.analyse_declarations(env) - self.finally_except_clause = copy.deepcopy(self.finally_clause) - self.finally_except_clause.analyse_declarations(env) + self.finally_except_clause = copy.deepcopy(self.finally_clause) + self.finally_except_clause.analyse_declarations(env) self.finally_clause.analyse_declarations(env) def analyse_expressions(self, env): self.body = self.body.analyse_expressions(env) self.finally_clause = self.finally_clause.analyse_expressions(env) - self.finally_except_clause = self.finally_except_clause.analyse_expressions(env) + self.finally_except_clause = self.finally_except_clause.analyse_expressions(env) if env.return_type and not env.return_type.is_void: self.func_return_type = env.return_type return self @@ -7552,9 +7552,9 @@ class TryFinallyStatNode(StatNode): gil_message = "Try-finally statement" def generate_execution_code(self, code): - code.mark_pos(self.pos) # before changing the error label, in case of tracing errors - code.putln("/*try:*/ {") - + code.mark_pos(self.pos) # before changing the error label, in case of tracing errors + code.putln("/*try:*/ {") + old_error_label = code.error_label old_labels = code.all_new_labels() new_labels = code.get_all_labels() @@ -7563,21 +7563,21 @@ class TryFinallyStatNode(StatNode): code.error_label = old_error_label catch_label = code.new_label() - was_in_try_finally = code.funcstate.in_try_finally - code.funcstate.in_try_finally = 1 + was_in_try_finally = code.funcstate.in_try_finally + code.funcstate.in_try_finally = 1 self.body.generate_execution_code(code) - code.funcstate.in_try_finally = was_in_try_finally + code.funcstate.in_try_finally = was_in_try_finally code.putln("}") temps_to_clean_up = code.funcstate.all_free_managed_temps() code.mark_pos(self.finally_clause.pos) code.putln("/*finally:*/ {") - # Reset labels only after writing out a potential line trace call for correct nogil error handling. - code.set_all_labels(old_labels) - + # Reset labels only after writing out a potential line trace call for correct nogil error handling. + code.set_all_labels(old_labels) + def fresh_finally_clause(_next=[self.finally_clause]): # generate the original subtree once and always keep a fresh copy node = _next[0] @@ -7624,7 +7624,7 @@ class TryFinallyStatNode(StatNode): code.putln('{') old_exc_vars = code.funcstate.exc_vars code.funcstate.exc_vars = exc_vars[:3] - self.finally_except_clause.generate_execution_code(code) + self.finally_except_clause.generate_execution_code(code) code.funcstate.exc_vars = old_exc_vars code.putln('}') @@ -7712,7 +7712,7 @@ class TryFinallyStatNode(StatNode): if self.is_try_finally_in_nogil: code.put_ensure_gil(declare_gilstate=False) - code.putln("__Pyx_PyThreadState_assign") + code.putln("__Pyx_PyThreadState_assign") code.putln(' '.join(["%s = 0;" % var for var in exc_vars])) for temp_name, type in temps_to_clean_up: @@ -7770,7 +7770,7 @@ class TryFinallyStatNode(StatNode): code.globalstate.use_utility_code(reset_exception_utility_code) if self.is_try_finally_in_nogil: code.put_ensure_gil(declare_gilstate=False) - + # not using preprocessor here to avoid warnings about # unused utility functions and/or temps code.putln("if (PY_MAJOR_VERSION >= 3) {") @@ -7808,8 +7808,8 @@ class GILStatNode(NogilTryFinallyStatNode): def __init__(self, pos, state, body): self.state = state self.create_state_temp_if_needed(pos, state, body) - TryFinallyStatNode.__init__( - self, pos, + TryFinallyStatNode.__init__( + self, pos, body=body, finally_clause=GILExitNode( pos, state=state, state_temp=self.state_temp)) @@ -7853,20 +7853,20 @@ class GILStatNode(NogilTryFinallyStatNode): else: variable = None - old_gil_config = code.funcstate.gil_owned + old_gil_config = code.funcstate.gil_owned if self.state == 'gil': code.put_ensure_gil(variable=variable) - code.funcstate.gil_owned = True + code.funcstate.gil_owned = True else: code.put_release_gil(variable=variable) - code.funcstate.gil_owned = False + code.funcstate.gil_owned = False TryFinallyStatNode.generate_execution_code(self, code) if self.state_temp: self.state_temp.release(code) - code.funcstate.gil_owned = old_gil_config + code.funcstate.gil_owned = old_gil_config code.end_block() @@ -7903,44 +7903,44 @@ class EnsureGILNode(GILExitNode): def generate_execution_code(self, code): code.put_ensure_gil(declare_gilstate=False) - -def cython_view_utility_code(): - from . import MemoryView - return MemoryView.view_utility_code - - + +def cython_view_utility_code(): + from . import MemoryView + return MemoryView.view_utility_code + + utility_code_for_cimports = { # utility code (or inlining c) in a pxd (or pyx) file. # TODO: Consider a generic user-level mechanism for importing - 'cpython.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), - 'cpython.array.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), - 'cython.view' : cython_view_utility_code, + 'cpython.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), + 'cpython.array.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"), + 'cython.view' : cython_view_utility_code, +} + +utility_code_for_imports = { + # utility code used when special modules are imported. + # TODO: Consider a generic user-level mechanism for importing + 'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"), + 'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"), } -utility_code_for_imports = { - # utility code used when special modules are imported. - # TODO: Consider a generic user-level mechanism for importing - 'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"), - 'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"), -} - - + class CImportStatNode(StatNode): # cimport statement # # module_name string Qualified name of module being imported # as_name string or None Name specified in "as" clause, if any - # is_absolute bool True for absolute imports, False otherwise + # is_absolute bool True for absolute imports, False otherwise child_attrs = [] - is_absolute = False + is_absolute = False def analyse_declarations(self, env): if not env.is_module_scope: error(self.pos, "cimport only allowed at module level") return - module_scope = env.find_module( - self.module_name, self.pos, relative_level=0 if self.is_absolute else -1) + module_scope = env.find_module( + self.module_name, self.pos, relative_level=0 if self.is_absolute else -1) if "." in self.module_name: names = [EncodedString(name) for name in self.module_name.split(".")] top_name = names[0] @@ -7959,7 +7959,7 @@ class CImportStatNode(StatNode): name = self.as_name or self.module_name env.declare_module(name, module_scope, self.pos) if self.module_name in utility_code_for_cimports: - env.use_utility_code(utility_code_for_cimports[self.module_name]()) + env.use_utility_code(utility_code_for_cimports[self.module_name]()) def analyse_expressions(self, env): return self @@ -7986,13 +7986,13 @@ class FromCImportStatNode(StatNode): return if self.relative_level and self.relative_level > env.qualified_name.count('.'): error(self.pos, "relative cimport beyond main package is not allowed") - return + return module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level) module_name = module_scope.qualified_name env.add_imported_module(module_scope) for pos, name, as_name, kind in self.imported_names: if name == "*": - for local_name, entry in list(module_scope.entries.items()): + for local_name, entry in list(module_scope.entries.items()): env.add_imported_entry(local_name, entry, pos) else: entry = module_scope.lookup(name) @@ -8007,8 +8007,8 @@ class FromCImportStatNode(StatNode): elif kind == 'class': entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name) else: - submodule_scope = env.context.find_module( - name, relative_to=module_scope, pos=self.pos, absolute_fallback=False) + submodule_scope = env.context.find_module( + name, relative_to=module_scope, pos=self.pos, absolute_fallback=False) if submodule_scope.parent_module is module_scope: env.declare_module(as_name or name, submodule_scope, self.pos) else: @@ -8018,13 +8018,13 @@ class FromCImportStatNode(StatNode): local_name = as_name or name env.add_imported_entry(local_name, entry, pos) - if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now + if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now if module_name in utility_code_for_cimports: - env.use_utility_code(utility_code_for_cimports[module_name]()) + env.use_utility_code(utility_code_for_cimports[module_name]()) for _, name, _, _ in self.imported_names: fqname = '%s.%s' % (module_name, name) if fqname in utility_code_for_cimports: - env.use_utility_code(utility_code_for_cimports[fqname]()) + env.use_utility_code(utility_code_for_cimports[fqname]()) def declaration_matches(self, entry, kind): if not entry.is_type: @@ -8106,7 +8106,7 @@ class FromImportStatNode(StatNode): return self def generate_execution_code(self, code): - code.mark_pos(self.pos) + code.mark_pos(self.pos) self.module.generate_evaluation_code(code) if self.import_star: code.putln( @@ -8253,13 +8253,13 @@ class ParallelStatNode(StatNode, ParallelNode): try: self.kwargs = self.kwargs.compile_time_value(env) - except Exception as e: + except Exception as e: error(self.kwargs.pos, "Only compile-time values may be " "supplied as keyword arguments") else: self.kwargs = {} - for kw, val in self.kwargs.items(): + for kw, val in self.kwargs.items(): if kw not in self.valid_keyword_arguments: error(self.pos, "Invalid keyword argument: %s" % kw) else: @@ -8276,14 +8276,14 @@ class ParallelStatNode(StatNode, ParallelNode): self.analyse_sharing_attributes(env) if self.num_threads is not None: - if self.parent and self.parent.num_threads is not None and not self.parent.is_prange: - error(self.pos, "num_threads already declared in outer section") + if self.parent and self.parent.num_threads is not None and not self.parent.is_prange: + error(self.pos, "num_threads already declared in outer section") elif self.parent and not self.parent.is_prange: - error(self.pos, "num_threads must be declared in the parent parallel section") + error(self.pos, "num_threads must be declared in the parent parallel section") elif (self.num_threads.type.is_int and - self.num_threads.is_literal and - self.num_threads.compile_time_value(env) <= 0): - error(self.pos, "argument to num_threads must be greater than 0") + self.num_threads.is_literal and + self.num_threads.compile_time_value(env) <= 0): + error(self.pos, "argument to num_threads must be greater than 0") if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject: self.num_threads = self.num_threads.coerce_to( @@ -8296,14 +8296,14 @@ class ParallelStatNode(StatNode, ParallelNode): This should be called in a post-order fashion during the analyse_expressions phase """ - for entry, (pos, op) in self.assignments.items(): + for entry, (pos, op) in self.assignments.items(): if self.is_prange and not self.is_parallel: # closely nested prange in a with parallel block, disallow # assigning to privates in the with parallel block (we # consider it too implicit and magicky for users) if entry in self.parent.assignments: - error(pos, "Cannot assign to private of outer parallel block") + error(pos, "Cannot assign to private of outer parallel block") continue if not self.is_prange and op: @@ -8412,7 +8412,7 @@ class ParallelStatNode(StatNode, ParallelNode): def initialize_privates_to_nan(self, code, exclude=None): first = True - for entry, (op, lastprivate) in sorted(self.privates.items()): + for entry, (op, lastprivate) in sorted(self.privates.items()): if not op and (not exclude or entry != exclude): invalid_value = entry.type.invalid_value() @@ -8441,7 +8441,7 @@ class ParallelStatNode(StatNode, ParallelNode): Write self.num_threads if set as the num_threads OpenMP directive """ if self.num_threads is not None: - code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads)) + code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads)) def declare_closure_privates(self, code): @@ -8453,7 +8453,7 @@ class ParallelStatNode(StatNode, ParallelNode): """ self.modified_entries = [] - for entry in sorted(self.assignments): + for entry in sorted(self.assignments): if entry.from_closure or entry.in_closure: self._allocate_closure_temp(code, entry) @@ -8473,13 +8473,13 @@ class ParallelStatNode(StatNode, ParallelNode): Make any used temporaries private. Before the relevant code block code.start_collecting_temps() should have been called. """ - c = self.privatization_insertion_point - self.privatization_insertion_point = None - + c = self.privatization_insertion_point + self.privatization_insertion_point = None + if self.is_parallel: self.temps = temps = code.funcstate.stop_collecting_temps() privates, firstprivates = [], [] - for temp, type in sorted(temps): + for temp, type in sorted(temps): if type.is_pyobject or type.is_memoryviewslice: firstprivates.append(temp) else: @@ -8502,7 +8502,7 @@ class ParallelStatNode(StatNode, ParallelNode): # Now clean up any memoryview slice and object temporaries if self.is_parallel and not self.is_nested_prange: code.putln("/* Clean up any temporaries */") - for temp, type in sorted(self.temps): + for temp, type in sorted(self.temps): if type.is_memoryviewslice: code.put_xdecref_memoryviewslice(temp, have_gil=False) elif type.is_pyobject: @@ -8567,9 +8567,9 @@ class ParallelStatNode(StatNode, ParallelNode): If compiled without OpenMP support (at the C level), then we still have to acquire the GIL to decref any object temporaries. """ - begin_code = self.begin_of_parallel_block - self.begin_of_parallel_block = None - + begin_code = self.begin_of_parallel_block + self.begin_of_parallel_block = None + if self.error_label_used: end_code = code @@ -8666,7 +8666,7 @@ class ParallelStatNode(StatNode, ParallelNode): that the breaking thread has well-defined values of the lastprivate variables, so we keep those values. """ - section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter + section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter code.putln_openmp("#pragma omp critical(%s)" % section_name) ParallelStatNode.critical_section_counter += 1 @@ -8675,11 +8675,11 @@ class ParallelStatNode(StatNode, ParallelNode): c = self.begin_of_parallel_control_block_point temp_count = 0 - for entry, (op, lastprivate) in sorted(self.privates.items()): + for entry, (op, lastprivate) in sorted(self.privates.items()): if not lastprivate or entry.type.is_pyobject: continue - type_decl = entry.type.empty_declaration_code() + type_decl = entry.type.empty_declaration_code() temp_cname = "__pyx_parallel_temp%d" % temp_count private_cname = entry.cname @@ -8734,7 +8734,7 @@ class ParallelStatNode(StatNode, ParallelNode): code.putln( "if (!%s) {" % Naming.parallel_exc_type) - code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc) + code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc) pos_info = chain(*zip(self.parallel_pos_info, self.pos_info)) code.funcstate.uses_error_indicator = True code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) @@ -8752,7 +8752,7 @@ class ParallelStatNode(StatNode, ParallelNode): code.put_ensure_gil(declare_gilstate=True) code.put_giveref(Naming.parallel_exc_type) - code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc) + code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc) pos_info = chain(*zip(self.pos_info, self.parallel_pos_info)) code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info)) @@ -8767,8 +8767,8 @@ class ParallelStatNode(StatNode, ParallelNode): code.set_all_labels(self.old_loop_labels + (self.old_return_label, self.old_error_label)) - def end_parallel_control_flow_block( - self, code, break_=False, continue_=False, return_=False): + def end_parallel_control_flow_block( + self, code, break_=False, continue_=False, return_=False): """ This ends the parallel control flow block and based on how the parallel section was exited, takes the corresponding action. The break_ and @@ -8783,8 +8783,8 @@ class ParallelStatNode(StatNode, ParallelNode): the for loop. """ c = self.begin_of_parallel_control_block_point - self.begin_of_parallel_control_block_point = None - self.begin_of_parallel_control_block_point_after_decls = None + self.begin_of_parallel_control_block_point = None + self.begin_of_parallel_control_block_point_after_decls = None if self.num_threads is not None: # FIXME: is it the right place? should not normally produce code. @@ -8793,8 +8793,8 @@ class ParallelStatNode(StatNode, ParallelNode): # Firstly, always prefer errors over returning, continue or break if self.error_label_used: - c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info) - c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc) + c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info) + c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc) code.putln( "if (%s) {" % Naming.parallel_exc_type) @@ -8829,9 +8829,9 @@ class ParallelStatNode(StatNode, ParallelNode): code.put(" case 2: ") code.put_goto(code.break_label) - if return_: - code.put(" case 3: ") - code.put_goto(code.return_label) + if return_: + code.put(" case 3: ") + code.put_goto(code.return_label) if self.error_label_used: code.globalstate.use_utility_code(restore_exception_utility_code) @@ -8888,8 +8888,8 @@ class ParallelWithBlockNode(ParallelStatNode): if self.privates: privates = [e.cname for e in self.privates - if not e.type.is_pyobject] - code.put('private(%s)' % ', '.join(sorted(privates))) + if not e.type.is_pyobject] + code.put('private(%s)' % ', '.join(sorted(privates))) self.privatization_insertion_point = code.insertion_point() self.put_num_threads(code) @@ -8897,7 +8897,7 @@ class ParallelWithBlockNode(ParallelStatNode): code.putln("#endif /* _OPENMP */") - code.begin_block() # parallel block + code.begin_block() # parallel block self.begin_parallel_block(code) self.initialize_privates_to_nan(code) code.funcstate.start_collecting_temps() @@ -8905,16 +8905,16 @@ class ParallelWithBlockNode(ParallelStatNode): self.trap_parallel_exit(code) self.privatize_temps(code) self.end_parallel_block(code) - code.end_block() # end parallel block + code.end_block() # end parallel block continue_ = code.label_used(code.continue_label) break_ = code.label_used(code.break_label) - return_ = code.label_used(code.return_label) + return_ = code.label_used(code.return_label) self.restore_labels(code) self.end_parallel_control_flow_block(code, break_=break_, - continue_=continue_, - return_=return_) + continue_=continue_, + return_=return_) self.release_closure_privates(code) @@ -8965,8 +8965,8 @@ class ParallelRangeNode(ParallelStatNode): if hasattr(self.schedule, 'decode'): self.schedule = self.schedule.decode('ascii') - if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'): - error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,)) + if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'): + error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,)) def analyse_expressions(self, env): was_nogil = env.nogil @@ -9009,7 +9009,7 @@ class ParallelRangeNode(ParallelStatNode): # As we range from 0 to nsteps, computing the index along the # way, we need a fitting type for 'i' and 'nsteps' self.index_type = PyrexTypes.widest_numeric_type( - self.index_type, node.type) + self.index_type, node.type) if self.else_clause is not None: self.else_clause = self.else_clause.analyse_expressions(env) @@ -9110,7 +9110,7 @@ class ParallelRangeNode(ParallelStatNode): # the start, stop , step, temps and target cnames fmt_dict = { 'target': target_index_cname, - 'target_type': self.target.type.empty_declaration_code() + 'target_type': self.target.type.empty_declaration_code() } # Setup start, stop and step, allocating temps if needed @@ -9137,7 +9137,7 @@ class ParallelRangeNode(ParallelStatNode): self.setup_parallel_control_flow_block(code) # parallel control flow block # Note: nsteps is private in an outer scope if present - code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict) + code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict) # The target iteration variable might not be initialized, do it only if # we are executing at least 1 iteration, otherwise we should leave the @@ -9204,7 +9204,7 @@ class ParallelRangeNode(ParallelStatNode): code.putln("#ifdef _OPENMP") code.put("#pragma omp for") - for entry, (op, lastprivate) in sorted(self.privates.items()): + for entry, (op, lastprivate) in sorted(self.privates.items()): # Don't declare the index variable as a reduction if op and op in "+*-&^|" and entry != self.target.entry: if entry.type.is_pyobject: @@ -9230,7 +9230,7 @@ class ParallelRangeNode(ParallelStatNode): if self.schedule: if self.chunksize: - chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize) + chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize) else: chunksize = "" @@ -9242,7 +9242,7 @@ class ParallelRangeNode(ParallelStatNode): code.putln("#endif /* _OPENMP */") code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict) - code.begin_block() # for loop block + code.begin_block() # for loop block guard_around_body_codepoint = code.insertion_point() @@ -9250,7 +9250,7 @@ class ParallelRangeNode(ParallelStatNode): # at least it doesn't spoil indentation code.begin_block() - code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict) + code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict) self.initialize_privates_to_nan(code, exclude=self.target.entry) if self.is_parallel and not self.is_nested_prange: @@ -9268,13 +9268,13 @@ class ParallelRangeNode(ParallelStatNode): # exceptions might be used guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why) - code.end_block() # end guard around loop body - code.end_block() # end for loop block + code.end_block() # end guard around loop body + code.end_block() # end for loop block if self.is_parallel: # Release the GIL and deallocate the thread state self.end_parallel_block(code) - code.end_block() # pragma omp parallel end block + code.end_block() # pragma omp parallel end block class CnameDecoratorNode(StatNode): @@ -9301,7 +9301,7 @@ class CnameDecoratorNode(StatNode): node = node.body.stats[0] self.is_function = isinstance(node, FuncDefNode) - is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode)) + is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode)) e = node.entry if self.is_function: @@ -9321,11 +9321,11 @@ class CnameDecoratorNode(StatNode): e.type.typeptr_cname = self.cname + '_type' e.type.scope.namespace_cname = e.type.typeptr_cname - e.as_variable.cname = e.type.typeptr_cname + e.as_variable.cname = e.type.typeptr_cname scope.scope_prefix = self.cname + "_" - for name, entry in scope.entries.items(): + for name, entry in scope.entries.items(): if entry.func_cname: entry.func_cname = self.mangle(entry.cname) if entry.pyfunc_cname: @@ -9349,7 +9349,7 @@ class CnameDecoratorNode(StatNode): if isinstance(self.node, DefNode): self.node.generate_function_header( - h_code, with_pymethdef=False, proto_only=True) + h_code, with_pymethdef=False, proto_only=True) else: from . import ModuleNode entry = self.node.entry @@ -9357,10 +9357,10 @@ class CnameDecoratorNode(StatNode): entry.cname = entry.func_cname ModuleNode.generate_cfunction_declaration( - entry, - env.global_scope(), - h_code, - definition=True) + entry, + env.global_scope(), + h_code, + definition=True) entry.cname = cname @@ -9417,16 +9417,16 @@ traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c") #------------------------------------------------------------------------------------ -get_exception_tuple_utility_code = UtilityCode( - proto=""" -static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/ +get_exception_tuple_utility_code = UtilityCode( + proto=""" +static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/ """, - # I doubt that calling __Pyx_GetException() here is correct as it moves - # the exception from tstate->curexc_* to tstate->exc_*, which prevents - # exception handlers later on from receiving it. - # NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro - impl = """ -static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) { + # I doubt that calling __Pyx_GetException() here is correct as it moves + # the exception from tstate->curexc_* to tstate->exc_*, which prevents + # exception handlers later on from receiving it. + # NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro + impl = """ +static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) { PyObject *type = NULL, *value = NULL, *tb = NULL; if (__Pyx_GetException(&type, &value, &tb) == 0) { PyObject* exc_info = PyTuple_New(3); @@ -9443,4 +9443,4 @@ static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tsta return NULL; } """, - requires=[get_exception_utility_code]) + requires=[get_exception_utility_code]) diff --git a/contrib/tools/cython/Cython/Compiler/Optimize.py b/contrib/tools/cython/Cython/Compiler/Optimize.py index da4556a9f6..3cb77efe2c 100644 --- a/contrib/tools/cython/Cython/Compiler/Optimize.py +++ b/contrib/tools/cython/Cython/Compiler/Optimize.py @@ -1,25 +1,25 @@ from __future__ import absolute_import import re -import sys -import copy -import codecs -import itertools - +import sys +import copy +import codecs +import itertools + from . import TypeSlots from .ExprNodes import not_a_constant import cython cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object, Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object, - UtilNodes=object, _py_int_types=object) + UtilNodes=object, _py_int_types=object) -if sys.version_info[0] >= 3: - _py_int_types = int +if sys.version_info[0] >= 3: + _py_int_types = int _py_string_types = (bytes, str) -else: - _py_int_types = (int, long) +else: + _py_int_types = (int, long) _py_string_types = (bytes, unicode) - + from . import Nodes from . import ExprNodes from . import PyrexTypes @@ -28,7 +28,7 @@ from . import Builtin from . import UtilNodes from . import Options -from .Code import UtilityCode, TempitaUtilityCode +from .Code import UtilityCode, TempitaUtilityCode from .StringEncoding import EncodedString, bytes_literal, encoded_string from .Errors import error, warning from .ParseTreeTransforms import SkipDeclarations @@ -43,23 +43,23 @@ try: except ImportError: basestring = str # Python 3 - + def load_c_utility(name): return UtilityCode.load_cached(name, "Optimize.c") - + def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)): if isinstance(node, coercion_nodes): return node.arg return node - + def unwrap_node(node): while isinstance(node, UtilNodes.ResultRefNode): node = node.expression return node - + def is_common_value(a, b): a = unwrap_node(a) b = unwrap_node(b) @@ -69,66 +69,66 @@ def is_common_value(a, b): return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute return False - + def filter_none_node(node): if node is not None and node.constant_result is None: return None return node - -class _YieldNodeCollector(Visitor.TreeVisitor): - """ - YieldExprNode finder for generator expressions. - """ - def __init__(self): - Visitor.TreeVisitor.__init__(self) - self.yield_stat_nodes = {} - self.yield_nodes = [] - - visit_Node = Visitor.TreeVisitor.visitchildren - - def visit_YieldExprNode(self, node): - self.yield_nodes.append(node) - self.visitchildren(node) - - def visit_ExprStatNode(self, node): - self.visitchildren(node) - if node.expr in self.yield_nodes: - self.yield_stat_nodes[node.expr] = node - - # everything below these nodes is out of scope: - - def visit_GeneratorExpressionNode(self, node): - pass - - def visit_LambdaNode(self, node): - pass - - def visit_FuncDefNode(self, node): - pass - - -def _find_single_yield_expression(node): - yield_statements = _find_yield_statements(node) - if len(yield_statements) != 1: - return None, None - return yield_statements[0] - - -def _find_yield_statements(node): - collector = _YieldNodeCollector() - collector.visitchildren(node) - try: - yield_statements = [ - (yield_node.arg, collector.yield_stat_nodes[yield_node]) - for yield_node in collector.yield_nodes - ] - except KeyError: - # found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield') - yield_statements = [] - return yield_statements - - + +class _YieldNodeCollector(Visitor.TreeVisitor): + """ + YieldExprNode finder for generator expressions. + """ + def __init__(self): + Visitor.TreeVisitor.__init__(self) + self.yield_stat_nodes = {} + self.yield_nodes = [] + + visit_Node = Visitor.TreeVisitor.visitchildren + + def visit_YieldExprNode(self, node): + self.yield_nodes.append(node) + self.visitchildren(node) + + def visit_ExprStatNode(self, node): + self.visitchildren(node) + if node.expr in self.yield_nodes: + self.yield_stat_nodes[node.expr] = node + + # everything below these nodes is out of scope: + + def visit_GeneratorExpressionNode(self, node): + pass + + def visit_LambdaNode(self, node): + pass + + def visit_FuncDefNode(self, node): + pass + + +def _find_single_yield_expression(node): + yield_statements = _find_yield_statements(node) + if len(yield_statements) != 1: + return None, None + return yield_statements[0] + + +def _find_yield_statements(node): + collector = _YieldNodeCollector() + collector.visitchildren(node) + try: + yield_statements = [ + (yield_node.arg, collector.yield_stat_nodes[yield_node]) + for yield_node in collector.yield_nodes + ] + except KeyError: + # found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield') + yield_statements = [] + return yield_statements + + class IterationTransform(Visitor.EnvTransform): """Transform some common for-in loop patterns into efficient C loops: @@ -148,7 +148,7 @@ class IterationTransform(Visitor.EnvTransform): pos = node.pos result_ref = UtilNodes.ResultRefNode(node) - if node.operand2.is_subscript: + if node.operand2.is_subscript: base_type = node.operand2.base.type.base_type else: base_type = node.operand2.type.base_type @@ -250,7 +250,7 @@ class IterationTransform(Visitor.EnvTransform): if not is_safe_iter and method in ('keys', 'values', 'items'): # try to reduce this to the corresponding .iter*() methods - if isinstance(base_obj, ExprNodes.CallNode): + if isinstance(base_obj, ExprNodes.CallNode): inner_function = base_obj.function if (inner_function.is_name and inner_function.name == 'dict' and inner_function.entry @@ -391,7 +391,7 @@ class IterationTransform(Visitor.EnvTransform): if slice_node.is_literal: # try to reduce to byte iteration for plain Latin-1 strings try: - bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1') + bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1') except UnicodeEncodeError: pass else: @@ -400,8 +400,8 @@ class IterationTransform(Visitor.EnvTransform): base=ExprNodes.BytesNode( slice_node.pos, value=bytes_value, constant_result=bytes_value, - type=PyrexTypes.c_const_char_ptr_type).coerce_to( - PyrexTypes.c_const_uchar_ptr_type, self.current_env()), + type=PyrexTypes.c_const_char_ptr_type).coerce_to( + PyrexTypes.c_const_uchar_ptr_type, self.current_env()), start=None, stop=ExprNodes.IntNode( slice_node.pos, value=str(len(bytes_value)), @@ -491,7 +491,7 @@ class IterationTransform(Visitor.EnvTransform): error(slice_node.pos, "C array iteration requires known end index") return node - elif slice_node.is_subscript: + elif slice_node.is_subscript: assert isinstance(slice_node.index, ExprNodes.SliceNode) slice_base = slice_node.base index = slice_node.index @@ -499,7 +499,7 @@ class IterationTransform(Visitor.EnvTransform): stop = filter_none_node(index.stop) step = filter_none_node(index.step) if step: - if not isinstance(step.constant_result, _py_int_types) \ + if not isinstance(step.constant_result, _py_int_types) \ or step.constant_result == 0 \ or step.constant_result > 0 and not stop \ or step.constant_result < 0 and not start: @@ -733,19 +733,19 @@ class IterationTransform(Visitor.EnvTransform): if len(args) < 3: step_pos = range_function.pos step_value = 1 - step = ExprNodes.IntNode(step_pos, value='1', constant_result=1) + step = ExprNodes.IntNode(step_pos, value='1', constant_result=1) else: step = args[2] step_pos = step.pos - if not isinstance(step.constant_result, _py_int_types): + if not isinstance(step.constant_result, _py_int_types): # cannot determine step direction return node step_value = step.constant_result if step_value == 0: # will lead to an error elsewhere return node - step = ExprNodes.IntNode(step_pos, value=str(step_value), - constant_result=step_value) + step = ExprNodes.IntNode(step_pos, value=str(step_value), + constant_result=step_value) if len(args) == 1: bound1 = ExprNodes.IntNode(range_function.pos, value='0', @@ -757,34 +757,34 @@ class IterationTransform(Visitor.EnvTransform): relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed) - bound2_ref_node = None + bound2_ref_node = None if reversed: bound1, bound2 = bound2, bound1 - abs_step = abs(step_value) - if abs_step != 1: - if (isinstance(bound1.constant_result, _py_int_types) and - isinstance(bound2.constant_result, _py_int_types)): - # calculate final bounds now - if step_value < 0: - begin_value = bound2.constant_result - end_value = bound1.constant_result - bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1 - else: - begin_value = bound1.constant_result - end_value = bound2.constant_result - bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1 - - bound1 = ExprNodes.IntNode( - bound1.pos, value=str(bound1_value), constant_result=bound1_value, - type=PyrexTypes.spanning_type(bound1.type, bound2.type)) - else: - # evaluate the same expression as above at runtime - bound2_ref_node = UtilNodes.LetRefNode(bound2) - bound1 = self._build_range_step_calculation( - bound1, bound2_ref_node, step, step_value) - - if step_value < 0: - step_value = -step_value + abs_step = abs(step_value) + if abs_step != 1: + if (isinstance(bound1.constant_result, _py_int_types) and + isinstance(bound2.constant_result, _py_int_types)): + # calculate final bounds now + if step_value < 0: + begin_value = bound2.constant_result + end_value = bound1.constant_result + bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1 + else: + begin_value = bound1.constant_result + end_value = bound2.constant_result + bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1 + + bound1 = ExprNodes.IntNode( + bound1.pos, value=str(bound1_value), constant_result=bound1_value, + type=PyrexTypes.spanning_type(bound1.type, bound2.type)) + else: + # evaluate the same expression as above at runtime + bound2_ref_node = UtilNodes.LetRefNode(bound2) + bound1 = self._build_range_step_calculation( + bound1, bound2_ref_node, step, step_value) + + if step_value < 0: + step_value = -step_value step.value = str(step_value) step.constant_result = step_value step = step.coerce_to_integer(self.current_env()) @@ -792,7 +792,7 @@ class IterationTransform(Visitor.EnvTransform): if not bound2.is_literal: # stop bound must be immutable => keep it in a temp var bound2_is_temp = True - bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2) + bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2) else: bound2_is_temp = False @@ -811,70 +811,70 @@ class IterationTransform(Visitor.EnvTransform): return for_node - def _build_range_step_calculation(self, bound1, bound2_ref_node, step, step_value): - abs_step = abs(step_value) - spanning_type = PyrexTypes.spanning_type(bound1.type, bound2_ref_node.type) - if step.type.is_int and abs_step < 0x7FFF: - # Avoid loss of integer precision warnings. - spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type) - else: - spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type) - if step_value < 0: - begin_value = bound2_ref_node - end_value = bound1 - final_op = '-' - else: - begin_value = bound1 - end_value = bound2_ref_node - final_op = '+' - - step_calculation_node = ExprNodes.binop_node( - bound1.pos, - operand1=ExprNodes.binop_node( - bound1.pos, - operand1=bound2_ref_node, - operator=final_op, # +/- - operand2=ExprNodes.MulNode( - bound1.pos, - operand1=ExprNodes.IntNode( - bound1.pos, - value=str(abs_step), - constant_result=abs_step, - type=spanning_step_type), - operator='*', - operand2=ExprNodes.DivNode( - bound1.pos, - operand1=ExprNodes.SubNode( - bound1.pos, - operand1=ExprNodes.SubNode( - bound1.pos, - operand1=begin_value, - operator='-', - operand2=end_value, - type=spanning_type), - operator='-', - operand2=ExprNodes.IntNode( - bound1.pos, - value='1', - constant_result=1), - type=spanning_step_type), - operator='//', - operand2=ExprNodes.IntNode( - bound1.pos, - value=str(abs_step), - constant_result=abs_step, - type=spanning_step_type), - type=spanning_step_type), - type=spanning_step_type), - type=spanning_step_type), - operator=final_op, # +/- - operand2=ExprNodes.IntNode( - bound1.pos, - value='1', - constant_result=1), - type=spanning_type) - return step_calculation_node - + def _build_range_step_calculation(self, bound1, bound2_ref_node, step, step_value): + abs_step = abs(step_value) + spanning_type = PyrexTypes.spanning_type(bound1.type, bound2_ref_node.type) + if step.type.is_int and abs_step < 0x7FFF: + # Avoid loss of integer precision warnings. + spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type) + else: + spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type) + if step_value < 0: + begin_value = bound2_ref_node + end_value = bound1 + final_op = '-' + else: + begin_value = bound1 + end_value = bound2_ref_node + final_op = '+' + + step_calculation_node = ExprNodes.binop_node( + bound1.pos, + operand1=ExprNodes.binop_node( + bound1.pos, + operand1=bound2_ref_node, + operator=final_op, # +/- + operand2=ExprNodes.MulNode( + bound1.pos, + operand1=ExprNodes.IntNode( + bound1.pos, + value=str(abs_step), + constant_result=abs_step, + type=spanning_step_type), + operator='*', + operand2=ExprNodes.DivNode( + bound1.pos, + operand1=ExprNodes.SubNode( + bound1.pos, + operand1=ExprNodes.SubNode( + bound1.pos, + operand1=begin_value, + operator='-', + operand2=end_value, + type=spanning_type), + operator='-', + operand2=ExprNodes.IntNode( + bound1.pos, + value='1', + constant_result=1), + type=spanning_step_type), + operator='//', + operand2=ExprNodes.IntNode( + bound1.pos, + value=str(abs_step), + constant_result=abs_step, + type=spanning_step_type), + type=spanning_step_type), + type=spanning_step_type), + type=spanning_step_type), + operator=final_op, # +/- + operand2=ExprNodes.IntNode( + bound1.pos, + value='1', + constant_result=1), + type=spanning_type) + return step_calculation_node + def _transform_dict_iteration(self, node, dict_obj, method, keys, values): temps = [] temp = UtilNodes.TempHandle(PyrexTypes.py_object_type) @@ -1192,9 +1192,9 @@ class SwitchTransform(Visitor.EnvTransform): if common_var is None: self.visitchildren(node) return node - cases.append(Nodes.SwitchCaseNode(pos=if_clause.pos, - conditions=conditions, - body=if_clause.body)) + cases.append(Nodes.SwitchCaseNode(pos=if_clause.pos, + conditions=conditions, + body=if_clause.body)) condition_values = [ cond for case in cases for cond in case.conditions] @@ -1205,16 +1205,16 @@ class SwitchTransform(Visitor.EnvTransform): self.visitchildren(node) return node - # Recurse into body subtrees that we left untouched so far. - self.visitchildren(node, 'else_clause') - for case in cases: - self.visitchildren(case, 'body') - + # Recurse into body subtrees that we left untouched so far. + self.visitchildren(node, 'else_clause') + for case in cases: + self.visitchildren(case, 'body') + common_var = unwrap_node(common_var) - switch_node = Nodes.SwitchStatNode(pos=node.pos, - test=common_var, - cases=cases, - else_clause=node.else_clause) + switch_node = Nodes.SwitchStatNode(pos=node.pos, + test=common_var, + cases=cases, + else_clause=node.else_clause) return switch_node def visit_CondExprNode(self, node): @@ -1225,11 +1225,11 @@ class SwitchTransform(Visitor.EnvTransform): not_in, common_var, conditions = self.extract_common_conditions( None, node.test, True) if common_var is None \ - or len(conditions) < 2 \ - or self.has_duplicate_values(conditions): + or len(conditions) < 2 \ + or self.has_duplicate_values(conditions): self.visitchildren(node) return node - + return self.build_simple_switch_statement( node, common_var, conditions, not_in, node.true_val, node.false_val) @@ -1242,8 +1242,8 @@ class SwitchTransform(Visitor.EnvTransform): not_in, common_var, conditions = self.extract_common_conditions( None, node, True) if common_var is None \ - or len(conditions) < 2 \ - or self.has_duplicate_values(conditions): + or len(conditions) < 2 \ + or self.has_duplicate_values(conditions): self.visitchildren(node) node.wrap_operands(self.current_env()) # in case we changed the operands return node @@ -1261,8 +1261,8 @@ class SwitchTransform(Visitor.EnvTransform): not_in, common_var, conditions = self.extract_common_conditions( None, node, True) if common_var is None \ - or len(conditions) < 2 \ - or self.has_duplicate_values(conditions): + or len(conditions) < 2 \ + or self.has_duplicate_values(conditions): self.visitchildren(node) return node @@ -1477,20 +1477,20 @@ class DropRefcountingTransform(Visitor.VisitorTransform): node = node.arg name_path = [] obj_node = node - while obj_node.is_attribute: + while obj_node.is_attribute: if obj_node.is_py_attr: return False name_path.append(obj_node.member) obj_node = obj_node.obj - if obj_node.is_name: + if obj_node.is_name: name_path.append(obj_node.name) names.append( ('.'.join(name_path[::-1]), node) ) - elif node.is_subscript: + elif node.is_subscript: if node.base.type != Builtin.list_type: return False if not node.index.type.is_int: return False - if not node.base.is_name: + if not node.base.is_name: return False indices.append(node) else: @@ -1618,60 +1618,60 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): stop=stop, step=step or ExprNodes.NoneNode(node.pos)) - def _handle_simple_function_ord(self, node, pos_args): - """Unpack ord('X'). - """ - if len(pos_args) != 1: - return node - arg = pos_args[0] - if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)): - if len(arg.value) == 1: - return ExprNodes.IntNode( - arg.pos, type=PyrexTypes.c_long_type, - value=str(ord(arg.value)), - constant_result=ord(arg.value) - ) - elif isinstance(arg, ExprNodes.StringNode): - if arg.unicode_value and len(arg.unicode_value) == 1 \ - and ord(arg.unicode_value) <= 255: # Py2/3 portability - return ExprNodes.IntNode( - arg.pos, type=PyrexTypes.c_int_type, - value=str(ord(arg.unicode_value)), - constant_result=ord(arg.unicode_value) - ) - return node - - # sequence processing + def _handle_simple_function_ord(self, node, pos_args): + """Unpack ord('X'). + """ + if len(pos_args) != 1: + return node + arg = pos_args[0] + if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)): + if len(arg.value) == 1: + return ExprNodes.IntNode( + arg.pos, type=PyrexTypes.c_long_type, + value=str(ord(arg.value)), + constant_result=ord(arg.value) + ) + elif isinstance(arg, ExprNodes.StringNode): + if arg.unicode_value and len(arg.unicode_value) == 1 \ + and ord(arg.unicode_value) <= 255: # Py2/3 portability + return ExprNodes.IntNode( + arg.pos, type=PyrexTypes.c_int_type, + value=str(ord(arg.unicode_value)), + constant_result=ord(arg.unicode_value) + ) + return node + + # sequence processing def _handle_simple_function_all(self, node, pos_args): """Transform - _result = all(p(x) for L in LL for x in L) + _result = all(p(x) for L in LL for x in L) into for L in LL: for x in L: - if not p(x): - return False + if not p(x): + return False else: - return True + return True """ return self._transform_any_all(node, pos_args, False) def _handle_simple_function_any(self, node, pos_args): """Transform - _result = any(p(x) for L in LL for x in L) + _result = any(p(x) for L in LL for x in L) into for L in LL: for x in L: - if p(x): - return True + if p(x): + return True else: - return False + return False """ return self._transform_any_all(node, pos_args, True) @@ -1681,40 +1681,40 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode): return node gen_expr_node = pos_args[0] - generator_body = gen_expr_node.def_node.gbody - loop_node = generator_body.body - yield_expression, yield_stat_node = _find_single_yield_expression(loop_node) + generator_body = gen_expr_node.def_node.gbody + loop_node = generator_body.body + yield_expression, yield_stat_node = _find_single_yield_expression(loop_node) if yield_expression is None: return node if is_any: condition = yield_expression else: - condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression) + condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression) test_node = Nodes.IfStatNode( - yield_expression.pos, else_clause=None, if_clauses=[ - Nodes.IfClauseNode( - yield_expression.pos, - condition=condition, - body=Nodes.ReturnStatNode( - node.pos, - value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any)) - )] - ) - loop_node.else_clause = Nodes.ReturnStatNode( + yield_expression.pos, else_clause=None, if_clauses=[ + Nodes.IfClauseNode( + yield_expression.pos, + condition=condition, + body=Nodes.ReturnStatNode( + node.pos, + value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any)) + )] + ) + loop_node.else_clause = Nodes.ReturnStatNode( node.pos, - value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any)) + value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any)) - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node) + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node) return ExprNodes.InlinedGeneratorExpressionNode( - gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all') + gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all') + + PySequence_List_func_type = PyrexTypes.CFuncType( + Builtin.list_type, + [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)]) - PySequence_List_func_type = PyrexTypes.CFuncType( - Builtin.list_type, - [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)]) - def _handle_simple_function_sorted(self, node, pos_args): """Transform sorted(genexpr) and sorted([listcomp]) into [listcomp].sort(). CPython just reads the iterable into a @@ -1724,62 +1724,62 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): """ if len(pos_args) != 1: return node - - arg = pos_args[0] - if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type: - list_node = pos_args[0] - loop_node = list_node.loop - - elif isinstance(arg, ExprNodes.GeneratorExpressionNode): - gen_expr_node = arg + + arg = pos_args[0] + if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type: + list_node = pos_args[0] + loop_node = list_node.loop + + elif isinstance(arg, ExprNodes.GeneratorExpressionNode): + gen_expr_node = arg loop_node = gen_expr_node.loop - yield_statements = _find_yield_statements(loop_node) - if not yield_statements: + yield_statements = _find_yield_statements(loop_node) + if not yield_statements: return node - list_node = ExprNodes.InlinedGeneratorExpressionNode( - node.pos, gen_expr_node, orig_func='sorted', - comprehension_type=Builtin.list_type) - - for yield_expression, yield_stat_node in yield_statements: - append_node = ExprNodes.ComprehensionAppendNode( - yield_expression.pos, - expr=yield_expression, - target=list_node.target) - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) - - elif arg.is_sequence_constructor: - # sorted([a, b, c]) or sorted((a, b, c)). The result is always a list, - # so starting off with a fresh one is more efficient. - list_node = loop_node = arg.as_list() - + list_node = ExprNodes.InlinedGeneratorExpressionNode( + node.pos, gen_expr_node, orig_func='sorted', + comprehension_type=Builtin.list_type) + + for yield_expression, yield_stat_node in yield_statements: + append_node = ExprNodes.ComprehensionAppendNode( + yield_expression.pos, + expr=yield_expression, + target=list_node.target) + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) + + elif arg.is_sequence_constructor: + # sorted([a, b, c]) or sorted((a, b, c)). The result is always a list, + # so starting off with a fresh one is more efficient. + list_node = loop_node = arg.as_list() + else: - # Interestingly, PySequence_List works on a lot of non-sequence - # things as well. - list_node = loop_node = ExprNodes.PythonCapiCallNode( - node.pos, "PySequence_List", self.PySequence_List_func_type, - args=pos_args, is_temp=True) + # Interestingly, PySequence_List works on a lot of non-sequence + # things as well. + list_node = loop_node = ExprNodes.PythonCapiCallNode( + node.pos, "PySequence_List", self.PySequence_List_func_type, + args=pos_args, is_temp=True) result_node = UtilNodes.ResultRefNode( - pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False) - list_assign_node = Nodes.SingleAssignmentNode( - node.pos, lhs=result_node, rhs=list_node, first=True) + pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False) + list_assign_node = Nodes.SingleAssignmentNode( + node.pos, lhs=result_node, rhs=list_node, first=True) sort_method = ExprNodes.AttributeNode( - node.pos, obj=result_node, attribute=EncodedString('sort'), + node.pos, obj=result_node, attribute=EncodedString('sort'), # entry ? type ? - needs_none_check=False) + needs_none_check=False) sort_node = Nodes.ExprStatNode( - node.pos, expr=ExprNodes.SimpleCallNode( - node.pos, function=sort_method, args=[])) + node.pos, expr=ExprNodes.SimpleCallNode( + node.pos, function=sort_method, args=[])) sort_node.analyse_declarations(self.current_env()) return UtilNodes.TempResultFromStatNode( result_node, - Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node])) + Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node])) - def __handle_simple_function_sum(self, node, pos_args): + def __handle_simple_function_sum(self, node, pos_args): """Transform sum(genexpr) into an equivalent inlined aggregation loop. """ if len(pos_args) not in (1,2): @@ -1791,12 +1791,12 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): loop_node = gen_expr_node.loop if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode): - yield_expression, yield_stat_node = _find_single_yield_expression(loop_node) - # FIXME: currently nonfunctional - yield_expression = None + yield_expression, yield_stat_node = _find_single_yield_expression(loop_node) + # FIXME: currently nonfunctional + yield_expression = None if yield_expression is None: return node - else: # ComprehensionNode + else: # ComprehensionNode yield_stat_node = gen_expr_node.append yield_expression = yield_stat_node.expr try: @@ -1819,7 +1819,7 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression) ) - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node) + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node) exec_code = Nodes.StatListNode( node.pos, @@ -1849,7 +1849,7 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): if len(args) <= 1: if len(args) == 1 and args[0].is_sequence_constructor: args = args[0].args - if len(args) <= 1: + if len(args) <= 1: # leave this to Python return node @@ -1876,8 +1876,8 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): return last_result - # builtin type creation - + # builtin type creation + def _DISABLED_handle_simple_function_tuple(self, node, pos_args): if not pos_args: return ExprNodes.TupleNode(node.pos, args=[], constant_result=()) @@ -1915,7 +1915,7 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type) def _transform_list_set_genexpr(self, node, pos_args, target_type): - """Replace set(genexpr) and list(genexpr) by an inlined comprehension. + """Replace set(genexpr) and list(genexpr) by an inlined comprehension. """ if len(pos_args) > 1: return node @@ -1924,26 +1924,26 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop - yield_statements = _find_yield_statements(loop_node) - if not yield_statements: + yield_statements = _find_yield_statements(loop_node) + if not yield_statements: return node - result_node = ExprNodes.InlinedGeneratorExpressionNode( - node.pos, gen_expr_node, - orig_func='set' if target_type is Builtin.set_type else 'list', - comprehension_type=target_type) - - for yield_expression, yield_stat_node in yield_statements: - append_node = ExprNodes.ComprehensionAppendNode( - yield_expression.pos, - expr=yield_expression, - target=result_node.target) - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) + result_node = ExprNodes.InlinedGeneratorExpressionNode( + node.pos, gen_expr_node, + orig_func='set' if target_type is Builtin.set_type else 'list', + comprehension_type=target_type) - return result_node + for yield_expression, yield_stat_node in yield_statements: + append_node = ExprNodes.ComprehensionAppendNode( + yield_expression.pos, + expr=yield_expression, + target=result_node.target) + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) + + return result_node def _handle_simple_function_dict(self, node, pos_args): - """Replace dict( (a,b) for ... ) by an inlined { a:b for ... } + """Replace dict( (a,b) for ... ) by an inlined { a:b for ... } """ if len(pos_args) == 0: return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={}) @@ -1954,29 +1954,29 @@ class EarlyReplaceBuiltinCalls(Visitor.EnvTransform): gen_expr_node = pos_args[0] loop_node = gen_expr_node.loop - yield_statements = _find_yield_statements(loop_node) - if not yield_statements: + yield_statements = _find_yield_statements(loop_node) + if not yield_statements: return node - for yield_expression, _ in yield_statements: - if not isinstance(yield_expression, ExprNodes.TupleNode): - return node - if len(yield_expression.args) != 2: - return node + for yield_expression, _ in yield_statements: + if not isinstance(yield_expression, ExprNodes.TupleNode): + return node + if len(yield_expression.args) != 2: + return node + + result_node = ExprNodes.InlinedGeneratorExpressionNode( + node.pos, gen_expr_node, orig_func='dict', + comprehension_type=Builtin.dict_type) - result_node = ExprNodes.InlinedGeneratorExpressionNode( - node.pos, gen_expr_node, orig_func='dict', - comprehension_type=Builtin.dict_type) - - for yield_expression, yield_stat_node in yield_statements: - append_node = ExprNodes.DictComprehensionAppendNode( - yield_expression.pos, - key_expr=yield_expression.args[0], - value_expr=yield_expression.args[1], - target=result_node.target) - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) + for yield_expression, yield_stat_node in yield_statements: + append_node = ExprNodes.DictComprehensionAppendNode( + yield_expression.pos, + key_expr=yield_expression.args[0], + value_expr=yield_expression.args[1], + target=result_node.target) + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) - return result_node + return result_node # specific handlers for general call nodes @@ -2024,8 +2024,8 @@ class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform): return node -class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, - Visitor.MethodDispatcherTransform): +class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, + Visitor.MethodDispatcherTransform): """Optimize some common methods calls and instantiation patterns for builtin types *after* the type analysis phase. @@ -2080,33 +2080,33 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, return arg.arg.coerce_to_boolean(self.current_env()) return node - PyNumber_Float_func_type = PyrexTypes.CFuncType( - PyrexTypes.py_object_type, [ - PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None) - ]) - - def visit_CoerceToPyTypeNode(self, node): - """Drop redundant conversion nodes after tree changes.""" - self.visitchildren(node) - arg = node.arg - if isinstance(arg, ExprNodes.CoerceFromPyTypeNode): - arg = arg.arg - if isinstance(arg, ExprNodes.PythonCapiCallNode): - if arg.function.name == 'float' and len(arg.args) == 1: - # undo redundant Py->C->Py coercion - func_arg = arg.args[0] - if func_arg.type is Builtin.float_type: - return func_arg.as_none_safe_node("float() argument must be a string or a number, not 'NoneType'") - elif func_arg.type.is_pyobject: - return ExprNodes.PythonCapiCallNode( - node.pos, '__Pyx_PyNumber_Float', self.PyNumber_Float_func_type, - args=[func_arg], - py_name='float', - is_temp=node.is_temp, - result_is_used=node.result_is_used, - ).coerce_to(node.type, self.current_env()) - return node - + PyNumber_Float_func_type = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None) + ]) + + def visit_CoerceToPyTypeNode(self, node): + """Drop redundant conversion nodes after tree changes.""" + self.visitchildren(node) + arg = node.arg + if isinstance(arg, ExprNodes.CoerceFromPyTypeNode): + arg = arg.arg + if isinstance(arg, ExprNodes.PythonCapiCallNode): + if arg.function.name == 'float' and len(arg.args) == 1: + # undo redundant Py->C->Py coercion + func_arg = arg.args[0] + if func_arg.type is Builtin.float_type: + return func_arg.as_none_safe_node("float() argument must be a string or a number, not 'NoneType'") + elif func_arg.type.is_pyobject: + return ExprNodes.PythonCapiCallNode( + node.pos, '__Pyx_PyNumber_Float', self.PyNumber_Float_func_type, + args=[func_arg], + py_name='float', + is_temp=node.is_temp, + result_is_used=node.result_is_used, + ).coerce_to(node.type, self.current_env()) + return node + def visit_CoerceFromPyTypeNode(self, node): """Drop redundant conversion nodes after tree changes. @@ -2118,9 +2118,9 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, arg = node.arg if not arg.type.is_pyobject: # no Python conversion left at all, just do a C coercion instead - if node.type != arg.type: - arg = arg.coerce_to(node.type, self.current_env()) - return arg + if node.type != arg.type: + arg = arg.coerce_to(node.type, self.current_env()) + return arg if isinstance(arg, ExprNodes.PyTypeTestNode): arg = arg.arg if arg.is_literal: @@ -2133,13 +2133,13 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, if node.type.assignable_from(arg.arg.type): # completely redundant C->Py->C coercion return arg.arg.coerce_to(node.type, self.current_env()) - elif arg.type is Builtin.unicode_type: - if arg.arg.type.is_unicode_char and node.type.is_unicode_char: - return arg.arg.coerce_to(node.type, self.current_env()) + elif arg.type is Builtin.unicode_type: + if arg.arg.type.is_unicode_char and node.type.is_unicode_char: + return arg.arg.coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.SimpleCallNode): if node.type.is_int or node.type.is_float: return self._optimise_numeric_cast_call(node, arg) - elif arg.is_subscript: + elif arg.is_subscript: index_node = arg.index if isinstance(index_node, ExprNodes.CoerceToPyTypeNode): index_node = index_node.arg @@ -2181,51 +2181,51 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, return node return coerce_node - float_float_func_types = dict( - (float_type, PyrexTypes.CFuncType( - float_type, [ - PyrexTypes.CFuncTypeArg("arg", float_type, None) - ])) - for float_type in (PyrexTypes.c_float_type, PyrexTypes.c_double_type, PyrexTypes.c_longdouble_type)) - + float_float_func_types = dict( + (float_type, PyrexTypes.CFuncType( + float_type, [ + PyrexTypes.CFuncTypeArg("arg", float_type, None) + ])) + for float_type in (PyrexTypes.c_float_type, PyrexTypes.c_double_type, PyrexTypes.c_longdouble_type)) + def _optimise_numeric_cast_call(self, node, arg): function = arg.function - args = None - if isinstance(arg, ExprNodes.PythonCapiCallNode): - args = arg.args - elif isinstance(function, ExprNodes.NameNode): - if function.type.is_builtin_type and isinstance(arg.arg_tuple, ExprNodes.TupleNode): - args = arg.arg_tuple.args - - if args is None or len(args) != 1: + args = None + if isinstance(arg, ExprNodes.PythonCapiCallNode): + args = arg.args + elif isinstance(function, ExprNodes.NameNode): + if function.type.is_builtin_type and isinstance(arg.arg_tuple, ExprNodes.TupleNode): + args = arg.arg_tuple.args + + if args is None or len(args) != 1: return node func_arg = args[0] if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode): func_arg = func_arg.arg elif func_arg.type.is_pyobject: - # play it safe: Python conversion might work on all sorts of things + # play it safe: Python conversion might work on all sorts of things return node - + if function.name == 'int': if func_arg.type.is_int or node.type.is_int: if func_arg.type == node.type: return func_arg elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float: - return ExprNodes.TypecastNode(node.pos, operand=func_arg, type=node.type) - elif func_arg.type.is_float and node.type.is_numeric: - if func_arg.type.math_h_modifier == 'l': - # Work around missing Cygwin definition. - truncl = '__Pyx_truncl' - else: - truncl = 'trunc' + func_arg.type.math_h_modifier - return ExprNodes.PythonCapiCallNode( - node.pos, truncl, - func_type=self.float_float_func_types[func_arg.type], - args=[func_arg], - py_name='int', - is_temp=node.is_temp, - result_is_used=node.result_is_used, - ).coerce_to(node.type, self.current_env()) + return ExprNodes.TypecastNode(node.pos, operand=func_arg, type=node.type) + elif func_arg.type.is_float and node.type.is_numeric: + if func_arg.type.math_h_modifier == 'l': + # Work around missing Cygwin definition. + truncl = '__Pyx_truncl' + else: + truncl = 'trunc' + func_arg.type.math_h_modifier + return ExprNodes.PythonCapiCallNode( + node.pos, truncl, + func_type=self.float_float_func_types[func_arg.type], + args=[func_arg], + py_name='int', + is_temp=node.is_temp, + result_is_used=node.result_is_used, + ).coerce_to(node.type, self.current_env()) elif function.name == 'float': if func_arg.type.is_float or node.type.is_float: if func_arg.type == node.type: @@ -2281,7 +2281,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, entry=type_entry, type=type_entry.type), attribute=attr_name, - is_called=True).analyse_as_type_attribute(self.current_env()) + is_called=True).analyse_as_type_attribute(self.current_env()) if method is None: return self._optimise_generic_builtin_method_call( node, attr_name, function, arg_list, is_unbound_method) @@ -2376,41 +2376,41 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, ) return node - PySequence_List_func_type = PyrexTypes.CFuncType( - Builtin.list_type, - [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)]) - - def _handle_simple_function_list(self, node, function, pos_args): - """Turn list(ob) into PySequence_List(ob). - """ - if len(pos_args) != 1: - return node - arg = pos_args[0] - return ExprNodes.PythonCapiCallNode( - node.pos, "PySequence_List", self.PySequence_List_func_type, - args=pos_args, is_temp=node.is_temp) - + PySequence_List_func_type = PyrexTypes.CFuncType( + Builtin.list_type, + [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)]) + + def _handle_simple_function_list(self, node, function, pos_args): + """Turn list(ob) into PySequence_List(ob). + """ + if len(pos_args) != 1: + return node + arg = pos_args[0] + return ExprNodes.PythonCapiCallNode( + node.pos, "PySequence_List", self.PySequence_List_func_type, + args=pos_args, is_temp=node.is_temp) + PyList_AsTuple_func_type = PyrexTypes.CFuncType( Builtin.tuple_type, [ PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None) ]) def _handle_simple_function_tuple(self, node, function, pos_args): - """Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple. + """Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple. """ if len(pos_args) != 1 or not node.is_temp: return node arg = pos_args[0] if arg.type is Builtin.tuple_type and not arg.may_be_none(): return arg - if arg.type is Builtin.list_type: - pos_args[0] = arg.as_none_safe_node( - "'NoneType' object is not iterable") - - return ExprNodes.PythonCapiCallNode( - node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type, - args=pos_args, is_temp=node.is_temp) - else: + if arg.type is Builtin.list_type: + pos_args[0] = arg.as_none_safe_node( + "'NoneType' object is not iterable") + + return ExprNodes.PythonCapiCallNode( + node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type, + args=pos_args, is_temp=node.is_temp) + else: return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type) PySet_New_func_type = PyrexTypes.CFuncType( @@ -2435,18 +2435,18 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, temps.append(arg) args.append(arg) result = ExprNodes.SetNode(node.pos, is_temp=1, args=args) - self.replace(node, result) + self.replace(node, result) for temp in temps[::-1]: result = UtilNodes.EvalWithTempExprNode(temp, result) return result else: # PySet_New(it) is better than a generic Python call to set(it) - return self.replace(node, ExprNodes.PythonCapiCallNode( + return self.replace(node, ExprNodes.PythonCapiCallNode( node.pos, "PySet_New", self.PySet_New_func_type, args=pos_args, is_temp=node.is_temp, - py_name="set")) + py_name="set")) PyFrozenSet_New_func_type = PyrexTypes.CFuncType( Builtin.frozenset_type, [ @@ -2510,11 +2510,11 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None) ]) - PyInt_FromDouble_func_type = PyrexTypes.CFuncType( - PyrexTypes.py_object_type, [ - PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_double_type, None) - ]) - + PyInt_FromDouble_func_type = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_double_type, None) + ]) + def _handle_simple_function_int(self, node, function, pos_args): """Transform int() into a faster C function call. """ @@ -2525,17 +2525,17 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, return node # int(x, base) func_arg = pos_args[0] if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode): - if func_arg.arg.type.is_float: - return ExprNodes.PythonCapiCallNode( - node.pos, "__Pyx_PyInt_FromDouble", self.PyInt_FromDouble_func_type, - args=[func_arg.arg], is_temp=True, py_name='int', - utility_code=UtilityCode.load_cached("PyIntFromDouble", "TypeConversion.c")) - else: - return node # handled in visit_CoerceFromPyTypeNode() + if func_arg.arg.type.is_float: + return ExprNodes.PythonCapiCallNode( + node.pos, "__Pyx_PyInt_FromDouble", self.PyInt_FromDouble_func_type, + args=[func_arg.arg], is_temp=True, py_name='int', + utility_code=UtilityCode.load_cached("PyIntFromDouble", "TypeConversion.c")) + else: + return node # handled in visit_CoerceFromPyTypeNode() if func_arg.type.is_pyobject and node.type.is_pyobject: return ExprNodes.PythonCapiCallNode( - node.pos, "__Pyx_PyNumber_Int", self.PyNumber_Int_func_type, - args=pos_args, is_temp=True, py_name='int') + node.pos, "__Pyx_PyNumber_Int", self.PyNumber_Int_func_type, + args=pos_args, is_temp=True, py_name='int') return node def _handle_simple_function_bool(self, node, function, pos_args): @@ -2560,30 +2560,30 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, Pyx_strlen_func_type = PyrexTypes.CFuncType( PyrexTypes.c_size_t_type, [ - PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None) - ]) + PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None) + ]) Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType( PyrexTypes.c_size_t_type, [ - PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None) - ]) + PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None) + ]) PyObject_Size_func_type = PyrexTypes.CFuncType( PyrexTypes.c_py_ssize_t_type, [ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None) - ], + ], exception_value="-1") _map_to_capi_len_function = { - Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH", - Builtin.bytes_type: "PyBytes_GET_SIZE", + Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH", + Builtin.bytes_type: "PyBytes_GET_SIZE", Builtin.bytearray_type: 'PyByteArray_GET_SIZE', - Builtin.list_type: "PyList_GET_SIZE", - Builtin.tuple_type: "PyTuple_GET_SIZE", - Builtin.set_type: "PySet_GET_SIZE", - Builtin.frozenset_type: "PySet_GET_SIZE", - Builtin.dict_type: "PyDict_Size", - }.get + Builtin.list_type: "PyList_GET_SIZE", + Builtin.tuple_type: "PyTuple_GET_SIZE", + Builtin.set_type: "PySet_GET_SIZE", + Builtin.frozenset_type: "PySet_GET_SIZE", + Builtin.dict_type: "PyDict_Size", + }.get _ext_types_with_pysize = set(["cpython.array.array"]) @@ -2668,14 +2668,14 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, if len(pos_args) != 2: return node arg, types = pos_args - temps = [] + temps = [] if isinstance(types, ExprNodes.TupleNode): types = types.args - if len(types) == 1 and not types[0].type is Builtin.type_type: - return node # nothing to improve here + if len(types) == 1 and not types[0].type is Builtin.type_type: + return node # nothing to improve here if arg.is_attribute or not arg.is_simple(): - arg = UtilNodes.ResultRefNode(arg) - temps.append(arg) + arg = UtilNodes.ResultRefNode(arg) + temps.append(arg) elif types.type is Builtin.type_type: types = [types] else: @@ -2706,17 +2706,17 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, type_check_function = '__Pyx_TypeCheck' type_check_args = [arg, test_type_node] else: - if not test_type_node.is_literal: - test_type_node = UtilNodes.ResultRefNode(test_type_node) - temps.append(test_type_node) - type_check_function = 'PyObject_IsInstance' - type_check_args = [arg, test_type_node] + if not test_type_node.is_literal: + test_type_node = UtilNodes.ResultRefNode(test_type_node) + temps.append(test_type_node) + type_check_function = 'PyObject_IsInstance' + type_check_args = [arg, test_type_node] test_nodes.append( ExprNodes.PythonCapiCallNode( test_type_node.pos, type_check_function, self.Py_type_check_func_type, - args=type_check_args, - is_temp=True, - )) + args=type_check_args, + is_temp=True, + )) def join_with_or(a, b, make_binop_node=ExprNodes.binop_node): or_node = make_binop_node(node.pos, 'or', a, b) @@ -2725,7 +2725,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, return or_node test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env) - for temp in temps[::-1]: + for temp in temps[::-1]: test_node = UtilNodes.EvalWithTempExprNode(temp, test_node) return test_node @@ -2738,7 +2738,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, if isinstance(arg, ExprNodes.CoerceToPyTypeNode): if arg.arg.type.is_unicode_char: return ExprNodes.TypecastNode( - arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type + arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type ).coerce_to(node.type, self.current_env()) elif isinstance(arg, ExprNodes.UnicodeNode): if len(arg.value) == 1: @@ -2990,8 +2990,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, PyObject_PopIndex_func_type = PyrexTypes.CFuncType( PyrexTypes.py_object_type, [ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None), - PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None), - PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None), + PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None), ], has_varargs=True) # to fake the additional macro args that lack a proper C type @@ -3026,23 +3026,23 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, ) elif len(args) == 2: index = unwrap_coerced_node(args[1]) - py_index = ExprNodes.NoneNode(index.pos) + py_index = ExprNodes.NoneNode(index.pos) orig_index_type = index.type if not index.type.is_int: - if isinstance(index, ExprNodes.IntNode): - py_index = index.coerce_to_pyobject(self.current_env()) + if isinstance(index, ExprNodes.IntNode): + py_index = index.coerce_to_pyobject(self.current_env()) + index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) + elif is_list: + if index.type.is_pyobject: + py_index = index.coerce_to_simple(self.current_env()) + index = ExprNodes.CloneNode(py_index) index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) - elif is_list: - if index.type.is_pyobject: - py_index = index.coerce_to_simple(self.current_env()) - index = ExprNodes.CloneNode(py_index) - index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env()) else: return node elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type): return node - elif isinstance(index, ExprNodes.IntNode): - py_index = index.coerce_to_pyobject(self.current_env()) + elif isinstance(index, ExprNodes.IntNode): + py_index = index.coerce_to_pyobject(self.current_env()) # real type might still be larger at runtime if not orig_index_type.is_int: orig_index_type = index.type @@ -3054,12 +3054,12 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, return ExprNodes.PythonCapiCallNode( node.pos, "__Pyx_Py%s_PopIndex" % type_name, self.PyObject_PopIndex_func_type, - args=[obj, py_index, index, + args=[obj, py_index, index, ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0), constant_result=orig_index_type.signed and 1 or 0, type=PyrexTypes.c_int_type), ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type, - orig_index_type.empty_declaration_code()), + orig_index_type.empty_declaration_code()), ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)], may_return_none=True, is_temp=node.is_temp, @@ -3163,184 +3163,184 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, may_return_none=True, utility_code=load_c_utility('py_dict_pop')) - Pyx_BinopInt_func_types = dict( - ((ctype, ret_type), PyrexTypes.CFuncType( - ret_type, [ - PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None), - PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None), - PyrexTypes.CFuncTypeArg("cval", ctype, None), - PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None), - PyrexTypes.CFuncTypeArg("zerodiv_check", PyrexTypes.c_bint_type, None), - ], exception_value=None if ret_type.is_pyobject else ret_type.exception_value)) - for ctype in (PyrexTypes.c_long_type, PyrexTypes.c_double_type) - for ret_type in (PyrexTypes.py_object_type, PyrexTypes.c_bint_type) - ) - - def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Add', node, function, args, is_unbound_method) - - def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method) - - def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Eq', node, function, args, is_unbound_method) - - def _handle_simple_method_object___ne__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Ne', node, function, args, is_unbound_method) - - def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('And', node, function, args, is_unbound_method) - - def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Or', node, function, args, is_unbound_method) - - def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Xor', node, function, args, is_unbound_method) - - def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method): - if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode): - return node - if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63): - return node - return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method) - - def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method): - if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode): - return node - if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63): - return node - return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method) - - def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method): - return self._optimise_num_div('Remainder', node, function, args, is_unbound_method) - - def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method): - return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method) - - def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method): - return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method) - - def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method): - return self._optimise_num_div('Divide', node, function, args, is_unbound_method) - - def _optimise_num_div(self, operator, node, function, args, is_unbound_method): - if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0: - return node - if isinstance(args[1], ExprNodes.IntNode): - if not (-2**30 <= args[1].constant_result <= 2**30): - return node - elif isinstance(args[1], ExprNodes.FloatNode): - if not (-2**53 <= args[1].constant_result <= 2**53): - return node - else: - return node - return self._optimise_num_binop(operator, node, function, args, is_unbound_method) - - def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Add', node, function, args, is_unbound_method) - - def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method) - - def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method) - - def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Divide', node, function, args, is_unbound_method) - - def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method) - - def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Eq', node, function, args, is_unbound_method) - - def _handle_simple_method_float___ne__(self, node, function, args, is_unbound_method): - return self._optimise_num_binop('Ne', node, function, args, is_unbound_method) - - def _optimise_num_binop(self, operator, node, function, args, is_unbound_method): - """ - Optimise math operators for (likely) float or small integer operations. - """ - if len(args) != 2: - return node - - if node.type.is_pyobject: - ret_type = PyrexTypes.py_object_type - elif node.type is PyrexTypes.c_bint_type and operator in ('Eq', 'Ne'): - ret_type = PyrexTypes.c_bint_type - else: - return node - - # When adding IntNode/FloatNode to something else, assume other operand is also numeric. - # Prefer constants on RHS as they allows better size control for some operators. - num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode) - if isinstance(args[1], num_nodes): - if args[0].type is not PyrexTypes.py_object_type: - return node - numval = args[1] - arg_order = 'ObjC' - elif isinstance(args[0], num_nodes): - if args[1].type is not PyrexTypes.py_object_type: - return node - numval = args[0] - arg_order = 'CObj' - else: - return node - - if not numval.has_constant_result(): - return node - - is_float = isinstance(numval, ExprNodes.FloatNode) - num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type - if is_float: - if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'): - return node - elif operator == 'Divide': - # mixed old-/new-style division is not currently optimised for integers - return node - elif abs(numval.constant_result) > 2**30: - # Cut off at an integer border that is still safe for all operations. - return node - - if operator in ('TrueDivide', 'FloorDivide', 'Divide', 'Remainder'): - if args[1].constant_result == 0: - # Don't optimise division by 0. :) - return node - - args = list(args) - args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)( - numval.pos, value=numval.value, constant_result=numval.constant_result, - type=num_type)) - inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False - args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace)) - if is_float or operator not in ('Eq', 'Ne'): - # "PyFloatBinop" and "PyIntBinop" take an additional "check for zero division" argument. - zerodivision_check = arg_order == 'CObj' and ( - not node.cdivision if isinstance(node, ExprNodes.DivNode) else False) - args.append(ExprNodes.BoolNode(node.pos, value=zerodivision_check, constant_result=zerodivision_check)) - - utility_code = TempitaUtilityCode.load_cached( - "PyFloatBinop" if is_float else "PyIntCompare" if operator in ('Eq', 'Ne') else "PyIntBinop", - "Optimize.c", - context=dict(op=operator, order=arg_order, ret_type=ret_type)) - - call_node = self._substitute_method_call( - node, function, - "__Pyx_Py%s_%s%s%s" % ( - 'Float' if is_float else 'Int', - '' if ret_type.is_pyobject else 'Bool', - operator, - arg_order), - self.Pyx_BinopInt_func_types[(num_type, ret_type)], - '__%s__' % operator[:3].lower(), is_unbound_method, args, - may_return_none=True, - with_none_check=False, - utility_code=utility_code) - - if node.type.is_pyobject and not ret_type.is_pyobject: - call_node = ExprNodes.CoerceToPyTypeNode(call_node, self.current_env(), node.type) - return call_node - + Pyx_BinopInt_func_types = dict( + ((ctype, ret_type), PyrexTypes.CFuncType( + ret_type, [ + PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("cval", ctype, None), + PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None), + PyrexTypes.CFuncTypeArg("zerodiv_check", PyrexTypes.c_bint_type, None), + ], exception_value=None if ret_type.is_pyobject else ret_type.exception_value)) + for ctype in (PyrexTypes.c_long_type, PyrexTypes.c_double_type) + for ret_type in (PyrexTypes.py_object_type, PyrexTypes.c_bint_type) + ) + + def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Add', node, function, args, is_unbound_method) + + def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method) + + def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Eq', node, function, args, is_unbound_method) + + def _handle_simple_method_object___ne__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Ne', node, function, args, is_unbound_method) + + def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('And', node, function, args, is_unbound_method) + + def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Or', node, function, args, is_unbound_method) + + def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Xor', node, function, args, is_unbound_method) + + def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method): + if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode): + return node + if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63): + return node + return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method) + + def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method): + if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode): + return node + if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63): + return node + return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method) + + def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method): + return self._optimise_num_div('Remainder', node, function, args, is_unbound_method) + + def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method): + return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method) + + def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method): + return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method) + + def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method): + return self._optimise_num_div('Divide', node, function, args, is_unbound_method) + + def _optimise_num_div(self, operator, node, function, args, is_unbound_method): + if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0: + return node + if isinstance(args[1], ExprNodes.IntNode): + if not (-2**30 <= args[1].constant_result <= 2**30): + return node + elif isinstance(args[1], ExprNodes.FloatNode): + if not (-2**53 <= args[1].constant_result <= 2**53): + return node + else: + return node + return self._optimise_num_binop(operator, node, function, args, is_unbound_method) + + def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Add', node, function, args, is_unbound_method) + + def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method) + + def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method) + + def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Divide', node, function, args, is_unbound_method) + + def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method) + + def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Eq', node, function, args, is_unbound_method) + + def _handle_simple_method_float___ne__(self, node, function, args, is_unbound_method): + return self._optimise_num_binop('Ne', node, function, args, is_unbound_method) + + def _optimise_num_binop(self, operator, node, function, args, is_unbound_method): + """ + Optimise math operators for (likely) float or small integer operations. + """ + if len(args) != 2: + return node + + if node.type.is_pyobject: + ret_type = PyrexTypes.py_object_type + elif node.type is PyrexTypes.c_bint_type and operator in ('Eq', 'Ne'): + ret_type = PyrexTypes.c_bint_type + else: + return node + + # When adding IntNode/FloatNode to something else, assume other operand is also numeric. + # Prefer constants on RHS as they allows better size control for some operators. + num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode) + if isinstance(args[1], num_nodes): + if args[0].type is not PyrexTypes.py_object_type: + return node + numval = args[1] + arg_order = 'ObjC' + elif isinstance(args[0], num_nodes): + if args[1].type is not PyrexTypes.py_object_type: + return node + numval = args[0] + arg_order = 'CObj' + else: + return node + + if not numval.has_constant_result(): + return node + + is_float = isinstance(numval, ExprNodes.FloatNode) + num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type + if is_float: + if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'): + return node + elif operator == 'Divide': + # mixed old-/new-style division is not currently optimised for integers + return node + elif abs(numval.constant_result) > 2**30: + # Cut off at an integer border that is still safe for all operations. + return node + + if operator in ('TrueDivide', 'FloorDivide', 'Divide', 'Remainder'): + if args[1].constant_result == 0: + # Don't optimise division by 0. :) + return node + + args = list(args) + args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)( + numval.pos, value=numval.value, constant_result=numval.constant_result, + type=num_type)) + inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False + args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace)) + if is_float or operator not in ('Eq', 'Ne'): + # "PyFloatBinop" and "PyIntBinop" take an additional "check for zero division" argument. + zerodivision_check = arg_order == 'CObj' and ( + not node.cdivision if isinstance(node, ExprNodes.DivNode) else False) + args.append(ExprNodes.BoolNode(node.pos, value=zerodivision_check, constant_result=zerodivision_check)) + + utility_code = TempitaUtilityCode.load_cached( + "PyFloatBinop" if is_float else "PyIntCompare" if operator in ('Eq', 'Ne') else "PyIntBinop", + "Optimize.c", + context=dict(op=operator, order=arg_order, ret_type=ret_type)) + + call_node = self._substitute_method_call( + node, function, + "__Pyx_Py%s_%s%s%s" % ( + 'Float' if is_float else 'Int', + '' if ret_type.is_pyobject else 'Bool', + operator, + arg_order), + self.Pyx_BinopInt_func_types[(num_type, ret_type)], + '__%s__' % operator[:3].lower(), is_unbound_method, args, + may_return_none=True, + with_none_check=False, + utility_code=utility_code) + + if node.type.is_pyobject and not ret_type.is_pyobject: + call_node = ExprNodes.CoerceToPyTypeNode(call_node, self.current_env(), node.type) + return call_node + ### unicode type methods PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType( @@ -3456,44 +3456,44 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, "PyUnicode_Split", self.PyUnicode_Split_func_type, 'split', is_unbound_method, args) - PyUnicode_Join_func_type = PyrexTypes.CFuncType( - Builtin.unicode_type, [ - PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), - PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None), - ]) - - def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method): - """ - unicode.join() builds a list first => see if we can do this more efficiently - """ - if len(args) != 2: - self._error_wrong_arg_count('unicode.join', node, args, "2") - return node - if isinstance(args[1], ExprNodes.GeneratorExpressionNode): - gen_expr_node = args[1] - loop_node = gen_expr_node.loop - - yield_statements = _find_yield_statements(loop_node) - if yield_statements: - inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode( - node.pos, gen_expr_node, orig_func='list', - comprehension_type=Builtin.list_type) - - for yield_expression, yield_stat_node in yield_statements: - append_node = ExprNodes.ComprehensionAppendNode( - yield_expression.pos, - expr=yield_expression, - target=inlined_genexpr.target) - - Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) - - args[1] = inlined_genexpr - - return self._substitute_method_call( - node, function, - "PyUnicode_Join", self.PyUnicode_Join_func_type, - 'join', is_unbound_method, args) - + PyUnicode_Join_func_type = PyrexTypes.CFuncType( + Builtin.unicode_type, [ + PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None), + PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None), + ]) + + def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method): + """ + unicode.join() builds a list first => see if we can do this more efficiently + """ + if len(args) != 2: + self._error_wrong_arg_count('unicode.join', node, args, "2") + return node + if isinstance(args[1], ExprNodes.GeneratorExpressionNode): + gen_expr_node = args[1] + loop_node = gen_expr_node.loop + + yield_statements = _find_yield_statements(loop_node) + if yield_statements: + inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode( + node.pos, gen_expr_node, orig_func='list', + comprehension_type=Builtin.list_type) + + for yield_expression, yield_stat_node in yield_statements: + append_node = ExprNodes.ComprehensionAppendNode( + yield_expression.pos, + expr=yield_expression, + target=inlined_genexpr.target) + + Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node) + + args[1] = inlined_genexpr + + return self._substitute_method_call( + node, function, + "PyUnicode_Join", self.PyUnicode_Join_func_type, + 'join', is_unbound_method, args) + PyString_Tailmatch_func_type = PyrexTypes.CFuncType( PyrexTypes.c_bint_type, [ PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode @@ -3626,8 +3626,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType( Builtin.bytes_type, [ PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None), - PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), - PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), ]) PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType( @@ -3671,8 +3671,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, # well, looks like we can't pass else: - value = bytes_literal(value, encoding) - return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type) + value = bytes_literal(value, encoding) + return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type) if encoding and error_handling == 'strict': # try to find a specific encoder function @@ -3692,30 +3692,30 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( Builtin.unicode_type, [ - PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None), PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None), - PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), - ])) + PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), + ])) _decode_c_string_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ - PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), - PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), - PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None), - ]) + ]) _decode_bytes_func_type = PyrexTypes.CFuncType( Builtin.unicode_type, [ PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), - PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), - PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None), - ]) + ]) _decode_cpp_string_func_type = None # lazy init @@ -3810,8 +3810,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, PyrexTypes.CFuncTypeArg("string", string_type, None), PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None), PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None), - PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), - PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None), + PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None), PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None), ]) helper_func_type = self._decode_cpp_string_func_type @@ -3882,14 +3882,14 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, if isinstance(node, ExprNodes.UnicodeNode): encoding = node.value node = ExprNodes.BytesNode( - node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type) + node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type) elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)): encoding = node.value.decode('ISO-8859-1') node = ExprNodes.BytesNode( - node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type) + node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type) elif node.type is Builtin.bytes_type: encoding = None - node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env()) + node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env()) elif node.type.is_string: encoding = None else: @@ -3933,8 +3933,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin, def _substitute_method_call(self, node, function, name, func_type, attr_name, is_unbound_method, args=(), utility_code=None, is_temp=None, - may_return_none=ExprNodes.PythonCapiCallNode.may_return_none, - with_none_check=True): + may_return_none=ExprNodes.PythonCapiCallNode.may_return_none, + with_none_check=True): args = list(args) if with_none_check and args: args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name) @@ -4210,15 +4210,15 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): bytes_value = None if str1.bytes_value is not None and str2.bytes_value is not None: if str1.bytes_value.encoding == str2.bytes_value.encoding: - bytes_value = bytes_literal( - str1.bytes_value + str2.bytes_value, - str1.bytes_value.encoding) + bytes_value = bytes_literal( + str1.bytes_value + str2.bytes_value, + str1.bytes_value.encoding) string_value = EncodedString(node.constant_result) return ExprNodes.UnicodeNode( str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value) elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode): if str1.value.encoding == str2.value.encoding: - bytes_value = bytes_literal(node.constant_result, str1.value.encoding) + bytes_value = bytes_literal(node.constant_result, str1.value.encoding) return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result) # all other combinations are rather complicated # to get right in Py2/3: encodings, unicode escapes, ... @@ -4275,12 +4275,12 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): def _calculate_constant_seq(self, node, sequence_node, factor): if factor.constant_result != 1 and sequence_node.args: - if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0: + if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0: del sequence_node.args[:] sequence_node.mult_factor = None elif sequence_node.mult_factor is not None: - if (isinstance(factor.constant_result, _py_int_types) and - isinstance(sequence_node.mult_factor.constant_result, _py_int_types)): + if (isinstance(factor.constant_result, _py_int_types) and + isinstance(sequence_node.mult_factor.constant_result, _py_int_types)): value = sequence_node.mult_factor.constant_result * factor.constant_result sequence_node.mult_factor = ExprNodes.IntNode( sequence_node.mult_factor.pos, @@ -4332,16 +4332,16 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): warning(pos, "Too few arguments for format placeholders", level=1) can_be_optimised = False break - if arg.is_starred: - can_be_optimised = False - break - if format_type in u'asrfdoxX': + if arg.is_starred: + can_be_optimised = False + break + if format_type in u'asrfdoxX': format_spec = s[1:] conversion_char = None if format_type in u'doxX' and u'.' in format_spec: # Precision is not allowed for integers in format(), but ok in %-formatting. can_be_optimised = False - elif format_type in u'ars': + elif format_type in u'ars': format_spec = format_spec[:-1] conversion_char = format_type if format_spec.startswith('0'): @@ -4363,7 +4363,7 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): else: # keep it simple for now ... can_be_optimised = False - break + break if not can_be_optimised: # Print all warnings we can find before finally giving up here. @@ -4379,11 +4379,11 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): node = ExprNodes.JoinedStrNode(pos, values=substrings) return self.visit_JoinedStrNode(node) - def visit_FormattedValueNode(self, node): - self.visitchildren(node) + def visit_FormattedValueNode(self, node): + self.visitchildren(node) conversion_char = node.conversion_char or 's' - if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value: - node.format_spec = None + if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value: + node.format_spec = None if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode): value = EncodedString(node.value.value) if value.isdigit(): @@ -4396,130 +4396,130 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations): value = node.value.unicode_value if value is not None: return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value) - return node - - def visit_JoinedStrNode(self, node): - """ - Clean up after the parser by discarding empty Unicode strings and merging - substring sequences. Empty or single-value join lists are not uncommon - because f-string format specs are always parsed into JoinedStrNodes. - """ - self.visitchildren(node) - unicode_node = ExprNodes.UnicodeNode - - values = [] - for is_unode_group, substrings in itertools.groupby(node.values, lambda v: isinstance(v, unicode_node)): - if is_unode_group: - substrings = list(substrings) - unode = substrings[0] - if len(substrings) > 1: + return node + + def visit_JoinedStrNode(self, node): + """ + Clean up after the parser by discarding empty Unicode strings and merging + substring sequences. Empty or single-value join lists are not uncommon + because f-string format specs are always parsed into JoinedStrNodes. + """ + self.visitchildren(node) + unicode_node = ExprNodes.UnicodeNode + + values = [] + for is_unode_group, substrings in itertools.groupby(node.values, lambda v: isinstance(v, unicode_node)): + if is_unode_group: + substrings = list(substrings) + unode = substrings[0] + if len(substrings) > 1: value = EncodedString(u''.join(value.value for value in substrings)) unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value) - # ignore empty Unicode strings - if unode.value: - values.append(unode) - else: - values.extend(substrings) - - if not values: + # ignore empty Unicode strings + if unode.value: + values.append(unode) + else: + values.extend(substrings) + + if not values: value = EncodedString('') node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value) - elif len(values) == 1: - node = values[0] - elif len(values) == 2: - # reduce to string concatenation - node = ExprNodes.binop_node(node.pos, '+', *values) - else: - node.values = values - return node - - def visit_MergedDictNode(self, node): - """Unpack **args in place if we can.""" - self.visitchildren(node) - args = [] - items = [] - - def add(arg): - if arg.is_dict_literal: - if items: - items[0].key_value_pairs.extend(arg.key_value_pairs) - else: - items.append(arg) - elif isinstance(arg, ExprNodes.MergedDictNode): - for child_arg in arg.keyword_args: - add(child_arg) - else: - if items: - args.append(items[0]) - del items[:] - args.append(arg) - - for arg in node.keyword_args: - add(arg) - if items: - args.append(items[0]) - - if len(args) == 1: - arg = args[0] - if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode): - return arg - node.keyword_args[:] = args - self._calculate_const(node) - return node - - def visit_MergedSequenceNode(self, node): - """Unpack *args in place if we can.""" - self.visitchildren(node) - - is_set = node.type is Builtin.set_type - args = [] - values = [] - - def add(arg): - if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor): - if values: - values[0].args.extend(arg.args) - else: - values.append(arg) - elif isinstance(arg, ExprNodes.MergedSequenceNode): - for child_arg in arg.args: - add(child_arg) - else: - if values: - args.append(values[0]) - del values[:] - args.append(arg) - - for arg in node.args: - add(arg) - if values: - args.append(values[0]) - - if len(args) == 1: - arg = args[0] - if ((is_set and arg.is_set_literal) or - (arg.is_sequence_constructor and arg.type is node.type) or - isinstance(arg, ExprNodes.MergedSequenceNode)): - return arg - node.args[:] = args - self._calculate_const(node) - return node - - def visit_SequenceNode(self, node): - """Unpack *args in place if we can.""" - self.visitchildren(node) - args = [] - for arg in node.args: - if not arg.is_starred: - args.append(arg) - elif arg.target.is_sequence_constructor and not arg.target.mult_factor: - args.extend(arg.target.args) - else: - args.append(arg) - node.args[:] = args - self._calculate_const(node) - return node - + elif len(values) == 1: + node = values[0] + elif len(values) == 2: + # reduce to string concatenation + node = ExprNodes.binop_node(node.pos, '+', *values) + else: + node.values = values + return node + + def visit_MergedDictNode(self, node): + """Unpack **args in place if we can.""" + self.visitchildren(node) + args = [] + items = [] + + def add(arg): + if arg.is_dict_literal: + if items: + items[0].key_value_pairs.extend(arg.key_value_pairs) + else: + items.append(arg) + elif isinstance(arg, ExprNodes.MergedDictNode): + for child_arg in arg.keyword_args: + add(child_arg) + else: + if items: + args.append(items[0]) + del items[:] + args.append(arg) + + for arg in node.keyword_args: + add(arg) + if items: + args.append(items[0]) + + if len(args) == 1: + arg = args[0] + if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode): + return arg + node.keyword_args[:] = args + self._calculate_const(node) + return node + + def visit_MergedSequenceNode(self, node): + """Unpack *args in place if we can.""" + self.visitchildren(node) + + is_set = node.type is Builtin.set_type + args = [] + values = [] + + def add(arg): + if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor): + if values: + values[0].args.extend(arg.args) + else: + values.append(arg) + elif isinstance(arg, ExprNodes.MergedSequenceNode): + for child_arg in arg.args: + add(child_arg) + else: + if values: + args.append(values[0]) + del values[:] + args.append(arg) + + for arg in node.args: + add(arg) + if values: + args.append(values[0]) + + if len(args) == 1: + arg = args[0] + if ((is_set and arg.is_set_literal) or + (arg.is_sequence_constructor and arg.type is node.type) or + isinstance(arg, ExprNodes.MergedSequenceNode)): + return arg + node.args[:] = args + self._calculate_const(node) + return node + + def visit_SequenceNode(self, node): + """Unpack *args in place if we can.""" + self.visitchildren(node) + args = [] + for arg in node.args: + if not arg.is_starred: + args.append(arg) + elif arg.target.is_sequence_constructor and not arg.target.mult_factor: + args.extend(arg.target.args) + else: + args.append(arg) + node.args[:] = args + self._calculate_const(node) + return node + def visit_PrimaryCmpNode(self, node): # calculate constant partial results in the comparison cascade self.visitchildren(node, ['operand1']) @@ -4759,30 +4759,30 @@ class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin): else "optimize.unpack_method_calls")): # optimise simple Python methods calls if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not ( - node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and len(node.arg_tuple.args) > 1)): + node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and len(node.arg_tuple.args) > 1)): # simple call, now exclude calls to objects that are definitely not methods may_be_a_method = True if function.type is Builtin.type_type: may_be_a_method = False - elif function.is_attribute: - if function.entry and function.entry.type.is_cfunction: - # optimised builtin method - may_be_a_method = False + elif function.is_attribute: + if function.entry and function.entry.type.is_cfunction: + # optimised builtin method + may_be_a_method = False elif function.is_name: - entry = function.entry - if entry.is_builtin or entry.type.is_cfunction: + entry = function.entry + if entry.is_builtin or entry.type.is_cfunction: may_be_a_method = False - elif entry.cf_assignments: + elif entry.cf_assignments: # local functions/classes are definitely not methods non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode) may_be_a_method = any( assignment.rhs and not isinstance(assignment.rhs, non_method_nodes) - for assignment in entry.cf_assignments) + for assignment in entry.cf_assignments) if may_be_a_method: - if (node.self and function.is_attribute and - isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self): - # function self object was moved into a CloneNode => undo - function.obj = function.obj.arg + if (node.self and function.is_attribute and + isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self): + # function self object was moved into a CloneNode => undo + function.obj = function.obj.arg node = self.replace(node, ExprNodes.PyMethodCallNode.from_node( node, function=function, arg_tuple=node.arg_tuple, type=node.type)) return node diff --git a/contrib/tools/cython/Cython/Compiler/Options.py b/contrib/tools/cython/Cython/Compiler/Options.py index a90813e954..b3ffbcd927 100644 --- a/contrib/tools/cython/Cython/Compiler/Options.py +++ b/contrib/tools/cython/Cython/Compiler/Options.py @@ -4,182 +4,182 @@ from __future__ import absolute_import - -class ShouldBeFromDirective(object): - known_directives = [] +class ShouldBeFromDirective(object): + + known_directives = [] def __init__(self, options_name, directive_name=None, disallow=False): - self.options_name = options_name - self.directive_name = directive_name or options_name + self.options_name = options_name + self.directive_name = directive_name or options_name self.disallow = disallow - self.known_directives.append(self) - - def __nonzero__(self): - self._bad_access() - - def __int__(self): - self._bad_access() - - def _bad_access(self): - raise RuntimeError(repr(self)) - - def __repr__(self): - return ( - "Illegal access of '%s' from Options module rather than directive '%s'" - % (self.options_name, self.directive_name)) - - -""" -The members of this module are documented using autodata in -Cython/docs/src/reference/compilation.rst. -See http://www.sphinx-doc.org/en/master/ext/autodoc.html#directive-autoattribute -for how autodata works. -Descriptions of those members should start with a #: -Donc forget to keep the docs in sync by removing and adding -the members in both this file and the .rst file. -""" - -#: Whether or not to include docstring in the Python extension. If False, the binary size -#: will be smaller, but the ``__doc__`` attribute of any class or function will be an -#: empty string. + self.known_directives.append(self) + + def __nonzero__(self): + self._bad_access() + + def __int__(self): + self._bad_access() + + def _bad_access(self): + raise RuntimeError(repr(self)) + + def __repr__(self): + return ( + "Illegal access of '%s' from Options module rather than directive '%s'" + % (self.options_name, self.directive_name)) + + +""" +The members of this module are documented using autodata in +Cython/docs/src/reference/compilation.rst. +See http://www.sphinx-doc.org/en/master/ext/autodoc.html#directive-autoattribute +for how autodata works. +Descriptions of those members should start with a #: +Donc forget to keep the docs in sync by removing and adding +the members in both this file and the .rst file. +""" + +#: Whether or not to include docstring in the Python extension. If False, the binary size +#: will be smaller, but the ``__doc__`` attribute of any class or function will be an +#: empty string. docstrings = True -#: Embed the source code position in the docstrings of functions and classes. -embed_pos_in_docstring = False - -#: Copy the original source code line by line into C code comments -#: in the generated code file to help with understanding the output. -#: This is also required for coverage analysis. -emit_code_comments = True - -# undocumented -pre_import = None - -#: Decref global variables in each module on exit for garbage collection. -#: 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects -#: Mostly for reducing noise in Valgrind as it typically executes at process exit -#: (when all memory will be reclaimed anyways). -#: Note that directly or indirectly executed cleanup code that makes use of global -#: variables or types may no longer be safe when enabling the respective level since -#: there is no guaranteed order in which the (reference counted) objects will -#: be cleaned up. The order can change due to live references and reference cycles. +#: Embed the source code position in the docstrings of functions and classes. +embed_pos_in_docstring = False + +#: Copy the original source code line by line into C code comments +#: in the generated code file to help with understanding the output. +#: This is also required for coverage analysis. +emit_code_comments = True + +# undocumented +pre_import = None + +#: Decref global variables in each module on exit for garbage collection. +#: 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects +#: Mostly for reducing noise in Valgrind as it typically executes at process exit +#: (when all memory will be reclaimed anyways). +#: Note that directly or indirectly executed cleanup code that makes use of global +#: variables or types may no longer be safe when enabling the respective level since +#: there is no guaranteed order in which the (reference counted) objects will +#: be cleaned up. The order can change due to live references and reference cycles. generate_cleanup_code = False -#: Should tp_clear() set object fields to None instead of clearing them to NULL? -clear_to_none = True - -#: Generate an annotated HTML version of the input source files for debugging and optimisation purposes. -#: This has the same effect as the ``annotate`` argument in :func:`cythonize`. +#: Should tp_clear() set object fields to None instead of clearing them to NULL? +clear_to_none = True + +#: Generate an annotated HTML version of the input source files for debugging and optimisation purposes. +#: This has the same effect as the ``annotate`` argument in :func:`cythonize`. annotate = False -# When annotating source files in HTML, include coverage information from -# this file. -annotate_coverage_xml = None - -#: This will abort the compilation on the first error occurred rather than trying -#: to keep going and printing further error messages. +# When annotating source files in HTML, include coverage information from +# this file. +annotate_coverage_xml = None + +#: This will abort the compilation on the first error occurred rather than trying +#: to keep going and printing further error messages. fast_fail = False -#: Turn all warnings into errors. +#: Turn all warnings into errors. warning_errors = False -#: Make unknown names an error. Python raises a NameError when -#: encountering unknown names at runtime, whereas this option makes -#: them a compile time error. If you want full Python compatibility, -#: you should disable this option and also 'cache_builtins'. +#: Make unknown names an error. Python raises a NameError when +#: encountering unknown names at runtime, whereas this option makes +#: them a compile time error. If you want full Python compatibility, +#: you should disable this option and also 'cache_builtins'. error_on_unknown_names = True -#: Make uninitialized local variable reference a compile time error. -#: Python raises UnboundLocalError at runtime, whereas this option makes -#: them a compile time error. Note that this option affects only variables -#: of "python object" type. +#: Make uninitialized local variable reference a compile time error. +#: Python raises UnboundLocalError at runtime, whereas this option makes +#: them a compile time error. Note that this option affects only variables +#: of "python object" type. error_on_uninitialized = True -#: This will convert statements of the form ``for i in range(...)`` -#: to ``for i from ...`` when ``i`` is a C integer type, and the direction -#: (i.e. sign of step) can be determined. -#: WARNING: This may change the semantics if the range causes assignment to -#: i to overflow. Specifically, if this option is set, an error will be -#: raised before the loop is entered, whereas without this option the loop -#: will execute until an overflowing value is encountered. +#: This will convert statements of the form ``for i in range(...)`` +#: to ``for i from ...`` when ``i`` is a C integer type, and the direction +#: (i.e. sign of step) can be determined. +#: WARNING: This may change the semantics if the range causes assignment to +#: i to overflow. Specifically, if this option is set, an error will be +#: raised before the loop is entered, whereas without this option the loop +#: will execute until an overflowing value is encountered. convert_range = True -#: Perform lookups on builtin names only once, at module initialisation -#: time. This will prevent the module from getting imported if a -#: builtin name that it uses cannot be found during initialisation. -#: Default is True. -#: Note that some legacy builtins are automatically remapped -#: from their Python 2 names to their Python 3 names by Cython -#: when building in Python 3.x, -#: so that they do not get in the way even if this option is enabled. -cache_builtins = True - -#: Generate branch prediction hints to speed up error handling etc. -gcc_branch_hints = True - -#: Enable this to allow one to write ``your_module.foo = ...`` to overwrite the -#: definition if the cpdef function foo, at the cost of an extra dictionary -#: lookup on every call. -#: If this is false it generates only the Python wrapper and no override check. +#: Perform lookups on builtin names only once, at module initialisation +#: time. This will prevent the module from getting imported if a +#: builtin name that it uses cannot be found during initialisation. +#: Default is True. +#: Note that some legacy builtins are automatically remapped +#: from their Python 2 names to their Python 3 names by Cython +#: when building in Python 3.x, +#: so that they do not get in the way even if this option is enabled. +cache_builtins = True + +#: Generate branch prediction hints to speed up error handling etc. +gcc_branch_hints = True + +#: Enable this to allow one to write ``your_module.foo = ...`` to overwrite the +#: definition if the cpdef function foo, at the cost of an extra dictionary +#: lookup on every call. +#: If this is false it generates only the Python wrapper and no override check. lookup_module_cpdef = False -#: Whether or not to embed the Python interpreter, for use in making a -#: standalone executable or calling from external libraries. -#: This will provide a C function which initialises the interpreter and -#: executes the body of this module. -#: See `this demo <https://github.com/cython/cython/tree/master/Demos/embed>`_ -#: for a concrete example. -#: If true, the initialisation function is the C main() function, but -#: this option can also be set to a non-empty string to provide a function name explicitly. -#: Default is False. +#: Whether or not to embed the Python interpreter, for use in making a +#: standalone executable or calling from external libraries. +#: This will provide a C function which initialises the interpreter and +#: executes the body of this module. +#: See `this demo <https://github.com/cython/cython/tree/master/Demos/embed>`_ +#: for a concrete example. +#: If true, the initialisation function is the C main() function, but +#: this option can also be set to a non-empty string to provide a function name explicitly. +#: Default is False. embed = None # In previous iterations of Cython, globals() gave the first non-Cython module # globals in the call stack. Sage relies on this behavior for variable injection. -old_style_globals = ShouldBeFromDirective('old_style_globals') +old_style_globals = ShouldBeFromDirective('old_style_globals') -#: Allows cimporting from a pyx file without a pxd file. +#: Allows cimporting from a pyx file without a pxd file. cimport_from_pyx = False -#: Maximum number of dimensions for buffers -- set lower than number of -#: dimensions in numpy, as -#: slices are passed by value and involve a lot of copying. +#: Maximum number of dimensions for buffers -- set lower than number of +#: dimensions in numpy, as +#: slices are passed by value and involve a lot of copying. buffer_max_dims = 8 -#: Number of function closure instances to keep in a freelist (0: no freelists) +#: Number of function closure instances to keep in a freelist (0: no freelists) closure_freelist_size = 8 # Arcadia specific source_root = None -def get_directive_defaults(): - # To add an item to this list, all accesses should be changed to use the new - # directive, and the global option itself should be set to an instance of - # ShouldBeFromDirective. - for old_option in ShouldBeFromDirective.known_directives: - value = globals().get(old_option.options_name) - assert old_option.directive_name in _directive_defaults - if not isinstance(value, ShouldBeFromDirective): - if old_option.disallow: - raise RuntimeError( - "Option '%s' must be set from directive '%s'" % ( - old_option.option_name, old_option.directive_name)) - else: - # Warn? - _directive_defaults[old_option.directive_name] = value - return _directive_defaults +def get_directive_defaults(): + # To add an item to this list, all accesses should be changed to use the new + # directive, and the global option itself should be set to an instance of + # ShouldBeFromDirective. + for old_option in ShouldBeFromDirective.known_directives: + value = globals().get(old_option.options_name) + assert old_option.directive_name in _directive_defaults + if not isinstance(value, ShouldBeFromDirective): + if old_option.disallow: + raise RuntimeError( + "Option '%s' must be set from directive '%s'" % ( + old_option.option_name, old_option.directive_name)) + else: + # Warn? + _directive_defaults[old_option.directive_name] = value + return _directive_defaults # Declare compiler directives -_directive_defaults = { +_directive_defaults = { 'boundscheck' : True, 'nonecheck' : False, 'initializedcheck' : True, 'embedsignature' : False, 'auto_cpdef': False, 'auto_pickle': None, - 'cdivision': False, # was True before 0.12 + 'cdivision': False, # was True before 0.12 'cdivision_warnings': False, 'c_api_binop_methods': True, 'overflowcheck': False, @@ -187,28 +187,28 @@ _directive_defaults = { 'always_allow_keywords': False, 'allow_none_for_extension_args': True, 'wraparound' : True, - 'ccomplex' : False, # use C99/C++ for complex types and arith + 'ccomplex' : False, # use C99/C++ for complex types and arith 'callspec' : "", - 'nogil' : False, + 'nogil' : False, 'profile': False, 'linetrace': False, - 'emit_code_comments': True, # copy original source code into C code comments - 'annotation_typing': True, # read type declarations from Python function annotations + 'emit_code_comments': True, # copy original source code into C code comments + 'annotation_typing': True, # read type declarations from Python function annotations 'infer_types': None, 'infer_types.verbose': False, 'autotestdict': True, 'autotestdict.cdef': False, 'autotestdict.all': False, - 'language_level': None, - 'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere. - 'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode + 'language_level': None, + 'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere. + 'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079). 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax. 'c_string_type': 'bytes', 'c_string_encoding': '', - 'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types + 'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types 'unraisable_tracebacks': True, - 'old_style_globals': False, + 'old_style_globals': False, 'np_pythran': False, 'fast_gil': False, @@ -226,16 +226,16 @@ _directive_defaults = { # optimizations 'optimize.inline_defnode_calls': True, - 'optimize.unpack_method_calls': True, # increases code size when True - 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True + 'optimize.unpack_method_calls': True, # increases code size when True + 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True 'optimize.use_switch': True, # remove unreachable code 'remove_unreachable': True, # control flow debug directives - 'control_flow.dot_output': "", # Graphviz output filename - 'control_flow.dot_annotate_defs': False, # Annotate definitions + 'control_flow.dot_output': "", # Graphviz output filename + 'control_flow.dot_annotate_defs': False, # Annotate definitions # test support 'test_assert_path_exists' : [], @@ -243,8 +243,8 @@ _directive_defaults = { # experimental, subject to change 'binding': None, - - 'formal_grammar': False, + + 'formal_grammar': False, } # Extra warning directives @@ -300,49 +300,49 @@ def normalise_encoding_name(option_name, encoding): # Override types possibilities above, if needed directive_types = { - 'language_level': str, # values can be None/2/3/'3str', where None == 2+warning + 'language_level': str, # values can be None/2/3/'3str', where None == 2+warning 'auto_pickle': bool, - 'locals': dict, + 'locals': dict, 'final' : bool, # final cdef classes and methods - 'nogil' : bool, + 'nogil' : bool, 'internal' : bool, # cdef class visibility in the module dict - 'infer_types' : bool, # values can be True/None/False + 'infer_types' : bool, # values can be True/None/False 'binding' : bool, - 'cfunc' : None, # decorators do not take directive value + 'cfunc' : None, # decorators do not take directive value 'ccall' : None, 'inline' : None, 'staticmethod' : None, 'cclass' : None, - 'no_gc_clear' : bool, - 'no_gc' : bool, + 'no_gc_clear' : bool, + 'no_gc' : bool, 'returns' : type, - 'exceptval': type, # actually (type, check=True/False), but has its own parser + 'exceptval': type, # actually (type, check=True/False), but has its own parser 'set_initial_path': str, 'freelist': int, 'c_string_type': one_of('bytes', 'bytearray', 'str', 'unicode'), 'c_string_encoding': normalise_encoding_name, } -for key, val in _directive_defaults.items(): +for key, val in _directive_defaults.items(): if key not in directive_types: directive_types[key] = type(val) -directive_scopes = { # defaults to available everywhere +directive_scopes = { # defaults to available everywhere # 'module', 'function', 'class', 'with statement' 'auto_pickle': ('module', 'cclass'), 'final' : ('cclass', 'function'), - 'nogil' : ('function', 'with statement'), + 'nogil' : ('function', 'with statement'), 'inline' : ('function',), - 'cfunc' : ('function', 'with statement'), - 'ccall' : ('function', 'with statement'), + 'cfunc' : ('function', 'with statement'), + 'ccall' : ('function', 'with statement'), 'returns' : ('function',), 'exceptval' : ('function',), 'locals' : ('function',), 'staticmethod' : ('function',), # FIXME: analysis currently lacks more specific function scope 'no_gc_clear' : ('cclass',), - 'no_gc' : ('cclass',), + 'no_gc' : ('cclass',), 'internal' : ('cclass',), - 'cclass' : ('class', 'cclass', 'with statement'), + 'cclass' : ('class', 'cclass', 'with statement'), 'autotestdict' : ('module',), 'autotestdict.all' : ('module',), 'autotestdict.cdef' : ('module',), @@ -350,28 +350,28 @@ directive_scopes = { # defaults to available everywhere 'test_assert_path_exists' : ('function', 'class', 'cclass'), 'test_fail_if_path_exists' : ('function', 'class', 'cclass'), 'freelist': ('cclass',), - 'emit_code_comments': ('module',), + 'emit_code_comments': ('module',), 'annotation_typing': ('module',), # FIXME: analysis currently lacks more specific function scope # Avoid scope-specific to/from_py_functions for c_string. 'c_string_type': ('module',), 'c_string_encoding': ('module',), 'type_version_tag': ('module', 'cclass'), - 'language_level': ('module',), - # globals() could conceivably be controlled at a finer granularity, - # but that would complicate the implementation - 'old_style_globals': ('module',), + 'language_level': ('module',), + # globals() could conceivably be controlled at a finer granularity, + # but that would complicate the implementation + 'old_style_globals': ('module',), 'np_pythran': ('module',), 'fast_gil': ('module',), 'iterable_coroutine': ('module', 'function'), } - + def parse_directive_value(name, value, relaxed_bool=False): """ Parses value as an option value for the given name and returns the interpreted value. None is returned if the option does not exist. - >>> print(parse_directive_value('nonexisting', 'asdf asdfd')) + >>> print(parse_directive_value('nonexisting', 'asdf asdfd')) None >>> parse_directive_value('boundscheck', 'True') True @@ -395,21 +395,21 @@ def parse_directive_value(name, value, relaxed_bool=False): ValueError: c_string_type directive must be one of ('bytes', 'bytearray', 'str', 'unicode'), got 'unnicode' """ type = directive_types.get(name) - if not type: - return None + if not type: + return None orig_value = value if type is bool: value = str(value) - if value == 'True': - return True - if value == 'False': - return False + if value == 'True': + return True + if value == 'False': + return False if relaxed_bool: value = value.lower() - if value in ("true", "yes"): - return True - elif value in ("false", "no"): - return False + if value in ("true", "yes"): + return True + elif value in ("false", "no"): + return False raise ValueError("%s directive must be set to True or False, got '%s'" % ( name, orig_value)) elif type is int: @@ -425,7 +425,7 @@ def parse_directive_value(name, value, relaxed_bool=False): else: assert False - + def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False, current_settings=None): """ @@ -461,16 +461,16 @@ def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False, result = current_settings for item in s.split(','): item = item.strip() - if not item: - continue - if '=' not in item: - raise ValueError('Expected "=" in option "%s"' % item) - name, value = [s.strip() for s in item.strip().split('=', 1)] - if name not in _directive_defaults: + if not item: + continue + if '=' not in item: + raise ValueError('Expected "=" in option "%s"' % item) + name, value = [s.strip() for s in item.strip().split('=', 1)] + if name not in _directive_defaults: found = False if name.endswith('.all'): prefix = name[:-3] - for directive in _directive_defaults: + for directive in _directive_defaults: if directive.startswith(prefix): found = True parsed_value = parse_directive_value(directive, value, relaxed_bool=relaxed_bool) @@ -481,73 +481,73 @@ def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False, parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool) result[name] = parsed_value return result - - -def parse_variable_value(value): - """ - Parses value as an option value for the given name and returns - the interpreted value. - - >>> parse_variable_value('True') - True - >>> parse_variable_value('true') - 'true' - >>> parse_variable_value('us-ascii') - 'us-ascii' - >>> parse_variable_value('str') - 'str' - >>> parse_variable_value('123') - 123 - >>> parse_variable_value('1.23') - 1.23 - - """ - if value == "True": - return True - elif value == "False": - return False - elif value == "None": - return None - elif value.isdigit(): - return int(value) - else: - try: - value = float(value) - except Exception: - # Not a float - pass - return value - - -def parse_compile_time_env(s, current_settings=None): - """ - Parses a comma-separated list of pragma options. Whitespace - is not considered. - - >>> parse_compile_time_env(' ') - {} - >>> (parse_compile_time_env('HAVE_OPENMP=True') == - ... {'HAVE_OPENMP': True}) - True - >>> parse_compile_time_env(' asdf') - Traceback (most recent call last): - ... - ValueError: Expected "=" in option "asdf" - >>> parse_compile_time_env('NUM_THREADS=4') == {'NUM_THREADS': 4} - True - >>> parse_compile_time_env('unknown=anything') == {'unknown': 'anything'} - True - """ - if current_settings is None: - result = {} - else: - result = current_settings - for item in s.split(','): - item = item.strip() - if not item: - continue - if '=' not in item: - raise ValueError('Expected "=" in option "%s"' % item) - name, value = [s.strip() for s in item.split('=', 1)] - result[name] = parse_variable_value(value) - return result + + +def parse_variable_value(value): + """ + Parses value as an option value for the given name and returns + the interpreted value. + + >>> parse_variable_value('True') + True + >>> parse_variable_value('true') + 'true' + >>> parse_variable_value('us-ascii') + 'us-ascii' + >>> parse_variable_value('str') + 'str' + >>> parse_variable_value('123') + 123 + >>> parse_variable_value('1.23') + 1.23 + + """ + if value == "True": + return True + elif value == "False": + return False + elif value == "None": + return None + elif value.isdigit(): + return int(value) + else: + try: + value = float(value) + except Exception: + # Not a float + pass + return value + + +def parse_compile_time_env(s, current_settings=None): + """ + Parses a comma-separated list of pragma options. Whitespace + is not considered. + + >>> parse_compile_time_env(' ') + {} + >>> (parse_compile_time_env('HAVE_OPENMP=True') == + ... {'HAVE_OPENMP': True}) + True + >>> parse_compile_time_env(' asdf') + Traceback (most recent call last): + ... + ValueError: Expected "=" in option "asdf" + >>> parse_compile_time_env('NUM_THREADS=4') == {'NUM_THREADS': 4} + True + >>> parse_compile_time_env('unknown=anything') == {'unknown': 'anything'} + True + """ + if current_settings is None: + result = {} + else: + result = current_settings + for item in s.split(','): + item = item.strip() + if not item: + continue + if '=' not in item: + raise ValueError('Expected "=" in option "%s"' % item) + name, value = [s.strip() for s in item.split('=', 1)] + result[name] = parse_variable_value(value) + return result diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd index c71407c748..2c17901fa4 100644 --- a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd +++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd @@ -70,9 +70,9 @@ cdef class CreateClosureClasses(CythonTransform): cdef create_class_from_scope(self, node, target_module_scope, inner_node=*) cdef find_entries_used_in_closures(self, node) -#cdef class InjectGilHandling(VisitorTransform, SkipDeclarations): -# cdef bint nogil - +#cdef class InjectGilHandling(VisitorTransform, SkipDeclarations): +# cdef bint nogil + cdef class GilCheck(VisitorTransform): cdef list env_stack cdef bint nogil diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py index 32bc736e00..0da3670cae 100644 --- a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py +++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py @@ -4,11 +4,11 @@ import cython cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object, Options=object, UtilNodes=object, LetNode=object, LetRefNode=object, TreeFragment=object, EncodedString=object, - error=object, warning=object, copy=object, _unicode=object) + error=object, warning=object, copy=object, _unicode=object) -import copy +import copy import hashlib - + from . import PyrexTypes from . import Naming from . import ExprNodes @@ -19,9 +19,9 @@ from . import Errors from .Visitor import VisitorTransform, TreeVisitor from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform -from .UtilNodes import LetNode, LetRefNode +from .UtilNodes import LetNode, LetRefNode from .TreeFragment import TreeFragment -from .StringEncoding import EncodedString, _unicode +from .StringEncoding import EncodedString, _unicode from .Errors import error, warning, CompileError, InternalError from .Code import UtilityCode @@ -52,7 +52,7 @@ class SkipDeclarations(object): def visit_CStructOrUnionDefNode(self, node): return node - + class NormalizeTree(CythonTransform): """ This transform fixes up a few things after parsing @@ -187,7 +187,7 @@ class PostParse(ScopeTrackingTransform): body = Nodes.ReturnStatNode( node.result_expr.pos, value=node.result_expr) node.def_node = Nodes.DefNode( - node.pos, name=node.name, + node.pos, name=node.name, args=node.args, star_arg=node.star_arg, starstar_arg=node.starstar_arg, body=body, doc=None) @@ -254,7 +254,7 @@ class PostParse(ScopeTrackingTransform): newdecls.append(decl) node.declarators = newdecls return stats - except PostParseError as e: + except PostParseError as e: # An error in a cdef clause is ok, simply remove the declaration # and try to move on to report more errors self.context.nonfatal_error(e) @@ -419,11 +419,11 @@ def sort_common_subsequences(items): for pos, item in enumerate(items): key = item[1] # the ResultRefNode which has already been injected into the sequences new_pos = pos - for i in range(pos-1, -1, -1): + for i in range(pos-1, -1, -1): if lower_than(key, items[i][0]): new_pos = i if new_pos != pos: - for i in range(pos, new_pos, -1): + for i in range(pos, new_pos, -1): items[i] = items[i-1] items[new_pos] = item @@ -459,7 +459,7 @@ def flatten_parallel_assignments(input, output): rhs_args = unpack_string_to_character_literals(rhs) rhs_size = len(rhs_args) - lhs_targets = [[] for _ in range(rhs_size)] + lhs_targets = [[] for _ in range(rhs_size)] starred_assignments = [] for lhs in input[:-1]: if not lhs.is_sequence_constructor: @@ -613,8 +613,8 @@ class TrackNumpyAttributes(VisitorTransform, SkipDeclarations): def visit_AttributeNode(self, node): self.visitchildren(node) - obj = node.obj - if (obj.is_name and obj.name in self.numpy_module_names) or obj.is_numpy_attribute: + obj = node.obj + if (obj.is_name and obj.name in self.numpy_module_names) or obj.is_numpy_attribute: node.is_numpy_attribute = True return node @@ -656,9 +656,9 @@ class InterpretCompilerDirectives(CythonTransform): 'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'), 'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'), 'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'), - 'operator.typeid' : ExprNodes.TypeidNode, + 'operator.typeid' : ExprNodes.TypeidNode, - # For backwards compatibility. + # For backwards compatibility. 'address': ExprNodes.AmpersandNode, } @@ -669,13 +669,13 @@ class InterpretCompilerDirectives(CythonTransform): special_methods = set(['declare', 'union', 'struct', 'typedef', 'sizeof', 'cast', 'pointer', 'compiled', 'NULL', 'fused_type', 'parallel']) - special_methods.update(unop_method_nodes) + special_methods.update(unop_method_nodes) valid_parallel_directives = set([ "parallel", "prange", "threadid", - #"threadsavailable", + #"threadsavailable", ]) def __init__(self, context, compilation_directive_defaults): @@ -683,9 +683,9 @@ class InterpretCompilerDirectives(CythonTransform): self.cython_module_names = set() self.directive_names = {'staticmethod': 'staticmethod'} self.parallel_directives = {} - directives = copy.deepcopy(Options.get_directive_defaults()) + directives = copy.deepcopy(Options.get_directive_defaults()) for key, value in compilation_directive_defaults.items(): - directives[_unicode(key)] = copy.deepcopy(value) + directives[_unicode(key)] = copy.deepcopy(value) self.directives = directives def check_directive_scope(self, pos, directive, scope): @@ -695,13 +695,13 @@ class InterpretCompilerDirectives(CythonTransform): 'is not allowed in %s scope' % (directive, scope))) return False else: - if directive not in Options.directive_types: + if directive not in Options.directive_types: error(pos, "Invalid directive: '%s'." % (directive,)) return True # Set up processing and handle the cython: comments. def visit_ModuleNode(self, node): - for key in sorted(node.directive_comments): + for key in sorted(node.directive_comments): if not self.check_directive_scope(node.pos, key, 'module'): self.wrong_scope_error(node.pos, key, 'module') del node.directive_comments[key] @@ -917,7 +917,7 @@ class InterpretCompilerDirectives(CythonTransform): directivetype = Options.directive_types.get(optname) if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode): - return optname, Options.get_directive_defaults()[optname] + return optname, Options.get_directive_defaults()[optname] elif directivetype is bool: if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode): raise PostParseError(pos, @@ -958,34 +958,34 @@ class InterpretCompilerDirectives(CythonTransform): else: assert False - def visit_with_directives(self, node, directives): - if not directives: - return self.visit_Node(node) - - old_directives = self.directives - new_directives = dict(old_directives) - new_directives.update(directives) - - if new_directives == old_directives: - return self.visit_Node(node) - - self.directives = new_directives - retbody = self.visit_Node(node) - self.directives = old_directives - - if not isinstance(retbody, Nodes.StatListNode): - retbody = Nodes.StatListNode(node.pos, stats=[retbody]) - return Nodes.CompilerDirectivesNode( - pos=retbody.pos, body=retbody, directives=new_directives) - + def visit_with_directives(self, node, directives): + if not directives: + return self.visit_Node(node) + + old_directives = self.directives + new_directives = dict(old_directives) + new_directives.update(directives) + + if new_directives == old_directives: + return self.visit_Node(node) + + self.directives = new_directives + retbody = self.visit_Node(node) + self.directives = old_directives + + if not isinstance(retbody, Nodes.StatListNode): + retbody = Nodes.StatListNode(node.pos, stats=[retbody]) + return Nodes.CompilerDirectivesNode( + pos=retbody.pos, body=retbody, directives=new_directives) + # Handle decorators def visit_FuncDefNode(self, node): directives = self._extract_directives(node, 'function') - return self.visit_with_directives(node, directives) + return self.visit_with_directives(node, directives) def visit_CVarDefNode(self, node): directives = self._extract_directives(node, 'function') - for name, value in directives.items(): + for name, value in directives.items(): if name == 'locals': node.directive_locals = value elif name not in ('final', 'staticmethod'): @@ -993,19 +993,19 @@ class InterpretCompilerDirectives(CythonTransform): node.pos, "Cdef functions can only take cython.locals(), " "staticmethod, or final decorators, got %s." % name)) - return self.visit_with_directives(node, directives) + return self.visit_with_directives(node, directives) def visit_CClassDefNode(self, node): directives = self._extract_directives(node, 'cclass') - return self.visit_with_directives(node, directives) + return self.visit_with_directives(node, directives) def visit_CppClassNode(self, node): directives = self._extract_directives(node, 'cppclass') - return self.visit_with_directives(node, directives) + return self.visit_with_directives(node, directives) def visit_PyClassDefNode(self, node): directives = self._extract_directives(node, 'class') - return self.visit_with_directives(node, directives) + return self.visit_with_directives(node, directives) def _extract_directives(self, node, scope_name): if not node.decorators: @@ -1052,7 +1052,7 @@ class InterpretCompilerDirectives(CythonTransform): optdict[name] = value return optdict - # Handle with-statements + # Handle with-statements def visit_WithStatNode(self, node): directive_dict = {} for directive in self.try_to_parse_directives(node.manager) or []: @@ -1252,19 +1252,19 @@ class WithTransform(CythonTransform, SkipDeclarations): def visit_WithStatNode(self, node): self.visitchildren(node, 'body') pos = node.pos - is_async = node.is_async + is_async = node.is_async body, target, manager = node.body, node.target, node.manager node.enter_call = ExprNodes.SimpleCallNode( pos, function=ExprNodes.AttributeNode( pos, obj=ExprNodes.CloneNode(manager), - attribute=EncodedString('__aenter__' if is_async else '__enter__'), + attribute=EncodedString('__aenter__' if is_async else '__enter__'), is_special_lookup=True), args=[], is_temp=True) - if is_async: - node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call) - + if is_async: + node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call) + if target is not None: body = Nodes.StatListNode( pos, stats=[ @@ -1282,7 +1282,7 @@ class WithTransform(CythonTransform, SkipDeclarations): pos, operand=ExprNodes.WithExitCallNode( pos, with_stat=node, test_if_run=False, - args=excinfo_target, + args=excinfo_target, await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)), body=Nodes.ReraiseStatNode(pos), ), @@ -1304,7 +1304,7 @@ class WithTransform(CythonTransform, SkipDeclarations): pos, with_stat=node, test_if_run=True, args=ExprNodes.TupleNode( - pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]), + pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]), await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)), handle_error_case=False, ) @@ -1316,76 +1316,76 @@ class WithTransform(CythonTransform, SkipDeclarations): class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations): - """ - Transforms method decorators in cdef classes into nested calls or properties. + """ + Transforms method decorators in cdef classes into nested calls or properties. - Python-style decorator properties are transformed into a PropertyNode - with up to the three getter, setter and deleter DefNodes. - The functional style isn't supported yet. + Python-style decorator properties are transformed into a PropertyNode + with up to the three getter, setter and deleter DefNodes. + The functional style isn't supported yet. """ - _properties = None - - _map_property_attribute = { - 'getter': '__get__', - 'setter': '__set__', - 'deleter': '__del__', - }.get - - def visit_CClassDefNode(self, node): - if self._properties is None: - self._properties = [] - self._properties.append({}) - super(DecoratorTransform, self).visit_CClassDefNode(node) - self._properties.pop() - return node - - def visit_PropertyNode(self, node): - # Low-level warning for other code until we can convert all our uses over. - level = 2 if isinstance(node.pos[0], str) else 0 - warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level) - return node - - def visit_DefNode(self, node): + _properties = None + + _map_property_attribute = { + 'getter': '__get__', + 'setter': '__set__', + 'deleter': '__del__', + }.get + + def visit_CClassDefNode(self, node): + if self._properties is None: + self._properties = [] + self._properties.append({}) + super(DecoratorTransform, self).visit_CClassDefNode(node) + self._properties.pop() + return node + + def visit_PropertyNode(self, node): + # Low-level warning for other code until we can convert all our uses over. + level = 2 if isinstance(node.pos[0], str) else 0 + warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level) + return node + + def visit_DefNode(self, node): scope_type = self.scope_type - node = self.visit_FuncDefNode(node) - if scope_type != 'cclass' or not node.decorators: - return node - - # transform @property decorators - properties = self._properties[-1] - for decorator_node in node.decorators[::-1]: - decorator = decorator_node.decorator - if decorator.is_name and decorator.name == 'property': - if len(node.decorators) > 1: - return self._reject_decorated_property(node, decorator_node) - name = node.name - node.name = EncodedString('__get__') - node.decorators.remove(decorator_node) - stat_list = [node] - if name in properties: - prop = properties[name] - prop.pos = node.pos - prop.doc = node.doc - prop.body.stats = stat_list - return [] - prop = Nodes.PropertyNode(node.pos, name=name) - prop.doc = node.doc - prop.body = Nodes.StatListNode(node.pos, stats=stat_list) - properties[name] = prop - return [prop] - elif decorator.is_attribute and decorator.obj.name in properties: - handler_name = self._map_property_attribute(decorator.attribute) - if handler_name: + node = self.visit_FuncDefNode(node) + if scope_type != 'cclass' or not node.decorators: + return node + + # transform @property decorators + properties = self._properties[-1] + for decorator_node in node.decorators[::-1]: + decorator = decorator_node.decorator + if decorator.is_name and decorator.name == 'property': + if len(node.decorators) > 1: + return self._reject_decorated_property(node, decorator_node) + name = node.name + node.name = EncodedString('__get__') + node.decorators.remove(decorator_node) + stat_list = [node] + if name in properties: + prop = properties[name] + prop.pos = node.pos + prop.doc = node.doc + prop.body.stats = stat_list + return [] + prop = Nodes.PropertyNode(node.pos, name=name) + prop.doc = node.doc + prop.body = Nodes.StatListNode(node.pos, stats=stat_list) + properties[name] = prop + return [prop] + elif decorator.is_attribute and decorator.obj.name in properties: + handler_name = self._map_property_attribute(decorator.attribute) + if handler_name: if decorator.obj.name != node.name: # CPython does not generate an error or warning, but not something useful either. error(decorator_node.pos, "Mismatching property names, expected '%s', got '%s'" % ( decorator.obj.name, node.name)) elif len(node.decorators) > 1: - return self._reject_decorated_property(node, decorator_node) + return self._reject_decorated_property(node, decorator_node) else: return self._add_to_property(properties, node, handler_name, decorator_node) - + # we clear node.decorators, so we need to set the # is_staticmethod/is_classmethod attributes now for decorator in node.decorators: @@ -1394,61 +1394,61 @@ class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations): node.is_classmethod |= func.name == 'classmethod' node.is_staticmethod |= func.name == 'staticmethod' - # transform normal decorators + # transform normal decorators decs = node.decorators node.decorators = None return self.chain_decorators(node, decs, node.name) - - @staticmethod - def _reject_decorated_property(node, decorator_node): - # restrict transformation to outermost decorator as wrapped properties will probably not work - for deco in node.decorators: - if deco != decorator_node: - error(deco.pos, "Property methods with additional decorators are not supported") - return node - - @staticmethod - def _add_to_property(properties, node, name, decorator): - prop = properties[node.name] - node.name = name - node.decorators.remove(decorator) - stats = prop.body.stats - for i, stat in enumerate(stats): - if stat.name == name: - stats[i] = node - break - else: - stats.append(node) - return [] - - @staticmethod - def chain_decorators(node, decorators, name): - """ - Decorators are applied directly in DefNode and PyClassDefNode to avoid - reassignments to the function/class name - except for cdef class methods. - For those, the reassignment is required as methods are originally - defined in the PyMethodDef struct. - - The IndirectionNode allows DefNode to override the decorator. - """ - decorator_result = ExprNodes.NameNode(node.pos, name=name) + + @staticmethod + def _reject_decorated_property(node, decorator_node): + # restrict transformation to outermost decorator as wrapped properties will probably not work + for deco in node.decorators: + if deco != decorator_node: + error(deco.pos, "Property methods with additional decorators are not supported") + return node + + @staticmethod + def _add_to_property(properties, node, name, decorator): + prop = properties[node.name] + node.name = name + node.decorators.remove(decorator) + stats = prop.body.stats + for i, stat in enumerate(stats): + if stat.name == name: + stats[i] = node + break + else: + stats.append(node) + return [] + + @staticmethod + def chain_decorators(node, decorators, name): + """ + Decorators are applied directly in DefNode and PyClassDefNode to avoid + reassignments to the function/class name - except for cdef class methods. + For those, the reassignment is required as methods are originally + defined in the PyMethodDef struct. + + The IndirectionNode allows DefNode to override the decorator. + """ + decorator_result = ExprNodes.NameNode(node.pos, name=name) for decorator in decorators[::-1]: decorator_result = ExprNodes.SimpleCallNode( decorator.pos, - function=decorator.decorator, - args=[decorator_result]) + function=decorator.decorator, + args=[decorator_result]) - name_node = ExprNodes.NameNode(node.pos, name=name) + name_node = ExprNodes.NameNode(node.pos, name=name) reassignment = Nodes.SingleAssignmentNode( node.pos, - lhs=name_node, - rhs=decorator_result) + lhs=name_node, + rhs=decorator_result) reassignment = Nodes.IndirectionNode([reassignment]) node.decorator_indirection = reassignment return [node, reassignment] - + class CnameDirectivesTransform(CythonTransform, SkipDeclarations): """ Only part of the CythonUtilityCode pipeline. Must be run before @@ -1482,7 +1482,7 @@ class CnameDirectivesTransform(CythonTransform, SkipDeclarations): raise AssertionError( "argument to cname decorator must be a string literal") - cname = args[0].compile_time_value(None) + cname = args[0].compile_time_value(None) del node.decorators[i] node = Nodes.CnameDecoratorNode(pos=node.pos, node=node, cname=cname) @@ -1708,8 +1708,8 @@ if VALUE is not None: # so it can be pickled *after* self is memoized. unpickle_func = TreeFragment(u""" def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state): - cdef object __pyx_PickleError - cdef object __pyx_result + cdef object __pyx_PickleError + cdef object __pyx_result if __pyx_checksum != %(checksum)s: from pickle import PickleError as __pyx_PickleError raise __pyx_PickleError("Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum) @@ -1738,8 +1738,8 @@ if VALUE is not None: pickle_func = TreeFragment(u""" def __reduce_cython__(self): - cdef tuple state - cdef object _dict + cdef tuple state + cdef object _dict cdef bint use_setstate state = (%(members)s) _dict = getattr(self, '__dict__', None) @@ -1788,7 +1788,7 @@ if VALUE is not None: if decorators: transform = DecoratorTransform(self.context) def_node = node.node - _, reassignments = transform.chain_decorators( + _, reassignments = transform.chain_decorators( def_node, decorators, def_node.name) reassignments.analyse_declarations(env) node = [node, reassignments] @@ -1801,7 +1801,7 @@ if VALUE is not None: node.stats.insert(0, node.py_func) node.py_func = self.visit(node.py_func) node.update_fused_defnode_entry(env) - pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True) + pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True) pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env)) node.resulting_fused_function = pycfunc # Create assignment node for our def function @@ -1853,8 +1853,8 @@ if VALUE is not None: node.body = Nodes.NogilTryFinallyStatNode( node.body.pos, body=node.body, - finally_clause=Nodes.EnsureGILNode(node.body.pos), - finally_except_clause=Nodes.EnsureGILNode(node.body.pos)) + finally_clause=Nodes.EnsureGILNode(node.body.pos), + finally_except_clause=Nodes.EnsureGILNode(node.body.pos)) def _handle_fused(self, node): if node.is_generator and node.has_fused_arguments: @@ -1905,8 +1905,8 @@ if VALUE is not None: def visit_DefNode(self, node): node = self.visit_FuncDefNode(node) env = self.current_env() - if isinstance(node, Nodes.DefNode) and node.is_wrapper: - env = env.parent_scope + if isinstance(node, Nodes.DefNode) and node.is_wrapper: + env = env.parent_scope if (not isinstance(node, Nodes.DefNode) or node.fused_py_func or node.is_generator_body or not node.needs_assignment_synthesis(env)): @@ -1930,7 +1930,7 @@ if VALUE is not None: else: binding = self.current_directives.get('binding') rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding) - node.code_object = rhs.code_object + node.code_object = rhs.code_object if node.is_generator: node.gbody.code_object = node.code_object @@ -2161,28 +2161,28 @@ class CalculateQualifiedNamesTransform(EnvTransform): return node def visit_PyCFunctionNode(self, node): - orig_qualified_name = self.qualified_name[:] - if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '<locals>': - self.qualified_name.pop() - self._set_qualname(node) - else: - self._set_qualname(node, node.def_node.name) + orig_qualified_name = self.qualified_name[:] + if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '<locals>': + self.qualified_name.pop() + self._set_qualname(node) + else: + self._set_qualname(node, node.def_node.name) self.visitchildren(node) - self.qualified_name = orig_qualified_name + self.qualified_name = orig_qualified_name return node def visit_DefNode(self, node): - if node.is_wrapper and self.qualified_name: - assert self.qualified_name[-1] == '<locals>', self.qualified_name - orig_qualified_name = self.qualified_name[:] - self.qualified_name.pop() - self._set_qualname(node) - self._super_visit_FuncDefNode(node) - self.qualified_name = orig_qualified_name - else: - self._set_qualname(node, node.name) - self.visit_FuncDefNode(node) - return node + if node.is_wrapper and self.qualified_name: + assert self.qualified_name[-1] == '<locals>', self.qualified_name + orig_qualified_name = self.qualified_name[:] + self.qualified_name.pop() + self._set_qualname(node) + self._super_visit_FuncDefNode(node) + self.qualified_name = orig_qualified_name + else: + self._set_qualname(node, node.name) + self.visit_FuncDefNode(node) + return node def visit_FuncDefNode(self, node): orig_qualified_name = self.qualified_name[:] @@ -2273,26 +2273,26 @@ class ExpandInplaceOperators(EnvTransform): if lhs.type.is_cpp_class: # No getting around this exact operator here. return node - if isinstance(lhs, ExprNodes.BufferIndexNode): - # There is code to handle this case in InPlaceAssignmentNode + if isinstance(lhs, ExprNodes.BufferIndexNode): + # There is code to handle this case in InPlaceAssignmentNode return node env = self.current_env() def side_effect_free_reference(node, setting=False): - if node.is_name: + if node.is_name: return node, [] elif node.type.is_pyobject and not setting: node = LetRefNode(node) return node, [node] - elif node.is_subscript: + elif node.is_subscript: base, temps = side_effect_free_reference(node.base) index = LetRefNode(node.index) return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index] - elif node.is_attribute: + elif node.is_attribute: obj, temps = side_effect_free_reference(node.obj) return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps - elif isinstance(node, ExprNodes.BufferIndexNode): - raise ValueError("Don't allow things like attributes of buffer indexing operations") + elif isinstance(node, ExprNodes.BufferIndexNode): + raise ValueError("Don't allow things like attributes of buffer indexing operations") else: node = LetRefNode(node) return node, [node] @@ -2333,7 +2333,7 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations): @cython.cclass @cython.ccall @cython.inline - @cython.nogil + @cython.nogil """ def visit_ModuleNode(self, node): @@ -2353,7 +2353,7 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations): modifiers = [] if 'inline' in self.directives: modifiers.append('inline') - nogil = self.directives.get('nogil') + nogil = self.directives.get('nogil') except_val = self.directives.get('exceptval') return_type_node = self.directives.get('returns') if return_type_node is None and self.directives['annotation_typing']: @@ -2366,7 +2366,7 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations): except_val = (None, False) if 'ccall' in self.directives: node = node.as_cfunction( - overridable=True, modifiers=modifiers, nogil=nogil, + overridable=True, modifiers=modifiers, nogil=nogil, returns=return_type_node, except_val=except_val) return self.visit(node) if 'cfunc' in self.directives: @@ -2374,21 +2374,21 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations): error(node.pos, "cfunc directive is not allowed here") else: node = node.as_cfunction( - overridable=False, modifiers=modifiers, nogil=nogil, + overridable=False, modifiers=modifiers, nogil=nogil, returns=return_type_node, except_val=except_val) return self.visit(node) if 'inline' in modifiers: error(node.pos, "Python functions cannot be declared 'inline'") - if nogil: - # TODO: turn this into a "with gil" declaration. - error(node.pos, "Python functions cannot be declared 'nogil'") + if nogil: + # TODO: turn this into a "with gil" declaration. + error(node.pos, "Python functions cannot be declared 'nogil'") self.visitchildren(node) return node - def visit_LambdaNode(self, node): - # No directives should modify lambdas or generator expressions (and also nothing in them). - return node - + def visit_LambdaNode(self, node): + # No directives should modify lambdas or generator expressions (and also nothing in them). + return node + def visit_PyClassDefNode(self, node): if 'cclass' in self.directives: node = node.as_cclass() @@ -2437,8 +2437,8 @@ class AlignFunctionDefinitions(CythonTransform): if pxd_def is None: pxd_def = self.scope.lookup(node.class_name) if pxd_def: - if not pxd_def.defined_in_pxd: - return node + if not pxd_def.defined_in_pxd: + return node outer_scope = self.scope self.scope = pxd_def.type.scope self.visitchildren(node) @@ -2518,13 +2518,13 @@ class RemoveUnreachableCode(CythonTransform): node.else_clause = None return node - def visit_TryFinallyStatNode(self, node): - self.visitchildren(node) - if node.finally_clause.is_terminator: - node.is_terminator = True - return node + def visit_TryFinallyStatNode(self, node): + self.visitchildren(node) + if node.finally_clause.is_terminator: + node.is_terminator = True + return node + - class YieldNodeCollector(TreeVisitor): def __init__(self): @@ -2545,11 +2545,11 @@ class YieldNodeCollector(TreeVisitor): self.has_yield = True self.visitchildren(node) - def visit_AwaitExprNode(self, node): + def visit_AwaitExprNode(self, node): self.yields.append(node) self.has_await = True - self.visitchildren(node) - + self.visitchildren(node) + def visit_ReturnStatNode(self, node): self.visitchildren(node) if node.value: @@ -2576,12 +2576,12 @@ class YieldNodeCollector(TreeVisitor): def visit_GeneratorExpressionNode(self, node): pass - def visit_CArgDeclNode(self, node): - # do not look into annotations - # FIXME: support (yield) in default arguments (currently crashes) - pass + def visit_CArgDeclNode(self, node): + # do not look into annotations + # FIXME: support (yield) in default arguments (currently crashes) + pass + - class MarkClosureVisitor(CythonTransform): def visit_ModuleNode(self, node): @@ -2598,7 +2598,7 @@ class MarkClosureVisitor(CythonTransform): collector = YieldNodeCollector() collector.visitchildren(node) - if node.is_async_def: + if node.is_async_def: coroutine_type = Nodes.AsyncDefNode if collector.has_yield: coroutine_type = Nodes.AsyncGenNode @@ -2612,30 +2612,30 @@ class MarkClosureVisitor(CythonTransform): return node elif collector.has_yield: coroutine_type = Nodes.GeneratorDefNode - else: - return node + else: + return node for i, yield_expr in enumerate(collector.yields, 1): - yield_expr.label_num = i + yield_expr.label_num = i for retnode in collector.returns + collector.finallys + collector.excepts: - retnode.in_generator = True + retnode.in_generator = True - gbody = Nodes.GeneratorBodyDefNode( + gbody = Nodes.GeneratorBodyDefNode( pos=node.pos, name=node.name, body=node.body, is_async_gen_body=node.is_async_def and collector.has_yield) coroutine = coroutine_type( - pos=node.pos, name=node.name, args=node.args, - star_arg=node.star_arg, starstar_arg=node.starstar_arg, - doc=node.doc, decorators=node.decorators, - gbody=gbody, lambda_name=node.lambda_name, - return_type_annotation=node.return_type_annotation) - return coroutine - + pos=node.pos, name=node.name, args=node.args, + star_arg=node.star_arg, starstar_arg=node.starstar_arg, + doc=node.doc, decorators=node.decorators, + gbody=gbody, lambda_name=node.lambda_name, + return_type_annotation=node.return_type_annotation) + return coroutine + def visit_CFuncDefNode(self, node): - self.needs_closure = False - self.visitchildren(node) - node.needs_closure = self.needs_closure - self.needs_closure = True + self.needs_closure = False + self.visitchildren(node) + node.needs_closure = self.needs_closure + self.needs_closure = True if node.needs_closure and node.overridable: error(node.pos, "closures inside cpdef functions not yet supported") return node @@ -2652,7 +2652,7 @@ class MarkClosureVisitor(CythonTransform): self.needs_closure = True return node - + class CreateClosureClasses(CythonTransform): # Output closure classes in module scope for all functions # that really need it. @@ -2685,7 +2685,7 @@ class CreateClosureClasses(CythonTransform): if node.is_generator: for scope in node.local_scope.iter_local_scopes(): for entry in scope.entries.values(): - if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal): + if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal): entry.in_closure = True from_closure, in_closure = self.find_entries_used_in_closures(node) @@ -2718,12 +2718,12 @@ class CreateClosureClasses(CythonTransform): node.needs_outer_scope = True return - # entry.cname can contain periods (eg. a derived C method of a class). - # We want to use the cname as part of a C struct name, so we replace - # periods with double underscores. + # entry.cname can contain periods (eg. a derived C method of a class). + # We want to use the cname as part of a C struct name, so we replace + # periods with double underscores. as_name = '%s_%s' % ( target_module_scope.next_id(Naming.closure_class_prefix), - node.entry.cname.replace('.','__')) + node.entry.cname.replace('.','__')) entry = target_module_scope.declare_c_class( name=as_name, pos=node.pos, defining=True, @@ -2796,60 +2796,60 @@ class CreateClosureClasses(CythonTransform): return node -class InjectGilHandling(VisitorTransform, SkipDeclarations): - """ - Allow certain Python operations inside of nogil blocks by implicitly acquiring the GIL. - - Must run before the AnalyseDeclarationsTransform to make sure the GILStatNodes get - set up, parallel sections know that the GIL is acquired inside of them, etc. - """ - def __call__(self, root): - self.nogil = False - return super(InjectGilHandling, self).__call__(root) - - # special node handling - - def visit_RaiseStatNode(self, node): - """Allow raising exceptions in nogil sections by wrapping them in a 'with gil' block.""" - if self.nogil: - node = Nodes.GILStatNode(node.pos, state='gil', body=node) - return node - - # further candidates: - # def visit_AssertStatNode(self, node): - # def visit_ReraiseStatNode(self, node): - - # nogil tracking - - def visit_GILStatNode(self, node): - was_nogil = self.nogil - self.nogil = (node.state == 'nogil') - self.visitchildren(node) - self.nogil = was_nogil - return node - - def visit_CFuncDefNode(self, node): - was_nogil = self.nogil - if isinstance(node.declarator, Nodes.CFuncDeclaratorNode): - self.nogil = node.declarator.nogil and not node.declarator.with_gil - self.visitchildren(node) - self.nogil = was_nogil - return node - - def visit_ParallelRangeNode(self, node): - was_nogil = self.nogil - self.nogil = node.nogil - self.visitchildren(node) - self.nogil = was_nogil - return node - - def visit_ExprNode(self, node): - # No special GIL handling inside of expressions for now. - return node - - visit_Node = VisitorTransform.recurse_to_children - - +class InjectGilHandling(VisitorTransform, SkipDeclarations): + """ + Allow certain Python operations inside of nogil blocks by implicitly acquiring the GIL. + + Must run before the AnalyseDeclarationsTransform to make sure the GILStatNodes get + set up, parallel sections know that the GIL is acquired inside of them, etc. + """ + def __call__(self, root): + self.nogil = False + return super(InjectGilHandling, self).__call__(root) + + # special node handling + + def visit_RaiseStatNode(self, node): + """Allow raising exceptions in nogil sections by wrapping them in a 'with gil' block.""" + if self.nogil: + node = Nodes.GILStatNode(node.pos, state='gil', body=node) + return node + + # further candidates: + # def visit_AssertStatNode(self, node): + # def visit_ReraiseStatNode(self, node): + + # nogil tracking + + def visit_GILStatNode(self, node): + was_nogil = self.nogil + self.nogil = (node.state == 'nogil') + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_CFuncDefNode(self, node): + was_nogil = self.nogil + if isinstance(node.declarator, Nodes.CFuncDeclaratorNode): + self.nogil = node.declarator.nogil and not node.declarator.with_gil + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_ParallelRangeNode(self, node): + was_nogil = self.nogil + self.nogil = node.nogil + self.visitchildren(node) + self.nogil = was_nogil + return node + + def visit_ExprNode(self, node): + # No special GIL handling inside of expressions for now. + return node + + visit_Node = VisitorTransform.recurse_to_children + + class GilCheck(VisitorTransform): """ Call `node.gil_check(env)` on each node to make sure we hold the @@ -2869,28 +2869,28 @@ class GilCheck(VisitorTransform): self.nogil_declarator_only = False return super(GilCheck, self).__call__(root) - def _visit_scoped_children(self, node, gil_state): - was_nogil = self.nogil - outer_attrs = node.outer_attrs - if outer_attrs and len(self.env_stack) > 1: - self.nogil = self.env_stack[-2].nogil - self.visitchildren(node, outer_attrs) - - self.nogil = gil_state + def _visit_scoped_children(self, node, gil_state): + was_nogil = self.nogil + outer_attrs = node.outer_attrs + if outer_attrs and len(self.env_stack) > 1: + self.nogil = self.env_stack[-2].nogil + self.visitchildren(node, outer_attrs) + + self.nogil = gil_state self.visitchildren(node, attrs=None, exclude=outer_attrs) - self.nogil = was_nogil - + self.nogil = was_nogil + def visit_FuncDefNode(self, node): self.env_stack.append(node.local_scope) - inner_nogil = node.local_scope.nogil + inner_nogil = node.local_scope.nogil - if inner_nogil: + if inner_nogil: self.nogil_declarator_only = True - if inner_nogil and node.nogil_check: + if inner_nogil and node.nogil_check: node.nogil_check(node.local_scope) - self._visit_scoped_children(node, inner_nogil) + self._visit_scoped_children(node, inner_nogil) # This cannot be nested, so it doesn't need backup/restore self.nogil_declarator_only = False @@ -2903,9 +2903,9 @@ class GilCheck(VisitorTransform): node.nogil_check() was_nogil = self.nogil - is_nogil = (node.state == 'nogil') + is_nogil = (node.state == 'nogil') - if was_nogil == is_nogil and not self.nogil_declarator_only: + if was_nogil == is_nogil and not self.nogil_declarator_only: if not was_nogil: error(node.pos, "Trying to acquire the GIL while it is " "already held.") @@ -2918,7 +2918,7 @@ class GilCheck(VisitorTransform): # which is wrapped in a StatListNode. Just unpack that. node.finally_clause, = node.finally_clause.stats - self._visit_scoped_children(node, is_nogil) + self._visit_scoped_children(node, is_nogil) return node def visit_ParallelRangeNode(self, node): @@ -2965,19 +2965,19 @@ class GilCheck(VisitorTransform): def visit_Node(self, node): if self.env_stack and self.nogil and node.nogil_check: node.nogil_check(self.env_stack[-1]) - if node.outer_attrs: - self._visit_scoped_children(node, self.nogil) - else: - self.visitchildren(node) - if self.nogil: - node.in_nogil_context = True + if node.outer_attrs: + self._visit_scoped_children(node, self.nogil) + else: + self.visitchildren(node) + if self.nogil: + node.in_nogil_context = True return node class TransformBuiltinMethods(EnvTransform): - """ - Replace Cython's own cython.* builtins by the corresponding tree nodes. - """ + """ + Replace Cython's own cython.* builtins by the corresponding tree nodes. + """ def visit_SingleAssignmentNode(self, node): if node.declaration_only: @@ -3137,13 +3137,13 @@ class TransformBuiltinMethods(EnvTransform): node.function.pos, operand1=node.args[0], operand2=node.args[1]) elif function == u'cast': if len(node.args) != 2: - error(node.function.pos, - u"cast() takes exactly two arguments and an optional typecheck keyword") + error(node.function.pos, + u"cast() takes exactly two arguments and an optional typecheck keyword") else: type = node.args[0].analyse_as_type(self.current_env()) if type: - node = ExprNodes.TypecastNode( - node.function.pos, type=type, operand=node.args[1], typecheck=False) + node = ExprNodes.TypecastNode( + node.function.pos, type=type, operand=node.args[1], typecheck=False) else: error(node.args[0].pos, "Not a type") elif function == u'sizeof': @@ -3189,12 +3189,12 @@ class TransformBuiltinMethods(EnvTransform): return self._inject_super(node, func_name) return node - def visit_GeneralCallNode(self, node): - function = node.function.as_cython_attribute() + def visit_GeneralCallNode(self, node): + function = node.function.as_cython_attribute() if function == u'cast': # NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args - args = node.positional_args.args - kwargs = node.keyword_args.compile_time_value(None) + args = node.positional_args.args + kwargs = node.keyword_args.compile_time_value(None) if (len(args) != 2 or len(kwargs) > 1 or (len(kwargs) == 1 and 'typecheck' not in kwargs)): error(node.function.pos, @@ -3205,13 +3205,13 @@ class TransformBuiltinMethods(EnvTransform): typecheck = kwargs.get('typecheck', False) node = ExprNodes.TypecastNode( node.function.pos, type=type, operand=args[1], typecheck=typecheck) - else: + else: error(args[0].pos, "Not a type") - self.visitchildren(node) - return node - - + self.visitchildren(node) + return node + + class ReplaceFusedTypeChecks(VisitorTransform): """ This is not a transform in the pipeline. It is invoked on the specific @@ -3348,11 +3348,11 @@ class DebugTransform(CythonTransform): self.tb.start('Globals') entries = {} - for k, v in node.scope.entries.items(): + for k, v in node.scope.entries.items(): if (v.qualified_name not in self.visited and not - v.name.startswith('__pyx_') and not - v.type.is_cfunction and not - v.type.is_extension_type): + v.name.startswith('__pyx_') and not + v.type.is_cfunction and not + v.type.is_extension_type): entries[k]= v self.serialize_local_variables(entries) @@ -3407,7 +3407,7 @@ class DebugTransform(CythonTransform): def visit_NameNode(self, node): if (self.register_stepinto and - node.type is not None and + node.type is not None and node.type.is_cfunction and getattr(node, 'is_called', False) and node.entry.func_cname is not None): diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.pxd b/contrib/tools/cython/Cython/Compiler/Parsing.pxd index 8117e05c1f..25453b39ab 100644 --- a/contrib/tools/cython/Cython/Compiler/Parsing.pxd +++ b/contrib/tools/cython/Cython/Compiler/Parsing.pxd @@ -44,12 +44,12 @@ cdef p_typecast(PyrexScanner s) cdef p_sizeof(PyrexScanner s) cdef p_yield_expression(PyrexScanner s) cdef p_yield_statement(PyrexScanner s) -cdef p_async_statement(PyrexScanner s, ctx, decorators) +cdef p_async_statement(PyrexScanner s, ctx, decorators) cdef p_power(PyrexScanner s) cdef p_new_expr(PyrexScanner s) cdef p_trailer(PyrexScanner s, node1) cdef p_call_parse_args(PyrexScanner s, bint allow_genexp = *) -cdef p_call_build_packed_args(pos, positional_args, keyword_args) +cdef p_call_build_packed_args(pos, positional_args, keyword_args) cdef p_call(PyrexScanner s, function) cdef p_index(PyrexScanner s, base) cdef tuple p_subscript_list(PyrexScanner s) @@ -72,7 +72,7 @@ cdef _append_escape_sequence(kind, builder, unicode escape_sequence, PyrexScanne cdef tuple _f_string_error_pos(pos, string, Py_ssize_t i) @cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, next_start=Py_ssize_t) cdef list p_f_string(PyrexScanner s, unicode_value, pos, bint is_raw) -@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, quote_char=Py_UCS4, NO_CHAR=Py_UCS4) +@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, quote_char=Py_UCS4, NO_CHAR=Py_UCS4) cdef tuple p_f_string_expr(PyrexScanner s, unicode_value, pos, Py_ssize_t starting_index, bint is_raw) cdef p_list_maker(PyrexScanner s) cdef p_comp_iter(PyrexScanner s, body) @@ -114,18 +114,18 @@ cdef p_if_statement(PyrexScanner s) cdef p_if_clause(PyrexScanner s) cdef p_else_clause(PyrexScanner s) cdef p_while_statement(PyrexScanner s) -cdef p_for_statement(PyrexScanner s, bint is_async=*) -cdef dict p_for_bounds(PyrexScanner s, bint allow_testlist=*, bint is_async=*) +cdef p_for_statement(PyrexScanner s, bint is_async=*) +cdef dict p_for_bounds(PyrexScanner s, bint allow_testlist=*, bint is_async=*) cdef p_for_from_relation(PyrexScanner s) cdef p_for_from_step(PyrexScanner s) cdef p_target(PyrexScanner s, terminator) cdef p_for_target(PyrexScanner s) -cdef p_for_iterator(PyrexScanner s, bint allow_testlist=*, bint is_async=*) +cdef p_for_iterator(PyrexScanner s, bint allow_testlist=*, bint is_async=*) cdef p_try_statement(PyrexScanner s) cdef p_except_clause(PyrexScanner s) cdef p_include_statement(PyrexScanner s, ctx) cdef p_with_statement(PyrexScanner s) -cdef p_with_items(PyrexScanner s, bint is_async=*) +cdef p_with_items(PyrexScanner s, bint is_async=*) cdef p_with_template(PyrexScanner s) cdef p_simple_statement(PyrexScanner s, bint first_statement = *) cdef p_simple_statement_list(PyrexScanner s, ctx, bint first_statement = *) @@ -135,7 +135,7 @@ cdef p_IF_statement(PyrexScanner s, ctx) cdef p_statement(PyrexScanner s, ctx, bint first_statement = *) cdef p_statement_list(PyrexScanner s, ctx, bint first_statement = *) cdef p_suite(PyrexScanner s, ctx = *) -cdef tuple p_suite_with_docstring(PyrexScanner s, ctx, bint with_doc_only=*) +cdef tuple p_suite_with_docstring(PyrexScanner s, ctx, bint with_doc_only=*) cdef tuple _extract_docstring(node) cdef p_positional_and_keyword_args(PyrexScanner s, end_sy_set, templates = *) @@ -183,17 +183,17 @@ cdef p_c_modifiers(PyrexScanner s) cdef p_c_func_or_var_declaration(PyrexScanner s, pos, ctx) cdef p_ctypedef_statement(PyrexScanner s, ctx) cdef p_decorators(PyrexScanner s) -cdef _reject_cdef_modifier_in_py(PyrexScanner s, name) -cdef p_def_statement(PyrexScanner s, list decorators=*, bint is_async_def=*) +cdef _reject_cdef_modifier_in_py(PyrexScanner s, name) +cdef p_def_statement(PyrexScanner s, list decorators=*, bint is_async_def=*) cdef p_varargslist(PyrexScanner s, terminator=*, bint annotated = *) cdef p_py_arg_decl(PyrexScanner s, bint annotated = *) cdef p_class_statement(PyrexScanner s, decorators) cdef p_c_class_definition(PyrexScanner s, pos, ctx) -cdef tuple p_c_class_options(PyrexScanner s) +cdef tuple p_c_class_options(PyrexScanner s) cdef p_property_decl(PyrexScanner s) cdef p_doc_string(PyrexScanner s) cdef p_ignorable_statement(PyrexScanner s) -cdef dict p_compiler_directive_comments(PyrexScanner s) -cdef p_template_definition(PyrexScanner s) +cdef dict p_compiler_directive_comments(PyrexScanner s) +cdef p_template_definition(PyrexScanner s) cdef p_cpp_class_definition(PyrexScanner s, pos, ctx) cdef p_cpp_class_attribute(PyrexScanner s, ctx) diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.py b/contrib/tools/cython/Cython/Compiler/Parsing.py index 14110fcc06..4d2f12a24a 100644 --- a/contrib/tools/cython/Cython/Compiler/Parsing.py +++ b/contrib/tools/cython/Cython/Compiler/Parsing.py @@ -8,37 +8,37 @@ from __future__ import absolute_import # This should be done automatically import cython cython.declare(Nodes=object, ExprNodes=object, EncodedString=object, - bytes_literal=object, StringEncoding=object, + bytes_literal=object, StringEncoding=object, FileSourceDescriptor=object, lookup_unicodechar=object, unicode_category=object, Future=object, Options=object, error=object, warning=object, Builtin=object, ModuleNode=object, Utils=object, _unicode=object, _bytes=object, re=object, sys=object, _parse_escape_sequences=object, _parse_escape_sequences_raw=object, - partial=object, reduce=object, _IS_PY3=cython.bint, _IS_2BYTE_UNICODE=cython.bint, - _CDEF_MODIFIERS=tuple) + partial=object, reduce=object, _IS_PY3=cython.bint, _IS_2BYTE_UNICODE=cython.bint, + _CDEF_MODIFIERS=tuple) -from io import StringIO +from io import StringIO import re -import sys +import sys from unicodedata import lookup as lookup_unicodechar, category as unicode_category -from functools import partial, reduce +from functools import partial, reduce -from .Scanning import PyrexScanner, FileSourceDescriptor, StringSourceDescriptor +from .Scanning import PyrexScanner, FileSourceDescriptor, StringSourceDescriptor from . import Nodes from . import ExprNodes from . import Builtin from . import StringEncoding -from .StringEncoding import EncodedString, bytes_literal, _unicode, _bytes +from .StringEncoding import EncodedString, bytes_literal, _unicode, _bytes from .ModuleNode import ModuleNode from .Errors import error, warning from .. import Utils from . import Future from . import Options -_IS_PY3 = sys.version_info[0] >= 3 +_IS_PY3 = sys.version_info[0] >= 3 _IS_2BYTE_UNICODE = sys.maxunicode == 0xffff -_CDEF_MODIFIERS = ('inline', 'nogil', 'api') +_CDEF_MODIFIERS = ('inline', 'nogil', 'api') + - class Ctx(object): # Parsing context level = 'other' @@ -62,8 +62,8 @@ class Ctx(object): d.update(kwds) return ctx - -def p_ident(s, message="Expected an identifier"): + +def p_ident(s, message="Expected an identifier"): if s.sy == 'IDENT': name = s.systring s.next() @@ -218,7 +218,7 @@ def p_starred_expr(s): starred = False expr = p_bit_expr(s) if starred: - expr = ExprNodes.StarredUnpackingNode(pos, expr) + expr = ExprNodes.StarredUnpackingNode(pos, expr) return expr def p_cascaded_cmp(s): @@ -358,7 +358,7 @@ def p_sizeof(s): s.expect(')') return node - + def p_yield_expression(s): # s.sy == "yield" pos = s.position() @@ -368,8 +368,8 @@ def p_yield_expression(s): is_yield_from = True s.next() if s.sy != ')' and s.sy not in statement_terminators: - # "yield from" does not support implicit tuples, but "yield" does ("yield 1,2") - arg = p_test(s) if is_yield_from else p_testlist(s) + # "yield from" does not support implicit tuples, but "yield" does ("yield 1,2") + arg = p_test(s) if is_yield_from else p_testlist(s) else: if is_yield_from: s.error("'yield from' requires a source argument", @@ -380,47 +380,47 @@ def p_yield_expression(s): else: return ExprNodes.YieldExprNode(pos, arg=arg) - + def p_yield_statement(s): # s.sy == "yield" yield_expr = p_yield_expression(s) return Nodes.ExprStatNode(yield_expr.pos, expr=yield_expr) -def p_async_statement(s, ctx, decorators): - # s.sy >> 'async' ... - if s.sy == 'def': - # 'async def' statements aren't allowed in pxd files - if 'pxd' in ctx.level: - s.error('def statement not allowed here') - s.level = ctx.level - return p_def_statement(s, decorators, is_async_def=True) - elif decorators: - s.error("Decorators can only be followed by functions or classes") - elif s.sy == 'for': - return p_for_statement(s, is_async=True) - elif s.sy == 'with': - s.next() - return p_with_items(s, is_async=True) - else: - s.error("expected one of 'def', 'for', 'with' after 'async'") - - -#power: atom_expr ('**' factor)* -#atom_expr: ['await'] atom trailer* - +def p_async_statement(s, ctx, decorators): + # s.sy >> 'async' ... + if s.sy == 'def': + # 'async def' statements aren't allowed in pxd files + if 'pxd' in ctx.level: + s.error('def statement not allowed here') + s.level = ctx.level + return p_def_statement(s, decorators, is_async_def=True) + elif decorators: + s.error("Decorators can only be followed by functions or classes") + elif s.sy == 'for': + return p_for_statement(s, is_async=True) + elif s.sy == 'with': + s.next() + return p_with_items(s, is_async=True) + else: + s.error("expected one of 'def', 'for', 'with' after 'async'") + + +#power: atom_expr ('**' factor)* +#atom_expr: ['await'] atom trailer* + def p_power(s): if s.systring == 'new' and s.peek()[0] == 'IDENT': return p_new_expr(s) - await_pos = None - if s.sy == 'await': - await_pos = s.position() - s.next() + await_pos = None + if s.sy == 'await': + await_pos = s.position() + s.next() n1 = p_atom(s) while s.sy in ('(', '[', '.'): n1 = p_trailer(s, n1) - if await_pos: - n1 = ExprNodes.AwaitExprNode(await_pos, arg=n1) + if await_pos: + n1 = ExprNodes.AwaitExprNode(await_pos, arg=n1) if s.sy == '**': pos = s.position() s.next() @@ -428,7 +428,7 @@ def p_power(s): n1 = ExprNodes.binop_node(pos, '**', n1, n2) return n1 - + def p_new_expr(s): # s.systring == 'new'. pos = s.position() @@ -446,39 +446,39 @@ def p_trailer(s, node1): return p_index(s, node1) else: # s.sy == '.' s.next() - name = p_ident(s) + name = p_ident(s) return ExprNodes.AttributeNode(pos, - obj=node1, attribute=name) + obj=node1, attribute=name) + - # arglist: argument (',' argument)* [','] # argument: [test '='] test # Really [keyword '='] test -# since PEP 448: -# argument: ( test [comp_for] | -# test '=' test | -# '**' expr | -# star_expr ) - -def p_call_parse_args(s, allow_genexp=True): +# since PEP 448: +# argument: ( test [comp_for] | +# test '=' test | +# '**' expr | +# star_expr ) + +def p_call_parse_args(s, allow_genexp=True): # s.sy == '(' pos = s.position() s.next() positional_args = [] keyword_args = [] - starstar_seen = False - last_was_tuple_unpack = False - while s.sy != ')': + starstar_seen = False + last_was_tuple_unpack = False + while s.sy != ')': if s.sy == '*': - if starstar_seen: - s.error("Non-keyword arg following keyword arg", pos=s.position()) + if starstar_seen: + s.error("Non-keyword arg following keyword arg", pos=s.position()) + s.next() + positional_args.append(p_test(s)) + last_was_tuple_unpack = True + elif s.sy == '**': s.next() - positional_args.append(p_test(s)) - last_was_tuple_unpack = True - elif s.sy == '**': - s.next() - keyword_args.append(p_test(s)) - starstar_seen = True + keyword_args.append(p_test(s)) + starstar_seen = True else: arg = p_test(s) if s.sy == '=': @@ -486,86 +486,86 @@ def p_call_parse_args(s, allow_genexp=True): if not arg.is_name: s.error("Expected an identifier before '='", pos=arg.pos) - encoded_name = s.context.intern_ustring(arg.name) + encoded_name = s.context.intern_ustring(arg.name) keyword = ExprNodes.IdentifierStringNode( arg.pos, value=encoded_name) arg = p_test(s) keyword_args.append((keyword, arg)) else: if keyword_args: - s.error("Non-keyword arg following keyword arg", pos=arg.pos) - if positional_args and not last_was_tuple_unpack: - positional_args[-1].append(arg) - else: - positional_args.append([arg]) - last_was_tuple_unpack = False + s.error("Non-keyword arg following keyword arg", pos=arg.pos) + if positional_args and not last_was_tuple_unpack: + positional_args[-1].append(arg) + else: + positional_args.append([arg]) + last_was_tuple_unpack = False if s.sy != ',': break s.next() if s.sy in ('for', 'async'): - if not keyword_args and not last_was_tuple_unpack: - if len(positional_args) == 1 and len(positional_args[0]) == 1: - positional_args = [[p_genexp(s, positional_args[0][0])]] + if not keyword_args and not last_was_tuple_unpack: + if len(positional_args) == 1 and len(positional_args[0]) == 1: + positional_args = [[p_genexp(s, positional_args[0][0])]] s.expect(')') - return positional_args or [[]], keyword_args + return positional_args or [[]], keyword_args + - -def p_call_build_packed_args(pos, positional_args, keyword_args): +def p_call_build_packed_args(pos, positional_args, keyword_args): keyword_dict = None - - subtuples = [ - ExprNodes.TupleNode(pos, args=arg) if isinstance(arg, list) else ExprNodes.AsTupleNode(pos, arg=arg) - for arg in positional_args - ] - # TODO: implement a faster way to join tuples than creating each one and adding them - arg_tuple = reduce(partial(ExprNodes.binop_node, pos, '+'), subtuples) - - if keyword_args: - kwargs = [] - dict_items = [] - for item in keyword_args: - if isinstance(item, tuple): - key, value = item - dict_items.append(ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)) - elif item.is_dict_literal: - # unpack "**{a:b}" directly - dict_items.extend(item.key_value_pairs) - else: - if dict_items: - kwargs.append(ExprNodes.DictNode( - dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) - dict_items = [] - kwargs.append(item) - - if dict_items: - kwargs.append(ExprNodes.DictNode( - dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) - - if kwargs: - if len(kwargs) == 1 and kwargs[0].is_dict_literal: - # only simple keyword arguments found -> one dict - keyword_dict = kwargs[0] - else: - # at least one **kwargs - keyword_dict = ExprNodes.MergedDictNode(pos, keyword_args=kwargs) - + + subtuples = [ + ExprNodes.TupleNode(pos, args=arg) if isinstance(arg, list) else ExprNodes.AsTupleNode(pos, arg=arg) + for arg in positional_args + ] + # TODO: implement a faster way to join tuples than creating each one and adding them + arg_tuple = reduce(partial(ExprNodes.binop_node, pos, '+'), subtuples) + + if keyword_args: + kwargs = [] + dict_items = [] + for item in keyword_args: + if isinstance(item, tuple): + key, value = item + dict_items.append(ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)) + elif item.is_dict_literal: + # unpack "**{a:b}" directly + dict_items.extend(item.key_value_pairs) + else: + if dict_items: + kwargs.append(ExprNodes.DictNode( + dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) + dict_items = [] + kwargs.append(item) + + if dict_items: + kwargs.append(ExprNodes.DictNode( + dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True)) + + if kwargs: + if len(kwargs) == 1 and kwargs[0].is_dict_literal: + # only simple keyword arguments found -> one dict + keyword_dict = kwargs[0] + else: + # at least one **kwargs + keyword_dict = ExprNodes.MergedDictNode(pos, keyword_args=kwargs) + return arg_tuple, keyword_dict - + def p_call(s, function): # s.sy == '(' pos = s.position() - positional_args, keyword_args = p_call_parse_args(s) + positional_args, keyword_args = p_call_parse_args(s) - if not keyword_args and len(positional_args) == 1 and isinstance(positional_args[0], list): - return ExprNodes.SimpleCallNode(pos, function=function, args=positional_args[0]) + if not keyword_args and len(positional_args) == 1 and isinstance(positional_args[0], list): + return ExprNodes.SimpleCallNode(pos, function=function, args=positional_args[0]) else: - arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args) - return ExprNodes.GeneralCallNode( - pos, function=function, positional_args=arg_tuple, keyword_args=keyword_dict) + arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args) + return ExprNodes.GeneralCallNode( + pos, function=function, positional_args=arg_tuple, keyword_args=keyword_dict) + - #lambdef: 'lambda' [varargslist] ':' test #subscriptlist: subscript (',' subscript)* [','] @@ -697,14 +697,14 @@ def p_atom(s): return ExprNodes.UnicodeNode(pos, value = unicode_value, bytes_value = bytes_value) elif kind == 'b': return ExprNodes.BytesNode(pos, value = bytes_value) - elif kind == 'f': - return ExprNodes.JoinedStrNode(pos, values = unicode_value) - elif kind == '': - return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value) + elif kind == 'f': + return ExprNodes.JoinedStrNode(pos, values = unicode_value) + elif kind == '': + return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value) else: - s.error("invalid string kind '%s'" % kind) + s.error("invalid string kind '%s'" % kind) elif sy == 'IDENT': - name = s.systring + name = s.systring if name == "None": result = ExprNodes.NoneNode(pos) elif name == "True": @@ -771,9 +771,9 @@ def wrap_compile_time_constant(pos, value): elif isinstance(value, bool): return ExprNodes.BoolNode(pos, value=value) elif isinstance(value, int): - return ExprNodes.IntNode(pos, value=rep, constant_result=value) + return ExprNodes.IntNode(pos, value=rep, constant_result=value) elif isinstance(value, float): - return ExprNodes.FloatNode(pos, value=rep, constant_result=value) + return ExprNodes.FloatNode(pos, value=rep, constant_result=value) elif isinstance(value, complex): node = ExprNodes.ImagNode(pos, value=repr(value.imag), constant_result=complex(0.0, value.imag)) if value.real: @@ -786,8 +786,8 @@ def wrap_compile_time_constant(pos, value): elif isinstance(value, _unicode): return ExprNodes.UnicodeNode(pos, value=EncodedString(value)) elif isinstance(value, _bytes): - bvalue = bytes_literal(value, 'ascii') # actually: unknown encoding, but BytesLiteral requires one - return ExprNodes.BytesNode(pos, value=bvalue, constant_result=value) + bvalue = bytes_literal(value, 'ascii') # actually: unknown encoding, but BytesLiteral requires one + return ExprNodes.BytesNode(pos, value=bvalue, constant_result=value) elif isinstance(value, tuple): args = [wrap_compile_time_constant(pos, arg) for arg in value] @@ -796,8 +796,8 @@ def wrap_compile_time_constant(pos, value): else: # error already reported return None - elif not _IS_PY3 and isinstance(value, long): - return ExprNodes.IntNode(pos, value=rep.rstrip('L'), constant_result=value) + elif not _IS_PY3 and isinstance(value, long): + return ExprNodes.IntNode(pos, value=rep.rstrip('L'), constant_result=value) error(pos, "Invalid type for compile-time constant: %r (type %s)" % (value, value.__class__.__name__)) return None @@ -806,84 +806,84 @@ def wrap_compile_time_constant(pos, value): def p_cat_string_literal(s): # A sequence of one or more adjacent string literals. # Returns (kind, bytes_value, unicode_value) - # where kind in ('b', 'c', 'u', 'f', '') - pos = s.position() + # where kind in ('b', 'c', 'u', 'f', '') + pos = s.position() kind, bytes_value, unicode_value = p_string_literal(s) if kind == 'c' or s.sy != 'BEGIN_STRING': return kind, bytes_value, unicode_value - bstrings, ustrings, positions = [bytes_value], [unicode_value], [pos] + bstrings, ustrings, positions = [bytes_value], [unicode_value], [pos] bytes_value = unicode_value = None while s.sy == 'BEGIN_STRING': pos = s.position() next_kind, next_bytes_value, next_unicode_value = p_string_literal(s) if next_kind == 'c': error(pos, "Cannot concatenate char literal with another string or char literal") - continue + continue elif next_kind != kind: - # concatenating f strings and normal strings is allowed and leads to an f string - if set([kind, next_kind]) in (set(['f', 'u']), set(['f', ''])): - kind = 'f' - else: + # concatenating f strings and normal strings is allowed and leads to an f string + if set([kind, next_kind]) in (set(['f', 'u']), set(['f', ''])): + kind = 'f' + else: error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" % ( kind, next_kind)) - continue - bstrings.append(next_bytes_value) - ustrings.append(next_unicode_value) - positions.append(pos) + continue + bstrings.append(next_bytes_value) + ustrings.append(next_unicode_value) + positions.append(pos) # join and rewrap the partial literals if kind in ('b', 'c', '') or kind == 'u' and None not in bstrings: # Py3 enforced unicode literals are parsed as bytes/unicode combination - bytes_value = bytes_literal(StringEncoding.join_bytes(bstrings), s.source_encoding) + bytes_value = bytes_literal(StringEncoding.join_bytes(bstrings), s.source_encoding) if kind in ('u', ''): - unicode_value = EncodedString(u''.join([u for u in ustrings if u is not None])) - if kind == 'f': - unicode_value = [] - for u, pos in zip(ustrings, positions): - if isinstance(u, list): - unicode_value += u - else: - # non-f-string concatenated into the f-string - unicode_value.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u))) + unicode_value = EncodedString(u''.join([u for u in ustrings if u is not None])) + if kind == 'f': + unicode_value = [] + for u, pos in zip(ustrings, positions): + if isinstance(u, list): + unicode_value += u + else: + # non-f-string concatenated into the f-string + unicode_value.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u))) return kind, bytes_value, unicode_value - + def p_opt_string_literal(s, required_type='u'): - if s.sy != 'BEGIN_STRING': - return None - pos = s.position() - kind, bytes_value, unicode_value = p_string_literal(s, required_type) - if required_type == 'u': - if kind == 'f': - s.error("f-string not allowed here", pos) - return unicode_value - elif required_type == 'b': - return bytes_value - else: - s.error("internal parser configuration error") - - + if s.sy != 'BEGIN_STRING': + return None + pos = s.position() + kind, bytes_value, unicode_value = p_string_literal(s, required_type) + if required_type == 'u': + if kind == 'f': + s.error("f-string not allowed here", pos) + return unicode_value + elif required_type == 'b': + return bytes_value + else: + s.error("internal parser configuration error") + + def check_for_non_ascii_characters(string): for c in string: if c >= u'\x80': return True return False - + def p_string_literal(s, kind_override=None): # A single string or char literal. Returns (kind, bvalue, uvalue) - # where kind in ('b', 'c', 'u', 'f', ''). The 'bvalue' is the source + # where kind in ('b', 'c', 'u', 'f', ''). The 'bvalue' is the source # code byte sequence of the string literal, 'uvalue' is the # decoded Unicode string. Either of the two may be None depending # on the 'kind' of string, only unprefixed strings have both - # representations. In f-strings, the uvalue is a list of the Unicode - # strings and f-string expressions that make up the f-string. + # representations. In f-strings, the uvalue is a list of the Unicode + # strings and f-string expressions that make up the f-string. # s.sy == 'BEGIN_STRING' pos = s.position() is_python3_source = s.context.language_level >= 3 - has_non_ascii_literal_characters = False + has_non_ascii_literal_characters = False string_start_pos = (pos[0], pos[1], pos[2] + len(s.systring)) - kind_string = s.systring.rstrip('"\'').lower() + kind_string = s.systring.rstrip('"\'').lower() if len(kind_string) > 1: if len(set(kind_string)) != len(kind_string): error(pos, 'Duplicate string prefix character') @@ -893,32 +893,32 @@ def p_string_literal(s, kind_override=None): error(pos, 'String prefixes b and f cannot be combined') if 'u' in kind_string and 'f' in kind_string: error(pos, 'String prefixes u and f cannot be combined') - - is_raw = 'r' in kind_string - - if 'c' in kind_string: - # this should never happen, since the lexer does not allow combining c - # with other prefix characters - if len(kind_string) != 1: + + is_raw = 'r' in kind_string + + if 'c' in kind_string: + # this should never happen, since the lexer does not allow combining c + # with other prefix characters + if len(kind_string) != 1: error(pos, 'Invalid string prefix for character literal') - kind = 'c' - elif 'f' in kind_string: + kind = 'c' + elif 'f' in kind_string: kind = 'f' # u is ignored is_raw = True # postpone the escape resolution - elif 'b' in kind_string: - kind = 'b' - elif 'u' in kind_string: - kind = 'u' - else: + elif 'b' in kind_string: + kind = 'b' + elif 'u' in kind_string: + kind = 'u' + else: kind = '' - + if kind == '' and kind_override is None and Future.unicode_literals in s.context.future_directives: chars = StringEncoding.StrLiteralBuilder(s.source_encoding) kind = 'u' else: if kind_override is not None and kind_override in 'ub': kind = kind_override - if kind in ('u', 'f'): # f-strings are scanned exactly like Unicode literals, but are parsed further later + if kind in ('u', 'f'): # f-strings are scanned exactly like Unicode literals, but are parsed further later chars = StringEncoding.UnicodeLiteralBuilder() elif kind == '': chars = StringEncoding.StrLiteralBuilder(s.source_encoding) @@ -929,17 +929,17 @@ def p_string_literal(s, kind_override=None): s.next() sy = s.sy systr = s.systring - # print "p_string_literal: sy =", sy, repr(s.systring) ### + # print "p_string_literal: sy =", sy, repr(s.systring) ### if sy == 'CHARS': chars.append(systr) - if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr): - has_non_ascii_literal_characters = True + if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr): + has_non_ascii_literal_characters = True elif sy == 'ESCAPE': # in Py2, 'ur' raw unicode strings resolve unicode escapes but nothing else if is_raw and (is_python3_source or kind != 'u' or systr[1] not in u'Uu'): chars.append(systr) if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr): - has_non_ascii_literal_characters = True + has_non_ascii_literal_characters = True else: _append_escape_sequence(kind, chars, systr, s) elif sy == 'NEWLINE': @@ -959,18 +959,18 @@ def p_string_literal(s, kind_override=None): error(pos, u"invalid character literal: %r" % bytes_value) else: bytes_value, unicode_value = chars.getstrings() - if (has_non_ascii_literal_characters - and is_python3_source and Future.unicode_literals in s.context.future_directives): + if (has_non_ascii_literal_characters + and is_python3_source and Future.unicode_literals in s.context.future_directives): # Python 3 forbids literal non-ASCII characters in byte strings - if kind == 'b': + if kind == 'b': s.error("bytes can only contain ASCII literal characters.", pos=pos) bytes_value = None - if kind == 'f': + if kind == 'f': unicode_value = p_f_string(s, unicode_value, string_start_pos, is_raw='r' in kind_string) s.next() return (kind, bytes_value, unicode_value) - + def _append_escape_sequence(kind, builder, escape_sequence, s): c = escape_sequence[1] if c in u"01234567": @@ -1043,11 +1043,11 @@ def _f_string_error_pos(pos, string, i): def p_f_string(s, unicode_value, pos, is_raw): - # Parses a PEP 498 f-string literal into a list of nodes. Nodes are either UnicodeNodes - # or FormattedValueNodes. - values = [] + # Parses a PEP 498 f-string literal into a list of nodes. Nodes are either UnicodeNodes + # or FormattedValueNodes. + values = [] next_start = 0 - size = len(unicode_value) + size = len(unicode_value) builder = StringEncoding.UnicodeLiteralBuilder() _parse_seq = _parse_escape_sequences_raw if is_raw else _parse_escape_sequences @@ -1063,7 +1063,7 @@ def p_f_string(s, unicode_value, pos, is_raw): if c == '\\': if not is_raw and len(part) > 1: _append_escape_sequence('f', builder, part, s) - else: + else: builder.append(part) elif c == '{': if part == '{{': @@ -1074,150 +1074,150 @@ def p_f_string(s, unicode_value, pos, is_raw): values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring())) builder = StringEncoding.UnicodeLiteralBuilder() next_start, expr_node = p_f_string_expr(s, unicode_value, pos, next_start, is_raw) - values.append(expr_node) + values.append(expr_node) elif c == '}': if part == '}}': builder.append('}') else: error(_f_string_error_pos(pos, unicode_value, end), "f-string: single '}' is not allowed") - else: + else: builder.append(part) - + if builder.chars: values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring())) - return values - - + return values + + def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw): - # Parses a {}-delimited expression inside an f-string. Returns a FormattedValueNode - # and the index in the string that follows the expression. - i = starting_index - size = len(unicode_value) - conversion_char = terminal_char = format_spec = None - format_spec_str = None - NO_CHAR = 2**30 - - nested_depth = 0 - quote_char = NO_CHAR - in_triple_quotes = False + # Parses a {}-delimited expression inside an f-string. Returns a FormattedValueNode + # and the index in the string that follows the expression. + i = starting_index + size = len(unicode_value) + conversion_char = terminal_char = format_spec = None + format_spec_str = None + NO_CHAR = 2**30 + + nested_depth = 0 + quote_char = NO_CHAR + in_triple_quotes = False backslash_reported = False - - while True: - if i >= size: + + while True: + if i >= size: break # error will be reported below - c = unicode_value[i] - - if quote_char != NO_CHAR: - if c == '\\': + c = unicode_value[i] + + if quote_char != NO_CHAR: + if c == '\\': # avoid redundant error reports along '\' sequences if not backslash_reported: error(_f_string_error_pos(pos, unicode_value, i), "backslashes not allowed in f-strings") backslash_reported = True - elif c == quote_char: - if in_triple_quotes: - if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: - in_triple_quotes = False - quote_char = NO_CHAR - i += 2 - else: - quote_char = NO_CHAR - elif c in '\'"': - quote_char = c - if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: - in_triple_quotes = True - i += 2 - elif c in '{[(': - nested_depth += 1 - elif nested_depth != 0 and c in '}])': - nested_depth -= 1 - elif c == '#': + elif c == quote_char: + if in_triple_quotes: + if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: + in_triple_quotes = False + quote_char = NO_CHAR + i += 2 + else: + quote_char = NO_CHAR + elif c in '\'"': + quote_char = c + if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: + in_triple_quotes = True + i += 2 + elif c in '{[(': + nested_depth += 1 + elif nested_depth != 0 and c in '}])': + nested_depth -= 1 + elif c == '#': error(_f_string_error_pos(pos, unicode_value, i), "format string cannot include #") - elif nested_depth == 0 and c in '!:}': - # allow != as a special case - if c == '!' and i + 1 < size and unicode_value[i + 1] == '=': - i += 1 - continue - - terminal_char = c - break - i += 1 - - # normalise line endings as the parser expects that - expr_str = unicode_value[starting_index:i].replace('\r\n', '\n').replace('\r', '\n') - expr_pos = (pos[0], pos[1], pos[2] + starting_index + 2) # TODO: find exact code position (concat, multi-line, ...) - - if not expr_str.strip(): + elif nested_depth == 0 and c in '!:}': + # allow != as a special case + if c == '!' and i + 1 < size and unicode_value[i + 1] == '=': + i += 1 + continue + + terminal_char = c + break + i += 1 + + # normalise line endings as the parser expects that + expr_str = unicode_value[starting_index:i].replace('\r\n', '\n').replace('\r', '\n') + expr_pos = (pos[0], pos[1], pos[2] + starting_index + 2) # TODO: find exact code position (concat, multi-line, ...) + + if not expr_str.strip(): error(_f_string_error_pos(pos, unicode_value, starting_index), "empty expression not allowed in f-string") - - if terminal_char == '!': - i += 1 - if i + 2 > size: + + if terminal_char == '!': + i += 1 + if i + 2 > size: pass # error will be reported below else: conversion_char = unicode_value[i] i += 1 terminal_char = unicode_value[i] - - if terminal_char == ':': - in_triple_quotes = False - in_string = False - nested_depth = 0 - start_format_spec = i + 1 - while True: - if i >= size: + + if terminal_char == ':': + in_triple_quotes = False + in_string = False + nested_depth = 0 + start_format_spec = i + 1 + while True: + if i >= size: break # error will be reported below - c = unicode_value[i] - if not in_triple_quotes and not in_string: - if c == '{': - nested_depth += 1 - elif c == '}': - if nested_depth > 0: - nested_depth -= 1 - else: - terminal_char = c - break - if c in '\'"': - if not in_string and i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: - in_triple_quotes = not in_triple_quotes - i += 2 - elif not in_triple_quotes: - in_string = not in_string - i += 1 - - format_spec_str = unicode_value[start_format_spec:i] - - if terminal_char != '}': + c = unicode_value[i] + if not in_triple_quotes and not in_string: + if c == '{': + nested_depth += 1 + elif c == '}': + if nested_depth > 0: + nested_depth -= 1 + else: + terminal_char = c + break + if c in '\'"': + if not in_string and i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c: + in_triple_quotes = not in_triple_quotes + i += 2 + elif not in_triple_quotes: + in_string = not in_string + i += 1 + + format_spec_str = unicode_value[start_format_spec:i] + + if terminal_char != '}': error(_f_string_error_pos(pos, unicode_value, i), "missing '}' in format string expression" + ( ", found '%s'" % terminal_char if terminal_char else "")) - - # parse the expression as if it was surrounded by parentheses - buf = StringIO('(%s)' % expr_str) - scanner = PyrexScanner(buf, expr_pos[0], parent_scanner=s, source_encoding=s.source_encoding, initial_pos=expr_pos) - expr = p_testlist(scanner) # TODO is testlist right here? - - # validate the conversion char - if conversion_char is not None and not ExprNodes.FormattedValueNode.find_conversion_func(conversion_char): + + # parse the expression as if it was surrounded by parentheses + buf = StringIO('(%s)' % expr_str) + scanner = PyrexScanner(buf, expr_pos[0], parent_scanner=s, source_encoding=s.source_encoding, initial_pos=expr_pos) + expr = p_testlist(scanner) # TODO is testlist right here? + + # validate the conversion char + if conversion_char is not None and not ExprNodes.FormattedValueNode.find_conversion_func(conversion_char): error(expr_pos, "invalid conversion character '%s'" % conversion_char) - - # the format spec is itself treated like an f-string - if format_spec_str: + + # the format spec is itself treated like an f-string + if format_spec_str: format_spec = ExprNodes.JoinedStrNode(pos, values=p_f_string(s, format_spec_str, pos, is_raw)) - - return i + 1, ExprNodes.FormattedValueNode( + + return i + 1, ExprNodes.FormattedValueNode( pos, value=expr, conversion_char=conversion_char, format_spec=format_spec) - - -# since PEP 448: -# list_display ::= "[" [listmaker] "]" -# listmaker ::= (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) + + +# since PEP 448: +# list_display ::= "[" [listmaker] "]" +# listmaker ::= (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) # comp_iter ::= comp_for | comp_if # comp_for ::= ["async"] "for" expression_list "in" testlist [comp_iter] -# comp_if ::= "if" test [comp_iter] +# comp_if ::= "if" test [comp_iter] def p_list_maker(s): # s.sy == '[' @@ -1225,30 +1225,30 @@ def p_list_maker(s): s.next() if s.sy == ']': s.expect(']') - return ExprNodes.ListNode(pos, args=[]) - - expr = p_test_or_starred_expr(s) + return ExprNodes.ListNode(pos, args=[]) + + expr = p_test_or_starred_expr(s) if s.sy in ('for', 'async'): - if expr.is_starred: - s.error("iterable unpacking cannot be used in comprehension") + if expr.is_starred: + s.error("iterable unpacking cannot be used in comprehension") append = ExprNodes.ComprehensionAppendNode(pos, expr=expr) loop = p_comp_for(s, append) s.expect(']') return ExprNodes.ComprehensionNode( - pos, loop=loop, append=append, type=Builtin.list_type, + pos, loop=loop, append=append, type=Builtin.list_type, # list comprehensions leak their loop variable in Py2 - has_local_scope=s.context.language_level >= 3) - - # (merged) list literal - if s.sy == ',': - s.next() - exprs = p_test_or_starred_expr_list(s, expr) - else: - exprs = [expr] - s.expect(']') - return ExprNodes.ListNode(pos, args=exprs) - - + has_local_scope=s.context.language_level >= 3) + + # (merged) list literal + if s.sy == ',': + s.next() + exprs = p_test_or_starred_expr_list(s, expr) + else: + exprs = [expr] + s.expect(']') + return ExprNodes.ListNode(pos, args=exprs) + + def p_comp_iter(s, body): if s.sy in ('for', 'async'): return p_comp_for(s, body) @@ -1283,121 +1283,121 @@ def p_comp_if(s, body): else_clause = None ) -# since PEP 448: -#dictorsetmaker: ( ((test ':' test | '**' expr) -# (comp_for | (',' (test ':' test | '**' expr))* [','])) | -# ((test | star_expr) -# (comp_for | (',' (test | star_expr))* [','])) ) - +# since PEP 448: +#dictorsetmaker: ( ((test ':' test | '**' expr) +# (comp_for | (',' (test ':' test | '**' expr))* [','])) | +# ((test | star_expr) +# (comp_for | (',' (test | star_expr))* [','])) ) + def p_dict_or_set_maker(s): # s.sy == '{' pos = s.position() s.next() if s.sy == '}': s.next() - return ExprNodes.DictNode(pos, key_value_pairs=[]) - - parts = [] - target_type = 0 - last_was_simple_item = False - while True: - if s.sy in ('*', '**'): - # merged set/dict literal - if target_type == 0: - target_type = 1 if s.sy == '*' else 2 # 'stars' - elif target_type != len(s.sy): - s.error("unexpected %sitem found in %s literal" % ( - s.sy, 'set' if target_type == 1 else 'dict')) + return ExprNodes.DictNode(pos, key_value_pairs=[]) + + parts = [] + target_type = 0 + last_was_simple_item = False + while True: + if s.sy in ('*', '**'): + # merged set/dict literal + if target_type == 0: + target_type = 1 if s.sy == '*' else 2 # 'stars' + elif target_type != len(s.sy): + s.error("unexpected %sitem found in %s literal" % ( + s.sy, 'set' if target_type == 1 else 'dict')) + s.next() + if s.sy == '*': + s.error("expected expression, found '*'") + item = p_starred_expr(s) + parts.append(item) + last_was_simple_item = False + else: + item = p_test(s) + if target_type == 0: + target_type = 2 if s.sy == ':' else 1 # dict vs. set + if target_type == 2: + # dict literal + s.expect(':') + key = item + value = p_test(s) + item = ExprNodes.DictItemNode(key.pos, key=key, value=value) + if last_was_simple_item: + parts[-1].append(item) + else: + parts.append([item]) + last_was_simple_item = True + + if s.sy == ',': s.next() - if s.sy == '*': - s.error("expected expression, found '*'") - item = p_starred_expr(s) - parts.append(item) - last_was_simple_item = False - else: - item = p_test(s) - if target_type == 0: - target_type = 2 if s.sy == ':' else 1 # dict vs. set - if target_type == 2: - # dict literal - s.expect(':') - key = item - value = p_test(s) - item = ExprNodes.DictItemNode(key.pos, key=key, value=value) - if last_was_simple_item: - parts[-1].append(item) - else: - parts.append([item]) - last_was_simple_item = True - - if s.sy == ',': - s.next() if s.sy == '}': break - else: - break - + else: + break + if s.sy in ('for', 'async'): - # dict/set comprehension - if len(parts) == 1 and isinstance(parts[0], list) and len(parts[0]) == 1: - item = parts[0][0] - if target_type == 2: - assert isinstance(item, ExprNodes.DictItemNode), type(item) - comprehension_type = Builtin.dict_type - append = ExprNodes.DictComprehensionAppendNode( - item.pos, key_expr=item.key, value_expr=item.value) - else: - comprehension_type = Builtin.set_type - append = ExprNodes.ComprehensionAppendNode(item.pos, expr=item) + # dict/set comprehension + if len(parts) == 1 and isinstance(parts[0], list) and len(parts[0]) == 1: + item = parts[0][0] + if target_type == 2: + assert isinstance(item, ExprNodes.DictItemNode), type(item) + comprehension_type = Builtin.dict_type + append = ExprNodes.DictComprehensionAppendNode( + item.pos, key_expr=item.key, value_expr=item.value) + else: + comprehension_type = Builtin.set_type + append = ExprNodes.ComprehensionAppendNode(item.pos, expr=item) loop = p_comp_for(s, append) s.expect('}') - return ExprNodes.ComprehensionNode(pos, loop=loop, append=append, type=comprehension_type) + return ExprNodes.ComprehensionNode(pos, loop=loop, append=append, type=comprehension_type) else: - # syntax error, try to find a good error message - if len(parts) == 1 and not isinstance(parts[0], list): - s.error("iterable unpacking cannot be used in comprehension") - else: - # e.g. "{1,2,3 for ..." - s.expect('}') - return ExprNodes.DictNode(pos, key_value_pairs=[]) - - s.expect('}') - if target_type == 1: - # (merged) set literal - items = [] - set_items = [] - for part in parts: - if isinstance(part, list): - set_items.extend(part) - else: - if set_items: - items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items)) - set_items = [] - items.append(part) - if set_items: - items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items)) - if len(items) == 1 and items[0].is_set_literal: - return items[0] - return ExprNodes.MergedSequenceNode(pos, args=items, type=Builtin.set_type) - else: - # (merged) dict literal - items = [] - dict_items = [] - for part in parts: - if isinstance(part, list): - dict_items.extend(part) - else: - if dict_items: - items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items)) - dict_items = [] - items.append(part) - if dict_items: - items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items)) - if len(items) == 1 and items[0].is_dict_literal: - return items[0] - return ExprNodes.MergedDictNode(pos, keyword_args=items, reject_duplicates=False) - - + # syntax error, try to find a good error message + if len(parts) == 1 and not isinstance(parts[0], list): + s.error("iterable unpacking cannot be used in comprehension") + else: + # e.g. "{1,2,3 for ..." + s.expect('}') + return ExprNodes.DictNode(pos, key_value_pairs=[]) + + s.expect('}') + if target_type == 1: + # (merged) set literal + items = [] + set_items = [] + for part in parts: + if isinstance(part, list): + set_items.extend(part) + else: + if set_items: + items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items)) + set_items = [] + items.append(part) + if set_items: + items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items)) + if len(items) == 1 and items[0].is_set_literal: + return items[0] + return ExprNodes.MergedSequenceNode(pos, args=items, type=Builtin.set_type) + else: + # (merged) dict literal + items = [] + dict_items = [] + for part in parts: + if isinstance(part, list): + dict_items.extend(part) + else: + if dict_items: + items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items)) + dict_items = [] + items.append(part) + if dict_items: + items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items)) + if len(items) == 1 and items[0].is_dict_literal: + return items[0] + return ExprNodes.MergedDictNode(pos, keyword_args=items, reject_duplicates=False) + + # NOTE: no longer in Py3 :) def p_backquote_expr(s): # s.sy == '`' @@ -1423,11 +1423,11 @@ def p_simple_expr_list(s, expr=None): s.next() return exprs - + def p_test_or_starred_expr_list(s, expr=None): exprs = expr is not None and [expr] or [] while s.sy not in expr_terminators: - exprs.append(p_test_or_starred_expr(s)) + exprs.append(p_test_or_starred_expr(s)) if s.sy != ',': break s.next() @@ -1481,7 +1481,7 @@ def p_genexp(s, expr): expr_terminators = cython.declare(set, set([ ')', ']', '}', ':', '=', 'NEWLINE'])) - + #------------------------------------------------------- # # Statements @@ -1495,14 +1495,14 @@ def p_global_statement(s): names = p_ident_list(s) return Nodes.GlobalNode(pos, names = names) - + def p_nonlocal_statement(s): pos = s.position() s.next() names = p_ident_list(s) return Nodes.NonlocalNode(pos, names = names) - + def p_expression_or_assignment(s): expr = p_testlist_star_expr(s) if s.sy == ':' and (expr.is_name or expr.is_subscript or expr.is_attribute): @@ -1523,7 +1523,7 @@ def p_expression_or_assignment(s): expr = p_testlist_star_expr(s) expr_list.append(expr) if len(expr_list) == 1: - if re.match(r"([-+*/%^&|]|<<|>>|\*\*|//|@)=", s.sy): + if re.match(r"([-+*/%^&|]|<<|>>|\*\*|//|@)=", s.sy): lhs = expr_list[0] if isinstance(lhs, ExprNodes.SliceIndexNode): # implementation requires IndexNode @@ -1531,7 +1531,7 @@ def p_expression_or_assignment(s): lhs.pos, base=lhs.base, index=make_slice_node(lhs.pos, lhs.start, lhs.stop)) - elif not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode)): + elif not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode)): error(lhs.pos, "Illegal operand for inplace operation.") operator = s.sy[:-1] s.next() @@ -1539,17 +1539,17 @@ def p_expression_or_assignment(s): rhs = p_yield_expression(s) else: rhs = p_testlist(s) - return Nodes.InPlaceAssignmentNode(lhs.pos, operator=operator, lhs=lhs, rhs=rhs) + return Nodes.InPlaceAssignmentNode(lhs.pos, operator=operator, lhs=lhs, rhs=rhs) expr = expr_list[0] return Nodes.ExprStatNode(expr.pos, expr=expr) rhs = expr_list[-1] if len(expr_list) == 2: - return Nodes.SingleAssignmentNode(rhs.pos, lhs=expr_list[0], rhs=rhs) + return Nodes.SingleAssignmentNode(rhs.pos, lhs=expr_list[0], rhs=rhs) else: - return Nodes.CascadedAssignmentNode(rhs.pos, lhs_list=expr_list[:-1], rhs=rhs) + return Nodes.CascadedAssignmentNode(rhs.pos, lhs_list=expr_list[:-1], rhs=rhs) + - def p_print_statement(s): # s.sy == 'print' pos = s.position() @@ -1572,12 +1572,12 @@ def p_print_statement(s): ends_with_comma = 1 break args.append(p_test(s)) - arg_tuple = ExprNodes.TupleNode(pos, args=args) + arg_tuple = ExprNodes.TupleNode(pos, args=args) return Nodes.PrintStatNode(pos, - arg_tuple=arg_tuple, stream=stream, - append_newline=not ends_with_comma) + arg_tuple=arg_tuple, stream=stream, + append_newline=not ends_with_comma) + - def p_exec_statement(s): # s.sy == 'exec' pos = s.position() @@ -1670,43 +1670,43 @@ def p_raise_statement(s): else: return Nodes.ReraiseStatNode(pos) - + def p_import_statement(s): # s.sy in ('import', 'cimport') pos = s.position() kind = s.sy s.next() - items = [p_dotted_name(s, as_allowed=1)] + items = [p_dotted_name(s, as_allowed=1)] while s.sy == ',': s.next() - items.append(p_dotted_name(s, as_allowed=1)) + items.append(p_dotted_name(s, as_allowed=1)) stats = [] - is_absolute = Future.absolute_import in s.context.future_directives + is_absolute = Future.absolute_import in s.context.future_directives for pos, target_name, dotted_name, as_name in items: if kind == 'cimport': - stat = Nodes.CImportStatNode( - pos, - module_name=dotted_name, - as_name=as_name, - is_absolute=is_absolute) + stat = Nodes.CImportStatNode( + pos, + module_name=dotted_name, + as_name=as_name, + is_absolute=is_absolute) else: if as_name and "." in dotted_name: - name_list = ExprNodes.ListNode(pos, args=[ - ExprNodes.IdentifierStringNode(pos, value=s.context.intern_ustring("*"))]) + name_list = ExprNodes.ListNode(pos, args=[ + ExprNodes.IdentifierStringNode(pos, value=s.context.intern_ustring("*"))]) else: name_list = None - stat = Nodes.SingleAssignmentNode( - pos, - lhs=ExprNodes.NameNode(pos, name=as_name or target_name), - rhs=ExprNodes.ImportNode( - pos, - module_name=ExprNodes.IdentifierStringNode(pos, value=dotted_name), - level=0 if is_absolute else None, - name_list=name_list)) + stat = Nodes.SingleAssignmentNode( + pos, + lhs=ExprNodes.NameNode(pos, name=as_name or target_name), + rhs=ExprNodes.ImportNode( + pos, + module_name=ExprNodes.IdentifierStringNode(pos, value=dotted_name), + level=0 if is_absolute else None, + name_list=name_list)) stats.append(stat) - return Nodes.StatListNode(pos, stats=stats) + return Nodes.StatListNode(pos, stats=stats) + - def p_from_import_statement(s, first_statement = 0): # s.sy == 'from' pos = s.position() @@ -1721,7 +1721,7 @@ def p_from_import_statement(s, first_statement = 0): level = None if level is not None and s.sy in ('import', 'cimport'): # we are dealing with "from .. import foo, bar" - dotted_name_pos, dotted_name = s.position(), s.context.intern_ustring('') + dotted_name_pos, dotted_name = s.position(), s.context.intern_ustring('') else: if level is None and Future.absolute_import in s.context.future_directives: level = 0 @@ -1734,7 +1734,7 @@ def p_from_import_statement(s, first_statement = 0): is_cimport = kind == 'cimport' is_parenthesized = False if s.sy == '*': - imported_names = [(s.position(), s.context.intern_ustring("*"), None, None)] + imported_names = [(s.position(), s.context.intern_ustring("*"), None, None)] s.next() else: if s.sy == '(': @@ -1775,11 +1775,11 @@ def p_from_import_statement(s, first_statement = 0): items = [] for (name_pos, name, as_name, kind) in imported_names: imported_name_strings.append( - ExprNodes.IdentifierStringNode(name_pos, value=name)) + ExprNodes.IdentifierStringNode(name_pos, value=name)) items.append( - (name, ExprNodes.NameNode(name_pos, name=as_name or name))) + (name, ExprNodes.NameNode(name_pos, name=as_name or name))) import_list = ExprNodes.ListNode( - imported_names[0][0], args=imported_name_strings) + imported_names[0][0], args=imported_name_strings) return Nodes.FromImportStatNode(pos, module = ExprNodes.ImportNode(dotted_name_pos, module_name = ExprNodes.IdentifierStringNode(pos, value = dotted_name), @@ -1788,8 +1788,8 @@ def p_from_import_statement(s, first_statement = 0): items = items) -imported_name_kinds = cython.declare(set, set(['class', 'struct', 'union'])) - +imported_name_kinds = cython.declare(set, set(['class', 'struct', 'union'])) + def p_imported_name(s, is_cimport): pos = s.position() kind = None @@ -1800,7 +1800,7 @@ def p_imported_name(s, is_cimport): as_name = p_as_name(s) return (pos, name, as_name, kind) - + def p_dotted_name(s, as_allowed): pos = s.position() target_name = p_ident(s) @@ -1811,9 +1811,9 @@ def p_dotted_name(s, as_allowed): names.append(p_ident(s)) if as_allowed: as_name = p_as_name(s) - return (pos, target_name, s.context.intern_ustring(u'.'.join(names)), as_name) + return (pos, target_name, s.context.intern_ustring(u'.'.join(names)), as_name) + - def p_as_name(s): if s.sy == 'IDENT' and s.systring == 'as': s.next() @@ -1821,7 +1821,7 @@ def p_as_name(s): else: return None - + def p_assert_statement(s): # s.sy == 'assert' pos = s.position() @@ -1834,7 +1834,7 @@ def p_assert_statement(s): value = None return Nodes.AssertStatNode(pos, cond = cond, value = value) - + statement_terminators = cython.declare(set, set([';', 'NEWLINE', 'EOF'])) def p_if_statement(s): @@ -1874,25 +1874,25 @@ def p_while_statement(s): condition = test, body = body, else_clause = else_clause) - -def p_for_statement(s, is_async=False): + +def p_for_statement(s, is_async=False): # s.sy == 'for' pos = s.position() s.next() - kw = p_for_bounds(s, allow_testlist=True, is_async=is_async) + kw = p_for_bounds(s, allow_testlist=True, is_async=is_async) body = p_suite(s) else_clause = p_else_clause(s) - kw.update(body=body, else_clause=else_clause, is_async=is_async) + kw.update(body=body, else_clause=else_clause, is_async=is_async) return Nodes.ForStatNode(pos, **kw) - -def p_for_bounds(s, allow_testlist=True, is_async=False): + +def p_for_bounds(s, allow_testlist=True, is_async=False): target = p_for_target(s) if s.sy == 'in': s.next() - iterator = p_for_iterator(s, allow_testlist, is_async=is_async) - return dict(target=target, iterator=iterator) - elif not s.in_python_file and not is_async: + iterator = p_for_iterator(s, allow_testlist, is_async=is_async) + return dict(target=target, iterator=iterator) + elif not s.in_python_file and not is_async: if s.sy == 'from': s.next() bound1 = p_bit_expr(s) @@ -1962,20 +1962,20 @@ def p_target(s, terminator): else: return expr - + def p_for_target(s): return p_target(s, 'in') - -def p_for_iterator(s, allow_testlist=True, is_async=False): + +def p_for_iterator(s, allow_testlist=True, is_async=False): pos = s.position() if allow_testlist: expr = p_testlist(s) else: expr = p_or_test(s) - return (ExprNodes.AsyncIteratorNode if is_async else ExprNodes.IteratorNode)(pos, sequence=expr) + return (ExprNodes.AsyncIteratorNode if is_async else ExprNodes.IteratorNode)(pos, sequence=expr) + - def p_try_statement(s): # s.sy == 'try' pos = s.position() @@ -2043,14 +2043,14 @@ def p_include_statement(s, ctx): include_file_path = s.context.find_include_file(include_file_name, pos) if include_file_path: s.included_files.append(include_file_name) - with Utils.open_source_file(include_file_path) as f: + with Utils.open_source_file(include_file_path) as f: if Options.source_root: import os rel_path = os.path.relpath(include_file_path, Options.source_root) else: rel_path = None source_desc = FileSourceDescriptor(include_file_path, rel_path) - s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments) + s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments) tree = p_statement_list(s2, ctx) return tree else: @@ -2058,21 +2058,21 @@ def p_include_statement(s, ctx): else: return Nodes.PassStatNode(pos) - + def p_with_statement(s): - s.next() # 'with' + s.next() # 'with' if s.systring == 'template' and not s.in_python_file: node = p_with_template(s) else: node = p_with_items(s) return node - -def p_with_items(s, is_async=False): + +def p_with_items(s, is_async=False): pos = s.position() if not s.in_python_file and s.sy == 'IDENT' and s.systring in ('nogil', 'gil'): - if is_async: - s.error("with gil/nogil cannot be async") + if is_async: + s.error("with gil/nogil cannot be async") state = s.systring s.next() if s.sy == ',': @@ -2080,7 +2080,7 @@ def p_with_items(s, is_async=False): body = p_with_items(s) else: body = p_suite(s) - return Nodes.GILStatNode(pos, state=state, body=body) + return Nodes.GILStatNode(pos, state=state, body=body) else: manager = p_test(s) target = None @@ -2089,12 +2089,12 @@ def p_with_items(s, is_async=False): target = p_starred_expr(s) if s.sy == ',': s.next() - body = p_with_items(s, is_async=is_async) + body = p_with_items(s, is_async=is_async) else: body = p_suite(s) - return Nodes.WithStatNode(pos, manager=manager, target=target, body=body, is_async=is_async) + return Nodes.WithStatNode(pos, manager=manager, target=target, body=body, is_async=is_async) + - def p_with_template(s): pos = s.position() templates = [] @@ -2255,13 +2255,13 @@ def p_statement(s, ctx, first_statement = 0): s.level = ctx.level decorators = p_decorators(s) if not ctx.allow_struct_enum_decorator and s.sy not in ('def', 'cdef', 'cpdef', 'class', 'async'): - if s.sy == 'IDENT' and s.systring == 'async': - pass # handled below - else: - s.error("Decorators can only be followed by functions or classes") + if s.sy == 'IDENT' and s.systring == 'async': + pass # handled below + else: + s.error("Decorators can only be followed by functions or classes") elif s.sy == 'pass' and cdef_flag: # empty cdef block - return p_pass_statement(s, with_newline=1) + return p_pass_statement(s, with_newline=1) overridable = 0 if s.sy == 'cdef': @@ -2275,11 +2275,11 @@ def p_statement(s, ctx, first_statement = 0): if ctx.level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'): s.error('cdef statement not allowed here') s.level = ctx.level - node = p_cdef_statement(s, ctx(overridable=overridable)) + node = p_cdef_statement(s, ctx(overridable=overridable)) if decorators is not None: - tup = (Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode) + tup = (Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode) if ctx.allow_struct_enum_decorator: - tup += (Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode) + tup += (Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode) if not isinstance(node, tup): s.error("Decorators can only be followed by functions or classes") node.decorators = decorators @@ -2322,22 +2322,22 @@ def p_statement(s, ctx, first_statement = 0): return p_try_statement(s) elif s.sy == 'with': return p_with_statement(s) - elif s.sy == 'async': - s.next() - return p_async_statement(s, ctx, decorators) + elif s.sy == 'async': + s.next() + return p_async_statement(s, ctx, decorators) else: - if s.sy == 'IDENT' and s.systring == 'async': - ident_name = s.systring - # PEP 492 enables the async/await keywords when it spots "async def ..." - s.next() - if s.sy == 'def': - return p_async_statement(s, ctx, decorators) - elif decorators: - s.error("Decorators can only be followed by functions or classes") - s.put_back('IDENT', ident_name) # re-insert original token - return p_simple_statement_list(s, ctx, first_statement=first_statement) - - + if s.sy == 'IDENT' and s.systring == 'async': + ident_name = s.systring + # PEP 492 enables the async/await keywords when it spots "async def ..." + s.next() + if s.sy == 'def': + return p_async_statement(s, ctx, decorators) + elif decorators: + s.error("Decorators can only be followed by functions or classes") + s.put_back('IDENT', ident_name) # re-insert original token + return p_simple_statement_list(s, ctx, first_statement=first_statement) + + def p_statement_list(s, ctx, first_statement = 0): # Parse a series of statements separated by newlines. pos = s.position() @@ -2412,7 +2412,7 @@ def p_positional_and_keyword_args(s, end_sy_set, templates = None): arg = Nodes.CComplexBaseTypeNode(base_type.pos, base_type = base_type, declarator = declarator) parsed_type = True - keyword_node = ExprNodes.IdentifierStringNode(arg.pos, value=ident) + keyword_node = ExprNodes.IdentifierStringNode(arg.pos, value=ident) keyword_args.append((keyword_node, arg)) was_keyword = True @@ -2455,31 +2455,31 @@ def p_calling_convention(s): else: return "" - + calling_convention_words = cython.declare( set, set(["__stdcall", "__cdecl", "__fastcall"])) - + def p_c_complex_base_type(s, templates = None): # s.sy == '(' pos = s.position() s.next() - base_type = p_c_base_type(s, templates=templates) - declarator = p_c_declarator(s, empty=True) - type_node = Nodes.CComplexBaseTypeNode( - pos, base_type=base_type, declarator=declarator) - if s.sy == ',': - components = [type_node] - while s.sy == ',': - s.next() - if s.sy == ')': - break - base_type = p_c_base_type(s, templates=templates) - declarator = p_c_declarator(s, empty=True) - components.append(Nodes.CComplexBaseTypeNode( - pos, base_type=base_type, declarator=declarator)) - type_node = Nodes.CTupleBaseTypeNode(pos, components = components) - + base_type = p_c_base_type(s, templates=templates) + declarator = p_c_declarator(s, empty=True) + type_node = Nodes.CComplexBaseTypeNode( + pos, base_type=base_type, declarator=declarator) + if s.sy == ',': + components = [type_node] + while s.sy == ',': + s.next() + if s.sy == ')': + break + base_type = p_c_base_type(s, templates=templates) + declarator = p_c_declarator(s, empty=True) + components.append(Nodes.CComplexBaseTypeNode( + pos, base_type=base_type, declarator=declarator)) + type_node = Nodes.CTupleBaseTypeNode(pos, components = components) + s.expect(')') if s.sy == '[': if is_memoryviewslice_access(s): @@ -2728,7 +2728,7 @@ special_basic_c_types = cython.declare(dict, { # name : (signed, longness) "Py_UNICODE" : (0, 0), "Py_UCS4" : (0, 0), - "Py_hash_t" : (2, 0), + "Py_hash_t" : (2, 0), "Py_ssize_t" : (2, 0), "ssize_t" : (2, 0), "size_t" : (0, 0), @@ -2783,7 +2783,7 @@ def p_c_declarator(s, ctx = Ctx(), empty = 0, is_type = 0, cmethod_flag = 0, if s.sy == '(': s.next() if s.sy == ')' or looking_at_name(s): - base = Nodes.CNameDeclaratorNode(pos, name=s.context.intern_ustring(u""), cname=None) + base = Nodes.CNameDeclaratorNode(pos, name=s.context.intern_ustring(u""), cname=None) result = p_c_func_declarator(s, pos, ctx, base, cmethod_flag) else: result = p_c_declarator(s, ctx, empty = empty, is_type = is_type, @@ -2835,8 +2835,8 @@ supported_overloaded_operators = cython.declare(set, set([ '+', '-', '*', '/', '%', '++', '--', '~', '|', '&', '^', '<<', '>>', ',', '==', '!=', '>=', '>', '<=', '<', - '[]', '()', '!', '=', - 'bool', + '[]', '()', '!', '=', + 'bool', ])) def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, @@ -2877,7 +2877,7 @@ def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, else: rhs = None if s.sy == 'IDENT': - name = s.systring + name = s.systring if empty: error(s.position(), "Declarator should be empty") s.next() @@ -2913,13 +2913,13 @@ def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, s.error("Overloading operator '%s' not yet supported." % op, fatal=False) name += op - elif op == 'IDENT': - op = s.systring; - if op not in supported_overloaded_operators: - s.error("Overloading operator '%s' not yet supported." % op, - fatal=False) - name = name + ' ' + op - s.next() + elif op == 'IDENT': + op = s.systring; + if op not in supported_overloaded_operators: + s.error("Overloading operator '%s' not yet supported." % op, + fatal=False) + name = name + ' ' + op + s.next() result = Nodes.CNameDeclaratorNode(pos, name = name, cname = cname, default = rhs) result.calling_convention = calling_convention @@ -2955,9 +2955,9 @@ def p_exception_value_clause(s): name = s.systring s.next() exc_val = p_name(s, name) - elif s.sy == '*': - exc_val = ExprNodes.CharNode(s.position(), value=u'*') - s.next() + elif s.sy == '*': + exc_val = ExprNodes.CharNode(s.position(), value=u'*') + s.next() else: if s.sy == '?': exc_check = 1 @@ -2965,7 +2965,7 @@ def p_exception_value_clause(s): exc_val = p_test(s) return exc_val, exc_check -c_arg_list_terminators = cython.declare(set, set(['*', '**', '.', ')', ':'])) +c_arg_list_terminators = cython.declare(set, set(['*', '**', '.', ')', ':'])) def p_c_arg_list(s, ctx = Ctx(), in_pyfunc = 0, cmethod_flag = 0, nonempty_declarators = 0, kw_only = 0, annotated = 1): @@ -3278,14 +3278,14 @@ def p_c_func_or_var_declaration(s, pos, ctx): is_const_method = 1 else: is_const_method = 0 - if s.sy == '->': - # Special enough to give a better error message and keep going. - s.error( - "Return type annotation is not allowed in cdef/cpdef signatures. " - "Please define it before the function name, as in C signatures.", - fatal=False) - s.next() - p_test(s) # Keep going, but ignore result. + if s.sy == '->': + # Special enough to give a better error message and keep going. + s.error( + "Return type annotation is not allowed in cdef/cpdef signatures. " + "Please define it before the function name, as in C signatures.", + fatal=False) + s.next() + p_test(s) # Keep going, but ignore result. if s.sy == ':': if ctx.level not in ('module', 'c_class', 'module_pxd', 'c_class_pxd', 'cpp_class') and not ctx.templates: s.error("C function definition not allowed here") @@ -3362,59 +3362,59 @@ def p_decorators(s): s.next() decstring = p_dotted_name(s, as_allowed=0)[2] names = decstring.split('.') - decorator = ExprNodes.NameNode(pos, name=s.context.intern_ustring(names[0])) + decorator = ExprNodes.NameNode(pos, name=s.context.intern_ustring(names[0])) for name in names[1:]: - decorator = ExprNodes.AttributeNode( - pos, attribute=s.context.intern_ustring(name), obj=decorator) + decorator = ExprNodes.AttributeNode( + pos, attribute=s.context.intern_ustring(name), obj=decorator) if s.sy == '(': decorator = p_call(s, decorator) decorators.append(Nodes.DecoratorNode(pos, decorator=decorator)) s.expect_newline("Expected a newline after decorator") return decorators - -def _reject_cdef_modifier_in_py(s, name): - """Step over incorrectly placed cdef modifiers (@see _CDEF_MODIFIERS) to provide a good error message for them. - """ - if s.sy == 'IDENT' and name in _CDEF_MODIFIERS: - # Special enough to provide a good error message. - s.error("Cannot use cdef modifier '%s' in Python function signature. Use a decorator instead." % name, fatal=False) - return p_ident(s) # Keep going, in case there are other errors. - return name - - -def p_def_statement(s, decorators=None, is_async_def=False): + +def _reject_cdef_modifier_in_py(s, name): + """Step over incorrectly placed cdef modifiers (@see _CDEF_MODIFIERS) to provide a good error message for them. + """ + if s.sy == 'IDENT' and name in _CDEF_MODIFIERS: + # Special enough to provide a good error message. + s.error("Cannot use cdef modifier '%s' in Python function signature. Use a decorator instead." % name, fatal=False) + return p_ident(s) # Keep going, in case there are other errors. + return name + + +def p_def_statement(s, decorators=None, is_async_def=False): # s.sy == 'def' pos = s.position() - # PEP 492 switches the async/await keywords on in "async def" functions - if is_async_def: - s.enter_async() + # PEP 492 switches the async/await keywords on in "async def" functions + if is_async_def: + s.enter_async() s.next() - name = _reject_cdef_modifier_in_py(s, p_ident(s)) - s.expect( - '(', - "Expected '(', found '%s'. Did you use cdef syntax in a Python declaration? " - "Use decorators and Python type annotations instead." % ( - s.systring if s.sy == 'IDENT' else s.sy)) + name = _reject_cdef_modifier_in_py(s, p_ident(s)) + s.expect( + '(', + "Expected '(', found '%s'. Did you use cdef syntax in a Python declaration? " + "Use decorators and Python type annotations instead." % ( + s.systring if s.sy == 'IDENT' else s.sy)) args, star_arg, starstar_arg = p_varargslist(s, terminator=')') s.expect(')') - _reject_cdef_modifier_in_py(s, s.systring) + _reject_cdef_modifier_in_py(s, s.systring) return_type_annotation = None if s.sy == '->': s.next() return_type_annotation = p_test(s) - _reject_cdef_modifier_in_py(s, s.systring) - + _reject_cdef_modifier_in_py(s, s.systring) + doc, body = p_suite_with_docstring(s, Ctx(level='function')) - if is_async_def: - s.exit_async() - - return Nodes.DefNode( - pos, name=name, args=args, star_arg=star_arg, starstar_arg=starstar_arg, - doc=doc, body=body, decorators=decorators, is_async_def=is_async_def, - return_type_annotation=return_type_annotation) - - + if is_async_def: + s.exit_async() + + return Nodes.DefNode( + pos, name=name, args=args, star_arg=star_arg, starstar_arg=starstar_arg, + doc=doc, body=body, decorators=decorators, is_async_def=is_async_def, + return_type_annotation=return_type_annotation) + + def p_varargslist(s, terminator=')', annotated=1): args = p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1, annotated = annotated) @@ -3433,8 +3433,8 @@ def p_varargslist(s, terminator=')', annotated=1): if s.sy == '**': s.next() starstar_arg = p_py_arg_decl(s, annotated=annotated) - if s.sy == ',': - s.next() + if s.sy == ',': + s.next() return (args, star_arg, starstar_arg) def p_py_arg_decl(s, annotated = 1): @@ -3446,18 +3446,18 @@ def p_py_arg_decl(s, annotated = 1): annotation = p_test(s) return Nodes.PyArgDeclNode(pos, name = name, annotation = annotation) - + def p_class_statement(s, decorators): # s.sy == 'class' pos = s.position() s.next() - class_name = EncodedString(p_ident(s)) - class_name.encoding = s.source_encoding # FIXME: why is this needed? + class_name = EncodedString(p_ident(s)) + class_name.encoding = s.source_encoding # FIXME: why is this needed? arg_tuple = None keyword_dict = None if s.sy == '(': - positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False) - arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args) + positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False) + arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args) if arg_tuple is None: # XXX: empty arg_tuple arg_tuple = ExprNodes.TupleNode(pos, args=[]) @@ -3469,7 +3469,7 @@ def p_class_statement(s, decorators): doc=doc, body=body, decorators=decorators, force_py3_semantics=s.context.language_level >= 3) - + def p_c_class_definition(s, pos, ctx): # s.sy == 'class' s.next() @@ -3489,7 +3489,7 @@ def p_c_class_definition(s, pos, ctx): objstruct_name = None typeobj_name = None bases = None - check_size = None + check_size = None if s.sy == '(': positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False) if keyword_args: @@ -3501,7 +3501,7 @@ def p_c_class_definition(s, pos, ctx): if s.sy == '[': if ctx.visibility not in ('public', 'extern') and not ctx.api: error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class") - objstruct_name, typeobj_name, check_size = p_c_class_options(s) + objstruct_name, typeobj_name, check_size = p_c_class_options(s) if s.sy == ':': if ctx.level == 'module_pxd': body_level = 'c_class_pxd' @@ -3540,16 +3540,16 @@ def p_c_class_definition(s, pos, ctx): bases = bases, objstruct_name = objstruct_name, typeobj_name = typeobj_name, - check_size = check_size, + check_size = check_size, in_pxd = ctx.level == 'module_pxd', doc = doc, body = body) - + def p_c_class_options(s): objstruct_name = None typeobj_name = None - check_size = None + check_size = None s.expect('[') while 1: if s.sy != 'IDENT': @@ -3560,16 +3560,16 @@ def p_c_class_options(s): elif s.systring == 'type': s.next() typeobj_name = p_ident(s) - elif s.systring == 'check_size': - s.next() - check_size = p_ident(s) - if check_size not in ('ignore', 'warn', 'error'): - s.error("Expected one of ignore, warn or error, found %r" % check_size) + elif s.systring == 'check_size': + s.next() + check_size = p_ident(s) + if check_size not in ('ignore', 'warn', 'error'): + s.error("Expected one of ignore, warn or error, found %r" % check_size) if s.sy != ',': break s.next() - s.expect(']', "Expected 'object', 'type' or 'check_size'") - return objstruct_name, typeobj_name, check_size + s.expect(']', "Expected 'object', 'type' or 'check_size'") + return objstruct_name, typeobj_name, check_size def p_property_decl(s): @@ -3648,64 +3648,64 @@ def p_code(s, level=None, ctx=Ctx): repr(s.sy), repr(s.systring))) return body - + _match_compiler_directive_comment = cython.declare(object, re.compile( r"^#\s*cython\s*:\s*((\w|[.])+\s*=.*)$").match) - + def p_compiler_directive_comments(s): result = {} while s.sy == 'commentline': - pos = s.position() + pos = s.position() m = _match_compiler_directive_comment(s.systring) if m: - directives_string = m.group(1).strip() + directives_string = m.group(1).strip() try: - new_directives = Options.parse_directive_list(directives_string, ignore_unknown=True) - except ValueError as e: + new_directives = Options.parse_directive_list(directives_string, ignore_unknown=True) + except ValueError as e: s.error(e.args[0], fatal=False) - s.next() - continue - - for name in new_directives: - if name not in result: - pass - elif new_directives[name] == result[name]: - warning(pos, "Duplicate directive found: %s" % (name,)) - else: - s.error("Conflicting settings found for top-level directive %s: %r and %r" % ( - name, result[name], new_directives[name]), pos=pos) - - if 'language_level' in new_directives: - # Make sure we apply the language level already to the first token that follows the comments. - s.context.set_language_level(new_directives['language_level']) - - result.update(new_directives) - + s.next() + continue + + for name in new_directives: + if name not in result: + pass + elif new_directives[name] == result[name]: + warning(pos, "Duplicate directive found: %s" % (name,)) + else: + s.error("Conflicting settings found for top-level directive %s: %r and %r" % ( + name, result[name], new_directives[name]), pos=pos) + + if 'language_level' in new_directives: + # Make sure we apply the language level already to the first token that follows the comments. + s.context.set_language_level(new_directives['language_level']) + + result.update(new_directives) + s.next() return result - + def p_module(s, pxd, full_module_name, ctx=Ctx): pos = s.position() directive_comments = p_compiler_directive_comments(s) s.parse_comments = False - if s.context.language_level is None: - s.context.set_language_level(2) # Arcadia default. - - if s.context.language_level is None: - s.context.set_language_level(2) - if pos[0].filename: - import warnings - warnings.warn( - "Cython directive 'language_level' not set, using 2 for now (Py2). " - "This will change in a later release! File: %s" % pos[0].filename, - FutureWarning, - stacklevel=1 if cython.compiled else 2, - ) - + if s.context.language_level is None: + s.context.set_language_level(2) # Arcadia default. + + if s.context.language_level is None: + s.context.set_language_level(2) + if pos[0].filename: + import warnings + warnings.warn( + "Cython directive 'language_level' not set, using 2 for now (Py2). " + "This will change in a later release! File: %s" % pos[0].filename, + FutureWarning, + stacklevel=1 if cython.compiled else 2, + ) + doc = p_doc_string(s) if pxd: level = 'module_pxd' @@ -3720,16 +3720,16 @@ def p_module(s, pxd, full_module_name, ctx=Ctx): full_module_name = full_module_name, directive_comments = directive_comments) -def p_template_definition(s): - name = p_ident(s) - if s.sy == '=': - s.expect('=') - s.expect('*') - required = False - else: - required = True - return name, required - +def p_template_definition(s): + name = p_ident(s) + if s.sy == '=': + s.expect('=') + s.expect('*') + required = False + else: + required = True + return name, required + def p_cpp_class_definition(s, pos, ctx): # s.sy == 'cppclass' s.next() @@ -3742,21 +3742,21 @@ def p_cpp_class_definition(s, pos, ctx): error(pos, "Qualified class name not allowed C++ class") if s.sy == '[': s.next() - templates = [p_template_definition(s)] + templates = [p_template_definition(s)] while s.sy == ',': s.next() - templates.append(p_template_definition(s)) + templates.append(p_template_definition(s)) s.expect(']') - template_names = [name for name, required in templates] + template_names = [name for name, required in templates] else: templates = None - template_names = None + template_names = None if s.sy == '(': s.next() - base_classes = [p_c_base_type(s, templates = template_names)] + base_classes = [p_c_base_type(s, templates = template_names)] while s.sy == ',': s.next() - base_classes.append(p_c_base_type(s, templates = template_names)) + base_classes.append(p_c_base_type(s, templates = template_names)) s.expect(')') else: base_classes = [] @@ -3769,7 +3769,7 @@ def p_cpp_class_definition(s, pos, ctx): s.expect_indent() attributes = [] body_ctx = Ctx(visibility = ctx.visibility, level='cpp_class', nogil=nogil or ctx.nogil) - body_ctx.templates = template_names + body_ctx.templates = template_names while s.sy != 'DEDENT': if s.sy != 'pass': attributes.append(p_cpp_class_attribute(s, body_ctx)) @@ -3795,13 +3795,13 @@ def p_cpp_class_attribute(s, ctx): decorators = p_decorators(s) if s.systring == 'cppclass': return p_cpp_class_definition(s, s.position(), ctx) - elif s.systring == 'ctypedef': - return p_ctypedef_statement(s, ctx) - elif s.sy == 'IDENT' and s.systring in struct_enum_union: - if s.systring != 'enum': - return p_cpp_class_definition(s, s.position(), ctx) - else: - return p_struct_enum(s, s.position(), ctx) + elif s.systring == 'ctypedef': + return p_ctypedef_statement(s, ctx) + elif s.sy == 'IDENT' and s.systring in struct_enum_union: + if s.systring != 'enum': + return p_cpp_class_definition(s, s.position(), ctx) + else: + return p_struct_enum(s, s.position(), ctx) else: node = p_c_func_or_var_declaration(s, s.position(), ctx) if decorators is not None: @@ -3829,7 +3829,7 @@ def print_parse_tree(f, node, level, key = None): t = type(node) if t is tuple: f.write("(%s @ %s\n" % (node[0], node[1])) - for i in range(2, len(node)): + for i in range(2, len(node)): print_parse_tree(f, node[i], level+1) f.write("%s)\n" % ind) return @@ -3845,7 +3845,7 @@ def print_parse_tree(f, node, level, key = None): return elif t is list: f.write("[\n") - for i in range(len(node)): + for i in range(len(node)): print_parse_tree(f, node[i], level+1) f.write("%s]\n" % ind) return diff --git a/contrib/tools/cython/Cython/Compiler/Pipeline.py b/contrib/tools/cython/Cython/Compiler/Pipeline.py index 891937248b..5194c3e49b 100644 --- a/contrib/tools/cython/Cython/Compiler/Pipeline.py +++ b/contrib/tools/cython/Cython/Compiler/Pipeline.py @@ -14,7 +14,7 @@ from . import Naming # def dumptree(t): # For quick debugging in pipelines - print(t.dump()) + print(t.dump()) return t def abort_on_errors(node): @@ -29,7 +29,7 @@ def parse_stage_factory(context): full_module_name = compsrc.full_module_name initial_pos = (source_desc, 1, 0) saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False - scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0) + scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0) Options.cimport_from_pyx = saved_cimport_from_pyx tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name) tree.compilation_source = compsrc @@ -54,20 +54,20 @@ def generate_pyx_code_stage_factory(options, result): return result return generate_pyx_code_stage - + def inject_pxd_code_stage_factory(context): def inject_pxd_code_stage(module_node): - for name, (statlistnode, scope) in context.pxds.items(): + for name, (statlistnode, scope) in context.pxds.items(): module_node.merge_in(statlistnode, scope) return module_node return inject_pxd_code_stage - + def use_utility_code_definitions(scope, target, seen=None): if seen is None: seen = set() - for entry in scope.entries.values(): + for entry in scope.entries.values(): if entry in seen: continue @@ -79,54 +79,54 @@ def use_utility_code_definitions(scope, target, seen=None): elif entry.as_module: use_utility_code_definitions(entry.as_module, target, seen) - -def sort_utility_codes(utilcodes): - ranks = {} - def get_rank(utilcode): - if utilcode not in ranks: - ranks[utilcode] = 0 # prevent infinite recursion on circular dependencies - original_order = len(ranks) - ranks[utilcode] = 1 + min([get_rank(dep) for dep in utilcode.requires or ()] or [-1]) + original_order * 1e-8 - return ranks[utilcode] - for utilcode in utilcodes: - get_rank(utilcode) - return [utilcode for utilcode, _ in sorted(ranks.items(), key=lambda kv: kv[1])] - - -def normalize_deps(utilcodes): - deps = {} - for utilcode in utilcodes: - deps[utilcode] = utilcode - - def unify_dep(dep): - if dep in deps: - return deps[dep] - else: - deps[dep] = dep - return dep - - for utilcode in utilcodes: - utilcode.requires = [unify_dep(dep) for dep in utilcode.requires or ()] - - + +def sort_utility_codes(utilcodes): + ranks = {} + def get_rank(utilcode): + if utilcode not in ranks: + ranks[utilcode] = 0 # prevent infinite recursion on circular dependencies + original_order = len(ranks) + ranks[utilcode] = 1 + min([get_rank(dep) for dep in utilcode.requires or ()] or [-1]) + original_order * 1e-8 + return ranks[utilcode] + for utilcode in utilcodes: + get_rank(utilcode) + return [utilcode for utilcode, _ in sorted(ranks.items(), key=lambda kv: kv[1])] + + +def normalize_deps(utilcodes): + deps = {} + for utilcode in utilcodes: + deps[utilcode] = utilcode + + def unify_dep(dep): + if dep in deps: + return deps[dep] + else: + deps[dep] = dep + return dep + + for utilcode in utilcodes: + utilcode.requires = [unify_dep(dep) for dep in utilcode.requires or ()] + + def inject_utility_code_stage_factory(context): def inject_utility_code_stage(module_node): - module_node.prepare_utility_code() + module_node.prepare_utility_code() use_utility_code_definitions(context.cython_scope, module_node.scope) - module_node.scope.utility_code_list = sort_utility_codes(module_node.scope.utility_code_list) - normalize_deps(module_node.scope.utility_code_list) + module_node.scope.utility_code_list = sort_utility_codes(module_node.scope.utility_code_list) + normalize_deps(module_node.scope.utility_code_list) added = [] # Note: the list might be extended inside the loop (if some utility code # pulls in other utility code, explicitly or implicitly) for utilcode in module_node.scope.utility_code_list: - if utilcode in added: - continue + if utilcode in added: + continue added.append(utilcode) if utilcode.requires: for dep in utilcode.requires: - if dep not in added and dep not in module_node.scope.utility_code_list: + if dep not in added and dep not in module_node.scope.utility_code_list: module_node.scope.utility_code_list.append(dep) - tree = utilcode.get_tree(cython_scope=context.cython_scope) + tree = utilcode.get_tree(cython_scope=context.cython_scope) if tree: module_node.merge_in(tree.body, tree.scope, merge_scope=True) return module_node @@ -141,7 +141,7 @@ def create_pipeline(context, mode, exclude_classes=()): assert mode in ('pyx', 'py', 'pxd') from .Visitor import PrintTree from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse - from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform + from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods @@ -186,7 +186,7 @@ def create_pipeline(context, mode, exclude_classes=()): InterpretCompilerDirectives(context, context.compiler_directives), ParallelRangeTransform(context), AdjustDefByDirectives(context), - WithTransform(context), + WithTransform(context), MarkClosureVisitor(context), _align_function_definitions, RemoveUnreachableCode(context), @@ -194,12 +194,12 @@ def create_pipeline(context, mode, exclude_classes=()): FlattenInListTransform(), DecoratorTransform(context), ForwardDeclareTypes(context), - InjectGilHandling(), + InjectGilHandling(), AnalyseDeclarationsTransform(context), AutoTestDictTransform(context), EmbedSignature(context), EarlyReplaceBuiltinCalls(context), ## Necessary? - TransformBuiltinMethods(context), + TransformBuiltinMethods(context), MarkParallelAssignments(context), ControlFlowAnalysis(context), RemoveUnreachableCode(context), @@ -211,8 +211,8 @@ def create_pipeline(context, mode, exclude_classes=()): AnalyseExpressionsTransform(context), FindInvalidUseOfFusedTypes(context), ExpandInplaceOperators(context), - IterationTransform(context), - SwitchTransform(context), + IterationTransform(context), + SwitchTransform(context), OptimizeBuiltinCalls(context), ## Necessary? CreateClosureClasses(context), ## After all lookups and type inference CalculateQualifiedNamesTransform(context), @@ -344,7 +344,7 @@ def run_pipeline(pipeline, source, printtree=True): continue if DebugFlags.debug_verbose_pipeline: t = time() - print("Entering pipeline phase %r" % phase) + print("Entering pipeline phase %r" % phase) # create a new wrapper for each step to show the name in profiles phase_name = getattr(phase, '__name__', type(phase).__name__) try: @@ -354,16 +354,16 @@ def run_pipeline(pipeline, source, printtree=True): run = _pipeline_entry_points[phase_name] = exec_ns[phase_name] data = run(phase, data) if DebugFlags.debug_verbose_pipeline: - print(" %.3f seconds" % (time() - t)) - except CompileError as err: + print(" %.3f seconds" % (time() - t)) + except CompileError as err: # err is set Errors.report_error(err, use_stack=False) error = err - except InternalError as err: + except InternalError as err: # Only raise if there was not an earlier error if Errors.num_errors == 0: raise error = err - except AbortError as err: + except AbortError as err: error = err return (error, data) diff --git a/contrib/tools/cython/Cython/Compiler/PyrexTypes.py b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py index 913d163597..3d4931cea6 100644 --- a/contrib/tools/cython/Cython/Compiler/PyrexTypes.py +++ b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py @@ -6,19 +6,19 @@ from __future__ import absolute_import import copy import hashlib -import re - -try: - reduce -except NameError: - from functools import reduce - -from Cython.Utils import cached_function +import re + +try: + reduce +except NameError: + from functools import reduce + +from Cython.Utils import cached_function from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode from . import StringEncoding from . import Naming -from .Errors import error, warning +from .Errors import error, warning class BaseType(object): @@ -27,9 +27,9 @@ class BaseType(object): # List of attribute names of any subtypes subtypes = [] - _empty_declaration = None + _empty_declaration = None _specialization_name = None - default_format_spec = None + default_format_spec = None def can_coerce_to_pyobject(self, env): return False @@ -37,20 +37,20 @@ class BaseType(object): def can_coerce_from_pyobject(self, env): return False - def can_coerce_to_pystring(self, env, format_spec=None): - return False - - def convert_to_pystring(self, cvalue, code, format_spec=None): - raise NotImplementedError("C types that support string formatting must override this method") - + def can_coerce_to_pystring(self, env, format_spec=None): + return False + + def convert_to_pystring(self, cvalue, code, format_spec=None): + raise NotImplementedError("C types that support string formatting must override this method") + def cast_code(self, expr_code): - return "((%s)%s)" % (self.empty_declaration_code(), expr_code) + return "((%s)%s)" % (self.empty_declaration_code(), expr_code) + + def empty_declaration_code(self): + if self._empty_declaration is None: + self._empty_declaration = self.declaration_code('') + return self._empty_declaration - def empty_declaration_code(self): - if self._empty_declaration is None: - self._empty_declaration = self.declaration_code('') - return self._empty_declaration - def specialization_name(self): if self._specialization_name is None: # This is not entirely robust. @@ -117,7 +117,7 @@ class BaseType(object): http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction """ - return {} + return {} def __lt__(self, other): """ @@ -249,7 +249,7 @@ class PyrexType(BaseType): is_returncode = 0 is_error = 0 is_buffer = 0 - is_ctuple = 0 + is_ctuple = 0 is_memoryviewslice = 0 is_pythran_expr = 0 is_numpy_buffer = 0 @@ -338,7 +338,7 @@ def public_decl(base_code, dll_linkage): else: return base_code -def create_typedef_type(name, base_type, cname, is_external=0, namespace=None): +def create_typedef_type(name, base_type, cname, is_external=0, namespace=None): is_fused = base_type.is_fused if base_type.is_complex or is_fused: if is_external: @@ -351,7 +351,7 @@ def create_typedef_type(name, base_type, cname, is_external=0, namespace=None): return base_type else: - return CTypedefType(name, base_type, cname, is_external, namespace) + return CTypedefType(name, base_type, cname, is_external, namespace) class CTypedefType(BaseType): @@ -375,13 +375,13 @@ class CTypedefType(BaseType): subtypes = ['typedef_base_type'] - def __init__(self, name, base_type, cname, is_external=0, namespace=None): + def __init__(self, name, base_type, cname, is_external=0, namespace=None): assert not base_type.is_complex self.typedef_name = name self.typedef_cname = cname self.typedef_base_type = base_type self.typedef_is_external = is_external - self.typedef_namespace = namespace + self.typedef_namespace = namespace def invalid_value(self): return self.typedef_base_type.invalid_value() @@ -395,8 +395,8 @@ class CTypedefType(BaseType): base_code = self.typedef_name else: base_code = public_decl(self.typedef_cname, dll_linkage) - if self.typedef_namespace is not None and not pyrex: - base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code) + if self.typedef_namespace is not None and not pyrex: + base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code) return self.base_declaration_code(base_code, entity_code) def as_argument_type(self): @@ -411,15 +411,15 @@ class CTypedefType(BaseType): else: return BaseType.cast_code(self, expr_code) - def specialize(self, values): - base_type = self.typedef_base_type.specialize(values) - namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None - if base_type is self.typedef_base_type and namespace is self.typedef_namespace: - return self - else: - return create_typedef_type(self.typedef_name, base_type, self.typedef_cname, - 0, namespace) - + def specialize(self, values): + base_type = self.typedef_base_type.specialize(values) + namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None + if base_type is self.typedef_base_type and namespace is self.typedef_namespace: + return self + else: + return create_typedef_type(self.typedef_name, base_type, self.typedef_cname, + 0, namespace) + def __repr__(self): return "<CTypedefType %s>" % self.typedef_cname @@ -428,7 +428,7 @@ class CTypedefType(BaseType): def _create_utility_code(self, template_utility_code, template_function_name): - type_name = type_identifier(self.typedef_cname) + type_name = type_identifier(self.typedef_cname) utility_code = template_utility_code.specialize( type = self.typedef_cname, TypeName = type_name) @@ -441,9 +441,9 @@ class CTypedefType(BaseType): base_type = self.typedef_base_type if type(base_type) is CIntType: self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "CIntToPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), + context={"TYPE": self.empty_declaration_code(), "TO_PY_FUNCTION": self.to_py_function})) return True elif base_type.is_float: @@ -451,17 +451,17 @@ class CTypedefType(BaseType): elif base_type.is_complex: pass # XXX implement! pass - elif base_type.is_cpp_string: - cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self) - context = { - 'cname': cname, - 'type': self.typedef_cname, - } - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "string.to_py", "CppConvert.pyx", context=context)) - self.to_py_function = cname - return True + elif base_type.is_cpp_string: + cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self) + context = { + 'cname': cname, + 'type': self.typedef_cname, + } + from .UtilityCode import CythonUtilityCode + env.use_utility_code(CythonUtilityCode.load( + "string.to_py", "CppConvert.pyx", context=context)) + self.to_py_function = cname + return True if self.to_py_utility_code: env.use_utility_code(self.to_py_utility_code) return True @@ -474,62 +474,62 @@ class CTypedefType(BaseType): base_type = self.typedef_base_type if type(base_type) is CIntType: self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "CIntFromPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), + context={"TYPE": self.empty_declaration_code(), "FROM_PY_FUNCTION": self.from_py_function})) return True elif base_type.is_float: pass # XXX implement! elif base_type.is_complex: pass # XXX implement! - elif base_type.is_cpp_string: - cname = '__pyx_convert_string_from_py_%s' % type_identifier(self) - context = { - 'cname': cname, - 'type': self.typedef_cname, - } - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "string.from_py", "CppConvert.pyx", context=context)) - self.from_py_function = cname - return True + elif base_type.is_cpp_string: + cname = '__pyx_convert_string_from_py_%s' % type_identifier(self) + context = { + 'cname': cname, + 'type': self.typedef_cname, + } + from .UtilityCode import CythonUtilityCode + env.use_utility_code(CythonUtilityCode.load( + "string.from_py", "CppConvert.pyx", context=context)) + self.from_py_function = cname + return True if self.from_py_utility_code: env.use_utility_code(self.from_py_utility_code) return True # delegation return self.typedef_base_type.create_from_py_utility_code(env) - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - if to_py_function is None: - to_py_function = self.to_py_function - return self.typedef_base_type.to_py_call_code( - source_code, result_code, result_type, to_py_function) - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): - return self.typedef_base_type.from_py_call_code( + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + if to_py_function is None: + to_py_function = self.to_py_function + return self.typedef_base_type.to_py_call_code( + source_code, result_code, result_type, to_py_function) + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None): + return self.typedef_base_type.from_py_call_code( source_code, result_code, error_pos, code, from_py_function or self.from_py_function, error_condition or self.error_condition(result_code) ) - + def overflow_check_binop(self, binop, env, const_rhs=False): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) - type = self.empty_declaration_code() + type = self.empty_declaration_code() name = self.specialization_name() if binop == "lshift": - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "LeftShift", "Overflow.c", context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) else: if const_rhs: binop += "_const" _load_overflow_base(env) - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "SizeCheck", "Overflow.c", context={'TYPE': type, 'NAME': name})) - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "Binop", "Overflow.c", context={'TYPE': type, 'NAME': name, 'BINOP': binop})) return "__Pyx_%s_%s_checking_overflow" % (binop, name) @@ -537,8 +537,8 @@ class CTypedefType(BaseType): def error_condition(self, result_code): if self.typedef_is_external: if self.exception_value: - condition = "(%s == %s)" % ( - result_code, self.cast_code(self.exception_value)) + condition = "(%s == %s)" % ( + result_code, self.cast_code(self.exception_value)) if self.exception_check: condition += " && PyErr_Occurred()" return condition @@ -594,9 +594,9 @@ class MemoryViewSliceType(PyrexType): the packing specifiers specify how the array elements are layed-out in memory. - 'contig' -- The data is contiguous in memory along this dimension. + 'contig' -- The data is contiguous in memory along this dimension. At most one dimension may be specified as 'contig'. - 'strided' -- The data isn't contiguous along this dimension. + 'strided' -- The data isn't contiguous along this dimension. 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension has its stride automatically computed from extents of the other dimensions to ensure C or Fortran memory layout. @@ -608,7 +608,7 @@ class MemoryViewSliceType(PyrexType): the *first* axis' packing spec and 'follow' for all other packing specs. """ - from . import Buffer, MemoryView + from . import Buffer, MemoryView self.dtype = base_dtype self.axes = axes @@ -622,17 +622,17 @@ class MemoryViewSliceType(PyrexType): self.writable_needed = False if not self.dtype.is_fused: - self.dtype_name = Buffer.mangle_dtype_name(self.dtype) - - def __hash__(self): - return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes)) - - def __eq__(self, other): - if isinstance(other, BaseType): - return self.same_as_resolved_type(other) - else: - return False - + self.dtype_name = Buffer.mangle_dtype_name(self.dtype) + + def __hash__(self): + return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes)) + + def __eq__(self, other): + if isinstance(other, BaseType): + return self.same_as_resolved_type(other) + else: + return False + def same_as_resolved_type(self, other_type): return ((other_type.is_memoryviewslice and #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional @@ -653,9 +653,9 @@ class MemoryViewSliceType(PyrexType): assert not pyrex assert not dll_linkage from . import MemoryView - base_code = str(self) if for_display else MemoryView.memviewslice_cname + base_code = str(self) if for_display else MemoryView.memviewslice_cname return self.base_declaration_code( - base_code, + base_code, entity_code) def attributes_known(self): @@ -707,33 +707,33 @@ class MemoryViewSliceType(PyrexType): elif attribute in ("copy", "copy_fortran"): ndim = len(self.axes) - follow_dim = [('direct', 'follow')] - contig_dim = [('direct', 'contig')] - to_axes_c = follow_dim * (ndim - 1) + contig_dim - to_axes_f = contig_dim + follow_dim * (ndim -1) + follow_dim = [('direct', 'follow')] + contig_dim = [('direct', 'contig')] + to_axes_c = follow_dim * (ndim - 1) + contig_dim + to_axes_f = contig_dim + follow_dim * (ndim -1) + + dtype = self.dtype + if dtype.is_const: + dtype = dtype.const_base_type - dtype = self.dtype - if dtype.is_const: - dtype = dtype.const_base_type + to_memview_c = MemoryViewSliceType(dtype, to_axes_c) + to_memview_f = MemoryViewSliceType(dtype, to_axes_f) - to_memview_c = MemoryViewSliceType(dtype, to_axes_c) - to_memview_f = MemoryViewSliceType(dtype, to_axes_f) - for to_memview, cython_name in [(to_memview_c, "copy"), (to_memview_f, "copy_fortran")]: - copy_func_type = CFuncType( - to_memview, - [CFuncTypeArg("memviewslice", self, None)]) - copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview) - - entry = scope.declare_cfunction( - cython_name, - copy_func_type, pos=pos, defining=1, - cname=copy_cname) - - utility = MemoryView.get_copy_new_utility(pos, self, to_memview) - env.use_utility_code(utility) - + copy_func_type = CFuncType( + to_memview, + [CFuncTypeArg("memviewslice", self, None)]) + copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview) + + entry = scope.declare_cfunction( + cython_name, + copy_func_type, pos=pos, defining=1, + cname=copy_cname) + + utility = MemoryView.get_copy_new_utility(pos, self, to_memview) + env.use_utility_code(utility) + MemoryView.use_cython_array_utility_code(env) elif attribute in ("is_c_contig", "is_f_contig"): @@ -758,35 +758,35 @@ class MemoryViewSliceType(PyrexType): return True - def get_entry(self, node, cname=None, type=None): - from . import MemoryView, Symtab - - if cname is None: - assert node.is_simple() or node.is_temp or node.is_elemental - cname = node.result() - - if type is None: - type = node.type - - entry = Symtab.Entry(cname, cname, type, node.pos) - return MemoryView.MemoryViewSliceBufferEntry(entry) - - def conforms_to(self, dst, broadcast=False, copying=False): - """ - Returns True if src conforms to dst, False otherwise. - - If conformable, the types are the same, the ndims are equal, and each axis spec is conformable. - - Any packing/access spec is conformable to itself. - - 'direct' and 'ptr' are conformable to 'full'. - 'contig' and 'follow' are conformable to 'strided'. - Any other combo is not conformable. - """ - from . import MemoryView - - src = self - + def get_entry(self, node, cname=None, type=None): + from . import MemoryView, Symtab + + if cname is None: + assert node.is_simple() or node.is_temp or node.is_elemental + cname = node.result() + + if type is None: + type = node.type + + entry = Symtab.Entry(cname, cname, type, node.pos) + return MemoryView.MemoryViewSliceBufferEntry(entry) + + def conforms_to(self, dst, broadcast=False, copying=False): + """ + Returns True if src conforms to dst, False otherwise. + + If conformable, the types are the same, the ndims are equal, and each axis spec is conformable. + + Any packing/access spec is conformable to itself. + + 'direct' and 'ptr' are conformable to 'full'. + 'contig' and 'follow' are conformable to 'strided'. + Any other combo is not conformable. + """ + from . import MemoryView + + src = self + #if not copying and self.writable_needed and not dst.writable_needed: # return False @@ -802,73 +802,73 @@ class MemoryViewSliceType(PyrexType): src_dtype = src_dtype.const_base_type if src_dtype != dst_dtype: - return False - - if src.ndim != dst.ndim: - if broadcast: - src, dst = MemoryView.broadcast_types(src, dst) - else: - return False - - for src_spec, dst_spec in zip(src.axes, dst.axes): - src_access, src_packing = src_spec - dst_access, dst_packing = dst_spec - if src_access != dst_access and dst_access != 'full': - return False - if src_packing != dst_packing and dst_packing != 'strided' and not copying: - return False - - return True - - def valid_dtype(self, dtype, i=0): - """ - Return whether type dtype can be used as the base type of a - memoryview slice. - - We support structs, numeric types and objects - """ - if dtype.is_complex and dtype.real_type.is_int: - return False - - if dtype.is_struct and dtype.kind == 'struct': - for member in dtype.scope.var_entries: - if not self.valid_dtype(member.type): - return False - - return True - - return ( - dtype.is_error or - # Pointers are not valid (yet) - # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or - (dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or - dtype.is_numeric or - dtype.is_pyobject or - dtype.is_fused or # accept this as it will be replaced by specializations later - (dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type)) - ) - - def validate_memslice_dtype(self, pos): - if not self.valid_dtype(self.dtype): - error(pos, "Invalid base type for memoryview slice: %s" % self.dtype) - - def assert_direct_dims(self, pos): - for access, packing in self.axes: - if access != 'direct': - error(pos, "All dimensions must be direct") - return False - return True - - def transpose(self, pos): - if not self.assert_direct_dims(pos): - return error_type - return MemoryViewSliceType(self.dtype, self.axes[::-1]) - - def specialization_name(self): - return '%s_%s' % ( - super(MemoryViewSliceType,self).specialization_name(), - self.specialization_suffix()) - + return False + + if src.ndim != dst.ndim: + if broadcast: + src, dst = MemoryView.broadcast_types(src, dst) + else: + return False + + for src_spec, dst_spec in zip(src.axes, dst.axes): + src_access, src_packing = src_spec + dst_access, dst_packing = dst_spec + if src_access != dst_access and dst_access != 'full': + return False + if src_packing != dst_packing and dst_packing != 'strided' and not copying: + return False + + return True + + def valid_dtype(self, dtype, i=0): + """ + Return whether type dtype can be used as the base type of a + memoryview slice. + + We support structs, numeric types and objects + """ + if dtype.is_complex and dtype.real_type.is_int: + return False + + if dtype.is_struct and dtype.kind == 'struct': + for member in dtype.scope.var_entries: + if not self.valid_dtype(member.type): + return False + + return True + + return ( + dtype.is_error or + # Pointers are not valid (yet) + # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or + (dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or + dtype.is_numeric or + dtype.is_pyobject or + dtype.is_fused or # accept this as it will be replaced by specializations later + (dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type)) + ) + + def validate_memslice_dtype(self, pos): + if not self.valid_dtype(self.dtype): + error(pos, "Invalid base type for memoryview slice: %s" % self.dtype) + + def assert_direct_dims(self, pos): + for access, packing in self.axes: + if access != 'direct': + error(pos, "All dimensions must be direct") + return False + return True + + def transpose(self, pos): + if not self.assert_direct_dims(pos): + return error_type + return MemoryViewSliceType(self.dtype, self.axes[::-1]) + + def specialization_name(self): + return '%s_%s' % ( + super(MemoryViewSliceType,self).specialization_name(), + self.specialization_suffix()) + def specialization_suffix(self): return "%s_%s" % (self.axes_to_name(), self.dtype_name) @@ -886,9 +886,9 @@ class MemoryViewSliceType(PyrexType): # We don't have 'code', so use a LazyUtilityCode with a callback. def lazy_utility_callback(code): - context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype) + context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype) return TempitaUtilityCode.load( - "ObjectToMemviewSlice", "MemoryView_C.c", context=context) + "ObjectToMemviewSlice", "MemoryView_C.c", context=context) env.use_utility_code(MemoryView.memviewslice_init_code) env.use_utility_code(LazyUtilityCode(lazy_utility_callback)) @@ -908,7 +908,7 @@ class MemoryViewSliceType(PyrexType): buf_flag = self.flags, ndim = self.ndim, axes_specs = ', '.join(self.axes_to_code()), - dtype_typedecl = self.dtype.empty_declaration_code(), + dtype_typedecl = self.dtype.empty_declaration_code(), struct_nesting_depth = self.dtype.struct_nesting_depth(), c_or_f_flag = c_or_f_flag, funcname = funcname, @@ -917,29 +917,29 @@ class MemoryViewSliceType(PyrexType): self.from_py_function = funcname return True - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None): # NOTE: auto-detection of readonly buffers is disabled: # writable = self.writable_needed or not self.dtype.is_const writable = not self.dtype.is_const return self._assign_from_py_code( source_code, result_code, error_pos, code, from_py_function, error_condition, extra_args=['PyBUF_WRITABLE' if writable else '0']) - + def create_to_py_utility_code(self, env): - self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env) + self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env) return True - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - assert self._dtype_to_py_func - assert self._dtype_from_py_func + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + assert self._dtype_to_py_func + assert self._dtype_from_py_func + + to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func + from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func - to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func - from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func + tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject) + return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup - tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject) - return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup - def dtype_object_conversion_funcs(self, env): get_function = "__pyx_memview_get_%s" % self.dtype_name set_function = "__pyx_memview_set_%s" % self.dtype_name @@ -977,8 +977,8 @@ class MemoryViewSliceType(PyrexType): error_condition=error_condition, ) - utility = TempitaUtilityCode.load_cached( - utility_name, "MemoryView_C.c", context=context) + utility = TempitaUtilityCode.load_cached( + utility_name, "MemoryView_C.c", context=context) env.use_utility_code(utility) return get_function, set_function @@ -1061,9 +1061,9 @@ class BufferType(BaseType): self.cast = cast self.is_numpy_buffer = self.base.name == "ndarray" - def can_coerce_to_pyobject(self,env): - return True - + def can_coerce_to_pyobject(self,env): + return True + def can_coerce_from_pyobject(self,env): return True @@ -1077,11 +1077,11 @@ class BufferType(BaseType): self.negative_indices, self.cast) return self - def get_entry(self, node): - from . import Buffer - assert node.is_name - return Buffer.BufferEntry(node.entry) - + def get_entry(self, node): + from . import Buffer + assert node.is_name + return Buffer.BufferEntry(node.entry) + def __getattr__(self, name): return getattr(self.base, name) @@ -1195,7 +1195,7 @@ class BuiltinObjectType(PyObjectType): has_attributes = 1 base_type = None module_name = '__builtin__' - require_exact = 1 + require_exact = 1 # fields that let it look like an extension type vtabslot_cname = None @@ -1203,7 +1203,7 @@ class BuiltinObjectType(PyObjectType): vtabptr_cname = None typedef_flag = True is_external = True - decl_type = 'PyObject' + decl_type = 'PyObject' def __init__(self, name, cname, objstruct_cname=None): self.name = name @@ -1211,12 +1211,12 @@ class BuiltinObjectType(PyObjectType): self.typeptr_cname = "(&%s)" % cname self.objstruct_cname = objstruct_cname self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles - if name == 'type': - # Special case the type type, as many C API calls (and other - # libraries) actually expect a PyTypeObject* for type arguments. - self.decl_type = objstruct_cname - if name == 'Exception': - self.require_exact = 0 + if name == 'type': + # Special case the type type, as many C API calls (and other + # libraries) actually expect a PyTypeObject* for type arguments. + self.decl_type = objstruct_cname + if name == 'Exception': + self.require_exact = 0 def set_scope(self, scope): self.scope = scope @@ -1270,15 +1270,15 @@ class BuiltinObjectType(PyObjectType): type_check = 'PyString_Check' elif type_name == 'basestring': type_check = '__Pyx_PyBaseString_Check' - elif type_name == 'Exception': - type_check = '__Pyx_PyException_Check' + elif type_name == 'Exception': + type_check = '__Pyx_PyException_Check' elif type_name == 'bytearray': type_check = 'PyByteArray_Check' elif type_name == 'frozenset': type_check = 'PyFrozenSet_Check' else: type_check = 'Py%s_Check' % type_name.capitalize() - if exact and type_name not in ('bool', 'slice', 'Exception'): + if exact and type_name not in ('bool', 'slice', 'Exception'): type_check += 'Exact' return type_check @@ -1306,19 +1306,19 @@ class BuiltinObjectType(PyObjectType): if pyrex or for_display: base_code = self.name else: - base_code = public_decl(self.decl_type, dll_linkage) + base_code = public_decl(self.decl_type, dll_linkage) entity_code = "*%s" % entity_code return self.base_declaration_code(base_code, entity_code) - def as_pyobject(self, cname): - if self.decl_type == 'PyObject': - return cname - else: - return "(PyObject *)" + cname - + def as_pyobject(self, cname): + if self.decl_type == 'PyObject': + return cname + else: + return "(PyObject *)" + cname + def cast_code(self, expr_code, to_object_struct = False): return "((%s*)%s)" % ( - to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None + to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None expr_code) def py_type_name(self): @@ -1346,7 +1346,7 @@ class PyExtensionType(PyObjectType): # vtable_cname string Name of C method table definition # early_init boolean Whether to initialize early (as opposed to during module execution). # defered_declarations [thunk] Used to declare class hierarchies in order - # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match + # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match is_extension_type = 1 has_attributes = 1 @@ -1354,7 +1354,7 @@ class PyExtensionType(PyObjectType): objtypedef_cname = None - def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None): + def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None): self.name = name self.scope = None self.typedef_flag = typedef_flag @@ -1370,7 +1370,7 @@ class PyExtensionType(PyObjectType): self.vtabptr_cname = None self.vtable_cname = None self.is_external = is_external - self.check_size = check_size or 'warn' + self.check_size = check_size or 'warn' self.defered_declarations = [] def set_scope(self, scope): @@ -1494,31 +1494,31 @@ class CType(PyrexType): else: return 0 - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - func = self.to_py_function if to_py_function is None else to_py_function - assert func - if self.is_string or self.is_cpp_string: - if result_type.is_builtin_type: - result_type_name = result_type.name - if result_type_name in ('bytes', 'str', 'unicode'): - func = func.replace("Object", result_type_name.title(), 1) - elif result_type_name == 'bytearray': - func = func.replace("Object", "ByteArray", 1) - return '%s = %s(%s)' % ( - result_code, - func, - source_code or 'NULL') - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + func = self.to_py_function if to_py_function is None else to_py_function + assert func + if self.is_string or self.is_cpp_string: + if result_type.is_builtin_type: + result_type_name = result_type.name + if result_type_name in ('bytes', 'str', 'unicode'): + func = func.replace("Object", result_type_name.title(), 1) + elif result_type_name == 'bytearray': + func = func.replace("Object", "ByteArray", 1) + return '%s = %s(%s)' % ( + result_code, + func, + source_code or 'NULL') + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None): return self._assign_from_py_code( source_code, result_code, error_pos, code, from_py_function, error_condition) - + class PythranExpr(CType): # Pythran object of a given type - + to_py_function = "__Pyx_pythran_to_python" is_pythran_expr = True writable = True @@ -1576,10 +1576,10 @@ class CConstType(BaseType): def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): - if for_display or pyrex: - return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) - else: - return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex) + if for_display or pyrex: + return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) + else: + return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex) def specialize(self, values): base_type = self.const_base_type.specialize(values) @@ -1591,9 +1591,9 @@ class CConstType(BaseType): def deduce_template_params(self, actual): return self.const_base_type.deduce_template_params(actual) - def can_coerce_to_pyobject(self, env): - return self.const_base_type.can_coerce_to_pyobject(env) - + def can_coerce_to_pyobject(self, env): + return self.const_base_type.can_coerce_to_pyobject(env) + def can_coerce_from_pyobject(self, env): return self.const_base_type.can_coerce_from_pyobject(env) @@ -1628,17 +1628,17 @@ class FusedType(CType): exception_check = 0 def __init__(self, types, name=None): - # Use list rather than set to preserve order (list should be short). - flattened_types = [] - for t in types: - if t.is_fused: - # recursively merge in subtypes - for subtype in t.types: - if subtype not in flattened_types: - flattened_types.append(subtype) - elif t not in flattened_types: - flattened_types.append(t) - self.types = flattened_types + # Use list rather than set to preserve order (list should be short). + flattened_types = [] + for t in types: + if t.is_fused: + # recursively merge in subtypes + for subtype in t.types: + if subtype not in flattened_types: + flattened_types.append(subtype) + elif t not in flattened_types: + flattened_types.append(t) + self.types = flattened_types self.name = name def declaration_code(self, entity_code, for_display = 0, @@ -1669,7 +1669,7 @@ class CVoidType(CType): # is_void = 1 - to_py_function = "__Pyx_void_to_None" + to_py_function = "__Pyx_void_to_None" def __repr__(self): return "<CVoidType>" @@ -1716,10 +1716,10 @@ class CNumericType(CType): def __init__(self, rank, signed = 1): self.rank = rank - if rank > 0 and signed == SIGNED: - # Signed is meaningless for anything but char, and complicates - # type promotion. - signed = 1 + if rank > 0 and signed == SIGNED: + # Signed is meaningless for anything but char, and complicates + # type promotion. + signed = 1 self.signed = signed def sign_and_name(self): @@ -1783,12 +1783,12 @@ class CIntLike(object): """ to_py_function = None from_py_function = None - to_pyunicode_utility = None - default_format_spec = 'd' + to_pyunicode_utility = None + default_format_spec = 'd' + + def can_coerce_to_pyobject(self, env): + return True - def can_coerce_to_pyobject(self, env): - return True - def can_coerce_from_pyobject(self, env): return True @@ -1810,48 +1810,48 @@ class CIntLike(object): "FROM_PY_FUNCTION": self.from_py_function})) return True - @staticmethod - def _parse_format(format_spec): - padding = ' ' - if not format_spec: - return ('d', 0, padding) - format_type = format_spec[-1] - if format_type in ('o', 'd', 'x', 'X'): - prefix = format_spec[:-1] - elif format_type.isdigit(): - format_type = 'd' - prefix = format_spec - else: - return (None, 0, padding) - if not prefix: - return (format_type, 0, padding) - if prefix[0] == '-': - prefix = prefix[1:] - if prefix and prefix[0] == '0': - padding = '0' - prefix = prefix.lstrip('0') - if prefix.isdigit(): - return (format_type, int(prefix), padding) - return (None, 0, padding) - - def can_coerce_to_pystring(self, env, format_spec=None): - format_type, width, padding = self._parse_format(format_spec) - return format_type is not None and width <= 2**30 - - def convert_to_pystring(self, cvalue, code, format_spec=None): - if self.to_pyunicode_utility is None: - utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name() - to_pyunicode_utility = TempitaUtilityCode.load_cached( - "CIntToPyUnicode", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "TO_PY_FUNCTION": utility_code_name}) - self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility) - else: - utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility - code.globalstate.use_utility_code(to_pyunicode_utility) - format_type, width, padding_char = self._parse_format(format_spec) - return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type) - + @staticmethod + def _parse_format(format_spec): + padding = ' ' + if not format_spec: + return ('d', 0, padding) + format_type = format_spec[-1] + if format_type in ('o', 'd', 'x', 'X'): + prefix = format_spec[:-1] + elif format_type.isdigit(): + format_type = 'd' + prefix = format_spec + else: + return (None, 0, padding) + if not prefix: + return (format_type, 0, padding) + if prefix[0] == '-': + prefix = prefix[1:] + if prefix and prefix[0] == '0': + padding = '0' + prefix = prefix.lstrip('0') + if prefix.isdigit(): + return (format_type, int(prefix), padding) + return (None, 0, padding) + + def can_coerce_to_pystring(self, env, format_spec=None): + format_type, width, padding = self._parse_format(format_spec) + return format_type is not None and width <= 2**30 + + def convert_to_pystring(self, cvalue, code, format_spec=None): + if self.to_pyunicode_utility is None: + utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name() + to_pyunicode_utility = TempitaUtilityCode.load_cached( + "CIntToPyUnicode", "TypeConversion.c", + context={"TYPE": self.empty_declaration_code(), + "TO_PY_FUNCTION": utility_code_name}) + self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility) + else: + utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility + code.globalstate.use_utility_code(to_pyunicode_utility) + format_type, width, padding_char = self._parse_format(format_spec) + return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type) + class CIntType(CIntLike, CNumericType): @@ -1890,21 +1890,21 @@ class CIntType(CIntLike, CNumericType): def overflow_check_binop(self, binop, env, const_rhs=False): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) - type = self.empty_declaration_code() + type = self.empty_declaration_code() name = self.specialization_name() if binop == "lshift": - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "LeftShift", "Overflow.c", context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) else: if const_rhs: binop += "_const" if type in ('int', 'long', 'long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseSigned", "Overflow.c", context={'INT': type, 'NAME': name})) elif type in ('unsigned int', 'unsigned long', 'unsigned long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseUnsigned", "Overflow.c", context={'UINT': type, 'NAME': name})) elif self.rank <= 1: @@ -1912,23 +1912,23 @@ class CIntType(CIntLike, CNumericType): return "__Pyx_%s_%s_no_overflow" % (binop, name) else: _load_overflow_base(env) - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "SizeCheck", "Overflow.c", context={'TYPE': type, 'NAME': name})) - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "Binop", "Overflow.c", context={'TYPE': type, 'NAME': name, 'BINOP': binop})) return "__Pyx_%s_%s_checking_overflow" % (binop, name) - + def _load_overflow_base(env): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) for type in ('int', 'long', 'long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseSigned", "Overflow.c", context={'INT': type, 'NAME': type.replace(' ', '_')})) for type in ('unsigned int', 'unsigned long', 'unsigned long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( + env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseUnsigned", "Overflow.c", context={'UINT': type, 'NAME': type.replace(' ', '_')})) @@ -1947,45 +1947,45 @@ class CReturnCodeType(CIntType): is_returncode = True exception_check = False - default_format_spec = '' + default_format_spec = '' + + def can_coerce_to_pystring(self, env, format_spec=None): + return not format_spec + + def convert_to_pystring(self, cvalue, code, format_spec=None): + return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname - def can_coerce_to_pystring(self, env, format_spec=None): - return not format_spec - def convert_to_pystring(self, cvalue, code, format_spec=None): - return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname - - class CBIntType(CIntType): to_py_function = "__Pyx_PyBool_FromLong" from_py_function = "__Pyx_PyObject_IsTrue" - exception_check = 1 # for C++ bool - default_format_spec = '' - - def can_coerce_to_pystring(self, env, format_spec=None): - return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec) - - def convert_to_pystring(self, cvalue, code, format_spec=None): - if format_spec: - return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec) - # NOTE: no caching here as the string constant cnames depend on the current module - utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name() - to_pyunicode_utility = TempitaUtilityCode.load_cached( - "CBIntToPyUnicode", "TypeConversion.c", context={ - "TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname, - "FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname, - "TO_PY_FUNCTION": utility_code_name, - }) - code.globalstate.use_utility_code(to_pyunicode_utility) - return "%s(%s)" % (utility_code_name, cvalue) - + exception_check = 1 # for C++ bool + default_format_spec = '' + + def can_coerce_to_pystring(self, env, format_spec=None): + return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec) + + def convert_to_pystring(self, cvalue, code, format_spec=None): + if format_spec: + return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec) + # NOTE: no caching here as the string constant cnames depend on the current module + utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name() + to_pyunicode_utility = TempitaUtilityCode.load_cached( + "CBIntToPyUnicode", "TypeConversion.c", context={ + "TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname, + "FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname, + "TO_PY_FUNCTION": utility_code_name, + }) + code.globalstate.use_utility_code(to_pyunicode_utility) + return "%s(%s)" % (utility_code_name, cvalue) + def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): - if for_display: + if for_display: base_code = 'bool' - elif pyrex: - base_code = 'bint' + elif pyrex: + base_code = 'bint' else: base_code = public_decl('int', dll_linkage) return self.base_declaration_code(base_code, entity_code) @@ -2014,9 +2014,9 @@ class CPyUCS4IntType(CIntType): to_py_function = "PyUnicode_FromOrdinal" from_py_function = "__Pyx_PyObject_AsPy_UCS4" - def can_coerce_to_pystring(self, env, format_spec=None): - return False # does the right thing anyway - + def can_coerce_to_pystring(self, env, format_spec=None): + return False # does the right thing anyway + def create_from_py_utility_code(self, env): env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c")) return True @@ -2038,9 +2038,9 @@ class CPyUnicodeIntType(CIntType): to_py_function = "PyUnicode_FromOrdinal" from_py_function = "__Pyx_PyObject_AsPy_UNICODE" - def can_coerce_to_pystring(self, env, format_spec=None): - return False # does the right thing anyway - + def can_coerce_to_pystring(self, env, format_spec=None): + return False # does the right thing anyway + def create_from_py_utility_code(self, env): env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c")) return True @@ -2116,11 +2116,11 @@ class CComplexType(CNumericType): def __init__(self, real_type): while real_type.is_typedef and not real_type.typedef_is_external: real_type = real_type.typedef_base_type - self.funcsuffix = "_%s" % real_type.specialization_name() - if real_type.is_float: - self.math_h_modifier = real_type.math_h_modifier + self.funcsuffix = "_%s" % real_type.specialization_name() + if real_type.is_float: + self.math_h_modifier = real_type.math_h_modifier else: - self.math_h_modifier = "_UNUSED" + self.math_h_modifier = "_UNUSED" self.real_type = real_type CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed) @@ -2201,40 +2201,40 @@ class CComplexType(CNumericType): return True - def _utility_code_context(self): - return { - 'type': self.empty_declaration_code(), - 'type_name': self.specialization_name(), - 'real_type': self.real_type.empty_declaration_code(), - 'func_suffix': self.funcsuffix, - 'm': self.math_h_modifier, - 'is_float': int(self.real_type.is_float) - } - + def _utility_code_context(self): + return { + 'type': self.empty_declaration_code(), + 'type_name': self.specialization_name(), + 'real_type': self.real_type.empty_declaration_code(), + 'func_suffix': self.funcsuffix, + 'm': self.math_h_modifier, + 'is_float': int(self.real_type.is_float) + } + def create_declaration_utility_code(self, env): # This must always be run, because a single CComplexType instance can be shared # across multiple compilations (the one created in the module scope) - env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c')) - env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c')) - env.use_utility_code(TempitaUtilityCode.load_cached( - 'Declarations', 'Complex.c', self._utility_code_context())) - env.use_utility_code(TempitaUtilityCode.load_cached( - 'Arithmetic', 'Complex.c', self._utility_code_context())) + env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c')) + env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c')) + env.use_utility_code(TempitaUtilityCode.load_cached( + 'Declarations', 'Complex.c', self._utility_code_context())) + env.use_utility_code(TempitaUtilityCode.load_cached( + 'Arithmetic', 'Complex.c', self._utility_code_context())) + return True + + def can_coerce_to_pyobject(self, env): return True - def can_coerce_to_pyobject(self, env): - return True - def can_coerce_from_pyobject(self, env): return True def create_to_py_utility_code(self, env): - env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c')) + env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c')) return True def create_from_py_utility_code(self, env): - env.use_utility_code(TempitaUtilityCode.load_cached( - 'FromPy', 'Complex.c', self._utility_code_context())) + env.use_utility_code(TempitaUtilityCode.load_cached( + 'FromPy', 'Complex.c', self._utility_code_context())) self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name() return True @@ -2269,7 +2269,7 @@ complex_ops = { (2, '-'): 'diff', (2, '*'): 'prod', (2, '/'): 'quot', - (2, '**'): 'pow', + (2, '**'): 'pow', (2, '=='): 'eq', } @@ -2302,8 +2302,8 @@ class CPointerBaseType(CType): def __init__(self, base_type): self.base_type = base_type - if base_type.is_const: - base_type = base_type.const_base_type + if base_type.is_const: + base_type = base_type.const_base_type for char_type in (c_char_type, c_uchar_type, c_schar_type): if base_type.same_as(char_type): self.is_string = 1 @@ -2313,16 +2313,16 @@ class CPointerBaseType(CType): self.is_pyunicode_ptr = 1 if self.is_string and not base_type.is_error: - if base_type.signed == 2: - self.to_py_function = "__Pyx_PyObject_FromCString" - if self.is_ptr: + if base_type.signed == 2: + self.to_py_function = "__Pyx_PyObject_FromCString" + if self.is_ptr: self.from_py_function = "__Pyx_PyObject_As%sSString" - elif base_type.signed: + elif base_type.signed: self.to_py_function = "__Pyx_PyObject_FromString" if self.is_ptr: self.from_py_function = "__Pyx_PyObject_As%sString" else: - self.to_py_function = "__Pyx_PyObject_FromCString" + self.to_py_function = "__Pyx_PyObject_FromCString" if self.is_ptr: self.from_py_function = "__Pyx_PyObject_As%sUString" if self.is_ptr: @@ -2353,7 +2353,7 @@ class CArrayType(CPointerBaseType): # size integer or None Number of elements is_array = 1 - to_tuple_function = None + to_tuple_function = None def __init__(self, base_type, size): super(CArrayType, self).__init__(base_type) @@ -2376,12 +2376,12 @@ class CArrayType(CPointerBaseType): or other_type is error_type) def assignable_from_resolved_type(self, src_type): - # C arrays are assigned by value, either Python containers or C arrays/pointers - if src_type.is_pyobject: - return True - if src_type.is_ptr or src_type.is_array: - return self.base_type.assignable_from(src_type.base_type) - return False + # C arrays are assigned by value, either Python containers or C arrays/pointers + if src_type.is_pyobject: + return True + if src_type.is_ptr or src_type.is_array: + return self.base_type.assignable_from(src_type.base_type) + return False def element_ptr_type(self): return c_ptr_type(self.base_type) @@ -2409,7 +2409,7 @@ class CArrayType(CPointerBaseType): if base_type == self.base_type: return self else: - return CArrayType(base_type, self.size) + return CArrayType(base_type, self.size) def deduce_template_params(self, actual): if isinstance(actual, CArrayType): @@ -2417,79 +2417,79 @@ class CArrayType(CPointerBaseType): else: return {} - def can_coerce_to_pyobject(self, env): - return self.base_type.can_coerce_to_pyobject(env) + def can_coerce_to_pyobject(self, env): + return self.base_type.can_coerce_to_pyobject(env) def can_coerce_from_pyobject(self, env): return self.base_type.can_coerce_from_pyobject(env) - def create_to_py_utility_code(self, env): - if self.to_py_function is not None: - return self.to_py_function - if not self.base_type.create_to_py_utility_code(env): - return False - + def create_to_py_utility_code(self, env): + if self.to_py_function is not None: + return self.to_py_function + if not self.base_type.create_to_py_utility_code(env): + return False + safe_typename = self.base_type.specialization_name() - to_py_function = "__Pyx_carray_to_py_%s" % safe_typename - to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename - - from .UtilityCode import CythonUtilityCode - context = { - 'cname': to_py_function, - 'to_tuple_cname': to_tuple_function, + to_py_function = "__Pyx_carray_to_py_%s" % safe_typename + to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename + + from .UtilityCode import CythonUtilityCode + context = { + 'cname': to_py_function, + 'to_tuple_cname': to_tuple_function, 'base_type': self.base_type, - } - env.use_utility_code(CythonUtilityCode.load( - "carray.to_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.to_tuple_function = to_tuple_function - self.to_py_function = to_py_function - return True - - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - func = self.to_py_function if to_py_function is None else to_py_function - if self.is_string or self.is_pyunicode_ptr: - return '%s = %s(%s)' % ( - result_code, - func, - source_code) - target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple' - return '%s = %s(%s, %s)' % ( - result_code, - self.to_tuple_function if target_is_tuple else func, - source_code, - self.size) - - def create_from_py_utility_code(self, env): - if self.from_py_function is not None: - return self.from_py_function - if not self.base_type.create_from_py_utility_code(env): - return False - + } + env.use_utility_code(CythonUtilityCode.load( + "carray.to_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.to_tuple_function = to_tuple_function + self.to_py_function = to_py_function + return True + + def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): + func = self.to_py_function if to_py_function is None else to_py_function + if self.is_string or self.is_pyunicode_ptr: + return '%s = %s(%s)' % ( + result_code, + func, + source_code) + target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple' + return '%s = %s(%s, %s)' % ( + result_code, + self.to_tuple_function if target_is_tuple else func, + source_code, + self.size) + + def create_from_py_utility_code(self, env): + if self.from_py_function is not None: + return self.from_py_function + if not self.base_type.create_from_py_utility_code(env): + return False + from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name() - - from .UtilityCode import CythonUtilityCode - context = { - 'cname': from_py_function, + + from .UtilityCode import CythonUtilityCode + context = { + 'cname': from_py_function, 'base_type': self.base_type, - } - env.use_utility_code(CythonUtilityCode.load( - "carray.from_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.from_py_function = from_py_function - return True - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): + } + env.use_utility_code(CythonUtilityCode.load( + "carray.from_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.from_py_function = from_py_function + return True + + def from_py_call_code(self, source_code, result_code, error_pos, code, + from_py_function=None, error_condition=None): assert not error_condition, '%s: %s' % (error_pos, error_condition) - call_code = "%s(%s, %s, %s)" % ( - from_py_function or self.from_py_function, - source_code, result_code, self.size) - return code.error_goto_if_neg(call_code, error_pos) - - + call_code = "%s(%s, %s, %s)" % ( + from_py_function or self.from_py_function, + source_code, result_code, self.size) + return code.error_goto_if_neg(call_code, error_pos) + + class CPtrType(CPointerBaseType): # base_type CType Reference type @@ -2564,7 +2564,7 @@ class CPtrType(CPointerBaseType): return self.base_type.find_cpp_operation_type(operator, operand_type) return None - + class CNullPtrType(CPtrType): is_null_ptr = 1 @@ -2573,7 +2573,7 @@ class CNullPtrType(CPtrType): class CReferenceType(BaseType): is_reference = 1 - is_fake_reference = 0 + is_fake_reference = 0 def __init__(self, base_type): self.ref_base_type = base_type @@ -2596,7 +2596,7 @@ class CReferenceType(BaseType): if base_type == self.ref_base_type: return self else: - return type(self)(base_type) + return type(self)(base_type) def deduce_template_params(self, actual): return self.ref_base_type.deduce_template_params(actual) @@ -2605,22 +2605,22 @@ class CReferenceType(BaseType): return getattr(self.ref_base_type, name) -class CFakeReferenceType(CReferenceType): - - is_fake_reference = 1 - - def __repr__(self): - return "<CFakeReferenceType %s>" % repr(self.ref_base_type) - - def __str__(self): - return "%s [&]" % self.ref_base_type - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - #print "CReferenceType.declaration_code: pointer to", self.base_type ### - return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code) - - +class CFakeReferenceType(CReferenceType): + + is_fake_reference = 1 + + def __repr__(self): + return "<CFakeReferenceType %s>" % repr(self.ref_base_type) + + def __str__(self): + return "%s [&]" % self.ref_base_type + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + #print "CReferenceType.declaration_code: pointer to", self.base_type ### + return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code) + + class CFuncType(CType): # return_type CType # args [CFuncTypeArg] @@ -2668,7 +2668,7 @@ class CFuncType(CType): self.is_strict_signature = is_strict_signature def __repr__(self): - arg_reprs = list(map(repr, self.args)) + arg_reprs = list(map(repr, self.args)) if self.has_varargs: arg_reprs.append("...") if self.exception_value: @@ -2730,8 +2730,8 @@ class CFuncType(CType): # is exempt from compatibility checking (the proper check # is performed elsewhere). for i in range(as_cmethod, nargs): - if not self.args[i].type.same_as(other_type.args[i].type): - return 0 + if not self.args[i].type.same_as(other_type.args[i].type): + return 0 if self.has_varargs != other_type.has_varargs: return 0 if self.optional_arg_count != other_type.optional_arg_count: @@ -2751,25 +2751,25 @@ class CFuncType(CType): if not self._same_exception_value(other_type.exception_value): return 0 elif not self._is_exception_compatible_with(other_type): - return 0 + return 0 + return 1 + + def _same_exception_value(self, other_exc_value): + if self.exception_value == other_exc_value: + return 1 + if self.exception_check != '+': + return 0 + if not self.exception_value or not other_exc_value: + return 0 + if self.exception_value.type != other_exc_value.type: + return 0 + if self.exception_value.entry and other_exc_value.entry: + if self.exception_value.entry.cname != other_exc_value.entry.cname: + return 0 + if self.exception_value.name != other_exc_value.name: + return 0 return 1 - def _same_exception_value(self, other_exc_value): - if self.exception_value == other_exc_value: - return 1 - if self.exception_check != '+': - return 0 - if not self.exception_value or not other_exc_value: - return 0 - if self.exception_value.type != other_exc_value.type: - return 0 - if self.exception_value.entry and other_exc_value.entry: - if self.exception_value.entry.cname != other_exc_value.entry.cname: - return 0 - if self.exception_value.name != other_exc_value.name: - return 0 - return 1 - def compatible_signature_with(self, other_type, as_cmethod = 0): return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod) @@ -2803,7 +2803,7 @@ class CFuncType(CType): if self.nogil != other_type.nogil: return 0 if not self._is_exception_compatible_with(other_type): - return 0 + return 0 self.original_sig = other_type.original_sig or other_type return 1 @@ -2844,11 +2844,11 @@ class CFuncType(CType): return 0 if not self.return_type.subtype_of_resolved_type(other_type.return_type): return 0 - if not self.exception_check and other_type.exception_check: - # a redundant exception check doesn't make functions incompatible, but a missing one does - return 0 - if not self._same_exception_value(other_type.exception_value): - return 0 + if not self.exception_check and other_type.exception_check: + # a redundant exception check doesn't make functions incompatible, but a missing one does + return 0 + if not self._same_exception_value(other_type.exception_value): + return 0 return 1 def same_calling_convention_as(self, other): @@ -2902,9 +2902,9 @@ class CFuncType(CType): trailer = " except %s" % self.exception_value elif self.exception_check == '+': trailer = " except +" - elif self.exception_check and for_display: - # not spelled out by default, unless for human eyes - trailer = " except *" + elif self.exception_check and for_display: + # not spelled out by default, unless for human eyes + trailer = " except *" if self.nogil: trailer += " nogil" if not with_calling_convention: @@ -2929,7 +2929,7 @@ class CFuncType(CType): func_name, arg_code, trailer) def signature_string(self): - s = self.empty_declaration_code() + s = self.empty_declaration_code() return s def signature_cast_string(self): @@ -3029,86 +3029,86 @@ class CFuncType(CType): assert not self.is_fused specialize_entry(entry, cname) - def can_coerce_to_pyobject(self, env): - # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code - if self.has_varargs or self.optional_arg_count: - return False - if self.to_py_function is not None: - return self.to_py_function - for arg in self.args: - if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env): - return False - if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env): - return False - return True - - def create_to_py_utility_code(self, env): - # FIXME: it seems we're trying to coerce in more cases than we should - if self.to_py_function is not None: - return self.to_py_function - if not self.can_coerce_to_pyobject(env): - return False - from .UtilityCode import CythonUtilityCode - safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1)) - to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename - - for arg in self.args: - if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env): - return False - if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env): - return False - - def declared_type(ctype): - type_displayname = str(ctype.declaration_code("", for_display=True)) - if ctype.is_pyobject: - arg_ctype = type_name = type_displayname - if ctype.is_builtin_type: - arg_ctype = ctype.name - elif not ctype.is_extension_type: - type_name = 'object' - type_displayname = None - else: - type_displayname = repr(type_displayname) - elif ctype is c_bint_type: - type_name = arg_ctype = 'bint' - else: - type_name = arg_ctype = type_displayname - if ctype is c_double_type: - type_displayname = 'float' - else: - type_displayname = repr(type_displayname) - return type_name, arg_ctype, type_displayname - - class Arg(object): - def __init__(self, arg_name, arg_type): - self.name = arg_name - self.type = arg_type - self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type) - - if self.return_type.is_void: - except_clause = 'except *' - elif self.return_type.is_pyobject: - except_clause = '' - elif self.exception_value: - except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value - else: - except_clause = 'except *' - - context = { - 'cname': to_py_function, - 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)], - 'return_type': Arg('return', self.return_type), - 'except_clause': except_clause, - } - # FIXME: directives come from first defining environment and do not adapt for reuse - env.use_utility_code(CythonUtilityCode.load( - "cfunc.to_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.to_py_function = to_py_function - return True - - + def can_coerce_to_pyobject(self, env): + # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code + if self.has_varargs or self.optional_arg_count: + return False + if self.to_py_function is not None: + return self.to_py_function + for arg in self.args: + if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env): + return False + if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env): + return False + return True + + def create_to_py_utility_code(self, env): + # FIXME: it seems we're trying to coerce in more cases than we should + if self.to_py_function is not None: + return self.to_py_function + if not self.can_coerce_to_pyobject(env): + return False + from .UtilityCode import CythonUtilityCode + safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1)) + to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename + + for arg in self.args: + if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env): + return False + if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env): + return False + + def declared_type(ctype): + type_displayname = str(ctype.declaration_code("", for_display=True)) + if ctype.is_pyobject: + arg_ctype = type_name = type_displayname + if ctype.is_builtin_type: + arg_ctype = ctype.name + elif not ctype.is_extension_type: + type_name = 'object' + type_displayname = None + else: + type_displayname = repr(type_displayname) + elif ctype is c_bint_type: + type_name = arg_ctype = 'bint' + else: + type_name = arg_ctype = type_displayname + if ctype is c_double_type: + type_displayname = 'float' + else: + type_displayname = repr(type_displayname) + return type_name, arg_ctype, type_displayname + + class Arg(object): + def __init__(self, arg_name, arg_type): + self.name = arg_name + self.type = arg_type + self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type) + + if self.return_type.is_void: + except_clause = 'except *' + elif self.return_type.is_pyobject: + except_clause = '' + elif self.exception_value: + except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value + else: + except_clause = 'except *' + + context = { + 'cname': to_py_function, + 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)], + 'return_type': Arg('return', self.return_type), + 'except_clause': except_clause, + } + # FIXME: directives come from first defining environment and do not adapt for reuse + env.use_utility_code(CythonUtilityCode.load( + "cfunc.to_py", "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context, compiler_directives=dict(env.global_scope().directives))) + self.to_py_function = to_py_function + return True + + def specialize_entry(entry, cname): """ Specialize an entry of a copied fused function or method @@ -3195,15 +3195,15 @@ def specialization_signature_string(fused_compound_type, fused_to_specific): return fused_type.specialize(fused_to_specific).typeof_name() - + def get_specialized_types(type): """ - Return a list of specialized types in their declared order. + Return a list of specialized types in their declared order. """ assert type.is_fused if isinstance(type, FusedType): - result = list(type.types) + result = list(type.types) for specialized_type in result: specialized_type.specialization_string = specialized_type.typeof_name() else: @@ -3214,7 +3214,7 @@ def get_specialized_types(type): specialization_signature_string(type, f2s)) result.append(specialized_type) - return result + return result class CFuncTypeArg(BaseType): @@ -3258,12 +3258,12 @@ class ToPyStructUtilityCode(object): requires = None - def __init__(self, type, forward_decl, env): + def __init__(self, type, forward_decl, env): self.type = type self.header = "static PyObject* %s(%s)" % (type.to_py_function, type.declaration_code('s')) self.forward_decl = forward_decl - self.env = env + self.env = env def __eq__(self, other): return isinstance(other, ToPyStructUtilityCode) and self.header == other.header @@ -3271,7 +3271,7 @@ class ToPyStructUtilityCode(object): def __hash__(self): return hash(self.header) - def get_tree(self, **kwargs): + def get_tree(self, **kwargs): pass def put_code(self, output): @@ -3285,9 +3285,9 @@ class ToPyStructUtilityCode(object): len(self.type.scope.var_entries)) for member in self.type.scope.var_entries: nameconst_cname = code.get_py_string_const(member.name, identifier=True) - code.putln("%s; if (unlikely(!member)) goto bad;" % ( - member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type))) - code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname) + code.putln("%s; if (unlikely(!member)) goto bad;" % ( + member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type))) + code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname) code.putln("Py_DECREF(member);") code.putln("return res;") code.putln("bad:") @@ -3299,7 +3299,7 @@ class ToPyStructUtilityCode(object): # This is a bit of a hack, we need a forward declaration # due to the way things are ordered in the module... if self.forward_decl: - proto.putln(self.type.empty_declaration_code() + ';') + proto.putln(self.type.empty_declaration_code() + ';') proto.putln(self.header + ";") def inject_tree_and_scope_into(self, module_node): @@ -3327,10 +3327,10 @@ class CStructOrUnionType(CType): self.scope = scope self.typedef_flag = typedef_flag self.is_struct = kind == 'struct' - self.to_py_function = "%s_to_py_%s" % ( - Naming.convert_func_prefix, self.specialization_name()) - self.from_py_function = "%s_from_py_%s" % ( - Naming.convert_func_prefix, self.specialization_name()) + self.to_py_function = "%s_to_py_%s" % ( + Naming.convert_func_prefix, self.specialization_name()) + self.from_py_function = "%s_from_py_%s" % ( + Naming.convert_func_prefix, self.specialization_name()) self.exception_check = True self._convert_to_py_code = None self._convert_from_py_code = None @@ -3344,27 +3344,27 @@ class CStructOrUnionType(CType): return False if self._convert_to_py_code is None: - is_union = not self.is_struct - unsafe_union_types = set() - safe_union_types = set() + is_union = not self.is_struct + unsafe_union_types = set() + safe_union_types = set() for member in self.scope.var_entries: - member_type = member.type + member_type = member.type if not member_type.can_coerce_to_pyobject(env): self.to_py_function = None self._convert_to_py_code = False return False - if is_union: - if member_type.is_ptr or member_type.is_cpp_class: - unsafe_union_types.add(member_type) - else: - safe_union_types.add(member_type) - - if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1): - # unsafe mix of safe and unsafe to convert types - self.from_py_function = None - self._convert_from_py_code = False - return False - + if is_union: + if member_type.is_ptr or member_type.is_cpp_class: + unsafe_union_types.add(member_type) + else: + safe_union_types.add(member_type) + + if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1): + # unsafe mix of safe and unsafe to convert types + self.from_py_function = None + self._convert_from_py_code = False + return False + return True def create_to_py_utility_code(self, env): @@ -3374,9 +3374,9 @@ class CStructOrUnionType(CType): if self._convert_to_py_code is None: for member in self.scope.var_entries: member.type.create_to_py_utility_code(env) - forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag - self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env) - + forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag + self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env) + env.use_utility_code(self._convert_to_py_code) return True @@ -3412,12 +3412,12 @@ class CStructOrUnionType(CType): var_entries=self.scope.var_entries, funcname=self.from_py_function, ) - from .UtilityCode import CythonUtilityCode - self._convert_from_py_code = CythonUtilityCode.load( - "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility", - "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context) + from .UtilityCode import CythonUtilityCode + self._convert_from_py_code = CythonUtilityCode.load( + "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility", + "CConvert.pyx", + outer_module_scope=env.global_scope(), # need access to types declared in module + context=context) env.use_utility_code(self._convert_from_py_code) return True @@ -3469,8 +3469,8 @@ class CStructOrUnionType(CType): if len(fields) != 2: return False a, b = fields return (a.type.is_float and b.type.is_float and - a.type.empty_declaration_code() == - b.type.empty_declaration_code()) + a.type.empty_declaration_code() == + b.type.empty_declaration_code()) def struct_nesting_depth(self): child_depths = [x.type.struct_nesting_depth() @@ -3484,22 +3484,22 @@ class CStructOrUnionType(CType): cpp_string_conversions = ("std::string", "TString", "TStringBuf") -builtin_cpp_conversions = { - # type element template params - "std::pair": 2, - "std::vector": 1, - "std::list": 1, - "std::set": 1, - "std::unordered_set": 1, - "std::map": 2, - "std::unordered_map": 2, - "std::complex": 1, - # arcadia_cpp_conversions - "TMaybe": 1, +builtin_cpp_conversions = { + # type element template params + "std::pair": 2, + "std::vector": 1, + "std::list": 1, + "std::set": 1, + "std::unordered_set": 1, + "std::map": 2, + "std::unordered_map": 2, + "std::complex": 1, + # arcadia_cpp_conversions + "TMaybe": 1, "TVector": 1, "THashMap": 2, "TMap": 2, -} +} class CppClassType(CType): # name string @@ -3519,7 +3519,7 @@ class CppClassType(CType): subtypes = ['templates'] - def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None): + def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None): self.name = name self.cname = cname self.scope = scope @@ -3527,11 +3527,11 @@ class CppClassType(CType): self.operators = [] self.templates = templates self.template_type = template_type - self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ()) - if templates and False: # https://github.com/cython/cython/issues/1868 - self.specializations = {tuple(zip(templates, templates)): self} - else: - self.specializations = {} + self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ()) + if templates and False: # https://github.com/cython/cython/issues/1868 + self.specializations = {tuple(zip(templates, templates)): self} + else: + self.specializations = {} self.is_cpp_string = cname in cpp_string_conversions def use_conversion_utility(self, from_or_to): @@ -3559,13 +3559,13 @@ class CppClassType(CType): def create_from_py_utility_code(self, env): if self.from_py_function is not None: return True - if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: + if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: X = "XYZABC" tags = [] context = {} for ix, T in enumerate(self.templates or []): - if ix >= builtin_cpp_conversions[self.cname]: - break + if ix >= builtin_cpp_conversions[self.cname]: + break if T.is_pyobject or not T.create_from_py_utility_code(env): return False tags.append(T.specialization_name()) @@ -3573,19 +3573,19 @@ class CppClassType(CType): if self.cname in cpp_string_conversions: cls = 'string' - tags = type_identifier(self), + tags = type_identifier(self), elif self.cname.startswith('std::'): cls = self.cname[5:] else: cls = 'arcadia_' + self.cname - cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags)) + cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags)) context.update({ 'cname': cname, 'maybe_unordered': self.maybe_unordered(), 'type': self.cname, }) from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( + env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context, compiler_directives=env.directives)) self.from_py_function = cname @@ -3603,13 +3603,13 @@ class CppClassType(CType): def create_to_py_utility_code(self, env): if self.to_py_function is not None: return True - if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: + if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: X = "XYZABC" tags = [] context = {} for ix, T in enumerate(self.templates or []): - if ix >= builtin_cpp_conversions[self.cname]: - break + if ix >= builtin_cpp_conversions[self.cname]: + break if not T.create_to_py_utility_code(env): return False tags.append(T.specialization_name()) @@ -3617,60 +3617,60 @@ class CppClassType(CType): if self.cname in cpp_string_conversions: cls = 'string' - prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode - tags = type_identifier(self), + prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode + tags = type_identifier(self), elif self.cname.startswith('std::'): cls = self.cname[5:] - prefix = '' + prefix = '' else: cls = 'arcadia_' + self.cname prefix = '' - cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags)) + cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags)) context.update({ 'cname': cname, 'maybe_unordered': self.maybe_unordered(), 'type': self.cname, }) from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( + env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", context=context, compiler_directives=env.directives)) self.to_py_function = cname return True - def is_template_type(self): - return self.templates is not None and self.template_type is None - - def get_fused_types(self, result=None, seen=None): - if result is None: - result = [] - seen = set() - if self.namespace: - self.namespace.get_fused_types(result, seen) - if self.templates: - for T in self.templates: - T.get_fused_types(result, seen) - return result - - def specialize_here(self, pos, template_values=None): - if not self.is_template_type(): + def is_template_type(self): + return self.templates is not None and self.template_type is None + + def get_fused_types(self, result=None, seen=None): + if result is None: + result = [] + seen = set() + if self.namespace: + self.namespace.get_fused_types(result, seen) + if self.templates: + for T in self.templates: + T.get_fused_types(result, seen) + return result + + def specialize_here(self, pos, template_values=None): + if not self.is_template_type(): error(pos, "'%s' type is not a template" % self) return error_type - if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates): - num_defaults = len(self.templates) - len(template_values) - partial_specialization = self.declaration_code('', template_params=template_values) - # Most of the time we don't need to declare anything typed to these - # default template arguments, but when we do there's no way in C++ - # to reference this directly. However, it is common convention to - # provide a typedef in the template class that resolves to each - # template type. For now, allow the user to specify this name as - # the template parameter. - # TODO: Allow typedefs in cpp classes and search for it in this - # classes scope as a concrete name we could use. - template_values = template_values + [ - TemplatePlaceholderType( - "%s::%s" % (partial_specialization, param.name), True) - for param in self.templates[-num_defaults:]] + if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates): + num_defaults = len(self.templates) - len(template_values) + partial_specialization = self.declaration_code('', template_params=template_values) + # Most of the time we don't need to declare anything typed to these + # default template arguments, but when we do there's no way in C++ + # to reference this directly. However, it is common convention to + # provide a typedef in the template class that resolves to each + # template type. For now, allow the user to specify this name as + # the template parameter. + # TODO: Allow typedefs in cpp classes and search for it in this + # classes scope as a concrete name we could use. + template_values = template_values + [ + TemplatePlaceholderType( + "%s::%s" % (partial_specialization, param.name), True) + for param in self.templates[-num_defaults:]] if len(self.templates) != len(template_values): error(pos, "%s templated type receives %d arguments, got %d" % (self.name, len(self.templates), len(template_values))) @@ -3701,20 +3701,20 @@ class CppClassType(CType): specialized.base_classes = [b.specialize(values) for b in self.base_classes] if self.namespace is not None: specialized.namespace = self.namespace.specialize(values) - specialized.scope = self.scope.specialize(values, specialized) - if self.cname == 'std::vector': - # vector<bool> is special cased in the C++ standard, and its - # accessors do not necessarily return references to the underlying - # elements (which may be bit-packed). - # http://www.cplusplus.com/reference/vector/vector-bool/ - # Here we pretend that the various methods return bool values - # (as the actual returned values are coercable to such, and - # we don't support call expressions as lvalues). - T = values.get(self.templates[0], None) - if T and not T.is_fused and T.empty_declaration_code() == 'bool': - for bit_ref_returner in ('at', 'back', 'front'): - if bit_ref_returner in specialized.scope.entries: - specialized.scope.entries[bit_ref_returner].type.return_type = T + specialized.scope = self.scope.specialize(values, specialized) + if self.cname == 'std::vector': + # vector<bool> is special cased in the C++ standard, and its + # accessors do not necessarily return references to the underlying + # elements (which may be bit-packed). + # http://www.cplusplus.com/reference/vector/vector-bool/ + # Here we pretend that the various methods return bool values + # (as the actual returned values are coercable to such, and + # we don't support call expressions as lvalues). + T = values.get(self.templates[0], None) + if T and not T.is_fused and T.empty_declaration_code() == 'bool': + for bit_ref_returner in ('at', 'back', 'front'): + if bit_ref_returner in specialized.scope.entries: + specialized.scope.entries[bit_ref_returner].type.return_type = T return specialized def deduce_template_params(self, actual): @@ -3724,16 +3724,16 @@ class CppClassType(CType): actual = actual.ref_base_type if self == actual: return {} - elif actual.is_cpp_class: + elif actual.is_cpp_class: self_template_type = self while getattr(self_template_type, 'template_type', None): self_template_type = self_template_type.template_type - def all_bases(cls): - yield cls - for parent in cls.base_classes: - for base in all_bases(parent): - yield base - for actual_base in all_bases(actual): + def all_bases(cls): + yield cls + for parent in cls.base_classes: + for base in all_bases(parent): + yield base + for actual_base in all_bases(actual): template_type = actual_base while getattr(template_type, 'template_type', None): template_type = template_type.template_type @@ -3746,17 +3746,17 @@ class CppClassType(CType): in zip(self.templates, actual_base.templates)], {}) else: - return {} + return {} def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0, - template_params = None): - if template_params is None: - template_params = self.templates + for_display = 0, dll_linkage = None, pyrex = 0, + template_params = None): + if template_params is None: + template_params = self.templates if self.templates: template_strings = [param.declaration_code('', for_display, None, pyrex) - for param in template_params - if not is_optional_template_param(param) and not param.is_fused] + for param in template_params + if not is_optional_template_param(param) and not param.is_fused] if for_display: brackets = "[%s]" else: @@ -3769,7 +3769,7 @@ class CppClassType(CType): else: base_code = "%s%s" % (self.cname, templates) if self.namespace is not None: - base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code) + base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code) base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) @@ -3801,7 +3801,7 @@ class CppClassType(CType): if self.templates == other_type.templates: return 1 for t1, t2 in zip(self.templates, other_type.templates): - if is_optional_template_param(t1) and is_optional_template_param(t2): + if is_optional_template_param(t1) and is_optional_template_param(t2): break if not t1.same_as_resolved_type(t2): return 0 @@ -3858,9 +3858,9 @@ class CppClassType(CType): class TemplatePlaceholderType(CType): - def __init__(self, name, optional=False): + def __init__(self, name, optional=False): self.name = name - self.optional = optional + self.optional = optional def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): @@ -3899,27 +3899,27 @@ class TemplatePlaceholderType(CType): else: return False -def is_optional_template_param(type): - return isinstance(type, TemplatePlaceholderType) and type.optional - - +def is_optional_template_param(type): + return isinstance(type, TemplatePlaceholderType) and type.optional + + class CEnumType(CIntLike, CType): # name string # cname string or None # typedef_flag boolean - # values [string], populated during declaration analysis + # values [string], populated during declaration analysis is_enum = 1 signed = 1 rank = -1 # Ranks below any integer type - def __init__(self, name, cname, typedef_flag, namespace=None): + def __init__(self, name, cname, typedef_flag, namespace=None): self.name = name self.cname = cname self.values = [] self.typedef_flag = typedef_flag - self.namespace = namespace - self.default_value = "(%s) 0" % self.empty_declaration_code() + self.namespace = namespace + self.default_value = "(%s) 0" % self.empty_declaration_code() def __str__(self): return self.name @@ -3933,127 +3933,127 @@ class CEnumType(CIntLike, CType): if pyrex or for_display: base_code = self.name else: - if self.namespace: - base_code = "%s::%s" % ( - self.namespace.empty_declaration_code(), self.cname) - elif self.typedef_flag: + if self.namespace: + base_code = "%s::%s" % ( + self.namespace.empty_declaration_code(), self.cname) + elif self.typedef_flag: base_code = self.cname else: base_code = "enum %s" % self.cname base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) - def specialize(self, values): - if self.namespace: - namespace = self.namespace.specialize(values) - if namespace != self.namespace: - return CEnumType( - self.name, self.cname, self.typedef_flag, namespace) - return self - - def create_type_wrapper(self, env): - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "EnumType", "CpdefEnums.pyx", - context={"name": self.name, - "items": tuple(self.values)}, - outer_module_scope=env.global_scope())) - - -class CTupleType(CType): - # components [PyrexType] - - is_ctuple = True - - def __init__(self, cname, components): - self.cname = cname - self.components = components - self.size = len(components) - self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) - self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) - self.exception_check = True - self._convert_to_py_code = None - self._convert_from_py_code = None - - def __str__(self): - return "(%s)" % ", ".join(str(c) for c in self.components) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - return str(self) - else: - return self.base_declaration_code(self.cname, entity_code) - - def can_coerce_to_pyobject(self, env): - for component in self.components: - if not component.can_coerce_to_pyobject(env): - return False - return True - + def specialize(self, values): + if self.namespace: + namespace = self.namespace.specialize(values) + if namespace != self.namespace: + return CEnumType( + self.name, self.cname, self.typedef_flag, namespace) + return self + + def create_type_wrapper(self, env): + from .UtilityCode import CythonUtilityCode + env.use_utility_code(CythonUtilityCode.load( + "EnumType", "CpdefEnums.pyx", + context={"name": self.name, + "items": tuple(self.values)}, + outer_module_scope=env.global_scope())) + + +class CTupleType(CType): + # components [PyrexType] + + is_ctuple = True + + def __init__(self, cname, components): + self.cname = cname + self.components = components + self.size = len(components) + self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) + self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) + self.exception_check = True + self._convert_to_py_code = None + self._convert_from_py_code = None + + def __str__(self): + return "(%s)" % ", ".join(str(c) for c in self.components) + + def declaration_code(self, entity_code, + for_display = 0, dll_linkage = None, pyrex = 0): + if pyrex or for_display: + return str(self) + else: + return self.base_declaration_code(self.cname, entity_code) + + def can_coerce_to_pyobject(self, env): + for component in self.components: + if not component.can_coerce_to_pyobject(env): + return False + return True + def can_coerce_from_pyobject(self, env): for component in self.components: if not component.can_coerce_from_pyobject(env): return False return True - def create_to_py_utility_code(self, env): - if self._convert_to_py_code is False: - return None # tri-state-ish - - if self._convert_to_py_code is None: - for component in self.components: - if not component.create_to_py_utility_code(env): - self.to_py_function = None - self._convert_to_py_code = False - return False - - context = dict( - struct_type_decl=self.empty_declaration_code(), - components=self.components, - funcname=self.to_py_function, - size=len(self.components) - ) - self._convert_to_py_code = TempitaUtilityCode.load( - "ToPyCTupleUtility", "TypeConversion.c", context=context) - - env.use_utility_code(self._convert_to_py_code) - return True - - def create_from_py_utility_code(self, env): - if self._convert_from_py_code is False: - return None # tri-state-ish - - if self._convert_from_py_code is None: - for component in self.components: - if not component.create_from_py_utility_code(env): - self.from_py_function = None - self._convert_from_py_code = False - return False - - context = dict( - struct_type_decl=self.empty_declaration_code(), - components=self.components, - funcname=self.from_py_function, - size=len(self.components) - ) - self._convert_from_py_code = TempitaUtilityCode.load( - "FromPyCTupleUtility", "TypeConversion.c", context=context) - - env.use_utility_code(self._convert_from_py_code) - return True - + def create_to_py_utility_code(self, env): + if self._convert_to_py_code is False: + return None # tri-state-ish + + if self._convert_to_py_code is None: + for component in self.components: + if not component.create_to_py_utility_code(env): + self.to_py_function = None + self._convert_to_py_code = False + return False + + context = dict( + struct_type_decl=self.empty_declaration_code(), + components=self.components, + funcname=self.to_py_function, + size=len(self.components) + ) + self._convert_to_py_code = TempitaUtilityCode.load( + "ToPyCTupleUtility", "TypeConversion.c", context=context) + + env.use_utility_code(self._convert_to_py_code) + return True + + def create_from_py_utility_code(self, env): + if self._convert_from_py_code is False: + return None # tri-state-ish + + if self._convert_from_py_code is None: + for component in self.components: + if not component.create_from_py_utility_code(env): + self.from_py_function = None + self._convert_from_py_code = False + return False + + context = dict( + struct_type_decl=self.empty_declaration_code(), + components=self.components, + funcname=self.from_py_function, + size=len(self.components) + ) + self._convert_from_py_code = TempitaUtilityCode.load( + "FromPyCTupleUtility", "TypeConversion.c", context=context) + + env.use_utility_code(self._convert_from_py_code) + return True + def cast_code(self, expr_code): return expr_code - - -def c_tuple_type(components): - components = tuple(components) - cname = Naming.ctuple_type_prefix + type_list_identifier(components) - tuple_type = CTupleType(cname, components) - return tuple_type - - + + +def c_tuple_type(components): + components = tuple(components) + cname = Naming.ctuple_type_prefix + type_list_identifier(components) + tuple_type = CTupleType(cname, components) + return tuple_type + + class UnspecifiedType(PyrexType): # Used as a placeholder until the type can be determined. @@ -4159,13 +4159,13 @@ c_null_ptr_type = CNullPtrType(c_void_type) c_void_ptr_type = CPtrType(c_void_type) c_void_ptr_ptr_type = CPtrType(c_void_ptr_type) c_char_ptr_type = CPtrType(c_char_type) -c_const_char_ptr_type = CPtrType(CConstType(c_char_type)) +c_const_char_ptr_type = CPtrType(CConstType(c_char_type)) c_uchar_ptr_type = CPtrType(c_uchar_type) -c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type)) +c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type)) c_char_ptr_ptr_type = CPtrType(c_char_ptr_type) c_int_ptr_type = CPtrType(c_int_type) c_py_unicode_ptr_type = CPtrType(c_py_unicode_type) -c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type)) +c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type)) c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type) c_ssize_t_ptr_type = CPtrType(c_ssize_t_type) c_size_t_ptr_type = CPtrType(c_size_t_type) @@ -4270,7 +4270,7 @@ def is_promotion(src_type, dst_type): return src_type.is_float and src_type.rank <= dst_type.rank return False -def best_match(arg_types, functions, pos=None, env=None, args=None): +def best_match(arg_types, functions, pos=None, env=None, args=None): """ Given a list args of arguments and a list of functions, choose one to call which seems to be the "best" fit for this list of arguments. @@ -4293,7 +4293,7 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): is not None, we also generate an error. """ # TODO: args should be a list of types, not a list of Nodes. - actual_nargs = len(arg_types) + actual_nargs = len(arg_types) candidates = [] errors = [] @@ -4338,7 +4338,7 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): from .Symtab import Entry specialization = Entry( name = func.name + "[%s]" % ",".join([str(t) for t in type_list]), - cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]), + cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]), type = func_type.specialize(deductions), pos = func.pos) candidates.append((specialization, specialization.type)) @@ -4363,13 +4363,13 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): for index, (func, func_type) in enumerate(candidates): score = [0,0,0,0,0,0,0] - for i in range(min(actual_nargs, len(func_type.args))): - src_type = arg_types[i] + for i in range(min(actual_nargs, len(func_type.args))): + src_type = arg_types[i] dst_type = func_type.args[i].type assignable = dst_type.assignable_from(src_type) - # Now take care of unprefixed string literals. So when you call a cdef + # Now take care of unprefixed string literals. So when you call a cdef # function that takes a char *, the coercion will mean that the # type will simply become bytes. We need to do this coercion # manually for overloaded and fused functions @@ -4387,13 +4387,13 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): assignable = dst_type.assignable_from(c_src_type) if assignable: src_type = c_src_type - needed_coercions[func] = (i, dst_type) + needed_coercions[func] = (i, dst_type) if assignable: if src_type == dst_type or dst_type.same_as(src_type): - pass # score 0 + pass # score 0 elif func_type.is_strict_signature: - break # exact match requested but not found + break # exact match requested but not found elif is_promotion(src_type, dst_type): score[2] += 1 elif ((src_type.is_int and dst_type.is_int) or @@ -4412,11 +4412,11 @@ def best_match(arg_types, functions, pos=None, env=None, args=None): else: score[0] += 1 else: - error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type) + error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type) bad_types.append((func, error_mesg)) break else: - possibilities.append((score, index, func)) # so we can sort it + possibilities.append((score, index, func)) # so we can sort it if possibilities: possibilities.sort() @@ -4448,7 +4448,7 @@ def merge_template_deductions(a, b): if a is None or b is None: return None all = a - for param, value in b.items(): + for param, value in b.items(): if param in all: if a[param] != b[param]: return None @@ -4458,12 +4458,12 @@ def merge_template_deductions(a, b): def widest_numeric_type(type1, type2): - """Given two numeric types, return the narrowest type encompassing both of them. - """ - if type1.is_reference: - type1 = type1.ref_base_type - if type2.is_reference: - type2 = type2.ref_base_type + """Given two numeric types, return the narrowest type encompassing both of them. + """ + if type1.is_reference: + type1 = type1.ref_base_type + if type2.is_reference: + type2 = type2.ref_base_type if type1.is_const: type1 = type1.const_base_type if type2.is_const: @@ -4487,10 +4487,10 @@ def widest_numeric_type(type1, type2): widest_type = type1 elif type1.signed < type2.signed: widest_type = type1 - elif type1.signed > type2.signed: - widest_type = type2 - elif type1.is_typedef > type2.is_typedef: - widest_type = type1 + elif type1.signed > type2.signed: + widest_type = type2 + elif type1.is_typedef > type2.is_typedef: + widest_type = type1 else: widest_type = type2 return widest_type @@ -4508,11 +4508,11 @@ def independent_spanning_type(type1, type2): # whereas "x = True or 2" must evaluate to a type that can hold # both a boolean value and an integer, so this function works # better. - if type1.is_reference ^ type2.is_reference: - if type1.is_reference: - type1 = type1.ref_base_type - else: - type2 = type2.ref_base_type + if type1.is_reference ^ type2.is_reference: + if type1.is_reference: + type1 = type1.ref_base_type + else: + type2 = type2.ref_base_type if type1 == type2: return type1 elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric): @@ -4565,10 +4565,10 @@ def _spanning_type(type1, type2): return py_object_type return type2 elif type1.is_ptr and type2.is_ptr: - if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class: - common_base = widest_cpp_type(type1.base_type, type2.base_type) - if common_base: - return CPtrType(common_base) + if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class: + common_base = widest_cpp_type(type1.base_type, type2.base_type) + if common_base: + return CPtrType(common_base) # incompatible pointers, void* will do as a result return c_void_ptr_type else: @@ -4586,24 +4586,24 @@ def widest_extension_type(type1, type2): if type1 is None or type2 is None: return py_object_type -def widest_cpp_type(type1, type2): - @cached_function - def bases(type): - all = set() - for base in type.base_classes: - all.add(base) - all.update(bases(base)) - return all - common_bases = bases(type1).intersection(bases(type2)) - common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set()) - candidates = [b for b in common_bases if b not in common_bases_bases] - if len(candidates) == 1: - return candidates[0] - else: - # Fall back to void* for now. - return None - - +def widest_cpp_type(type1, type2): + @cached_function + def bases(type): + all = set() + for base in type.base_classes: + all.add(base) + all.update(bases(base)) + return all + common_bases = bases(type1).intersection(bases(type2)) + common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set()) + candidates = [b for b in common_bases if b not in common_bases_bases] + if len(candidates) == 1: + return candidates[0] + else: + # Fall back to void* for now. + return None + + def simple_c_type(signed, longness, name): # Find type descriptor for simple type given name and modifiers. # Returns None if arguments don't make sense. @@ -4668,8 +4668,8 @@ def c_ptr_type(base_type): # Construct a C pointer type. if base_type is error_type: return error_type - elif base_type.is_reference: - return CPtrType(base_type.ref_base_type) + elif base_type.is_reference: + return CPtrType(base_type.ref_base_type) else: return CPtrType(base_type) @@ -4707,38 +4707,38 @@ def typecast(to_type, from_type, expr_code): else: #print "typecast: to", to_type, "from", from_type ### return to_type.cast_code(expr_code) - -def type_list_identifier(types): - return cap_length('__and_'.join(type_identifier(type) for type in types)) - -_type_identifier_cache = {} -def type_identifier(type): - decl = type.empty_declaration_code() - safe = _type_identifier_cache.get(decl) - if safe is None: - safe = decl - safe = re.sub(' +', ' ', safe) - safe = re.sub(' ([^a-zA-Z0-9_])', r'\1', safe) - safe = re.sub('([^a-zA-Z0-9_]) ', r'\1', safe) - safe = (safe.replace('__', '__dunder') - .replace('const ', '__const_') - .replace(' ', '__space_') - .replace('*', '__ptr') - .replace('&', '__ref') - .replace('[', '__lArr') - .replace(']', '__rArr') - .replace('<', '__lAng') - .replace('>', '__rAng') - .replace('(', '__lParen') - .replace(')', '__rParen') - .replace(',', '__comma_') - .replace('::', '__in_')) - safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe)) - _type_identifier_cache[decl] = safe - return safe - -def cap_length(s, max_prefix=63, max_len=1024): - if len(s) <= max_prefix: - return s + +def type_list_identifier(types): + return cap_length('__and_'.join(type_identifier(type) for type in types)) + +_type_identifier_cache = {} +def type_identifier(type): + decl = type.empty_declaration_code() + safe = _type_identifier_cache.get(decl) + if safe is None: + safe = decl + safe = re.sub(' +', ' ', safe) + safe = re.sub(' ([^a-zA-Z0-9_])', r'\1', safe) + safe = re.sub('([^a-zA-Z0-9_]) ', r'\1', safe) + safe = (safe.replace('__', '__dunder') + .replace('const ', '__const_') + .replace(' ', '__space_') + .replace('*', '__ptr') + .replace('&', '__ref') + .replace('[', '__lArr') + .replace(']', '__rArr') + .replace('<', '__lAng') + .replace('>', '__rAng') + .replace('(', '__lParen') + .replace(')', '__rParen') + .replace(',', '__comma_') + .replace('::', '__in_')) + safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe)) + _type_identifier_cache[decl] = safe + return safe + +def cap_length(s, max_prefix=63, max_len=1024): + if len(s) <= max_prefix: + return s hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6] return '%s__%s__etc' % (hash_prefix, s[:max_len-17]) diff --git a/contrib/tools/cython/Cython/Compiler/Pythran.py b/contrib/tools/cython/Cython/Compiler/Pythran.py index 8828c90c80..c02704a918 100644 --- a/contrib/tools/cython/Cython/Compiler/Pythran.py +++ b/contrib/tools/cython/Cython/Compiler/Pythran.py @@ -6,28 +6,28 @@ from .PyrexTypes import CType, CTypedefType, CStructOrUnionType import cython -try: - import pythran - pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9) +try: + import pythran + pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9) pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6) -except ImportError: - pythran = None - pythran_is_pre_0_9 = True +except ImportError: + pythran = None + pythran_is_pre_0_9 = True pythran_is_pre_0_9_6 = True if pythran_is_pre_0_9_6: pythran_builtins = '__builtin__' else: pythran_builtins = 'builtins' - + # Pythran/Numpy specific operations def has_np_pythran(env): - if env is None: - return False - directives = getattr(env, 'directives', None) - return (directives and directives.get('np_pythran', False)) + if env is None: + return False + directives = getattr(env, 'directives', None) + return (directives and directives.get('np_pythran', False)) @cython.ccall def is_pythran_supported_dtype(type_): @@ -47,10 +47,10 @@ def pythran_type(Ty, ptype="ndarray"): ctype = dtype.typedef_cname else: raise ValueError("unsupported type %s!" % dtype) - if pythran_is_pre_0_9: - return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim) - else: - return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim)) + if pythran_is_pre_0_9: + return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim) + else: + return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim)) if Ty.is_pythran_expr: return Ty.pythran_type #if Ty.is_none: @@ -66,12 +66,12 @@ def type_remove_ref(ty): def pythran_binop_type(op, tA, tB): - if op == '**': - return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % ( - pythran_type(tA), pythran_type(tB)) - else: - return "decltype(std::declval<%s>() %s std::declval<%s>())" % ( - pythran_type(tA), op, pythran_type(tB)) + if op == '**': + return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % ( + pythran_type(tA), pythran_type(tB)) + else: + return "decltype(std::declval<%s>() %s std::declval<%s>())" % ( + pythran_type(tA), op, pythran_type(tB)) def pythran_unaryop_type(op, type_): @@ -88,7 +88,7 @@ def _index_access(index_code, indices): def _index_type_code(index_with_type): idx, index_type = index_with_type if idx.is_slice: - n = 2 + int(not idx.step.is_none) + n = 2 + int(not idx.step.is_none) return "pythonic::%s::functor::slice{}(%s)" % ( pythran_builtins, ",".join(["0"]*n)) @@ -126,32 +126,32 @@ def pythran_indexing_type(type_, indices): def pythran_indexing_code(indices): return _index_access(_index_code, indices) -def np_func_to_list(func): - if not func.is_numpy_attribute: - return [] - return np_func_to_list(func.obj) + [func.attribute] - -if pythran is None: - def pythran_is_numpy_func_supported(name): - return False -else: - def pythran_is_numpy_func_supported(func): - CurF = pythran.tables.MODULES['numpy'] - FL = np_func_to_list(func) - for F in FL: - CurF = CurF.get(F, None) - if CurF is None: - return False - return True - -def pythran_functor(func): - func = np_func_to_list(func) - submodules = "::".join(func[:-1] + ["functor"]) - return "pythonic::numpy::%s::%s" % (submodules, func[-1]) - +def np_func_to_list(func): + if not func.is_numpy_attribute: + return [] + return np_func_to_list(func.obj) + [func.attribute] + +if pythran is None: + def pythran_is_numpy_func_supported(name): + return False +else: + def pythran_is_numpy_func_supported(func): + CurF = pythran.tables.MODULES['numpy'] + FL = np_func_to_list(func) + for F in FL: + CurF = CurF.get(F, None) + if CurF is None: + return False + return True + +def pythran_functor(func): + func = np_func_to_list(func) + submodules = "::".join(func[:-1] + ["functor"]) + return "pythonic::numpy::%s::%s" % (submodules, func[-1]) + def pythran_func_type(func, args): args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args)) - return "decltype(%s{}(%s))" % (pythran_functor(func), args) + return "decltype(%s{}(%s))" % (pythran_functor(func), args) @cython.ccall @@ -205,9 +205,9 @@ def is_pythran_buffer(type_): return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and type_.mode in ("c", "strided") and not type_.cast) -def pythran_get_func_include_file(func): - func = np_func_to_list(func) - return "pythonic/numpy/%s.hpp" % "/".join(func) +def pythran_get_func_include_file(func): + func = np_func_to_list(func) + return "pythonic/numpy/%s.hpp" % "/".join(func) def include_pythran_generic(env): # Generic files @@ -215,7 +215,7 @@ def include_pythran_generic(env): env.add_include_file("pythonic/python/core.hpp") env.add_include_file("pythonic/types/bool.hpp") env.add_include_file("pythonic/types/ndarray.hpp") - env.add_include_file("pythonic/numpy/power.hpp") + env.add_include_file("pythonic/numpy/power.hpp") env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins) env.add_include_file("<new>") # for placement new @@ -223,5 +223,5 @@ def include_pythran_generic(env): env.add_include_file("pythonic/types/uint%d.hpp" % i) env.add_include_file("pythonic/types/int%d.hpp" % i) for t in ("float", "float32", "float64", "set", "slice", "tuple", "int", - "complex", "complex64", "complex128"): + "complex", "complex64", "complex128"): env.add_include_file("pythonic/types/%s.hpp" % t) diff --git a/contrib/tools/cython/Cython/Compiler/Scanning.pxd b/contrib/tools/cython/Cython/Compiler/Scanning.pxd index 8eba7f068e..59593f88a2 100644 --- a/contrib/tools/cython/Cython/Compiler/Scanning.pxd +++ b/contrib/tools/cython/Cython/Compiler/Scanning.pxd @@ -4,27 +4,27 @@ import cython from ..Plex.Scanners cimport Scanner -cdef unicode any_string_prefix, IDENT - -cdef get_lexicon() -cdef initial_compile_time_env() - +cdef unicode any_string_prefix, IDENT + +cdef get_lexicon() +cdef initial_compile_time_env() + cdef class Method: cdef object name - cdef dict kwargs - cdef readonly object __name__ # for tracing the scanner + cdef dict kwargs + cdef readonly object __name__ # for tracing the scanner + +## methods commented with '##' out are used by Parsing.py when compiled. -## methods commented with '##' out are used by Parsing.py when compiled. - -@cython.final +@cython.final cdef class CompileTimeScope: cdef public dict entries cdef public CompileTimeScope outer - ##cdef declare(self, name, value) - ##cdef lookup_here(self, name) - ##cpdef lookup(self, name) + ##cdef declare(self, name, value) + ##cdef lookup_here(self, name) + ##cpdef lookup(self, name) -@cython.final +@cython.final cdef class PyrexScanner(Scanner): cdef public context cdef public list included_files @@ -53,15 +53,15 @@ cdef class PyrexScanner(Scanner): @cython.locals(current_level=cython.long, new_level=cython.long) cpdef indentation_action(self, text) #cpdef eof_action(self, text) - ##cdef next(self) - ##cdef peek(self) + ##cdef next(self) + ##cdef peek(self) #cpdef put_back(self, sy, systring) #cdef unread(self, token, value) - ##cdef bint expect(self, what, message = *) except -2 - ##cdef expect_keyword(self, what, message = *) - ##cdef expected(self, what, message = *) - ##cdef expect_indent(self) - ##cdef expect_dedent(self) - ##cdef expect_newline(self, message=*, bint ignore_semicolon=*) - ##cdef int enter_async(self) except -1 - ##cdef int exit_async(self) except -1 + ##cdef bint expect(self, what, message = *) except -2 + ##cdef expect_keyword(self, what, message = *) + ##cdef expected(self, what, message = *) + ##cdef expect_indent(self) + ##cdef expect_dedent(self) + ##cdef expect_newline(self, message=*, bint ignore_semicolon=*) + ##cdef int enter_async(self) except -1 + ##cdef int exit_async(self) except -1 diff --git a/contrib/tools/cython/Cython/Compiler/Scanning.py b/contrib/tools/cython/Cython/Compiler/Scanning.py index 78ad205d04..c721bba69b 100644 --- a/contrib/tools/cython/Cython/Compiler/Scanning.py +++ b/contrib/tools/cython/Cython/Compiler/Scanning.py @@ -5,11 +5,11 @@ from __future__ import absolute_import -import cython -cython.declare(make_lexicon=object, lexicon=object, - print_function=object, error=object, warning=object, - os=object, platform=object) - +import cython +cython.declare(make_lexicon=object, lexicon=object, + print_function=object, error=object, warning=object, + os=object, platform=object) + import os import platform @@ -27,14 +27,14 @@ scanner_dump_file = None lexicon = None - + def get_lexicon(): global lexicon if not lexicon: lexicon = make_lexicon() return lexicon - + #------------------------------------------------------------------ py_reserved_words = [ @@ -50,22 +50,22 @@ pyx_reserved_words = py_reserved_words + [ "cimport", "DEF", "IF", "ELIF", "ELSE" ] - + class Method(object): - def __init__(self, name, **kwargs): + def __init__(self, name, **kwargs): self.name = name - self.kwargs = kwargs or None - self.__name__ = name # for Plex tracing + self.kwargs = kwargs or None + self.__name__ = name # for Plex tracing def __call__(self, stream, text): - method = getattr(stream, self.name) - # self.kwargs is almost always unused => avoid call overhead - return method(text, **self.kwargs) if self.kwargs is not None else method(text) + method = getattr(stream, self.name) + # self.kwargs is almost always unused => avoid call overhead + return method(text, **self.kwargs) if self.kwargs is not None else method(text) def __copy__(self): return self # immutable, no need to copy - + def __deepcopy__(self, memo): return self # immutable, no need to copy @@ -74,7 +74,7 @@ class Method(object): class CompileTimeScope(object): - def __init__(self, outer=None): + def __init__(self, outer=None): self.entries = {} self.outer = outer @@ -100,10 +100,10 @@ class CompileTimeScope(object): else: raise - + def initial_compile_time_env(): benv = CompileTimeScope() - names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE') + names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE') for name, value in zip(names, platform.uname()): benv.declare(name, value) try: @@ -111,17 +111,17 @@ def initial_compile_time_env(): except ImportError: import builtins - names = ( - 'False', 'True', - 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', - 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter', - 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len', - 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', - 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', - 'sum', 'tuple', 'zip', - ### defined below in a platform independent way - # 'long', 'unicode', 'reduce', 'xrange' - ) + names = ( + 'False', 'True', + 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', + 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter', + 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len', + 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', + 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', + 'sum', 'tuple', 'zip', + ### defined below in a platform independent way + # 'long', 'unicode', 'reduce', 'xrange' + ) for name in names: try: @@ -129,26 +129,26 @@ def initial_compile_time_env(): except AttributeError: # ignore, likely Py3 pass - - # Py2/3 adaptations - from functools import reduce - benv.declare('reduce', reduce) - benv.declare('unicode', getattr(builtins, 'unicode', getattr(builtins, 'str'))) - benv.declare('long', getattr(builtins, 'long', getattr(builtins, 'int'))) - benv.declare('xrange', getattr(builtins, 'xrange', getattr(builtins, 'range'))) - + + # Py2/3 adaptations + from functools import reduce + benv.declare('reduce', reduce) + benv.declare('unicode', getattr(builtins, 'unicode', getattr(builtins, 'str'))) + benv.declare('long', getattr(builtins, 'long', getattr(builtins, 'int'))) + benv.declare('xrange', getattr(builtins, 'xrange', getattr(builtins, 'range'))) + denv = CompileTimeScope(benv) return denv - + #------------------------------------------------------------------ class SourceDescriptor(object): """ A SourceDescriptor should be considered immutable. """ - filename = None - + filename = None + _file_type = 'pyx' _escaped_description = None @@ -168,11 +168,11 @@ class SourceDescriptor(object): def get_escaped_description(self): if self._escaped_description is None: - esc_desc = \ + esc_desc = \ self.get_description().encode('ASCII', 'replace').decode("ASCII") # Use forward slashes on Windows since these paths - # will be used in the #line directives in the C/C++ files. - self._escaped_description = esc_desc.replace('\\', '/') + # will be used in the #line directives in the C/C++ files. + self._escaped_description = esc_desc.replace('\\', '/') return self._escaped_description def __gt__(self, other): @@ -198,7 +198,7 @@ class SourceDescriptor(object): def __copy__(self): return self # immutable, no need to copy - + def __deepcopy__(self, memo): return self # immutable, no need to copy @@ -232,10 +232,10 @@ class FileSourceDescriptor(SourceDescriptor): return lines except KeyError: pass - - with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f: + + with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f: lines = list(f) - + if key in self._lines: self._lines[key] = lines else: @@ -272,7 +272,7 @@ class FileSourceDescriptor(SourceDescriptor): def __repr__(self): return "<FileSourceDescriptor:%s>" % self.filename - + class StringSourceDescriptor(SourceDescriptor): """ Instances of this class can be used instead of a filenames if the @@ -288,8 +288,8 @@ class StringSourceDescriptor(SourceDescriptor): if not encoding: return self.codelines else: - return [line.encode(encoding, error_handling).decode(encoding) - for line in self.codelines] + return [line.encode(encoding, error_handling).decode(encoding) + for line in self.codelines] def get_description(self): return self.name @@ -311,7 +311,7 @@ class StringSourceDescriptor(SourceDescriptor): def __repr__(self): return "<StringSourceDescriptor:%s>" % self.name - + #------------------------------------------------------------------ class PyrexScanner(Scanner): @@ -321,8 +321,8 @@ class PyrexScanner(Scanner): # compile_time_eval boolean In a true conditional compilation context # compile_time_expr boolean In a compile-time expression context - def __init__(self, file, filename, parent_scanner=None, - scope=None, context=None, source_encoding=None, parse_comments=True, initial_pos=None): + def __init__(self, file, filename, parent_scanner=None, + scope=None, context=None, source_encoding=None, parse_comments=True, initial_pos=None): Scanner.__init__(self, get_lexicon(), file, filename, initial_pos) if filename.is_python_file(): @@ -349,7 +349,7 @@ class PyrexScanner(Scanner): self.compile_time_env = initial_compile_time_env() self.compile_time_eval = 1 self.compile_time_expr = 0 - if getattr(context.options, 'compile_time_env', None): + if getattr(context.options, 'compile_time_env', None): self.compile_time_env.update(context.options.compile_time_env) self.parse_comments = parse_comments self.source_encoding = source_encoding @@ -366,18 +366,18 @@ class PyrexScanner(Scanner): if self.parse_comments: self.produce('commentline', text) - def strip_underscores(self, text, symbol): - self.produce(symbol, text.replace('_', '')) - + def strip_underscores(self, text, symbol): + self.produce(symbol, text.replace('_', '')) + def current_level(self): return self.indentation_stack[-1] def open_bracket_action(self, text): - self.bracket_nesting_level += 1 + self.bracket_nesting_level += 1 return text def close_bracket_action(self, text): - self.bracket_nesting_level -= 1 + self.bracket_nesting_level -= 1 return text def newline_action(self, text): @@ -453,7 +453,7 @@ class PyrexScanner(Scanner): sy, systring = self.read() except UnrecognizedInput: self.error("Unrecognized character") - return # just a marker, error() always raises + return # just a marker, error() always raises if sy == IDENT: if systring in self.keywords: if systring == u'print' and print_function in self.context.future_directives: @@ -462,7 +462,7 @@ class PyrexScanner(Scanner): self.keywords.discard('exec') else: sy = systring - systring = self.context.intern_ustring(systring) + systring = self.context.intern_ustring(systring) self.sy = sy self.systring = systring if False: # debug_scanner: @@ -490,27 +490,27 @@ class PyrexScanner(Scanner): # This method should be added to Plex self.queue.insert(0, (token, value)) - def error(self, message, pos=None, fatal=True): + def error(self, message, pos=None, fatal=True): if pos is None: pos = self.position() if self.sy == 'INDENT': - error(pos, "Possible inconsistent indentation") + error(pos, "Possible inconsistent indentation") err = error(pos, message) if fatal: raise err - def expect(self, what, message=None): + def expect(self, what, message=None): if self.sy == what: self.next() else: self.expected(what, message) - def expect_keyword(self, what, message=None): + def expect_keyword(self, what, message=None): if self.sy == IDENT and self.systring == what: self.next() else: self.expected(what, message) - def expected(self, what, message=None): + def expected(self, what, message=None): if message: self.error(message) else: @@ -521,10 +521,10 @@ class PyrexScanner(Scanner): self.error("Expected '%s', found '%s'" % (what, found)) def expect_indent(self): - self.expect('INDENT', "Expected an increase in indentation level") + self.expect('INDENT', "Expected an increase in indentation level") def expect_dedent(self): - self.expect('DEDENT', "Expected a decrease in indentation level") + self.expect('DEDENT', "Expected a decrease in indentation level") def expect_newline(self, message="Expected a newline", ignore_semicolon=False): # Expect either a newline or end of file @@ -536,18 +536,18 @@ class PyrexScanner(Scanner): self.expect('NEWLINE', message) if useless_trailing_semicolon is not None: warning(useless_trailing_semicolon, "useless trailing semicolon") - - def enter_async(self): - self.async_enabled += 1 - if self.async_enabled == 1: - self.keywords.add('async') - self.keywords.add('await') - - def exit_async(self): - assert self.async_enabled > 0 - self.async_enabled -= 1 - if not self.async_enabled: - self.keywords.discard('await') - self.keywords.discard('async') - if self.sy in ('async', 'await'): - self.sy, self.systring = IDENT, self.context.intern_ustring(self.sy) + + def enter_async(self): + self.async_enabled += 1 + if self.async_enabled == 1: + self.keywords.add('async') + self.keywords.add('await') + + def exit_async(self): + assert self.async_enabled > 0 + self.async_enabled -= 1 + if not self.async_enabled: + self.keywords.discard('await') + self.keywords.discard('async') + if self.sy in ('async', 'await'): + self.sy, self.systring = IDENT, self.context.intern_ustring(self.sy) diff --git a/contrib/tools/cython/Cython/Compiler/StringEncoding.py b/contrib/tools/cython/Cython/Compiler/StringEncoding.py index ee2df88a2f..c37e8aab79 100644 --- a/contrib/tools/cython/Cython/Compiler/StringEncoding.py +++ b/contrib/tools/cython/Cython/Compiler/StringEncoding.py @@ -8,10 +8,10 @@ import re import sys if sys.version_info[0] >= 3: - _unicode, _str, _bytes, _unichr = str, str, bytes, chr + _unicode, _str, _bytes, _unichr = str, str, bytes, chr IS_PYTHON3 = True else: - _unicode, _str, _bytes, _unichr = unicode, str, str, unichr + _unicode, _str, _bytes, _unichr = unicode, str, str, unichr IS_PYTHON3 = False empty_bytes = _bytes() @@ -39,13 +39,13 @@ class UnicodeLiteralBuilder(object): # wide Unicode character on narrow platform => replace # by surrogate pair char_number -= 0x10000 - self.chars.append( _unichr((char_number // 1024) + 0xD800) ) - self.chars.append( _unichr((char_number % 1024) + 0xDC00) ) + self.chars.append( _unichr((char_number // 1024) + 0xD800) ) + self.chars.append( _unichr((char_number % 1024) + 0xDC00) ) else: - self.chars.append( _unichr(char_number) ) + self.chars.append( _unichr(char_number) ) else: def append_charval(self, char_number): - self.chars.append( _unichr(char_number) ) + self.chars.append( _unichr(char_number) ) def append_uescape(self, char_number, escape_string): self.append_charval(char_number) @@ -71,14 +71,14 @@ class BytesLiteralBuilder(object): self.chars.append(characters) def append_charval(self, char_number): - self.chars.append( _unichr(char_number).encode('ISO-8859-1') ) + self.chars.append( _unichr(char_number).encode('ISO-8859-1') ) def append_uescape(self, char_number, escape_string): self.append(escape_string) def getstring(self): # this *must* return a byte string! - return bytes_literal(join_bytes(self.chars), self.target_encoding) + return bytes_literal(join_bytes(self.chars), self.target_encoding) def getchar(self): # this *must* return a byte string! @@ -135,10 +135,10 @@ class EncodedString(_unicode): def contains_surrogates(self): return string_contains_surrogates(self) - def as_utf8_string(self): - return bytes_literal(self.utf8encode(), 'utf8') + def as_utf8_string(self): + return bytes_literal(self.utf8encode(), 'utf8') + - def string_contains_surrogates(ustring): """ Check if the unicode string contains surrogate code points @@ -207,18 +207,18 @@ class BytesLiteral(_bytes): is_unicode = False - def as_c_string_literal(self): - value = split_string_literal(escape_byte_string(self)) - return '"%s"' % value - - -def bytes_literal(s, encoding): - assert isinstance(s, bytes) - s = BytesLiteral(s) - s.encoding = encoding - return s - - + def as_c_string_literal(self): + value = split_string_literal(escape_byte_string(self)) + return '"%s"' % value + + +def bytes_literal(s, encoding): + assert isinstance(s, bytes) + s = BytesLiteral(s) + s.encoding = encoding + return s + + def encoded_string(s, encoding): assert isinstance(s, (_unicode, bytes)) s = EncodedString(s) @@ -338,7 +338,7 @@ def split_string_literal(s, limit=2000): def encode_pyunicode_string(s): """Create Py_UNICODE[] representation of a given unicode string. """ - s = list(map(ord, s)) + [0] + s = list(map(ord, s)) + [0] if sys.maxunicode >= 0x10000: # Wide build or Py3.3 utf16, utf32 = [], s @@ -360,4 +360,4 @@ def encode_pyunicode_string(s): if utf16 == utf32: utf16 = [] - return ",".join(map(_unicode, utf16)), ",".join(map(_unicode, utf32)) + return ",".join(map(_unicode, utf16)), ",".join(map(_unicode, utf32)) diff --git a/contrib/tools/cython/Cython/Compiler/Symtab.py b/contrib/tools/cython/Cython/Compiler/Symtab.py index dabe67f573..7361a55aea 100644 --- a/contrib/tools/cython/Cython/Compiler/Symtab.py +++ b/contrib/tools/cython/Cython/Compiler/Symtab.py @@ -7,12 +7,12 @@ from __future__ import absolute_import import re import copy import operator - -try: - import __builtin__ as builtins -except ImportError: # Py3 - import builtins - + +try: + import __builtin__ as builtins +except ImportError: # Py3 + import builtins + from .Errors import warning, error, InternalError from .StringEncoding import EncodedString from . import Options, Naming @@ -21,8 +21,8 @@ from .PyrexTypes import py_object_type, unspecified_type from .TypeSlots import ( pyfunction_signature, pymethod_signature, richcmp_special_methods, get_special_method_signature, get_property_accessor_signature) -from . import Future - +from . import Future + from . import Code iso_c99_keywords = set( @@ -232,13 +232,13 @@ class Entry(object): def all_entries(self): return [self] + self.inner_entries - def __lt__(left, right): - if isinstance(left, Entry) and isinstance(right, Entry): - return (left.name, left.cname) < (right.name, right.cname) - else: - return NotImplemented + def __lt__(left, right): + if isinstance(left, Entry) and isinstance(right, Entry): + return (left.name, left.cname) < (right.name, right.cname) + else: + return NotImplemented + - class InnerEntry(Entry): """ An entry in a closure scope that represents the real outer Entry. @@ -326,7 +326,7 @@ class Scope(object): self.name = name self.outer_scope = outer_scope self.parent_scope = parent_scope - mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_')) + mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_')) qual_scope = self.qualifying_scope() if qual_scope: self.qualified_name = qual_scope.qualify_name(name) @@ -361,7 +361,7 @@ class Scope(object): def merge_in(self, other, merge_unused=True, whitelist=None): # Use with care... entries = [] - for name, entry in other.entries.items(): + for name, entry in other.entries.items(): if not whitelist or name in whitelist: if entry.used or merge_unused: entries.append((name, entry)) @@ -463,11 +463,11 @@ class Scope(object): if cpp_override_allowed: # C++ function/method overrides with different signatures are ok. - pass + pass elif self.is_cpp_class_scope and entries[name].is_inherited: # Likewise ignore inherited classes. pass - elif visibility == 'extern': + elif visibility == 'extern': # Silenced outside of "cdef extern" blocks, until we have a safe way to # prevent pxd-defined cpdef functions from ending up here. warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0) @@ -531,19 +531,19 @@ class Scope(object): def declare_typedef(self, name, base_type, pos, cname = None, visibility = 'private', api = 0): if not cname: - if self.in_cinclude or (visibility != 'private' or api): + if self.in_cinclude or (visibility != 'private' or api): cname = name else: cname = self.mangle(Naming.type_prefix, name) try: - if self.is_cpp_class_scope: - namespace = self.outer_scope.lookup(self.name).type - else: - namespace = None + if self.is_cpp_class_scope: + namespace = self.outer_scope.lookup(self.name).type + else: + namespace = None type = PyrexTypes.create_typedef_type(name, base_type, cname, - (visibility == 'extern'), - namespace) - except ValueError as e: + (visibility == 'extern'), + namespace) + except ValueError as e: error(pos, e.args[0]) type = PyrexTypes.error_type entry = self.declare_type(name, type, pos, cname, @@ -582,8 +582,8 @@ class Scope(object): if scope: entry.type.scope = scope self.type_entries.append(entry) - if self.is_cpp_class_scope: - entry.type.namespace = self.outer_scope.lookup(self.name).type + if self.is_cpp_class_scope: + entry.type.namespace = self.outer_scope.lookup(self.name).type return entry def declare_cpp_class(self, name, scope, @@ -633,7 +633,7 @@ class Scope(object): else: declare_inherited_attributes(entry, base_class.base_classes) entry.type.scope.declare_inherited_cpp_attributes(base_class) - if scope: + if scope: declare_inherited_attributes(entry, base_classes) scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos) if self.is_cpp_class_scope: @@ -654,16 +654,16 @@ class Scope(object): visibility = 'private', api = 0, create_wrapper = 0): if name: if not cname: - if (self.in_cinclude or visibility == 'public' - or visibility == 'extern' or api): + if (self.in_cinclude or visibility == 'public' + or visibility == 'extern' or api): cname = name else: cname = self.mangle(Naming.type_prefix, name) - if self.is_cpp_class_scope: - namespace = self.outer_scope.lookup(self.name).type - else: - namespace = None - type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace) + if self.is_cpp_class_scope: + namespace = self.outer_scope.lookup(self.name).type + else: + namespace = None + type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace) else: type = PyrexTypes.c_anon_enum_type entry = self.declare_type(name, type, pos, cname = cname, @@ -673,9 +673,9 @@ class Scope(object): self.sue_entries.append(entry) return entry - def declare_tuple_type(self, pos, components): - return self.outer_scope.declare_tuple_type(pos, components) - + def declare_tuple_type(self, pos, components): + return self.outer_scope.declare_tuple_type(pos, components) + def declare_var(self, name, type, pos, cname = None, visibility = 'private', api = 0, in_pxd = 0, is_cdef = 0): @@ -751,8 +751,8 @@ class Scope(object): self.pyfunc_entries.append(entry) def declare_cfunction(self, name, type, pos, - cname=None, visibility='private', api=0, in_pxd=0, - defining=0, modifiers=(), utility_code=None, overridable=False): + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): # Add an entry for a C function. if not cname: if visibility != 'private' or api: @@ -761,18 +761,18 @@ class Scope(object): cname = self.mangle(Naming.func_prefix, name) entry = self.lookup_here(name) if entry: - if not in_pxd and visibility != entry.visibility and visibility == 'extern': - # Previously declared, but now extern => treat this - # as implementing the function, using the new cname - defining = True - visibility = entry.visibility - entry.cname = cname - entry.func_cname = cname + if not in_pxd and visibility != entry.visibility and visibility == 'extern': + # Previously declared, but now extern => treat this + # as implementing the function, using the new cname + defining = True + visibility = entry.visibility + entry.cname = cname + entry.func_cname = cname if visibility != 'private' and visibility != entry.visibility: - warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (name, entry.visibility, visibility), 1) - if overridable != entry.is_overridable: - warning(pos, "Function '%s' previously declared as '%s'" % ( - name, 'cpdef' if overridable else 'cdef'), 1) + warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (name, entry.visibility, visibility), 1) + if overridable != entry.is_overridable: + warning(pos, "Function '%s' previously declared as '%s'" % ( + name, 'cpdef' if overridable else 'cdef'), 1) if entry.type.same_as(type): # Fix with_gil vs nogil. entry.type = entry.type.with_with_gil(type.with_gil) @@ -805,7 +805,7 @@ class Scope(object): else: entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers) entry.func_cname = cname - entry.is_overridable = overridable + entry.is_overridable = overridable if in_pxd and visibility != 'extern': entry.defined_in_pxd = 1 if api: @@ -819,14 +819,14 @@ class Scope(object): if utility_code: assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname) entry.utility_code = utility_code - if overridable: - # names of cpdef functions can be used as variables and can be assigned to - var_entry = Entry(name, cname, py_object_type) # FIXME: cname? + if overridable: + # names of cpdef functions can be used as variables and can be assigned to + var_entry = Entry(name, cname, py_object_type) # FIXME: cname? var_entry.qualified_name = self.qualify_name(name) - var_entry.is_variable = 1 - var_entry.is_pyglobal = 1 - var_entry.scope = entry.scope - entry.as_variable = var_entry + var_entry.is_variable = 1 + var_entry.is_pyglobal = 1 + var_entry.scope = entry.scope + entry.as_variable = var_entry type.entry = entry return entry @@ -903,34 +903,34 @@ class Scope(object): obj_type = operands[0].type method = obj_type.scope.lookup("operator%s" % operator) if method is not None: - arg_types = [arg.type for arg in operands[1:]] - res = PyrexTypes.best_match([arg.type for arg in operands[1:]], - method.all_alternatives()) + arg_types = [arg.type for arg in operands[1:]] + res = PyrexTypes.best_match([arg.type for arg in operands[1:]], + method.all_alternatives()) if res is not None: return res function = self.lookup("operator%s" % operator) - function_alternatives = [] - if function is not None: - function_alternatives = function.all_alternatives() - - # look-up nonmember methods listed within a class - method_alternatives = [] - if len(operands)==2: # binary operators only - for n in range(2): - if operands[n].type.is_cpp_class: - obj_type = operands[n].type - method = obj_type.scope.lookup("operator%s" % operator) - if method is not None: - method_alternatives += method.all_alternatives() - - if (not method_alternatives) and (not function_alternatives): + function_alternatives = [] + if function is not None: + function_alternatives = function.all_alternatives() + + # look-up nonmember methods listed within a class + method_alternatives = [] + if len(operands)==2: # binary operators only + for n in range(2): + if operands[n].type.is_cpp_class: + obj_type = operands[n].type + method = obj_type.scope.lookup("operator%s" % operator) + if method is not None: + method_alternatives += method.all_alternatives() + + if (not method_alternatives) and (not function_alternatives): return None - - # select the unique alternatives - all_alternatives = list(set(method_alternatives + function_alternatives)) - - return PyrexTypes.best_match([arg.type for arg in operands], - all_alternatives) + + # select the unique alternatives + all_alternatives = list(set(method_alternatives + function_alternatives)) + + return PyrexTypes.best_match([arg.type for arg in operands], + all_alternatives) def lookup_operator_for_types(self, pos, operator, types): from .Nodes import Node @@ -942,9 +942,9 @@ class Scope(object): def use_utility_code(self, new_code): self.global_scope().use_utility_code(new_code) - def use_entry_utility_code(self, entry): - self.global_scope().use_entry_utility_code(entry) - + def use_entry_utility_code(self, entry): + self.global_scope().use_entry_utility_code(entry) + def defines_any(self, names): # Test whether any of the given names are defined in this scope. for name in names: @@ -970,8 +970,8 @@ class Scope(object): else: return outer.is_cpp() - def add_include_file(self, filename, verbatim_include=None, late=False): - self.outer_scope.add_include_file(filename, verbatim_include, late) + def add_include_file(self, filename, verbatim_include=None, late=False): + self.outer_scope.add_include_file(filename, verbatim_include, late) class PreImportScope(Scope): @@ -1000,16 +1000,16 @@ class BuiltinScope(Scope): Scope.__init__(self, "__builtin__", PreImportScope(), None) self.type_names = {} - for name, definition in sorted(self.builtin_entries.items()): + for name, definition in sorted(self.builtin_entries.items()): cname, type = definition self.declare_var(name, type, None, cname) - def lookup(self, name, language_level=None, str_is_str=None): - # 'language_level' and 'str_is_str' are passed by ModuleScope - if name == 'str': - if str_is_str is None: - str_is_str = language_level in (None, 2) - if not str_is_str: + def lookup(self, name, language_level=None, str_is_str=None): + # 'language_level' and 'str_is_str' are passed by ModuleScope + if name == 'str': + if str_is_str is None: + str_is_str = language_level in (None, 2) + if not str_is_str: name = 'unicode' return Scope.lookup(self, name) @@ -1023,12 +1023,12 @@ class BuiltinScope(Scope): else: warning(pos, "undeclared name not builtin: %s" % name, 2) - def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None): + def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None): # If python_equiv == "*", the Python equivalent has the same name # as the entry, otherwise it has the name specified by python_equiv. name = EncodedString(name) entry = self.declare_cfunction(name, type, None, cname, visibility='extern', - utility_code=utility_code) + utility_code=utility_code) if python_equiv: if python_equiv == "*": python_equiv = name @@ -1058,14 +1058,14 @@ class BuiltinScope(Scope): var_entry = Entry(name = entry.name, type = self.lookup('type').type, # make sure "type" is the first type declared... pos = entry.pos, - cname = entry.type.typeptr_cname) + cname = entry.type.typeptr_cname) var_entry.qualified_name = self.qualify_name(name) var_entry.is_variable = 1 var_entry.is_cglobal = 1 var_entry.is_readonly = 1 var_entry.is_builtin = 1 var_entry.utility_code = utility_code - var_entry.scope = self + var_entry.scope = self if Options.cache_builtins: var_entry.is_const = True entry.as_variable = var_entry @@ -1134,7 +1134,7 @@ class ModuleScope(Scope): is_module_scope = 1 has_import_star = 0 is_cython_builtin = 0 - old_style_globals = 0 + old_style_globals = 0 def __init__(self, name, parent_module, context): from . import Builtin @@ -1168,7 +1168,7 @@ class ModuleScope(Scope): self.cached_builtins = [] self.undeclared_cached_builtins = [] self.namespace_cname = self.module_cname - self._cached_tuple_types = {} + self._cached_tuple_types = {} for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__', '__spec__', '__loader__', '__package__', '__cached__']: self.declare_var(EncodedString(var_name), py_object_type, None) @@ -1180,39 +1180,39 @@ class ModuleScope(Scope): def global_scope(self): return self - def lookup(self, name, language_level=None, str_is_str=None): + def lookup(self, name, language_level=None, str_is_str=None): entry = self.lookup_here(name) if entry is not None: return entry - if language_level is None: - language_level = self.context.language_level if self.context is not None else 3 - if str_is_str is None: - str_is_str = language_level == 2 or ( - self.context is not None and Future.unicode_literals not in self.context.future_directives) - - return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str) - - def declare_tuple_type(self, pos, components): - components = tuple(components) - try: - ttype = self._cached_tuple_types[components] - except KeyError: - ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components) - cname = ttype.cname - entry = self.lookup_here(cname) - if not entry: - scope = StructOrUnionScope(cname) - for ix, component in enumerate(components): - scope.declare_var(name="f%s" % ix, type=component, pos=pos) - struct_entry = self.declare_struct_or_union( - cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname) - self.type_entries.remove(struct_entry) - ttype.struct_entry = struct_entry - entry = self.declare_type(cname, ttype, pos, cname) - ttype.entry = entry - return entry - + if language_level is None: + language_level = self.context.language_level if self.context is not None else 3 + if str_is_str is None: + str_is_str = language_level == 2 or ( + self.context is not None and Future.unicode_literals not in self.context.future_directives) + + return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str) + + def declare_tuple_type(self, pos, components): + components = tuple(components) + try: + ttype = self._cached_tuple_types[components] + except KeyError: + ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components) + cname = ttype.cname + entry = self.lookup_here(cname) + if not entry: + scope = StructOrUnionScope(cname) + for ix, component in enumerate(components): + scope.declare_var(name="f%s" % ix, type=component, pos=pos) + struct_entry = self.declare_struct_or_union( + cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname) + self.type_entries.remove(struct_entry) + ttype.struct_entry = struct_entry + entry = self.declare_type(cname, ttype, pos, cname) + ttype.entry = entry + return entry + def declare_builtin(self, name, pos): if not hasattr(builtins, name) \ and name not in Code.non_portable_builtins_map \ @@ -1233,10 +1233,10 @@ class ModuleScope(Scope): for entry in self.cached_builtins: if entry.name == name: return entry - if name == 'globals' and not self.old_style_globals: - return self.outer_scope.lookup('__Pyx_Globals') - else: - entry = self.declare(None, None, py_object_type, pos, 'private') + if name == 'globals' and not self.old_style_globals: + return self.outer_scope.lookup('__Pyx_Globals') + else: + entry = self.declare(None, None, py_object_type, pos, 'private') if Options.cache_builtins and name not in Code.uncachable_builtins: entry.is_builtin = 1 entry.is_const = 1 # cached @@ -1255,49 +1255,49 @@ class ModuleScope(Scope): # relative imports relative to this module's parent. # Finds and parses the module's .pxd file if the module # has not been referenced before. - relative_to = None - absolute_fallback = False - if relative_level is not None and relative_level > 0: - # explicit relative cimport - # error of going beyond top-level is handled in cimport node - relative_to = self - while relative_level > 0 and relative_to: - relative_to = relative_to.parent_module - relative_level -= 1 - elif relative_level != 0: - # -1 or None: try relative cimport first, then absolute - relative_to = self.parent_module - absolute_fallback = True - + relative_to = None + absolute_fallback = False + if relative_level is not None and relative_level > 0: + # explicit relative cimport + # error of going beyond top-level is handled in cimport node + relative_to = self + while relative_level > 0 and relative_to: + relative_to = relative_to.parent_module + relative_level -= 1 + elif relative_level != 0: + # -1 or None: try relative cimport first, then absolute + relative_to = self.parent_module + absolute_fallback = True + module_scope = self.global_scope() return module_scope.context.find_module( - module_name, relative_to=relative_to, pos=pos, absolute_fallback=absolute_fallback) + module_name, relative_to=relative_to, pos=pos, absolute_fallback=absolute_fallback) def find_submodule(self, name): # Find and return scope for a submodule of this module, # creating a new empty one if necessary. Doesn't parse .pxd. - if '.' in name: - name, submodule = name.split('.', 1) - else: - submodule = None + if '.' in name: + name, submodule = name.split('.', 1) + else: + submodule = None scope = self.lookup_submodule(name) if not scope: - scope = ModuleScope(name, parent_module=self, context=self.context) + scope = ModuleScope(name, parent_module=self, context=self.context) self.module_entries[name] = scope - if submodule: - scope = scope.find_submodule(submodule) + if submodule: + scope = scope.find_submodule(submodule) return scope def lookup_submodule(self, name): # Return scope for submodule of this module, or None. - if '.' in name: - name, submodule = name.split('.', 1) - else: - submodule = None - module = self.module_entries.get(name, None) - if submodule and module is not None: - module = module.lookup_submodule(submodule) - return module + if '.' in name: + name, submodule = name.split('.', 1) + else: + submodule = None + module = self.module_entries.get(name, None) + if submodule and module is not None: + module = module.lookup_submodule(submodule) + return module def add_include_file(self, filename, verbatim_include=None, late=False): """ @@ -1438,8 +1438,8 @@ class ModuleScope(Scope): return entry def declare_cfunction(self, name, type, pos, - cname=None, visibility='private', api=0, in_pxd=0, - defining=0, modifiers=(), utility_code=None, overridable=False): + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): if not defining and 'inline' in modifiers: # TODO(github/1736): Make this an error. warning(pos, "Declarations should not be declared inline.", 1) @@ -1449,8 +1449,8 @@ class ModuleScope(Scope): cname = name else: cname = self.mangle(Naming.func_prefix, name) - if visibility == 'extern' and type.optional_arg_count: - error(pos, "Extern functions cannot have default arguments values.") + if visibility == 'extern' and type.optional_arg_count: + error(pos, "Extern functions cannot have default arguments values.") entry = self.lookup_here(name) if entry and entry.defined_in_pxd: if entry.visibility != "private": @@ -1461,9 +1461,9 @@ class ModuleScope(Scope): entry.func_cname = cname entry = Scope.declare_cfunction( self, name, type, pos, - cname=cname, visibility=visibility, api=api, in_pxd=in_pxd, - defining=defining, modifiers=modifiers, utility_code=utility_code, - overridable=overridable) + cname=cname, visibility=visibility, api=api, in_pxd=in_pxd, + defining=defining, modifiers=modifiers, utility_code=utility_code, + overridable=overridable) return entry def declare_global(self, name, pos): @@ -1475,19 +1475,19 @@ class ModuleScope(Scope): if new_code is not None: self.utility_code_list.append(new_code) - def use_entry_utility_code(self, entry): - if entry is None: - return - if entry.utility_code: - self.utility_code_list.append(entry.utility_code) - if entry.utility_code_definition: - self.utility_code_list.append(entry.utility_code_definition) - - def declare_c_class(self, name, pos, defining=0, implementing=0, - module_name=None, base_type=None, objstruct_cname=None, - typeobj_cname=None, typeptr_cname=None, visibility='private', - typedef_flag=0, api=0, check_size=None, - buffer_defaults=None, shadow=0): + def use_entry_utility_code(self, entry): + if entry is None: + return + if entry.utility_code: + self.utility_code_list.append(entry.utility_code) + if entry.utility_code_definition: + self.utility_code_list.append(entry.utility_code_definition) + + def declare_c_class(self, name, pos, defining=0, implementing=0, + module_name=None, base_type=None, objstruct_cname=None, + typeobj_cname=None, typeptr_cname=None, visibility='private', + typedef_flag=0, api=0, check_size=None, + buffer_defaults=None, shadow=0): # If this is a non-extern typedef class, expose the typedef, but use # the non-typedef struct internally to avoid needing forward # declarations for anonymous structs. @@ -1519,8 +1519,8 @@ class ModuleScope(Scope): # Make a new entry if needed # if not entry or shadow: - type = PyrexTypes.PyExtensionType( - name, typedef_flag, base_type, visibility == 'extern', check_size=check_size) + type = PyrexTypes.PyExtensionType( + name, typedef_flag, base_type, visibility == 'extern', check_size=check_size) type.pos = pos type.buffer_defaults = buffer_defaults if objtypedef_cname is not None: @@ -1710,12 +1710,12 @@ class ModuleScope(Scope): var_entry = Entry(name = entry.name, type = Builtin.type_type, pos = entry.pos, - cname = entry.type.typeptr_cname) + cname = entry.type.typeptr_cname) var_entry.qualified_name = entry.qualified_name var_entry.is_variable = 1 var_entry.is_cglobal = 1 var_entry.is_readonly = 1 - var_entry.scope = entry.scope + var_entry.scope = entry.scope entry.as_variable = var_entry def is_cpp(self): @@ -1937,10 +1937,10 @@ class StructOrUnionScope(Scope): return entry def declare_cfunction(self, name, type, pos, - cname=None, visibility='private', api=0, in_pxd=0, - defining=0, modifiers=(), overridable=False): # currently no utility code ... - if overridable: - error(pos, "C struct/union member cannot be declared 'cpdef'") + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), overridable=False): # currently no utility code ... + if overridable: + error(pos, "C struct/union member cannot be declared 'cpdef'") return self.declare_var(name, type, pos, cname=cname, visibility=visibility) @@ -1975,7 +1975,7 @@ class ClassScope(Scope): py_object_type, [PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0)) entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c") - self.use_entry_utility_code(entry) + self.use_entry_utility_code(entry) entry.is_cfunction = 1 return entry @@ -2050,7 +2050,7 @@ class CClassScope(ClassScope): # getset_table_cname string # has_pyobject_attrs boolean Any PyObject attributes? # has_memoryview_attrs boolean Any memory view attributes? - # has_cpp_class_attrs boolean Any (non-pointer) C++ attributes? + # has_cpp_class_attrs boolean Any (non-pointer) C++ attributes? # has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC? # property_entries [Entry] # defined boolean Defined in .pxd file @@ -2062,7 +2062,7 @@ class CClassScope(ClassScope): has_pyobject_attrs = False has_memoryview_attrs = False - has_cpp_class_attrs = False + has_cpp_class_attrs = False has_cyclic_pyobject_attrs = False defined = False implemented = False @@ -2078,7 +2078,7 @@ class CClassScope(ClassScope): def needs_gc(self): # If the type or any of its base types have Python-valued # C attributes, then it needs to participate in GC. - if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False): + if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False): return True base_type = self.parent_type.base_type if base_type and base_type.scope is not None: @@ -2138,8 +2138,8 @@ class CClassScope(ClassScope): self.var_entries.append(entry) if type.is_memoryviewslice: self.has_memoryview_attrs = True - elif type.is_cpp_class: - self.has_cpp_class_attrs = True + elif type.is_cpp_class: + self.has_cpp_class_attrs = True elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'): self.has_pyobject_attrs = True if (not type.is_builtin_type @@ -2155,9 +2155,9 @@ class CClassScope(ClassScope): entry.needs_property = True if not self.is_closure_class_scope and name == "__weakref__": error(pos, "Special attribute __weakref__ cannot be exposed to Python") - if not (type.is_pyobject or type.can_coerce_to_pyobject(self)): - # we're not testing for coercion *from* Python here - that would fail later - error(pos, "C attribute of type '%s' cannot be accessed from Python" % type) + if not (type.is_pyobject or type.can_coerce_to_pyobject(self)): + # we're not testing for coercion *from* Python here - that would fail later + error(pos, "C attribute of type '%s' cannot be accessed from Python" % type) else: entry.needs_property = False return entry @@ -2218,8 +2218,8 @@ class CClassScope(ClassScope): return entry def declare_cfunction(self, name, type, pos, - cname=None, visibility='private', api=0, in_pxd=0, - defining=0, modifiers=(), utility_code=None, overridable=False): + cname=None, visibility='private', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): if get_special_method_signature(name) and not self.parent_type.is_builtin_type: error(pos, "Special methods must be declared with 'def', not 'cdef'") args = type.args @@ -2250,9 +2250,9 @@ class CClassScope(ClassScope): # TODO(robertwb): Make this an error. warning(pos, "Compatible but non-identical C method '%s' not redeclared " - "in definition part of extension type '%s'. " - "This may cause incorrect vtables to be generated." % ( - name, self.class_name), 2) + "in definition part of extension type '%s'. " + "This may cause incorrect vtables to be generated." % ( + name, self.class_name), 2) warning(entry.pos, "Previous declaration is here", 2) entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers) else: @@ -2263,7 +2263,7 @@ class CClassScope(ClassScope): error(pos, "C method '%s' not previously declared in definition part of" " extension type '%s'" % (name, self.class_name)) - entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers) + entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers) if defining: entry.func_cname = self.mangle(Naming.func_prefix, name) entry.utility_code = utility_code @@ -2293,13 +2293,13 @@ class CClassScope(ClassScope): # equivalent that must be accessible to support bound methods name = EncodedString(name) entry = self.declare_cfunction(name, type, None, cname, visibility='extern', - utility_code=utility_code) + utility_code=utility_code) var_entry = Entry(name, name, py_object_type) var_entry.qualified_name = name var_entry.is_variable = 1 var_entry.is_builtin = 1 var_entry.utility_code = utility_code - var_entry.scope = entry.scope + var_entry.scope = entry.scope entry.as_variable = var_entry return entry @@ -2397,15 +2397,15 @@ class CppClassScope(Scope): entry = self.declare(name, cname, type, pos, visibility) entry.is_variable = 1 if type.is_cfunction and self.type: - if not self.type.get_fused_types(): - entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname) + if not self.type.get_fused_types(): + entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname) if name != "this" and (defining or name != "<init>"): self.var_entries.append(entry) return entry def declare_cfunction(self, name, type, pos, - cname=None, visibility='extern', api=0, in_pxd=0, - defining=0, modifiers=(), utility_code=None, overridable=False): + cname=None, visibility='extern', api=0, in_pxd=0, + defining=0, modifiers=(), utility_code=None, overridable=False): class_name = self.name.split('::')[-1] if name in (class_name, '__init__') and cname is None: cname = "%s__init__%s" % (Naming.func_prefix, class_name) @@ -2469,9 +2469,9 @@ class CppClassScope(Scope): for base_entry in base_scope.cfunc_entries: entry = self.declare_cfunction(base_entry.name, base_entry.type, base_entry.pos, base_entry.cname, - base_entry.visibility, api=0, - modifiers=base_entry.func_modifiers, - utility_code=base_entry.utility_code) + base_entry.visibility, api=0, + modifiers=base_entry.func_modifiers, + utility_code=base_entry.utility_code) entry.is_inherited = 1 for base_entry in base_scope.type_entries: if base_entry.name not in base_templates: @@ -2496,7 +2496,7 @@ class CppClassScope(Scope): e.type.specialize(values), e.pos, e.cname, - utility_code=e.utility_code) + utility_code=e.utility_code) else: scope.declare_var(entry.name, entry.type.specialize(values), diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py b/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py index f8862d064a..abc7c0a892 100644 --- a/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py +++ b/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py @@ -1,118 +1,118 @@ - -import sys -from unittest import TestCase -try: - from StringIO import StringIO -except ImportError: - from io import StringIO # doesn't accept 'str' in Py2 - -from .. import Options -from ..CmdLine import parse_command_line - - -class CmdLineParserTest(TestCase): - def setUp(self): - backup = {} - for name, value in vars(Options).items(): - backup[name] = value - self._options_backup = backup - - def tearDown(self): - no_value = object() - for name, orig_value in self._options_backup.items(): - if getattr(Options, name, no_value) != orig_value: - setattr(Options, name, orig_value) - - def test_short_options(self): - options, sources = parse_command_line([ - '-V', '-l', '-+', '-t', '-v', '-v', '-v', '-p', '-D', '-a', '-3', - ]) - self.assertFalse(sources) - self.assertTrue(options.show_version) - self.assertTrue(options.use_listing_file) - self.assertTrue(options.cplus) - self.assertTrue(options.timestamps) - self.assertTrue(options.verbose >= 3) - self.assertTrue(Options.embed_pos_in_docstring) - self.assertFalse(Options.docstrings) - self.assertTrue(Options.annotate) - self.assertEqual(options.language_level, 3) - - options, sources = parse_command_line([ - '-f', '-2', 'source.pyx', - ]) - self.assertTrue(sources) - self.assertTrue(len(sources) == 1) - self.assertFalse(options.timestamps) - self.assertEqual(options.language_level, 2) - - def test_long_options(self): - options, sources = parse_command_line([ - '--version', '--create-listing', '--cplus', '--embed', '--timestamps', - '--verbose', '--verbose', '--verbose', - '--embed-positions', '--no-docstrings', '--annotate', '--lenient', - ]) - self.assertFalse(sources) - self.assertTrue(options.show_version) - self.assertTrue(options.use_listing_file) - self.assertTrue(options.cplus) - self.assertEqual(Options.embed, 'main') - self.assertTrue(options.timestamps) - self.assertTrue(options.verbose >= 3) - self.assertTrue(Options.embed_pos_in_docstring) - self.assertFalse(Options.docstrings) - self.assertTrue(Options.annotate) - self.assertFalse(Options.error_on_unknown_names) - self.assertFalse(Options.error_on_uninitialized) - - options, sources = parse_command_line([ - '--force', 'source.pyx', - ]) - self.assertTrue(sources) - self.assertTrue(len(sources) == 1) - self.assertFalse(options.timestamps) - - def test_options_with_values(self): - options, sources = parse_command_line([ - '--embed=huhu', - '-I/test/include/dir1', '--include-dir=/test/include/dir2', - '--include-dir', '/test/include/dir3', - '--working=/work/dir', - 'source.pyx', - '--output-file=/output/dir', - '--pre-import=/pre/import', - '--cleanup=3', - '--annotate-coverage=cov.xml', - '--gdb-outdir=/gdb/outdir', - '--directive=wraparound=false', - ]) - self.assertEqual(sources, ['source.pyx']) - self.assertEqual(Options.embed, 'huhu') - self.assertEqual(options.include_path, ['/test/include/dir1', '/test/include/dir2', '/test/include/dir3']) - self.assertEqual(options.working_path, '/work/dir') - self.assertEqual(options.output_file, '/output/dir') - self.assertEqual(Options.pre_import, '/pre/import') - self.assertEqual(Options.generate_cleanup_code, 3) - self.assertTrue(Options.annotate) - self.assertEqual(Options.annotate_coverage_xml, 'cov.xml') - self.assertTrue(options.gdb_debug) - self.assertEqual(options.output_dir, '/gdb/outdir') - - def test_errors(self): - def error(*args): - old_stderr = sys.stderr - stderr = sys.stderr = StringIO() - try: - self.assertRaises(SystemExit, parse_command_line, list(args)) - finally: - sys.stderr = old_stderr - self.assertTrue(stderr.getvalue()) - - error('-1') - error('-I') - error('--version=-a') - error('--version=--annotate=true') - error('--working') - error('--verbose=1') - error('--verbose=1') - error('--cleanup') + +import sys +from unittest import TestCase +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 + +from .. import Options +from ..CmdLine import parse_command_line + + +class CmdLineParserTest(TestCase): + def setUp(self): + backup = {} + for name, value in vars(Options).items(): + backup[name] = value + self._options_backup = backup + + def tearDown(self): + no_value = object() + for name, orig_value in self._options_backup.items(): + if getattr(Options, name, no_value) != orig_value: + setattr(Options, name, orig_value) + + def test_short_options(self): + options, sources = parse_command_line([ + '-V', '-l', '-+', '-t', '-v', '-v', '-v', '-p', '-D', '-a', '-3', + ]) + self.assertFalse(sources) + self.assertTrue(options.show_version) + self.assertTrue(options.use_listing_file) + self.assertTrue(options.cplus) + self.assertTrue(options.timestamps) + self.assertTrue(options.verbose >= 3) + self.assertTrue(Options.embed_pos_in_docstring) + self.assertFalse(Options.docstrings) + self.assertTrue(Options.annotate) + self.assertEqual(options.language_level, 3) + + options, sources = parse_command_line([ + '-f', '-2', 'source.pyx', + ]) + self.assertTrue(sources) + self.assertTrue(len(sources) == 1) + self.assertFalse(options.timestamps) + self.assertEqual(options.language_level, 2) + + def test_long_options(self): + options, sources = parse_command_line([ + '--version', '--create-listing', '--cplus', '--embed', '--timestamps', + '--verbose', '--verbose', '--verbose', + '--embed-positions', '--no-docstrings', '--annotate', '--lenient', + ]) + self.assertFalse(sources) + self.assertTrue(options.show_version) + self.assertTrue(options.use_listing_file) + self.assertTrue(options.cplus) + self.assertEqual(Options.embed, 'main') + self.assertTrue(options.timestamps) + self.assertTrue(options.verbose >= 3) + self.assertTrue(Options.embed_pos_in_docstring) + self.assertFalse(Options.docstrings) + self.assertTrue(Options.annotate) + self.assertFalse(Options.error_on_unknown_names) + self.assertFalse(Options.error_on_uninitialized) + + options, sources = parse_command_line([ + '--force', 'source.pyx', + ]) + self.assertTrue(sources) + self.assertTrue(len(sources) == 1) + self.assertFalse(options.timestamps) + + def test_options_with_values(self): + options, sources = parse_command_line([ + '--embed=huhu', + '-I/test/include/dir1', '--include-dir=/test/include/dir2', + '--include-dir', '/test/include/dir3', + '--working=/work/dir', + 'source.pyx', + '--output-file=/output/dir', + '--pre-import=/pre/import', + '--cleanup=3', + '--annotate-coverage=cov.xml', + '--gdb-outdir=/gdb/outdir', + '--directive=wraparound=false', + ]) + self.assertEqual(sources, ['source.pyx']) + self.assertEqual(Options.embed, 'huhu') + self.assertEqual(options.include_path, ['/test/include/dir1', '/test/include/dir2', '/test/include/dir3']) + self.assertEqual(options.working_path, '/work/dir') + self.assertEqual(options.output_file, '/output/dir') + self.assertEqual(Options.pre_import, '/pre/import') + self.assertEqual(Options.generate_cleanup_code, 3) + self.assertTrue(Options.annotate) + self.assertEqual(Options.annotate_coverage_xml, 'cov.xml') + self.assertTrue(options.gdb_debug) + self.assertEqual(options.output_dir, '/gdb/outdir') + + def test_errors(self): + def error(*args): + old_stderr = sys.stderr + stderr = sys.stderr = StringIO() + try: + self.assertRaises(SystemExit, parse_command_line, list(args)) + finally: + sys.stderr = old_stderr + self.assertTrue(stderr.getvalue()) + + error('-1') + error('-I') + error('--version=-a') + error('--version=--annotate=true') + error('--working') + error('--verbose=1') + error('--verbose=1') + error('--cleanup') diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py b/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py index cad428607a..443551ab88 100644 --- a/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py +++ b/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py @@ -1,68 +1,68 @@ - -from __future__ import absolute_import - -from copy import deepcopy -from unittest import TestCase - -from Cython.Compiler.FlowControl import ( - NameAssignment, StaticAssignment, Argument, NameDeletion) - - -class FakeType(object): - is_pyobject = True - - -class FakeNode(object): - pos = ('filename.pyx', 1, 2) - cf_state = None - type = FakeType() - - def infer_type(self, scope): - return self.type - - -class FakeEntry(object): - type = FakeType() - - -class TestGraph(TestCase): - def test_deepcopy(self): - lhs, rhs = FakeNode(), FakeNode() - entry = FakeEntry() - entry.pos = lhs.pos - - name_ass = NameAssignment(lhs, rhs, entry) - ass = deepcopy(name_ass) - self.assertTrue(ass.lhs) - self.assertTrue(ass.rhs) - self.assertTrue(ass.entry) - self.assertEqual(ass.pos, name_ass.pos) - self.assertFalse(ass.is_arg) - self.assertFalse(ass.is_deletion) - - static_ass = StaticAssignment(entry) - ass = deepcopy(static_ass) - self.assertTrue(ass.lhs) - self.assertTrue(ass.rhs) - self.assertTrue(ass.entry) - self.assertEqual(ass.pos, static_ass.pos) - self.assertFalse(ass.is_arg) - self.assertFalse(ass.is_deletion) - - arg_ass = Argument(lhs, rhs, entry) - ass = deepcopy(arg_ass) - self.assertTrue(ass.lhs) - self.assertTrue(ass.rhs) - self.assertTrue(ass.entry) - self.assertEqual(ass.pos, arg_ass.pos) - self.assertTrue(ass.is_arg) - self.assertFalse(ass.is_deletion) - - name_del = NameDeletion(lhs, entry) - ass = deepcopy(name_del) - self.assertTrue(ass.lhs) - self.assertTrue(ass.rhs) - self.assertTrue(ass.entry) - self.assertEqual(ass.pos, name_del.pos) - self.assertFalse(ass.is_arg) - self.assertTrue(ass.is_deletion) + +from __future__ import absolute_import + +from copy import deepcopy +from unittest import TestCase + +from Cython.Compiler.FlowControl import ( + NameAssignment, StaticAssignment, Argument, NameDeletion) + + +class FakeType(object): + is_pyobject = True + + +class FakeNode(object): + pos = ('filename.pyx', 1, 2) + cf_state = None + type = FakeType() + + def infer_type(self, scope): + return self.type + + +class FakeEntry(object): + type = FakeType() + + +class TestGraph(TestCase): + def test_deepcopy(self): + lhs, rhs = FakeNode(), FakeNode() + entry = FakeEntry() + entry.pos = lhs.pos + + name_ass = NameAssignment(lhs, rhs, entry) + ass = deepcopy(name_ass) + self.assertTrue(ass.lhs) + self.assertTrue(ass.rhs) + self.assertTrue(ass.entry) + self.assertEqual(ass.pos, name_ass.pos) + self.assertFalse(ass.is_arg) + self.assertFalse(ass.is_deletion) + + static_ass = StaticAssignment(entry) + ass = deepcopy(static_ass) + self.assertTrue(ass.lhs) + self.assertTrue(ass.rhs) + self.assertTrue(ass.entry) + self.assertEqual(ass.pos, static_ass.pos) + self.assertFalse(ass.is_arg) + self.assertFalse(ass.is_deletion) + + arg_ass = Argument(lhs, rhs, entry) + ass = deepcopy(arg_ass) + self.assertTrue(ass.lhs) + self.assertTrue(ass.rhs) + self.assertTrue(ass.entry) + self.assertEqual(ass.pos, arg_ass.pos) + self.assertTrue(ass.is_arg) + self.assertFalse(ass.is_deletion) + + name_del = NameDeletion(lhs, entry) + ass = deepcopy(name_del) + self.assertTrue(ass.lhs) + self.assertTrue(ass.rhs) + self.assertTrue(ass.entry) + self.assertEqual(ass.pos, name_del.pos) + self.assertFalse(ass.is_arg) + self.assertTrue(ass.is_deletion) diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py b/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py index a0ece5c1b5..3dddc960b3 100644 --- a/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py +++ b/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py @@ -1,129 +1,129 @@ -# mode: run -# tag: syntax - -""" -Uses TreeFragment to test invalid syntax. -""" - -from __future__ import absolute_import - -from ...TestUtils import CythonTest -from ..Errors import CompileError -from .. import ExprNodes - -# Copied from CPython's test_grammar.py -VALID_UNDERSCORE_LITERALS = [ - '0_0_0', - '4_2', - '1_0000_0000', - '0b1001_0100', - '0xffff_ffff', - '0o5_7_7', - '1_00_00.5', - '1_00_00.5j', - '1_00_00.5e5', - '1_00_00j', - '1_00_00e5_1', - '1e1_0', - '.1_4', - '.1_4e1', - '.1_4j', -] - -# Copied from CPython's test_grammar.py -INVALID_UNDERSCORE_LITERALS = [ - # Trailing underscores: - '0_', - '42_', - '1.4j_', - '0b1_', - '0xf_', - '0o5_', - # Underscores in the base selector: - '0_b0', - '0_xf', - '0_o5', - # Underscore right after the base selector: - '0b_0', - '0x_f', - '0o_5', - # Old-style octal, still disallowed: - #'0_7', - #'09_99', - # Special case with exponent: - '0 if 1_Else 1', - # Underscore right before a dot: - '1_.4', - '1_.4j', - # Underscore right after a dot: - '1._4', - '1._4j', - '._5', - # Underscore right after a sign: - '1.0e+_1', - # Multiple consecutive underscores: - '4_______2', - '0.1__4', - '0b1001__0100', - '0xffff__ffff', - '0o5__77', - '1e1__0', - # Underscore right before j: - '1.4_j', - '1.4e5_j', - # Underscore right before e: - '1_e1', - '1.4_e1', - # Underscore right after e: - '1e_1', - '1.4e_1', - # Whitespace in literals - '1_ 2', - '1 _2', - '1_2.2_ 1', - '1_2.2 _1', - '1_2e _1', - '1_2e2 _1', - '1_2e 2_1', -] - - -class TestGrammar(CythonTest): - - def test_invalid_number_literals(self): - for literal in INVALID_UNDERSCORE_LITERALS: - for expression in ['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']: - code = 'x = ' + expression % literal - try: - self.fragment(u'''\ - # cython: language_level=3 - ''' + code) - except CompileError as exc: - assert code in [s.strip() for s in str(exc).splitlines()], str(exc) - else: - assert False, "Invalid Cython code '%s' failed to raise an exception" % code - - def test_valid_number_literals(self): - for literal in VALID_UNDERSCORE_LITERALS: - for i, expression in enumerate(['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']): - code = 'x = ' + expression % literal - node = self.fragment(u'''\ - # cython: language_level=3 - ''' + code).root - assert node is not None - - literal_node = node.stats[0].rhs # StatListNode([SingleAssignmentNode('x', expr)]) - if i > 0: - # Add/MulNode() -> literal is first or second operand - literal_node = literal_node.operand2 if i % 2 else literal_node.operand1 - if 'j' in literal or 'J' in literal: - assert isinstance(literal_node, ExprNodes.ImagNode) - elif '.' in literal or 'e' in literal or 'E' in literal and not ('0x' in literal or '0X' in literal): - assert isinstance(literal_node, ExprNodes.FloatNode) - else: - assert isinstance(literal_node, ExprNodes.IntNode) - - -if __name__ == "__main__": - import unittest - unittest.main() +# mode: run +# tag: syntax + +""" +Uses TreeFragment to test invalid syntax. +""" + +from __future__ import absolute_import + +from ...TestUtils import CythonTest +from ..Errors import CompileError +from .. import ExprNodes + +# Copied from CPython's test_grammar.py +VALID_UNDERSCORE_LITERALS = [ + '0_0_0', + '4_2', + '1_0000_0000', + '0b1001_0100', + '0xffff_ffff', + '0o5_7_7', + '1_00_00.5', + '1_00_00.5j', + '1_00_00.5e5', + '1_00_00j', + '1_00_00e5_1', + '1e1_0', + '.1_4', + '.1_4e1', + '.1_4j', +] + +# Copied from CPython's test_grammar.py +INVALID_UNDERSCORE_LITERALS = [ + # Trailing underscores: + '0_', + '42_', + '1.4j_', + '0b1_', + '0xf_', + '0o5_', + # Underscores in the base selector: + '0_b0', + '0_xf', + '0_o5', + # Underscore right after the base selector: + '0b_0', + '0x_f', + '0o_5', + # Old-style octal, still disallowed: + #'0_7', + #'09_99', + # Special case with exponent: + '0 if 1_Else 1', + # Underscore right before a dot: + '1_.4', + '1_.4j', + # Underscore right after a dot: + '1._4', + '1._4j', + '._5', + # Underscore right after a sign: + '1.0e+_1', + # Multiple consecutive underscores: + '4_______2', + '0.1__4', + '0b1001__0100', + '0xffff__ffff', + '0o5__77', + '1e1__0', + # Underscore right before j: + '1.4_j', + '1.4e5_j', + # Underscore right before e: + '1_e1', + '1.4_e1', + # Underscore right after e: + '1e_1', + '1.4e_1', + # Whitespace in literals + '1_ 2', + '1 _2', + '1_2.2_ 1', + '1_2.2 _1', + '1_2e _1', + '1_2e2 _1', + '1_2e 2_1', +] + + +class TestGrammar(CythonTest): + + def test_invalid_number_literals(self): + for literal in INVALID_UNDERSCORE_LITERALS: + for expression in ['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']: + code = 'x = ' + expression % literal + try: + self.fragment(u'''\ + # cython: language_level=3 + ''' + code) + except CompileError as exc: + assert code in [s.strip() for s in str(exc).splitlines()], str(exc) + else: + assert False, "Invalid Cython code '%s' failed to raise an exception" % code + + def test_valid_number_literals(self): + for literal in VALID_UNDERSCORE_LITERALS: + for i, expression in enumerate(['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']): + code = 'x = ' + expression % literal + node = self.fragment(u'''\ + # cython: language_level=3 + ''' + code).root + assert node is not None + + literal_node = node.stats[0].rhs # StatListNode([SingleAssignmentNode('x', expr)]) + if i > 0: + # Add/MulNode() -> literal is first or second operand + literal_node = literal_node.operand2 if i % 2 else literal_node.operand1 + if 'j' in literal or 'J' in literal: + assert isinstance(literal_node, ExprNodes.ImagNode) + elif '.' in literal or 'e' in literal or 'E' in literal and not ('0x' in literal or '0X' in literal): + assert isinstance(literal_node, ExprNodes.FloatNode) + else: + assert isinstance(literal_node, ExprNodes.IntNode) + + +if __name__ == "__main__": + import unittest + unittest.main() diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py b/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py index 4b0bef8175..166bb225b9 100644 --- a/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py +++ b/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py @@ -16,7 +16,7 @@ class SignatureMatcherTest(unittest.TestCase): Test the signature matching algorithm for overloaded signatures. """ def assertMatches(self, expected_type, arg_types, functions): - match = pt.best_match(arg_types, functions) + match = pt.best_match(arg_types, functions) if expected_type is not None: self.assertNotEqual(None, match) self.assertEqual(expected_type, match.type) diff --git a/contrib/tools/cython/Cython/Compiler/TreeFragment.py b/contrib/tools/cython/Cython/Compiler/TreeFragment.py index 5d2150f347..b85da8191a 100644 --- a/contrib/tools/cython/Cython/Compiler/TreeFragment.py +++ b/contrib/tools/cython/Cython/Compiler/TreeFragment.py @@ -9,7 +9,7 @@ Support for parsing strings into code trees. from __future__ import absolute_import import re -from io import StringIO +from io import StringIO from .Scanning import PyrexScanner, StringSourceDescriptor from .Symtab import ModuleScope @@ -17,7 +17,7 @@ from . import PyrexTypes from .Visitor import VisitorTransform from .Nodes import Node, StatListNode from .ExprNodes import NameNode -from .StringEncoding import _unicode +from .StringEncoding import _unicode from . import Parsing from . import Main from . import UtilNodes @@ -25,21 +25,21 @@ from . import UtilNodes class StringParseContext(Main.Context): def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False): - if include_directories is None: - include_directories = [] - if compiler_directives is None: - compiler_directives = {} - # TODO: see if "language_level=3" also works for our internal code here. - Main.Context.__init__(self, include_directories, compiler_directives, cpp=cpp, language_level=2) + if include_directories is None: + include_directories = [] + if compiler_directives is None: + compiler_directives = {} + # TODO: see if "language_level=3" also works for our internal code here. + Main.Context.__init__(self, include_directories, compiler_directives, cpp=cpp, language_level=2) self.module_name = name - def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True): + def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True): if module_name not in (self.module_name, 'cython'): raise AssertionError("Not yet supporting any cimports/includes from string code snippets") - return ModuleScope(module_name, parent_module=None, context=self) + return ModuleScope(module_name, parent_module=None, context=self) -def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, +def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, context=None, allow_struct_enum_decorator=False): """ Utility method to parse a (unicode) string of code. This is mostly @@ -60,7 +60,7 @@ def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, # to use a unicode string so that code fragments don't have to bother # with encoding. This means that test code passed in should not have an # encoding header. - assert isinstance(code, _unicode), "unicode code snippets only please" + assert isinstance(code, _unicode), "unicode code snippets only please" encoding = "UTF-8" module_name = name @@ -68,7 +68,7 @@ def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, initial_pos = (name, 1, 0) code_source = StringSourceDescriptor(name, code) - scope = context.find_module(module_name, pos=initial_pos, need_pxd=False) + scope = context.find_module(module_name, pos=initial_pos, need_pxd=False) buf = StringIO(code) @@ -86,7 +86,7 @@ def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None, tree.scope = scope return tree - + class TreeCopier(VisitorTransform): def visit_Node(self, node): if node is None: @@ -96,7 +96,7 @@ class TreeCopier(VisitorTransform): self.visitchildren(c) return c - + class ApplyPositionAndCopy(TreeCopier): def __init__(self, pos): super(ApplyPositionAndCopy, self).__init__() @@ -107,7 +107,7 @@ class ApplyPositionAndCopy(TreeCopier): copy.pos = self.pos return copy - + class TemplateTransform(VisitorTransform): """ Makes a copy of a template tree while doing substitutions. @@ -197,16 +197,16 @@ class TemplateTransform(VisitorTransform): else: return self.visit_Node(node) - + def copy_code_tree(node): return TreeCopier()(node) - -_match_indent = re.compile(u"^ *").match - - + +_match_indent = re.compile(u"^ *").match + + def strip_common_indent(lines): - """Strips empty lines and common indentation from the list of strings given in lines""" + """Strips empty lines and common indentation from the list of strings given in lines""" # TODO: Facilitate textwrap.indent instead lines = [x for x in lines if x.strip() != u""] if lines: @@ -214,24 +214,24 @@ def strip_common_indent(lines): lines = [x[minindent:] for x in lines] return lines - + class TreeFragment(object): - def __init__(self, code, name=None, pxds=None, temps=None, pipeline=None, level=None, initial_pos=None): - if pxds is None: - pxds = {} - if temps is None: - temps = [] - if pipeline is None: - pipeline = [] - if not name: - name = "(tree fragment)" - - if isinstance(code, _unicode): + def __init__(self, code, name=None, pxds=None, temps=None, pipeline=None, level=None, initial_pos=None): + if pxds is None: + pxds = {} + if temps is None: + temps = [] + if pipeline is None: + pipeline = [] + if not name: + name = "(tree fragment)" + + if isinstance(code, _unicode): def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n"))) fmt_code = fmt(code) fmt_pxds = {} - for key, value in pxds.items(): + for key, value in pxds.items(): fmt_pxds[key] = fmt(value) mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos) if level is None: @@ -244,8 +244,8 @@ class TreeFragment(object): t = transform(t) self.root = t elif isinstance(code, Node): - if pxds: - raise NotImplementedError() + if pxds: + raise NotImplementedError() self.root = code else: raise ValueError("Unrecognized code format (accepts unicode and Node)") @@ -254,16 +254,16 @@ class TreeFragment(object): def copy(self): return copy_code_tree(self.root) - def substitute(self, nodes=None, temps=None, pos = None): - if nodes is None: - nodes = {} - if temps is None: - temps = [] + def substitute(self, nodes=None, temps=None, pos = None): + if nodes is None: + nodes = {} + if temps is None: + temps = [] return TemplateTransform()(self.root, substitutions = nodes, temps = self.temps + temps, pos = pos) - + class SetPosTransform(VisitorTransform): def __init__(self, pos): super(SetPosTransform, self).__init__() diff --git a/contrib/tools/cython/Cython/Compiler/TreePath.py b/contrib/tools/cython/Cython/Compiler/TreePath.py index 20db8aae26..8585905557 100644 --- a/contrib/tools/cython/Cython/Compiler/TreePath.py +++ b/contrib/tools/cython/Cython/Compiler/TreePath.py @@ -18,14 +18,14 @@ else: _unicode = unicode path_tokenizer = re.compile( - r"(" - r"'[^']*'|\"[^\"]*\"|" - r"//?|" - r"\(\)|" - r"==?|" - r"[/.*\[\]()@])|" - r"([^/\[\]()@=\s]+)|" - r"\s+" + r"(" + r"'[^']*'|\"[^\"]*\"|" + r"//?|" + r"\(\)|" + r"==?|" + r"[/.*\[\]()@])|" + r"([^/\[\]()@=\s]+)|" + r"\s+" ).findall def iterchildren(node, attr_name): diff --git a/contrib/tools/cython/Cython/Compiler/TypeInference.py b/contrib/tools/cython/Cython/Compiler/TypeInference.py index 1edda5bbfa..c7ffee7d24 100644 --- a/contrib/tools/cython/Cython/Compiler/TypeInference.py +++ b/contrib/tools/cython/Cython/Compiler/TypeInference.py @@ -9,19 +9,19 @@ from .. import Utils from .PyrexTypes import py_object_type, unspecified_type from .Visitor import CythonTransform, EnvTransform -try: - reduce -except NameError: - from functools import reduce +try: + reduce +except NameError: + from functools import reduce + - class TypedExprNode(ExprNodes.ExprNode): # Used for declaring assignments of a specified type without a known entry. - subexprs = [] + subexprs = [] + + def __init__(self, type, pos=None): + super(TypedExprNode, self).__init__(pos, type=type) - def __init__(self, type, pos=None): - super(TypedExprNode, self).__init__(pos, type=type) - object_expr = TypedExprNode(py_object_type) @@ -68,12 +68,12 @@ class MarkParallelAssignments(EnvTransform): parallel_node.assigned_nodes.append(lhs) elif isinstance(lhs, ExprNodes.SequenceNode): - for i, arg in enumerate(lhs.args): - if not rhs or arg.is_starred: - item_node = None - else: - item_node = rhs.inferable_item_node(i) - self.mark_assignment(arg, item_node) + for i, arg in enumerate(lhs.args): + if not rhs or arg.is_starred: + item_node = None + else: + item_node = rhs.inferable_item_node(i) + self.mark_assignment(arg, item_node) else: # Could use this info to infer cdef class attributes... pass @@ -191,10 +191,10 @@ class MarkParallelAssignments(EnvTransform): # use fake expressions with the right result type if node.star_arg: self.mark_assignment( - node.star_arg, TypedExprNode(Builtin.tuple_type, node.pos)) + node.star_arg, TypedExprNode(Builtin.tuple_type, node.pos)) if node.starstar_arg: self.mark_assignment( - node.starstar_arg, TypedExprNode(Builtin.dict_type, node.pos)) + node.starstar_arg, TypedExprNode(Builtin.dict_type, node.pos)) EnvTransform.visit_FuncDefNode(self, node) return node @@ -404,7 +404,7 @@ class SimpleAssignmentTypeInferer(object): else: entry = node.entry node_type = spanning_type( - types, entry.might_overflow, entry.pos, scope) + types, entry.might_overflow, entry.pos, scope) node.inferred_type = node_type def infer_name_node_type_partial(node): @@ -413,7 +413,7 @@ class SimpleAssignmentTypeInferer(object): if not types: return entry = node.entry - return spanning_type(types, entry.might_overflow, entry.pos, scope) + return spanning_type(types, entry.might_overflow, entry.pos, scope) def inferred_types(entry): has_none = False @@ -488,7 +488,7 @@ class SimpleAssignmentTypeInferer(object): types = inferred_types(entry) if types and all(types): entry_type = spanning_type( - types, entry.might_overflow, entry.pos, scope) + types, entry.might_overflow, entry.pos, scope) inferred.add(entry) self.set_entry_type(entry, entry_type) @@ -498,7 +498,7 @@ class SimpleAssignmentTypeInferer(object): for assmt in entry.cf_assignments: assmt.infer_type() types = inferred_types(entry) - new_type = spanning_type(types, entry.might_overflow, entry.pos, scope) + new_type = spanning_type(types, entry.might_overflow, entry.pos, scope) if new_type != entry.type: self.set_entry_type(entry, new_type) dirty = True @@ -530,22 +530,22 @@ def find_spanning_type(type1, type2): return PyrexTypes.c_double_type return result_type -def simply_type(result_type, pos): +def simply_type(result_type, pos): if result_type.is_reference: result_type = result_type.ref_base_type if result_type.is_const: result_type = result_type.const_base_type if result_type.is_cpp_class: result_type.check_nullary_constructor(pos) - if result_type.is_array: - result_type = PyrexTypes.c_ptr_type(result_type.base_type) + if result_type.is_array: + result_type = PyrexTypes.c_ptr_type(result_type.base_type) return result_type -def aggressive_spanning_type(types, might_overflow, pos, scope): - return simply_type(reduce(find_spanning_type, types), pos) - -def safe_spanning_type(types, might_overflow, pos, scope): - result_type = simply_type(reduce(find_spanning_type, types), pos) +def aggressive_spanning_type(types, might_overflow, pos, scope): + return simply_type(reduce(find_spanning_type, types), pos) + +def safe_spanning_type(types, might_overflow, pos, scope): + result_type = simply_type(reduce(find_spanning_type, types), pos) if result_type.is_pyobject: # In theory, any specific Python type is always safe to # infer. However, inferring str can cause some existing code @@ -581,9 +581,9 @@ def safe_spanning_type(types, might_overflow, pos, scope): # to make sure everything is supported. elif (result_type.is_int or result_type.is_enum) and not might_overflow: return result_type - elif (not result_type.can_coerce_to_pyobject(scope) - and not result_type.is_error): - return result_type + elif (not result_type.can_coerce_to_pyobject(scope) + and not result_type.is_error): + return result_type return py_object_type diff --git a/contrib/tools/cython/Cython/Compiler/TypeSlots.py b/contrib/tools/cython/Cython/Compiler/TypeSlots.py index 34e31df0af..0b4ff67042 100644 --- a/contrib/tools/cython/Cython/Compiler/TypeSlots.py +++ b/contrib/tools/cython/Cython/Compiler/TypeSlots.py @@ -7,7 +7,7 @@ from __future__ import absolute_import from . import Naming from . import PyrexTypes -from .Errors import error +from .Errors import error invisible = ['__cinit__', '__dealloc__', '__richcmp__', '__nonzero__', '__bool__'] @@ -73,7 +73,7 @@ class Signature(object): } type_to_format_map = dict( - (type_, format_) for format_, type_ in format_map.items()) + (type_, format_) for format_, type_ in format_map.items()) error_value_map = { 'O': "NULL", @@ -101,12 +101,12 @@ class Signature(object): self.exception_check = ret_format != 'r' and self.error_value is not None self.is_staticmethod = False - def __repr__(self): - return '<Signature[%s(%s%s)]>' % ( - self.ret_format, - ', '.join(self.fixed_arg_format), - '*' if self.has_generic_args else '') - + def __repr__(self): + return '<Signature[%s(%s%s)]>' % ( + self.ret_format, + ', '.join(self.fixed_arg_format), + '*' if self.has_generic_args else '') + def num_fixed_args(self): return len(self.fixed_arg_format) @@ -135,7 +135,7 @@ class Signature(object): def function_type(self, self_arg_override=None): # Construct a C function type descriptor for this signature args = [] - for i in range(self.num_fixed_args()): + for i in range(self.num_fixed_args()): if self_arg_override is not None and self.is_self_arg(i): assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg) args.append(self_arg_override) @@ -202,10 +202,10 @@ class SlotDescriptor(object): return guard def generate(self, scope, code): - preprocessor_guard = self.preprocessor_guard_code() - if preprocessor_guard: - code.putln(preprocessor_guard) - + preprocessor_guard = self.preprocessor_guard_code() + if preprocessor_guard: + code.putln(preprocessor_guard) + end_pypy_guard = False if self.is_initialised_dynamically: value = "0" @@ -229,12 +229,12 @@ class SlotDescriptor(object): code.putln("%s, /*%s*/" % (inherited_value, self.slot_name)) code.putln("#else") end_pypy_guard = True - + code.putln("%s, /*%s*/" % (value, self.slot_name)) - - if end_pypy_guard: - code.putln("#endif") - + + if end_pypy_guard: + code.putln("#endif") + if self.py3 == '<RESERVED>': code.putln("#else") code.putln("0, /*reserved*/") @@ -370,7 +370,7 @@ class ConstructorSlot(InternalMethodSlot): and scope.parent_type.base_type and not scope.has_pyobject_attrs and not scope.has_memoryview_attrs - and not scope.has_cpp_class_attrs + and not scope.has_cpp_class_attrs and not (entry and entry.is_special)): # if the type does not have object attributes, it can # delegate GC methods to its parent - iff the parent @@ -439,12 +439,12 @@ class DocStringSlot(SlotDescriptor): # Descriptor for the docstring slot. def slot_code(self, scope): - doc = scope.doc - if doc is None: + doc = scope.doc + if doc is None: return "0" - if doc.is_unicode: - doc = doc.as_utf8_string() - return doc.as_c_string_literal() + if doc.is_unicode: + doc = doc.as_utf8_string() + return doc.as_c_string_literal() class SuiteSlot(SlotDescriptor): @@ -452,8 +452,8 @@ class SuiteSlot(SlotDescriptor): # # sub_slots [SlotDescriptor] - def __init__(self, sub_slots, slot_type, slot_name, ifdef=None): - SlotDescriptor.__init__(self, slot_name, ifdef=ifdef) + def __init__(self, sub_slots, slot_type, slot_name, ifdef=None): + SlotDescriptor.__init__(self, slot_name, ifdef=ifdef) self.sub_slots = sub_slots self.slot_type = slot_type substructures.append(self) @@ -475,8 +475,8 @@ class SuiteSlot(SlotDescriptor): def generate_substructure(self, scope, code): if not self.is_empty(scope): code.putln("") - if self.ifdef: - code.putln("#if %s" % self.ifdef) + if self.ifdef: + code.putln("#if %s" % self.ifdef) code.putln( "static %s %s = {" % ( self.slot_type, @@ -484,8 +484,8 @@ class SuiteSlot(SlotDescriptor): for slot in self.sub_slots: slot.generate(scope, code) code.putln("};") - if self.ifdef: - code.putln("#endif") + if self.ifdef: + code.putln("#endif") substructures = [] # List of all SuiteSlot instances @@ -531,27 +531,27 @@ class BaseClassSlot(SlotDescriptor): base_type.typeptr_cname)) -class DictOffsetSlot(SlotDescriptor): - # Slot descriptor for a class' dict offset, for dynamic attributes. - - def slot_code(self, scope): +class DictOffsetSlot(SlotDescriptor): + # Slot descriptor for a class' dict offset, for dynamic attributes. + + def slot_code(self, scope): dict_entry = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None - if dict_entry and dict_entry.is_variable: - if getattr(dict_entry.type, 'cname', None) != 'PyDict_Type': - error(dict_entry.pos, "__dict__ slot must be of type 'dict'") - return "0" - type = scope.parent_type - if type.typedef_flag: - objstruct = type.objstruct_cname - else: - objstruct = "struct %s" % type.objstruct_cname - return ("offsetof(%s, %s)" % ( - objstruct, - dict_entry.cname)) - else: - return "0" - - + if dict_entry and dict_entry.is_variable: + if getattr(dict_entry.type, 'cname', None) != 'PyDict_Type': + error(dict_entry.pos, "__dict__ slot must be of type 'dict'") + return "0" + type = scope.parent_type + if type.typedef_flag: + objstruct = type.objstruct_cname + else: + objstruct = "struct %s" % type.objstruct_cname + return ("offsetof(%s, %s)" % ( + objstruct, + dict_entry.cname)) + else: + return "0" + + # The following dictionary maps __xxx__ method names to slot descriptors. method_name_to_slot = {} @@ -726,12 +726,12 @@ property_accessor_signatures = { #------------------------------------------------------------------------------------------ PyNumberMethods_Py3_GUARD = "PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)" - + PyNumberMethods = ( MethodSlot(binaryfunc, "nb_add", "__add__"), MethodSlot(binaryfunc, "nb_subtract", "__sub__"), MethodSlot(binaryfunc, "nb_multiply", "__mul__"), - MethodSlot(binaryfunc, "nb_divide", "__div__", ifdef = PyNumberMethods_Py3_GUARD), + MethodSlot(binaryfunc, "nb_divide", "__div__", ifdef = PyNumberMethods_Py3_GUARD), MethodSlot(binaryfunc, "nb_remainder", "__mod__"), MethodSlot(binaryfunc, "nb_divmod", "__divmod__"), MethodSlot(ternaryfunc, "nb_power", "__pow__"), @@ -745,18 +745,18 @@ PyNumberMethods = ( MethodSlot(binaryfunc, "nb_and", "__and__"), MethodSlot(binaryfunc, "nb_xor", "__xor__"), MethodSlot(binaryfunc, "nb_or", "__or__"), - EmptySlot("nb_coerce", ifdef = PyNumberMethods_Py3_GUARD), + EmptySlot("nb_coerce", ifdef = PyNumberMethods_Py3_GUARD), MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"), MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"), MethodSlot(unaryfunc, "nb_float", "__float__"), - MethodSlot(unaryfunc, "nb_oct", "__oct__", ifdef = PyNumberMethods_Py3_GUARD), - MethodSlot(unaryfunc, "nb_hex", "__hex__", ifdef = PyNumberMethods_Py3_GUARD), + MethodSlot(unaryfunc, "nb_oct", "__oct__", ifdef = PyNumberMethods_Py3_GUARD), + MethodSlot(unaryfunc, "nb_hex", "__hex__", ifdef = PyNumberMethods_Py3_GUARD), # Added in release 2.0 MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"), MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"), MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"), - MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", ifdef = PyNumberMethods_Py3_GUARD), + MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", ifdef = PyNumberMethods_Py3_GUARD), MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"), MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!! MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"), @@ -809,13 +809,13 @@ PyBufferProcs = ( MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__") ) -PyAsyncMethods = ( - MethodSlot(unaryfunc, "am_await", "__await__"), - MethodSlot(unaryfunc, "am_aiter", "__aiter__"), - MethodSlot(unaryfunc, "am_anext", "__anext__"), +PyAsyncMethods = ( + MethodSlot(unaryfunc, "am_await", "__await__"), + MethodSlot(unaryfunc, "am_aiter", "__aiter__"), + MethodSlot(unaryfunc, "am_anext", "__anext__"), EmptySlot("am_send", ifdef="PY_VERSION_HEX >= 0x030A00A3"), -) - +) + #------------------------------------------------------------------------------------------ # # The main slot table. This table contains descriptors for all the @@ -826,15 +826,15 @@ PyAsyncMethods = ( slot_table = ( ConstructorSlot("tp_dealloc", '__dealloc__'), - EmptySlot("tp_print", ifdef="PY_VERSION_HEX < 0x030800b4"), - EmptySlot("tp_vectorcall_offset", ifdef="PY_VERSION_HEX >= 0x030800b4"), + EmptySlot("tp_print", ifdef="PY_VERSION_HEX < 0x030800b4"), + EmptySlot("tp_vectorcall_offset", ifdef="PY_VERSION_HEX >= 0x030800b4"), EmptySlot("tp_getattr"), EmptySlot("tp_setattr"), - - # tp_compare (Py2) / tp_reserved (Py3<3.5) / tp_as_async (Py3.5+) is always used as tp_as_async in Py3 - MethodSlot(cmpfunc, "tp_compare", "__cmp__", ifdef="PY_MAJOR_VERSION < 3"), - SuiteSlot(PyAsyncMethods, "__Pyx_PyAsyncMethodsStruct", "tp_as_async", ifdef="PY_MAJOR_VERSION >= 3"), - + + # tp_compare (Py2) / tp_reserved (Py3<3.5) / tp_as_async (Py3.5+) is always used as tp_as_async in Py3 + MethodSlot(cmpfunc, "tp_compare", "__cmp__", ifdef="PY_MAJOR_VERSION < 3"), + SuiteSlot(PyAsyncMethods, "__Pyx_PyAsyncMethodsStruct", "tp_as_async", ifdef="PY_MAJOR_VERSION >= 3"), + MethodSlot(reprfunc, "tp_repr", "__repr__"), SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"), @@ -873,7 +873,7 @@ slot_table = ( SyntheticSlot("tp_descr_get", ["__get__"], "0"), SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"), - DictOffsetSlot("tp_dictoffset"), + DictOffsetSlot("tp_dictoffset"), MethodSlot(initproc, "tp_init", "__init__"), EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"), diff --git a/contrib/tools/cython/Cython/Compiler/UtilNodes.py b/contrib/tools/cython/Cython/Compiler/UtilNodes.py index 376fb602e1..c41748ace0 100644 --- a/contrib/tools/cython/Cython/Compiler/UtilNodes.py +++ b/contrib/tools/cython/Cython/Compiler/UtilNodes.py @@ -51,15 +51,15 @@ class TempRefNode(AtomicExprNode): def generate_result_code(self, code): pass - def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): if self.type.is_pyobject: rhs.make_owned_reference(code) # TODO: analyse control flow to see if this is necessary code.put_xdecref(self.result(), self.ctype()) - code.putln('%s = %s;' % ( - self.result(), - rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()), - )) + code.putln('%s = %s;' % ( + self.result(), + rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()), + )) rhs.generate_post_assignment_code(code) rhs.free_temps(code) @@ -142,15 +142,15 @@ class ResultRefNode(AtomicExprNode): else: return () - def update_expression(self, expression): - self.expression = expression - if hasattr(expression, "type"): - self.type = expression.type - + def update_expression(self, expression): + self.expression = expression + if hasattr(expression, "type"): + self.type = expression.type + def analyse_types(self, env): if self.expression is not None: - if not self.expression.type: - self.expression = self.expression.analyse_types(env) + if not self.expression.type: + self.expression = self.expression.analyse_types(env) self.type = self.expression.type return self @@ -197,15 +197,15 @@ class ResultRefNode(AtomicExprNode): def generate_disposal_code(self, code): pass - def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): if self.type.is_pyobject: rhs.make_owned_reference(code) if not self.lhs_of_first_assignment: code.put_decref(self.result(), self.ctype()) - code.putln('%s = %s;' % ( - self.result(), - rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()), - )) + code.putln('%s = %s;' % ( + self.result(), + rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()), + )) rhs.generate_post_assignment_code(code) rhs.free_temps(code) @@ -250,7 +250,7 @@ class LetNodeMixin: code.put_decref_clear(self.temp, self.temp_type) code.funcstate.release_temp(self.temp) - + class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin): # A wrapper around a subexpression that moves an expression into a # temp variable and provides it to the subexpression. @@ -275,7 +275,7 @@ class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin): def analyse_types(self, env): self.temp_expression = self.temp_expression.analyse_types(env) - self.lazy_temp.update_expression(self.temp_expression) # overwrite in case it changed + self.lazy_temp.update_expression(self.temp_expression) # overwrite in case it changed self.subexpression = self.subexpression.analyse_types(env) self.type = self.subexpression.type return self @@ -291,10 +291,10 @@ class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin): self.subexpression.generate_evaluation_code(code) self.teardown_temp_expr(code) - + LetRefNode = ResultRefNode - + class LetNode(Nodes.StatNode, LetNodeMixin): # Implements a local temporary variable scope. Imagine this # syntax being present: diff --git a/contrib/tools/cython/Cython/Compiler/UtilityCode.py b/contrib/tools/cython/Cython/Compiler/UtilityCode.py index 91daffe65f..98e9ab5bfb 100644 --- a/contrib/tools/cython/Cython/Compiler/UtilityCode.py +++ b/contrib/tools/cython/Cython/Compiler/UtilityCode.py @@ -16,7 +16,7 @@ class NonManglingModuleScope(Symtab.ModuleScope): def add_imported_entry(self, name, entry, pos): entry.used = True - return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos) + return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos) def mangle(self, prefix, name=None): if name: @@ -28,13 +28,13 @@ class NonManglingModuleScope(Symtab.ModuleScope): else: return Symtab.ModuleScope.mangle(self, prefix) - + class CythonUtilityCodeContext(StringParseContext): scope = None - def find_module(self, module_name, relative_to=None, pos=None, need_pxd=True, absolute_fallback=True): - if relative_to: - raise AssertionError("Relative imports not supported in utility code.") + def find_module(self, module_name, relative_to=None, pos=None, need_pxd=True, absolute_fallback=True): + if relative_to: + raise AssertionError("Relative imports not supported in utility code.") if module_name != self.module_name: if module_name not in self.modules: raise AssertionError("Only the cython cimport is supported.") @@ -42,7 +42,7 @@ class CythonUtilityCodeContext(StringParseContext): return self.modules[module_name] if self.scope is None: - self.scope = NonManglingModuleScope( + self.scope = NonManglingModuleScope( self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp) return self.scope @@ -68,8 +68,8 @@ class CythonUtilityCode(Code.UtilityCodeBase): is_cython_utility = True def __init__(self, impl, name="__pyxutil", prefix="", requires=None, - file=None, from_scope=None, context=None, compiler_directives=None, - outer_module_scope=None): + file=None, from_scope=None, context=None, compiler_directives=None, + outer_module_scope=None): # 1) We need to delay the parsing/processing, so that all modules can be # imported without import loops # 2) The same utility code object can be used for multiple source files; @@ -90,25 +90,25 @@ class CythonUtilityCode(Code.UtilityCodeBase): self.prefix = prefix self.requires = requires or [] self.from_scope = from_scope - self.outer_module_scope = outer_module_scope - self.compiler_directives = compiler_directives + self.outer_module_scope = outer_module_scope + self.compiler_directives = compiler_directives self.context_types = context_types - def __eq__(self, other): - if isinstance(other, CythonUtilityCode): - return self._equality_params() == other._equality_params() - else: - return False - - def _equality_params(self): - outer_scope = self.outer_module_scope - while isinstance(outer_scope, NonManglingModuleScope): - outer_scope = outer_scope.outer_scope - return self.impl, outer_scope, self.compiler_directives - - def __hash__(self): - return hash(self.impl) - + def __eq__(self, other): + if isinstance(other, CythonUtilityCode): + return self._equality_params() == other._equality_params() + else: + return False + + def _equality_params(self): + outer_scope = self.outer_module_scope + while isinstance(outer_scope, NonManglingModuleScope): + outer_scope = outer_scope.outer_scope + return self.impl, outer_scope, self.compiler_directives + + def __hash__(self): + return hash(self.impl) + def get_tree(self, entries_only=False, cython_scope=None): from .AnalysedTreeTransforms import AutoTestDictTransform # The AutoTestDictTransform creates the statement "__test__ = {}", @@ -117,14 +117,14 @@ class CythonUtilityCode(Code.UtilityCodeBase): excludes = [AutoTestDictTransform] from . import Pipeline, ParseTreeTransforms - context = CythonUtilityCodeContext( + context = CythonUtilityCodeContext( self.name, compiler_directives=self.compiler_directives, cpp=cython_scope.is_cpp() if cython_scope else False) context.prefix = self.prefix context.cython_scope = cython_scope #context = StringParseContext(self.name) - tree = parse_from_strings( - self.name, self.impl, context=context, allow_struct_enum_decorator=True) + tree = parse_from_strings( + self.name, self.impl, context=context, allow_struct_enum_decorator=True) pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes) if entries_only: @@ -143,33 +143,33 @@ class CythonUtilityCode(Code.UtilityCodeBase): pipeline = Pipeline.insert_into_pipeline(pipeline, transform, before=before) - def merge_scope(scope): - def merge_scope_transform(module_node): - module_node.scope.merge_in(scope) + def merge_scope(scope): + def merge_scope_transform(module_node): + module_node.scope.merge_in(scope) return module_node - return merge_scope_transform - - if self.from_scope: - pipeline = Pipeline.insert_into_pipeline( - pipeline, merge_scope(self.from_scope), - before=ParseTreeTransforms.AnalyseDeclarationsTransform) - - for dep in self.requires: - if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope: - pipeline = Pipeline.insert_into_pipeline( - pipeline, merge_scope(dep.tree.scope), - before=ParseTreeTransforms.AnalyseDeclarationsTransform) - - if self.outer_module_scope: - # inject outer module between utility code module and builtin module - def scope_transform(module_node): - module_node.scope.outer_scope = self.outer_module_scope - return module_node - - pipeline = Pipeline.insert_into_pipeline( - pipeline, scope_transform, - before=ParseTreeTransforms.AnalyseDeclarationsTransform) - + return merge_scope_transform + + if self.from_scope: + pipeline = Pipeline.insert_into_pipeline( + pipeline, merge_scope(self.from_scope), + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + for dep in self.requires: + if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope: + pipeline = Pipeline.insert_into_pipeline( + pipeline, merge_scope(dep.tree.scope), + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + + if self.outer_module_scope: + # inject outer module between utility code module and builtin module + def scope_transform(module_node): + module_node.scope.outer_scope = self.outer_module_scope + return module_node + + pipeline = Pipeline.insert_into_pipeline( + pipeline, scope_transform, + before=ParseTreeTransforms.AnalyseDeclarationsTransform) + if self.context_types: # inject types into module scope def scope_transform(module_node): @@ -184,7 +184,7 @@ class CythonUtilityCode(Code.UtilityCodeBase): (err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False) assert not err, err - self.tree = tree + self.tree = tree return tree def put_code(self, output): @@ -213,12 +213,12 @@ class CythonUtilityCode(Code.UtilityCodeBase): entries.pop('__builtins__') entries.pop('__doc__') - for entry in entries.values(): + for entry in entries.values(): entry.utility_code_definition = self entry.used = used original_scope = tree.scope - dest_scope.merge_in(original_scope, merge_unused=True, whitelist=whitelist) + dest_scope.merge_in(original_scope, merge_unused=True, whitelist=whitelist) tree.scope = dest_scope for dep in self.requires: @@ -227,7 +227,7 @@ class CythonUtilityCode(Code.UtilityCodeBase): return original_scope - + def declare_declarations_in_scope(declaration_string, env, private_type=True, *args, **kwargs): """ diff --git a/contrib/tools/cython/Cython/Compiler/Visitor.pxd b/contrib/tools/cython/Cython/Compiler/Visitor.pxd index 36fe5b1885..d5d5692aa7 100644 --- a/contrib/tools/cython/Cython/Compiler/Visitor.pxd +++ b/contrib/tools/cython/Cython/Compiler/Visitor.pxd @@ -12,13 +12,13 @@ cdef class TreeVisitor: cdef _visitchild(self, child, parent, attrname, idx) cdef dict _visitchildren(self, parent, attrs) cpdef visitchildren(self, parent, attrs=*) - cdef _raise_compiler_error(self, child, e) + cdef _raise_compiler_error(self, child, e) cdef class VisitorTransform(TreeVisitor): - cdef dict _process_children(self, parent, attrs=*) - cpdef visitchildren(self, parent, attrs=*, exclude=*) - cdef list _flatten_list(self, list orig_list) - cdef list _select_attrs(self, attrs, exclude) + cdef dict _process_children(self, parent, attrs=*) + cpdef visitchildren(self, parent, attrs=*, exclude=*) + cdef list _flatten_list(self, list orig_list) + cdef list _select_attrs(self, attrs, exclude) cdef class CythonTransform(VisitorTransform): cdef public context diff --git a/contrib/tools/cython/Cython/Compiler/Visitor.py b/contrib/tools/cython/Cython/Compiler/Visitor.py index ad5525fe01..a35d13e1d0 100644 --- a/contrib/tools/cython/Cython/Compiler/Visitor.py +++ b/contrib/tools/cython/Cython/Compiler/Visitor.py @@ -1,14 +1,14 @@ # cython: infer_types=True -# cython: language_level=3 -# cython: auto_pickle=False +# cython: language_level=3 +# cython: auto_pickle=False # # Tree visitor and transform framework # -from __future__ import absolute_import, print_function +from __future__ import absolute_import, print_function -import sys +import sys import inspect from . import TypeSlots @@ -17,19 +17,19 @@ from . import Nodes from . import ExprNodes from . import Errors from . import DebugFlags -from . import Future +from . import Future import cython -cython.declare(_PRINTABLE=tuple) - -if sys.version_info[0] >= 3: - _PRINTABLE = (bytes, str, int, float) -else: - _PRINTABLE = (str, unicode, long, int, float) - - +cython.declare(_PRINTABLE=tuple) + +if sys.version_info[0] >= 3: + _PRINTABLE = (bytes, str, int, float) +else: + _PRINTABLE = (str, unicode, long, int, float) + + class TreeVisitor(object): """ Base class for writing visitors for a Cython tree, contains utilities for @@ -59,9 +59,9 @@ class TreeVisitor(object): >>> tree = SampleNode(0, SampleNode(1), [SampleNode(2), SampleNode(3)]) >>> class MyVisitor(TreeVisitor): ... def visit_SampleNode(self, node): - ... print("in %s %s" % (node.value, self.access_path)) + ... print("in %s %s" % (node.value, self.access_path)) ... self.visitchildren(node) - ... print("out %s" % node.value) + ... print("out %s" % node.value) ... >>> MyVisitor().visit(tree) in 0 [] @@ -78,9 +78,9 @@ class TreeVisitor(object): self.dispatch_table = {} self.access_path = [] - def dump_node(self, node): - ignored = list(node.child_attrs or []) + [ - u'child_attrs', u'pos', u'gil_message', u'cpp_message', u'subexprs'] + def dump_node(self, node): + ignored = list(node.child_attrs or []) + [ + u'child_attrs', u'pos', u'gil_message', u'cpp_message', u'subexprs'] values = [] pos = getattr(node, 'pos', None) if pos: @@ -93,7 +93,7 @@ class TreeVisitor(object): for attr in attribute_names: if attr in ignored: continue - if attr.startswith('_') or attr.endswith('_'): + if attr.startswith('_') or attr.endswith('_'): continue try: value = getattr(node, attr) @@ -103,12 +103,12 @@ class TreeVisitor(object): continue elif isinstance(value, list): value = u'[...]/%d' % len(value) - elif not isinstance(value, _PRINTABLE): + elif not isinstance(value, _PRINTABLE): continue else: value = repr(value) values.append(u'%s = %s' % (attr, value)) - return u'%s(%s)' % (node.__class__.__name__, u',\n '.join(values)) + return u'%s(%s)' % (node.__class__.__name__, u',\n '.join(values)) def _find_node_path(self, stacktrace): import os.path @@ -159,11 +159,11 @@ class TreeVisitor(object): handler_method = getattr(self, pattern % mro_cls.__name__, None) if handler_method is not None: return handler_method - print(type(self), cls) + print(type(self), cls) if self.access_path: - print(self.access_path) - print(self.access_path[-1][0].pos) - print(self.access_path[-1][0].__dict__) + print(self.access_path) + print(self.access_path[-1][0].pos) + print(self.access_path[-1][0].__dict__) raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj)) def visit(self, obj): @@ -182,7 +182,7 @@ class TreeVisitor(object): raise except Errors.AbortError: raise - except Exception as e: + except Exception as e: if DebugFlags.debug_no_exception_intercept: raise self._raise_compiler_error(obj, e) @@ -244,46 +244,46 @@ class VisitorTransform(TreeVisitor): was not, an exception will be raised. (Typically you want to ensure that you are within a StatListNode or similar before doing this.) """ - def visitchildren(self, parent, attrs=None, exclude=None): - # generic def entry point for calls from Python subclasses - if exclude is not None: - attrs = self._select_attrs(parent.child_attrs if attrs is None else attrs, exclude) - return self._process_children(parent, attrs) - - @cython.final - def _select_attrs(self, attrs, exclude): - return [name for name in attrs if name not in exclude] - - @cython.final - def _process_children(self, parent, attrs=None): - # fast cdef entry point for calls from Cython subclasses + def visitchildren(self, parent, attrs=None, exclude=None): + # generic def entry point for calls from Python subclasses + if exclude is not None: + attrs = self._select_attrs(parent.child_attrs if attrs is None else attrs, exclude) + return self._process_children(parent, attrs) + + @cython.final + def _select_attrs(self, attrs, exclude): + return [name for name in attrs if name not in exclude] + + @cython.final + def _process_children(self, parent, attrs=None): + # fast cdef entry point for calls from Cython subclasses result = self._visitchildren(parent, attrs) - for attr, newnode in result.items(): - if type(newnode) is list: - newnode = self._flatten_list(newnode) - setattr(parent, attr, newnode) + for attr, newnode in result.items(): + if type(newnode) is list: + newnode = self._flatten_list(newnode) + setattr(parent, attr, newnode) return result - @cython.final - def _flatten_list(self, orig_list): - # Flatten the list one level and remove any None - newlist = [] - for x in orig_list: - if x is not None: - if type(x) is list: - newlist.extend(x) - else: - newlist.append(x) - return newlist - + @cython.final + def _flatten_list(self, orig_list): + # Flatten the list one level and remove any None + newlist = [] + for x in orig_list: + if x is not None: + if type(x) is list: + newlist.extend(x) + else: + newlist.append(x) + return newlist + def recurse_to_children(self, node): - self._process_children(node) + self._process_children(node) return node def __call__(self, root): return self._visit(root) - + class CythonTransform(VisitorTransform): """ Certain common conventions and utilities for Cython transforms. @@ -304,15 +304,15 @@ class CythonTransform(VisitorTransform): def visit_CompilerDirectivesNode(self, node): old = self.current_directives self.current_directives = node.directives - self._process_children(node) + self._process_children(node) self.current_directives = old return node def visit_Node(self, node): - self._process_children(node) + self._process_children(node) return node - + class ScopeTrackingTransform(CythonTransform): # Keeps track of type of scopes #scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct' @@ -321,14 +321,14 @@ class ScopeTrackingTransform(CythonTransform): def visit_ModuleNode(self, node): self.scope_type = 'module' self.scope_node = node - self._process_children(node) + self._process_children(node) return node def visit_scope(self, node, scope_type): prev = self.scope_type, self.scope_node self.scope_type = scope_type self.scope_node = node - self._process_children(node) + self._process_children(node) self.scope_type, self.scope_node = prev return node @@ -371,45 +371,45 @@ class EnvTransform(CythonTransform): def visit_FuncDefNode(self, node): self.enter_scope(node, node.local_scope) - self._process_children(node) + self._process_children(node) self.exit_scope() return node def visit_GeneratorBodyDefNode(self, node): - self._process_children(node) + self._process_children(node) return node def visit_ClassDefNode(self, node): self.enter_scope(node, node.scope) - self._process_children(node) + self._process_children(node) self.exit_scope() return node def visit_CStructOrUnionDefNode(self, node): self.enter_scope(node, node.scope) - self._process_children(node) + self._process_children(node) self.exit_scope() return node def visit_ScopedExprNode(self, node): if node.expr_scope: self.enter_scope(node, node.expr_scope) - self._process_children(node) + self._process_children(node) self.exit_scope() else: - self._process_children(node) + self._process_children(node) return node def visit_CArgDeclNode(self, node): # default arguments are evaluated in the outer scope if node.default: - attrs = [attr for attr in node.child_attrs if attr != 'default'] - self._process_children(node, attrs) + attrs = [attr for attr in node.child_attrs if attr != 'default'] + self._process_children(node, attrs) self.enter_scope(node, self.current_env().outer_scope) self.visitchildren(node, ('default',)) self.exit_scope() else: - self._process_children(node) + self._process_children(node) return node @@ -430,7 +430,7 @@ class NodeRefCleanupMixin(object): def visit_CloneNode(self, node): arg = node.arg if arg not in self._replacements: - self.visitchildren(arg) + self.visitchildren(arg) node.arg = self._replacements.get(arg, arg) return node @@ -457,7 +457,7 @@ find_special_method_for_binary_operator = { '>': '__gt__', '+': '__add__', '&': '__and__', - '/': '__div__', + '/': '__div__', '//': '__floordiv__', '<<': '__lshift__', '%': '__mod__', @@ -494,7 +494,7 @@ class MethodDispatcherTransform(EnvTransform): """ # only visit call nodes and Python operations def visit_GeneralCallNode(self, node): - self._process_children(node) + self._process_children(node) function = node.function if not function.type.is_pyobject: return node @@ -509,7 +509,7 @@ class MethodDispatcherTransform(EnvTransform): return self._dispatch_to_handler(node, function, args, keyword_args) def visit_SimpleCallNode(self, node): - self._process_children(node) + self._process_children(node) function = node.function if function.type.is_pyobject: arg_tuple = node.arg_tuple @@ -523,7 +523,7 @@ class MethodDispatcherTransform(EnvTransform): def visit_PrimaryCmpNode(self, node): if node.cascade: # not currently handled below - self._process_children(node) + self._process_children(node) return node return self._visit_binop_node(node) @@ -531,16 +531,16 @@ class MethodDispatcherTransform(EnvTransform): return self._visit_binop_node(node) def _visit_binop_node(self, node): - self._process_children(node) + self._process_children(node) # FIXME: could special case 'not_in' special_method_name = find_special_method_for_binary_operator(node.operator) if special_method_name: operand1, operand2 = node.operand1, node.operand2 if special_method_name == '__contains__': operand1, operand2 = operand2, operand1 - elif special_method_name == '__div__': - if Future.division in self.current_env().global_scope().context.future_directives: - special_method_name = '__truediv__' + elif special_method_name == '__div__': + if Future.division in self.current_env().global_scope().context.future_directives: + special_method_name = '__truediv__' obj_type = operand1.type if obj_type.is_builtin_type: type_name = obj_type.name @@ -552,7 +552,7 @@ class MethodDispatcherTransform(EnvTransform): return node def visit_UnopNode(self, node): - self._process_children(node) + self._process_children(node) special_method_name = find_special_method_for_unary_operator(node.operator) if special_method_name: operand = node.operand @@ -624,19 +624,19 @@ class MethodDispatcherTransform(EnvTransform): return function_handler(node, function, arg_list, kwargs) else: return function_handler(node, function, arg_list) - elif function.is_attribute: + elif function.is_attribute: attr_name = function.attribute - if function.type.is_pyobject: - self_arg = function.obj - elif node.self and function.entry: - entry = function.entry.as_variable - if not entry or not entry.is_builtin: - return node - # C implementation of a Python builtin method - see if we find further matches - self_arg = node.self - arg_list = arg_list[1:] # drop CloneNode of self argument - else: - return node + if function.type.is_pyobject: + self_arg = function.obj + elif node.self and function.entry: + entry = function.entry.as_variable + if not entry or not entry.is_builtin: + return node + # C implementation of a Python builtin method - see if we find further matches + self_arg = node.self + arg_list = arg_list[1:] # drop CloneNode of self argument + else: + return node obj_type = self_arg.type is_unbound_method = False if obj_type.is_builtin_type: @@ -673,12 +673,12 @@ class MethodDispatcherTransform(EnvTransform): if self_arg is not None: arg_list = [self_arg] + list(arg_list) if kwargs: - result = method_handler( + result = method_handler( node, function, arg_list, is_unbound_method, kwargs) else: - result = method_handler( + result = method_handler( node, function, arg_list, is_unbound_method) - return result + return result def _handle_function(self, node, function_name, function, arg_list, kwargs): """Fallback handler""" @@ -699,15 +699,15 @@ class RecursiveNodeReplacer(VisitorTransform): super(RecursiveNodeReplacer, self).__init__() self.orig_node, self.new_node = orig_node, new_node - def visit_CloneNode(self, node): - if node is self.orig_node: - return self.new_node - if node.arg is self.orig_node: - node.arg = self.new_node - return node - + def visit_CloneNode(self, node): + if node is self.orig_node: + return self.new_node + if node.arg is self.orig_node: + node.arg = self.new_node + return node + def visit_Node(self, node): - self._process_children(node) + self._process_children(node) if node is self.orig_node: return self.new_node else: @@ -752,22 +752,22 @@ def replace_node(ptr, value): else: getattr(parent, attrname)[listidx] = value - + class PrintTree(TreeVisitor): """Prints a representation of the tree to standard output. Subclass and override repr_of to provide more information about nodes. """ - def __init__(self, start=None, end=None): + def __init__(self, start=None, end=None): TreeVisitor.__init__(self) self._indent = "" - if start is not None or end is not None: - self._line_range = (start or 0, end or 2**30) - else: - self._line_range = None + if start is not None or end is not None: + self._line_range = (start or 0, end or 2**30) + else: + self._line_range = None def indent(self): self._indent += " " - + def unindent(self): self._indent = self._indent[:-2] @@ -781,37 +781,37 @@ class PrintTree(TreeVisitor): # under the parent-node, not displaying the list itself in # the hierarchy. def visit_Node(self, node): - self._print_node(node) + self._print_node(node) self.indent() self.visitchildren(node) self.unindent() return node - def visit_CloneNode(self, node): - self._print_node(node) - self.indent() - line = node.pos[1] - if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]: - print("%s- %s: %s" % (self._indent, 'arg', self.repr_of(node.arg))) - self.indent() - self.visitchildren(node.arg) - self.unindent() - self.unindent() - return node - - def _print_node(self, node): - line = node.pos[1] - if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]: - if len(self.access_path) == 0: - name = "(root)" - else: - parent, attr, idx = self.access_path[-1] - if idx is not None: - name = "%s[%d]" % (attr, idx) - else: - name = attr - print("%s- %s: %s" % (self._indent, name, self.repr_of(node))) - + def visit_CloneNode(self, node): + self._print_node(node) + self.indent() + line = node.pos[1] + if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]: + print("%s- %s: %s" % (self._indent, 'arg', self.repr_of(node.arg))) + self.indent() + self.visitchildren(node.arg) + self.unindent() + self.unindent() + return node + + def _print_node(self, node): + line = node.pos[1] + if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]: + if len(self.access_path) == 0: + name = "(root)" + else: + parent, attr, idx = self.access_path[-1] + if idx is not None: + name = "%s[%d]" % (attr, idx) + else: + name = attr + print("%s- %s: %s" % (self._indent, name, self.repr_of(node))) + def repr_of(self, node): if node is None: return "(none)" diff --git a/contrib/tools/cython/Cython/Coverage.py b/contrib/tools/cython/Cython/Coverage.py index 1795790e42..5aa9df2ce0 100644 --- a/contrib/tools/cython/Cython/Coverage.py +++ b/contrib/tools/cython/Cython/Coverage.py @@ -1,181 +1,181 @@ -""" -A Cython plugin for coverage.py - -Requires the coverage package at least in version 4.0 (which added the plugin API). -""" - -from __future__ import absolute_import - -import re -import os.path -import sys -from collections import defaultdict - -from coverage.plugin import CoveragePlugin, FileTracer, FileReporter # requires coverage.py 4.0+ +""" +A Cython plugin for coverage.py + +Requires the coverage package at least in version 4.0 (which added the plugin API). +""" + +from __future__ import absolute_import + +import re +import os.path +import sys +from collections import defaultdict + +from coverage.plugin import CoveragePlugin, FileTracer, FileReporter # requires coverage.py 4.0+ from coverage.files import canonical_filename - -from .Utils import find_root_package_dir, is_package_dir, open_source_file - - -from . import __version__ - - -C_FILE_EXTENSIONS = ['.c', '.cpp', '.cc', '.cxx'] -MODULE_FILE_EXTENSIONS = set(['.py', '.pyx', '.pxd'] + C_FILE_EXTENSIONS) - - -def _find_c_source(base_path): - file_exists = os.path.exists - for ext in C_FILE_EXTENSIONS: - file_name = base_path + ext - if file_exists(file_name): - return file_name - return None - - + +from .Utils import find_root_package_dir, is_package_dir, open_source_file + + +from . import __version__ + + +C_FILE_EXTENSIONS = ['.c', '.cpp', '.cc', '.cxx'] +MODULE_FILE_EXTENSIONS = set(['.py', '.pyx', '.pxd'] + C_FILE_EXTENSIONS) + + +def _find_c_source(base_path): + file_exists = os.path.exists + for ext in C_FILE_EXTENSIONS: + file_name = base_path + ext + if file_exists(file_name): + return file_name + return None + + def _find_dep_file_path(main_file, file_path, relative_path_search=False): - abs_path = os.path.abspath(file_path) + abs_path = os.path.abspath(file_path) if not os.path.exists(abs_path) and (file_path.endswith('.pxi') or relative_path_search): # files are looked up relative to the main source file rel_file_path = os.path.join(os.path.dirname(main_file), file_path) if os.path.exists(rel_file_path): abs_path = os.path.abspath(rel_file_path) - # search sys.path for external locations if a valid file hasn't been found - if not os.path.exists(abs_path): - for sys_path in sys.path: - test_path = os.path.realpath(os.path.join(sys_path, file_path)) - if os.path.exists(test_path): + # search sys.path for external locations if a valid file hasn't been found + if not os.path.exists(abs_path): + for sys_path in sys.path: + test_path = os.path.realpath(os.path.join(sys_path, file_path)) + if os.path.exists(test_path): return canonical_filename(test_path) return canonical_filename(abs_path) - - -class Plugin(CoveragePlugin): - # map from traced file paths to absolute file paths - _file_path_map = None - # map from traced file paths to corresponding C files - _c_files_map = None - # map from parsed C files to their content - _parsed_c_files = None - - def sys_info(self): - return [('Cython version', __version__)] - - def file_tracer(self, filename): - """ - Try to find a C source file for a file path found by the tracer. - """ + + +class Plugin(CoveragePlugin): + # map from traced file paths to absolute file paths + _file_path_map = None + # map from traced file paths to corresponding C files + _c_files_map = None + # map from parsed C files to their content + _parsed_c_files = None + + def sys_info(self): + return [('Cython version', __version__)] + + def file_tracer(self, filename): + """ + Try to find a C source file for a file path found by the tracer. + """ # TODO We need to pxd-files to the include map. For more info see pybuild.py # Currently skip such files, because they are not supported in Arcadia pybuild with coverage. if os.path.splitext(filename)[-1] not in ('.pyx', '.pxi'): return None - if filename.startswith('<') or filename.startswith('memory:'): - return None - c_file = py_file = None + if filename.startswith('<') or filename.startswith('memory:'): + return None + c_file = py_file = None filename = canonical_filename(filename) - if self._c_files_map and filename in self._c_files_map: - c_file = self._c_files_map[filename][0] - - if c_file is None: - c_file, py_file = self._find_source_files(filename) - if not c_file: - return None # unknown file - - # parse all source file paths and lines from C file - # to learn about all relevant source files right away (pyx/pxi/pxd) - # FIXME: this might already be too late if the first executed line - # is not from the main .pyx file but a file with a different - # name than the .c file (which prevents us from finding the - # .c file) - _, code = self._read_source_lines(c_file, filename) - if code is None: - return None # no source found - - if self._file_path_map is None: - self._file_path_map = {} - return CythonModuleTracer(filename, py_file, c_file, self._c_files_map, self._file_path_map) - - def file_reporter(self, filename): - # TODO: let coverage.py handle .py files itself - #ext = os.path.splitext(filename)[1].lower() - #if ext == '.py': - # from coverage.python import PythonFileReporter - # return PythonFileReporter(filename) - + if self._c_files_map and filename in self._c_files_map: + c_file = self._c_files_map[filename][0] + + if c_file is None: + c_file, py_file = self._find_source_files(filename) + if not c_file: + return None # unknown file + + # parse all source file paths and lines from C file + # to learn about all relevant source files right away (pyx/pxi/pxd) + # FIXME: this might already be too late if the first executed line + # is not from the main .pyx file but a file with a different + # name than the .c file (which prevents us from finding the + # .c file) + _, code = self._read_source_lines(c_file, filename) + if code is None: + return None # no source found + + if self._file_path_map is None: + self._file_path_map = {} + return CythonModuleTracer(filename, py_file, c_file, self._c_files_map, self._file_path_map) + + def file_reporter(self, filename): + # TODO: let coverage.py handle .py files itself + #ext = os.path.splitext(filename)[1].lower() + #if ext == '.py': + # from coverage.python import PythonFileReporter + # return PythonFileReporter(filename) + filename = canonical_filename(filename) - if self._c_files_map and filename in self._c_files_map: - c_file, rel_file_path, code = self._c_files_map[filename] - else: - c_file, _ = self._find_source_files(filename) - if not c_file: + if self._c_files_map and filename in self._c_files_map: + c_file, rel_file_path, code = self._c_files_map[filename] + else: + c_file, _ = self._find_source_files(filename) + if not c_file: if standalone(): raise AssertionError(filename) - return None # unknown file - rel_file_path, code = self._read_source_lines(c_file, filename) + return None # unknown file + rel_file_path, code = self._read_source_lines(c_file, filename) if code is None: if standalone(): raise AssertionError(filename) return None # no source found - return CythonModuleReporter(c_file, filename, rel_file_path, code) - - def _find_source_files(self, filename): - basename, ext = os.path.splitext(filename) - ext = ext.lower() - if ext in MODULE_FILE_EXTENSIONS: - pass + return CythonModuleReporter(c_file, filename, rel_file_path, code) + + def _find_source_files(self, filename): + basename, ext = os.path.splitext(filename) + ext = ext.lower() + if ext in MODULE_FILE_EXTENSIONS: + pass elif ext == '.pyd': # Windows extension module platform_suffix = re.search(r'[.]cp[0-9]+-win[_a-z0-9]*$', basename, re.I) - if platform_suffix: - basename = basename[:platform_suffix.start()] + if platform_suffix: + basename = basename[:platform_suffix.start()] elif ext == '.so': # Linux/Unix/Mac extension module platform_suffix = re.search(r'[.](?:cpython|pypy)-[0-9]+[-_a-z0-9]*$', basename, re.I) if platform_suffix: basename = basename[:platform_suffix.start()] - elif ext == '.pxi': - # if we get here, it means that the first traced line of a Cython module was - # not in the main module but in an include file, so try a little harder to - # find the main source file - self._find_c_source_files(os.path.dirname(filename), filename) - if filename in self._c_files_map: - return self._c_files_map[filename][0], None + elif ext == '.pxi': + # if we get here, it means that the first traced line of a Cython module was + # not in the main module but in an include file, so try a little harder to + # find the main source file + self._find_c_source_files(os.path.dirname(filename), filename) + if filename in self._c_files_map: + return self._c_files_map[filename][0], None if standalone(): raise AssertionError(filename) - else: - # none of our business - return None, None - - c_file = filename if ext in C_FILE_EXTENSIONS else _find_c_source(basename) - if c_file is None: - # a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c" - package_root = find_root_package_dir.uncached(filename) - package_path = os.path.relpath(basename, package_root).split(os.path.sep) - if len(package_path) > 1: - test_basepath = os.path.join(os.path.dirname(filename), '.'.join(package_path)) - c_file = _find_c_source(test_basepath) - - py_source_file = None - if c_file: - py_source_file = os.path.splitext(c_file)[0] + '.py' - if not os.path.exists(py_source_file): - py_source_file = None - - try: + else: + # none of our business + return None, None + + c_file = filename if ext in C_FILE_EXTENSIONS else _find_c_source(basename) + if c_file is None: + # a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c" + package_root = find_root_package_dir.uncached(filename) + package_path = os.path.relpath(basename, package_root).split(os.path.sep) + if len(package_path) > 1: + test_basepath = os.path.join(os.path.dirname(filename), '.'.join(package_path)) + c_file = _find_c_source(test_basepath) + + py_source_file = None + if c_file: + py_source_file = os.path.splitext(c_file)[0] + '.py' + if not os.path.exists(py_source_file): + py_source_file = None + + try: with OpenFile(c_file) as f: if '/* Generated by Cython ' not in f.read(30): - return None, None # not a Cython file - except (IOError, OSError): - c_file = None - - return c_file, py_source_file - - def _find_c_source_files(self, dir_path, source_file): - """ - Desperately parse all C files in the directory or its package parents - (not re-descending) to find the (included) source file in one of them. - """ + return None, None # not a Cython file + except (IOError, OSError): + c_file = None + + return c_file, py_source_file + + def _find_c_source_files(self, dir_path, source_file): + """ + Desperately parse all C files in the directory or its package parents + (not re-descending) to find the (included) source file in one of them. + """ if standalone(): if os.environ.get('PYTHON_COVERAGE_CYTHON_BUILD_ROOT'): broot = os.environ['PYTHON_COVERAGE_CYTHON_BUILD_ROOT'] @@ -190,188 +190,188 @@ class Plugin(CoveragePlugin): return raise AssertionError((source_file, os.environ.get('PYTHON_COVERAGE_CYTHON_BUILD_ROOT'))) - if not os.path.isdir(dir_path): - return - splitext = os.path.splitext - for filename in os.listdir(dir_path): - ext = splitext(filename)[1].lower() - if ext in C_FILE_EXTENSIONS: - self._read_source_lines(os.path.join(dir_path, filename), source_file) - if source_file in self._c_files_map: - return - # not found? then try one package up - if is_package_dir(dir_path): - self._find_c_source_files(os.path.dirname(dir_path), source_file) - - def _read_source_lines(self, c_file, sourcefile): - """ - Parse a Cython generated C/C++ source file and find the executable lines. - Each executable line starts with a comment header that states source file - and line number, as well as the surrounding range of source code lines. - """ - if self._parsed_c_files is None: - self._parsed_c_files = {} - if c_file in self._parsed_c_files: - code_lines = self._parsed_c_files[c_file] - else: - code_lines = self._parse_cfile_lines(c_file) - self._parsed_c_files[c_file] = code_lines - - if self._c_files_map is None: - self._c_files_map = {} - - for filename, code in code_lines.items(): + if not os.path.isdir(dir_path): + return + splitext = os.path.splitext + for filename in os.listdir(dir_path): + ext = splitext(filename)[1].lower() + if ext in C_FILE_EXTENSIONS: + self._read_source_lines(os.path.join(dir_path, filename), source_file) + if source_file in self._c_files_map: + return + # not found? then try one package up + if is_package_dir(dir_path): + self._find_c_source_files(os.path.dirname(dir_path), source_file) + + def _read_source_lines(self, c_file, sourcefile): + """ + Parse a Cython generated C/C++ source file and find the executable lines. + Each executable line starts with a comment header that states source file + and line number, as well as the surrounding range of source code lines. + """ + if self._parsed_c_files is None: + self._parsed_c_files = {} + if c_file in self._parsed_c_files: + code_lines = self._parsed_c_files[c_file] + else: + code_lines = self._parse_cfile_lines(c_file) + self._parsed_c_files[c_file] = code_lines + + if self._c_files_map is None: + self._c_files_map = {} + + for filename, code in code_lines.items(): abs_path = _find_dep_file_path(c_file, filename, relative_path_search=True) - self._c_files_map[abs_path] = (c_file, filename, code) - - if sourcefile not in self._c_files_map: - return (None,) * 2 # e.g. shared library file - return self._c_files_map[sourcefile][1:] - - def _parse_cfile_lines(self, c_file): - """ - Parse a C file and extract all source file lines that generated executable code. - """ - match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match - match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match - match_comment_end = re.compile(r' *[*]/$').match - match_trace_line = re.compile(r' *__Pyx_TraceLine\(([0-9]+),').match - not_executable = re.compile( - r'\s*c(?:type)?def\s+' - r'(?:(?:public|external)\s+)?' - r'(?:struct|union|enum|class)' - r'(\s+[^:]+|)\s*:' - ).match - - code_lines = defaultdict(dict) - executable_lines = defaultdict(set) - current_filename = None - + self._c_files_map[abs_path] = (c_file, filename, code) + + if sourcefile not in self._c_files_map: + return (None,) * 2 # e.g. shared library file + return self._c_files_map[sourcefile][1:] + + def _parse_cfile_lines(self, c_file): + """ + Parse a C file and extract all source file lines that generated executable code. + """ + match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match + match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match + match_comment_end = re.compile(r' *[*]/$').match + match_trace_line = re.compile(r' *__Pyx_TraceLine\(([0-9]+),').match + not_executable = re.compile( + r'\s*c(?:type)?def\s+' + r'(?:(?:public|external)\s+)?' + r'(?:struct|union|enum|class)' + r'(\s+[^:]+|)\s*:' + ).match + + code_lines = defaultdict(dict) + executable_lines = defaultdict(set) + current_filename = None + with OpenFile(c_file) as lines: - lines = iter(lines) - for line in lines: - match = match_source_path_line(line) - if not match: - if '__Pyx_TraceLine(' in line and current_filename is not None: - trace_line = match_trace_line(line) - if trace_line: - executable_lines[current_filename].add(int(trace_line.group(1))) - continue - filename, lineno = match.groups() - current_filename = filename - lineno = int(lineno) - for comment_line in lines: - match = match_current_code_line(comment_line) - if match: - code_line = match.group(1).rstrip() - if not_executable(code_line): - break - code_lines[filename][lineno] = code_line - break - elif match_comment_end(comment_line): - # unexpected comment format - false positive? - break - - # Remove lines that generated code but are not traceable. - for filename, lines in code_lines.items(): - dead_lines = set(lines).difference(executable_lines.get(filename, ())) - for lineno in dead_lines: - del lines[lineno] - return code_lines - - -class CythonModuleTracer(FileTracer): - """ - Find the Python/Cython source file for a Cython module. - """ - def __init__(self, module_file, py_file, c_file, c_files_map, file_path_map): - super(CythonModuleTracer, self).__init__() - self.module_file = module_file - self.py_file = py_file - self.c_file = c_file - self._c_files_map = c_files_map - self._file_path_map = file_path_map - - def has_dynamic_source_filename(self): - return True - - def dynamic_source_filename(self, filename, frame): - """ - Determine source file path. Called by the function call tracer. - """ - source_file = frame.f_code.co_filename - try: - return self._file_path_map[source_file] - except KeyError: - pass + lines = iter(lines) + for line in lines: + match = match_source_path_line(line) + if not match: + if '__Pyx_TraceLine(' in line and current_filename is not None: + trace_line = match_trace_line(line) + if trace_line: + executable_lines[current_filename].add(int(trace_line.group(1))) + continue + filename, lineno = match.groups() + current_filename = filename + lineno = int(lineno) + for comment_line in lines: + match = match_current_code_line(comment_line) + if match: + code_line = match.group(1).rstrip() + if not_executable(code_line): + break + code_lines[filename][lineno] = code_line + break + elif match_comment_end(comment_line): + # unexpected comment format - false positive? + break + + # Remove lines that generated code but are not traceable. + for filename, lines in code_lines.items(): + dead_lines = set(lines).difference(executable_lines.get(filename, ())) + for lineno in dead_lines: + del lines[lineno] + return code_lines + + +class CythonModuleTracer(FileTracer): + """ + Find the Python/Cython source file for a Cython module. + """ + def __init__(self, module_file, py_file, c_file, c_files_map, file_path_map): + super(CythonModuleTracer, self).__init__() + self.module_file = module_file + self.py_file = py_file + self.c_file = c_file + self._c_files_map = c_files_map + self._file_path_map = file_path_map + + def has_dynamic_source_filename(self): + return True + + def dynamic_source_filename(self, filename, frame): + """ + Determine source file path. Called by the function call tracer. + """ + source_file = frame.f_code.co_filename + try: + return self._file_path_map[source_file] + except KeyError: + pass if standalone(): abs_path = self.module_file else: abs_path = _find_dep_file_path(filename, source_file) - - if self.py_file and source_file[-3:].lower() == '.py': - # always let coverage.py handle this case itself - self._file_path_map[source_file] = self.py_file - return self.py_file - - assert self._c_files_map is not None - if abs_path not in self._c_files_map: - self._c_files_map[abs_path] = (self.c_file, source_file, None) - self._file_path_map[source_file] = abs_path - return abs_path - - -class CythonModuleReporter(FileReporter): - """ - Provide detailed trace information for one source file to coverage.py. - """ - def __init__(self, c_file, source_file, rel_file_path, code): - super(CythonModuleReporter, self).__init__(source_file) - self.name = rel_file_path - self.c_file = c_file - self._code = code + + if self.py_file and source_file[-3:].lower() == '.py': + # always let coverage.py handle this case itself + self._file_path_map[source_file] = self.py_file + return self.py_file + + assert self._c_files_map is not None + if abs_path not in self._c_files_map: + self._c_files_map[abs_path] = (self.c_file, source_file, None) + self._file_path_map[source_file] = abs_path + return abs_path + + +class CythonModuleReporter(FileReporter): + """ + Provide detailed trace information for one source file to coverage.py. + """ + def __init__(self, c_file, source_file, rel_file_path, code): + super(CythonModuleReporter, self).__init__(source_file) + self.name = rel_file_path + self.c_file = c_file + self._code = code self._abs_filename = self._find_abs_filename() - - def lines(self): - """ - Return set of line numbers that are possibly executable. - """ - return set(self._code) - - def _iter_source_tokens(self): - current_line = 1 - for line_no, code_line in sorted(self._code.items()): - while line_no > current_line: - yield [] - current_line += 1 - yield [('txt', code_line)] - current_line += 1 - - def source(self): - """ - Return the source code of the file as a string. - """ + + def lines(self): + """ + Return set of line numbers that are possibly executable. + """ + return set(self._code) + + def _iter_source_tokens(self): + current_line = 1 + for line_no, code_line in sorted(self._code.items()): + while line_no > current_line: + yield [] + current_line += 1 + yield [('txt', code_line)] + current_line += 1 + + def source(self): + """ + Return the source code of the file as a string. + """ if os.path.exists(self._abs_filename): with open_source_file(self._abs_filename) as f: - return f.read() - else: - return '\n'.join( - (tokens[0][1] if tokens else '') - for tokens in self._iter_source_tokens()) - - def source_token_lines(self): - """ - Iterate over the source code tokens. - """ + return f.read() + else: + return '\n'.join( + (tokens[0][1] if tokens else '') + for tokens in self._iter_source_tokens()) + + def source_token_lines(self): + """ + Iterate over the source code tokens. + """ if os.path.exists(self._abs_filename): with open_source_file(self._abs_filename) as f: - for line in f: - yield [('txt', line.rstrip('\n'))] - else: - for line in self._iter_source_tokens(): + for line in f: + yield [('txt', line.rstrip('\n'))] + else: + for line in self._iter_source_tokens(): yield line - + def _find_abs_filename(self): for root in [ os.environ.get('PYTHON_COVERAGE_ARCADIA_SOURCE_ROOT'), @@ -382,10 +382,10 @@ class CythonModuleReporter(FileReporter): if root and os.path.exists(abs_path): return abs_path return self.filename - -def coverage_init(reg, options): - reg.add_file_tracer(Plugin()) + +def coverage_init(reg, options): + reg.add_file_tracer(Plugin()) # ========================== Arcadia specific ================================= diff --git a/contrib/tools/cython/Cython/Debugger/Cygdb.py b/contrib/tools/cython/Cython/Debugger/Cygdb.py index 029adb1742..45f31ce6f7 100644 --- a/contrib/tools/cython/Cython/Debugger/Cygdb.py +++ b/contrib/tools/cython/Cython/Debugger/Cygdb.py @@ -126,7 +126,7 @@ def main(path_to_debug_info=None, gdb_argv=None, no_import=False): logging_level = logging.WARN if options.verbosity == 1: logging_level = logging.INFO - if options.verbosity >= 2: + if options.verbosity >= 2: logging_level = logging.DEBUG logging.basicConfig(level=logging_level) diff --git a/contrib/tools/cython/Cython/Debugger/DebugWriter.py b/contrib/tools/cython/Cython/Debugger/DebugWriter.py index a5f30e8850..876a3a2169 100644 --- a/contrib/tools/cython/Cython/Debugger/DebugWriter.py +++ b/contrib/tools/cython/Cython/Debugger/DebugWriter.py @@ -1,4 +1,4 @@ -from __future__ import absolute_import +from __future__ import absolute_import import os import sys @@ -15,9 +15,9 @@ except ImportError: try: from xml.etree import ElementTree as etree except ImportError: - etree = None + etree = None -from ..Compiler import Errors +from ..Compiler import Errors class CythonDebugWriter(object): @@ -32,7 +32,7 @@ class CythonDebugWriter(object): if etree is None: raise Errors.NoElementTreeInstalledException() - self.output_dir = os.path.join(output_dir or os.curdir, 'cython_debug') + self.output_dir = os.path.join(output_dir or os.curdir, 'cython_debug') self.tb = etree.TreeBuilder() # set by Cython.Compiler.ParseTreeTransforms.DebugTransform self.module_name = None @@ -44,10 +44,10 @@ class CythonDebugWriter(object): def end(self, name): self.tb.end(name) - def add_entry(self, name, **attrs): - self.tb.start(name, attrs) - self.tb.end(name) - + def add_entry(self, name, **attrs): + self.tb.start(name, attrs) + self.tb.end(name) + def serialize(self): self.tb.end('Module') self.tb.end('cython_debug') @@ -55,7 +55,7 @@ class CythonDebugWriter(object): try: os.makedirs(self.output_dir) - except OSError as e: + except OSError as e: if e.errno != errno.EEXIST: raise diff --git a/contrib/tools/cython/Cython/Debugger/Tests/TestLibCython.py b/contrib/tools/cython/Cython/Debugger/Tests/TestLibCython.py index a23d31f534..13560646ff 100644 --- a/contrib/tools/cython/Cython/Debugger/Tests/TestLibCython.py +++ b/contrib/tools/cython/Cython/Debugger/Tests/TestLibCython.py @@ -14,7 +14,7 @@ from distutils import ccompiler import runtests import Cython.Distutils.extension -import Cython.Distutils.old_build_ext as build_ext +import Cython.Distutils.old_build_ext as build_ext from Cython.Debugger import Cygdb as cygdb root = os.path.dirname(os.path.abspath(__file__)) @@ -31,38 +31,38 @@ def test_gdb(): if have_gdb is not None: return have_gdb - have_gdb = False + have_gdb = False try: - p = subprocess.Popen(['gdb', '-nx', '--version'], stdout=subprocess.PIPE) + p = subprocess.Popen(['gdb', '-nx', '--version'], stdout=subprocess.PIPE) except OSError: - # gdb not found - gdb_version = None + # gdb not found + gdb_version = None else: - stdout, _ = p.communicate() + stdout, _ = p.communicate() # Based on Lib/test/test_gdb.py - regex = r"GNU gdb [^\d]*(\d+)\.(\d+)" - gdb_version = re.match(regex, stdout.decode('ascii', 'ignore')) + regex = r"GNU gdb [^\d]*(\d+)\.(\d+)" + gdb_version = re.match(regex, stdout.decode('ascii', 'ignore')) - if gdb_version: - gdb_version_number = list(map(int, gdb_version.groups())) + if gdb_version: + gdb_version_number = list(map(int, gdb_version.groups())) if gdb_version_number >= [7, 2]: - have_gdb = True - with tempfile.NamedTemporaryFile(mode='w+') as python_version_script: + have_gdb = True + with tempfile.NamedTemporaryFile(mode='w+') as python_version_script: python_version_script.write( 'python import sys; print("%s %s" % sys.version_info[:2])') python_version_script.flush() p = subprocess.Popen(['gdb', '-batch', '-x', python_version_script.name], stdout=subprocess.PIPE) - stdout, _ = p.communicate() + stdout, _ = p.communicate() try: - internal_python_version = list(map(int, stdout.decode('ascii', 'ignore').split())) - if internal_python_version < [2, 6]: - have_gdb = False + internal_python_version = list(map(int, stdout.decode('ascii', 'ignore').split())) + if internal_python_version < [2, 6]: + have_gdb = False except ValueError: have_gdb = False - if not have_gdb: - warnings.warn('Skipping gdb tests, need gdb >= 7.2 with Python >= 2.6') + if not have_gdb: + warnings.warn('Skipping gdb tests, need gdb >= 7.2 with Python >= 2.6') return have_gdb diff --git a/contrib/tools/cython/Cython/Debugger/Tests/test_libcython_in_gdb.py b/contrib/tools/cython/Cython/Debugger/Tests/test_libcython_in_gdb.py index e798e7564f..bd7608d607 100644 --- a/contrib/tools/cython/Cython/Debugger/Tests/test_libcython_in_gdb.py +++ b/contrib/tools/cython/Cython/Debugger/Tests/test_libcython_in_gdb.py @@ -5,8 +5,8 @@ Note: debug information is already imported by the file generated by Cython.Debugger.Cygdb.make_command_file() """ -from __future__ import absolute_import - +from __future__ import absolute_import + import os import re import sys @@ -23,10 +23,10 @@ import itertools import gdb -from .. import libcython -from .. import libpython -from . import TestLibCython as test_libcython -from ...Utils import add_metaclass +from .. import libcython +from .. import libpython +from . import TestLibCython as test_libcython +from ...Utils import add_metaclass # for some reason sys.argv is missing in gdb sys.argv = ['gdb'] @@ -48,12 +48,12 @@ def print_on_call_decorator(func): class TraceMethodCallMeta(type): def __init__(self, name, bases, dict): - for func_name, func in dict.items(): + for func_name, func in dict.items(): if inspect.isfunction(func): setattr(self, func_name, print_on_call_decorator(func)) -@add_metaclass(TraceMethodCallMeta) +@add_metaclass(TraceMethodCallMeta) class DebugTestCase(unittest.TestCase): """ Base class for test cases. On teardown it kills the inferior and unsets diff --git a/contrib/tools/cython/Cython/Debugger/Tests/test_libpython_in_gdb.py b/contrib/tools/cython/Cython/Debugger/Tests/test_libpython_in_gdb.py index d5e01187ed..6f34cee47b 100644 --- a/contrib/tools/cython/Cython/Debugger/Tests/test_libpython_in_gdb.py +++ b/contrib/tools/cython/Cython/Debugger/Tests/test_libpython_in_gdb.py @@ -14,8 +14,8 @@ import gdb from Cython.Debugger import libcython from Cython.Debugger import libpython -from . import test_libcython_in_gdb -from .test_libcython_in_gdb import _debug, inferior_python_version +from . import test_libcython_in_gdb +from .test_libcython_in_gdb import _debug, inferior_python_version class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase): diff --git a/contrib/tools/cython/Cython/Debugger/libcython.py b/contrib/tools/cython/Cython/Debugger/libcython.py index 238d578853..23153789b6 100644 --- a/contrib/tools/cython/Cython/Debugger/libcython.py +++ b/contrib/tools/cython/Cython/Debugger/libcython.py @@ -4,11 +4,11 @@ GDB extension that adds Cython support. from __future__ import print_function -try: - input = raw_input -except NameError: - pass - +try: + input = raw_input +except NameError: + pass + import sys import textwrap import traceback @@ -346,12 +346,12 @@ class CythonBase(object): except RuntimeError: func_address = 0 else: - func_address = gdb_value.address - if not isinstance(func_address, int): - # Seriously? Why is the address not an int? - if not isinstance(func_address, (str, bytes)): - func_address = str(func_address) - func_address = int(func_address.split()[0], 0) + func_address = gdb_value.address + if not isinstance(func_address, int): + # Seriously? Why is the address not an int? + if not isinstance(func_address, (str, bytes)): + func_address = str(func_address) + func_address = int(func_address.split()[0], 0) a = ', '.join('%s=%s' % (name, val) for name, val in func_args) sys.stdout.write('#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a)) @@ -392,7 +392,7 @@ class CythonBase(object): result = {} seen = set() - for k, v in pyobject_dict.items(): + for k, v in pyobject_dict.items(): result[k.proxyval(seen)] = v return result @@ -416,7 +416,7 @@ class CythonBase(object): # Closed over free variable if cur_lineno > cython_func.lineno: if cyvar.type == PythonObject: - return int(gdb.parse_and_eval(cyvar.cname)) + return int(gdb.parse_and_eval(cyvar.cname)) return True return False @@ -743,7 +743,7 @@ class CyImport(CythonCommand): for marker in module.find('LineNumberMapping'): cython_lineno = int(marker.attrib['cython_lineno']) - c_linenos = list(map(int, marker.attrib['c_linenos'].split())) + c_linenos = list(map(int, marker.attrib['c_linenos'].split())) cython_module.lineno_cy2c[cython_lineno] = min(c_linenos) for c_lineno in c_linenos: cython_module.lineno_c2cy[c_lineno] = cython_lineno @@ -816,7 +816,7 @@ class CyBreak(CythonCommand): while True: try: - result = input( + result = input( "Select a function, press 'a' for all " "functions or press 'q' or '^D' to quit: ") except EOFError: @@ -863,10 +863,10 @@ class CyBreak(CythonCommand): def complete(self, text, word): # Filter init-module functions (breakpoints can be set using # modulename:linenumber). - names = [n for n, L in self.cy.functions_by_name.items() - if any(not f.is_initmodule_function for f in L)] - qnames = [n for n, f in self.cy.functions_by_qualified_name.items() - if not f.is_initmodule_function] + names = [n for n, L in self.cy.functions_by_name.items() + if any(not f.is_initmodule_function for f in L)] + qnames = [n for n, f in self.cy.functions_by_qualified_name.items() + if not f.is_initmodule_function] if parameters.complete_unqualified: all_names = itertools.chain(qnames, names) @@ -1156,7 +1156,7 @@ class CyLocals(CythonCommand): local_cython_vars = cython_function.locals max_name_length = len(max(local_cython_vars, key=len)) - for name, cyvar in sorted(local_cython_vars.items(), key=sortkey): + for name, cyvar in sorted(local_cython_vars.items(), key=sortkey): if self.is_initialized(self.get_cython_function(), cyvar.name): value = gdb.parse_and_eval(cyvar.cname) if not value.is_optimized_out: @@ -1189,13 +1189,13 @@ class CyGlobals(CyLocals): seen = set() print('Python globals:') - for k, v in sorted(global_python_dict.items(), key=sortkey): + for k, v in sorted(global_python_dict.items(), key=sortkey): v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN) seen.add(k) print(' %-*s = %s' % (max_name_length, k, v)) print('C globals:') - for name, cyvar in sorted(module_globals.items(), key=sortkey): + for name, cyvar in sorted(module_globals.items(), key=sortkey): if name not in seen: try: value = gdb.parse_and_eval(cyvar.cname) @@ -1218,8 +1218,8 @@ class EvaluateOrExecuteCodeMixin(object): "Fill a remotely allocated dict with values from the Cython C stack" cython_func = self.get_cython_function() - for name, cyvar in cython_func.locals.items(): - if cyvar.type == PythonObject and self.is_initialized(cython_func, name): + for name, cyvar in cython_func.locals.items(): + if cyvar.type == PythonObject and self.is_initialized(cython_func, name): try: val = gdb.parse_and_eval(cyvar.cname) except RuntimeError: diff --git a/contrib/tools/cython/Cython/Debugger/libpython.py b/contrib/tools/cython/Cython/Debugger/libpython.py index 213a60de5a..fea626dd73 100644 --- a/contrib/tools/cython/Cython/Debugger/libpython.py +++ b/contrib/tools/cython/Cython/Debugger/libpython.py @@ -46,10 +46,10 @@ the type names are known to the debugger The module also extends gdb with some python-specific commands. ''' - + # NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax # compatible (2.6+ and 3.0+). See #19308. - + from __future__ import print_function import gdb import os @@ -2486,17 +2486,17 @@ class PyCont(ExecutionControlCommandBase): def _pointervalue(gdbval): """ - Return the value of the pointer as a Python int. + Return the value of the pointer as a Python int. gdbval.type must be a pointer type """ # don't convert with int() as it will raise a RuntimeError if gdbval.address is not None: - return int(gdbval.address) + return int(gdbval.address) else: # the address attribute is None sometimes, in which case we can # still convert the pointer to an int - return int(gdbval) + return int(gdbval) def pointervalue(gdbval): @@ -2688,7 +2688,7 @@ class FixGdbCommand(gdb.Command): warnings.filterwarnings('ignore', r'.*', RuntimeWarning, re.escape(__name__)) try: - int(gdb.parse_and_eval("(void *) 0")) == 0 + int(gdb.parse_and_eval("(void *) 0")) == 0 except RuntimeError: pass # warnings.resetwarnings() @@ -2726,7 +2726,7 @@ class PyExec(gdb.Command): lines = [] while True: try: - line = input('>') + line = input('>') except EOFError: break else: diff --git a/contrib/tools/cython/Cython/Distutils/build_ext.py b/contrib/tools/cython/Cython/Distutils/build_ext.py index c499bb525a..598bb4a89b 100644 --- a/contrib/tools/cython/Cython/Distutils/build_ext.py +++ b/contrib/tools/cython/Cython/Distutils/build_ext.py @@ -1,25 +1,25 @@ import sys -if 'setuptools' in sys.modules: - try: - from setuptools.command.build_ext import build_ext as _build_ext - except ImportError: - # We may be in the process of importing setuptools, which tries - # to import this. - from distutils.command.build_ext import build_ext as _build_ext -else: - from distutils.command.build_ext import build_ext as _build_ext - +if 'setuptools' in sys.modules: + try: + from setuptools.command.build_ext import build_ext as _build_ext + except ImportError: + # We may be in the process of importing setuptools, which tries + # to import this. + from distutils.command.build_ext import build_ext as _build_ext +else: + from distutils.command.build_ext import build_ext as _build_ext -class new_build_ext(_build_ext, object): - def finalize_options(self): - if self.distribution.ext_modules: + +class new_build_ext(_build_ext, object): + def finalize_options(self): + if self.distribution.ext_modules: nthreads = getattr(self, 'parallel', None) # -j option in Py3.5+ nthreads = int(nthreads) if nthreads else None - from Cython.Build.Dependencies import cythonize - self.distribution.ext_modules[:] = cythonize( + from Cython.Build.Dependencies import cythonize + self.distribution.ext_modules[:] = cythonize( self.distribution.ext_modules, nthreads=nthreads, force=self.force) super(new_build_ext, self).finalize_options() -# This will become new_build_ext in the future. -from .old_build_ext import old_build_ext as build_ext +# This will become new_build_ext in the future. +from .old_build_ext import old_build_ext as build_ext diff --git a/contrib/tools/cython/Cython/Distutils/extension.py b/contrib/tools/cython/Cython/Distutils/extension.py index bf36ac0e11..d8bdbf0f5b 100644 --- a/contrib/tools/cython/Cython/Distutils/extension.py +++ b/contrib/tools/cython/Cython/Distutils/extension.py @@ -45,7 +45,7 @@ class Extension(_Extension.Extension): # Translate pyrex_X to cython_X for backwards compatibility. had_pyrex_options = False - for key in list(kw): + for key in list(kw): if key.startswith('pyrex_'): had_pyrex_options = True kw['cython' + key[5:]] = kw.pop(key) diff --git a/contrib/tools/cython/Cython/Distutils/old_build_ext.py b/contrib/tools/cython/Cython/Distutils/old_build_ext.py index ad3b157855..aa2a1cf229 100644 --- a/contrib/tools/cython/Cython/Distutils/old_build_ext.py +++ b/contrib/tools/cython/Cython/Distutils/old_build_ext.py @@ -1,355 +1,355 @@ -"""Cython.Distutils.old_build_ext - -Implements a version of the Distutils 'build_ext' command, for +"""Cython.Distutils.old_build_ext + +Implements a version of the Distutils 'build_ext' command, for building Cython extension modules. - + Note that this module is deprecated. Use cythonize() instead. """ - -__revision__ = "$Id:$" - -import inspect -import sys -import os -from distutils.errors import DistutilsPlatformError -from distutils.dep_util import newer, newer_group -from distutils import log -from distutils.command import build_ext as _build_ext -from distutils import sysconfig -import warnings - - -try: - from __builtin__ import basestring -except ImportError: - basestring = str - - -def _check_stack(path): - try: - for frame in inspect.getouterframes(inspect.currentframe(), 0): - if path in frame[1].replace(os.sep, '/'): - return True - except Exception: - pass - return False - -if (not _check_stack('setuptools/extensions.py') - and not _check_stack('pyximport/pyxbuild.py') - and not _check_stack('Cython/Distutils/build_ext.py')): - warnings.warn( - "Cython.Distutils.old_build_ext does not properly handle dependencies " - "and is deprecated.") - - -extension_name_re = _build_ext.extension_name_re - -show_compilers = _build_ext.show_compilers - -class Optimization(object): - def __init__(self): - self.flags = ( - 'OPT', - 'CFLAGS', - 'CPPFLAGS', - 'EXTRA_CFLAGS', - 'BASECFLAGS', - 'PY_CFLAGS', - ) - self.state = sysconfig.get_config_vars(*self.flags) - self.config_vars = sysconfig.get_config_vars() - - - def disable_optimization(self): - "disable optimization for the C or C++ compiler" - badoptions = ('-O1', '-O2', '-O3') - - for flag, option in zip(self.flags, self.state): - if option is not None: - L = [opt for opt in option.split() if opt not in badoptions] - self.config_vars[flag] = ' '.join(L) - - def restore_state(self): - "restore the original state" - for flag, option in zip(self.flags, self.state): - if option is not None: - self.config_vars[flag] = option - - -optimization = Optimization() - - -class old_build_ext(_build_ext.build_ext): - - description = "build C/C++ and Cython extensions (compile/link to build directory)" - - sep_by = _build_ext.build_ext.sep_by + +__revision__ = "$Id:$" + +import inspect +import sys +import os +from distutils.errors import DistutilsPlatformError +from distutils.dep_util import newer, newer_group +from distutils import log +from distutils.command import build_ext as _build_ext +from distutils import sysconfig +import warnings + + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + +def _check_stack(path): + try: + for frame in inspect.getouterframes(inspect.currentframe(), 0): + if path in frame[1].replace(os.sep, '/'): + return True + except Exception: + pass + return False + +if (not _check_stack('setuptools/extensions.py') + and not _check_stack('pyximport/pyxbuild.py') + and not _check_stack('Cython/Distutils/build_ext.py')): + warnings.warn( + "Cython.Distutils.old_build_ext does not properly handle dependencies " + "and is deprecated.") + + +extension_name_re = _build_ext.extension_name_re + +show_compilers = _build_ext.show_compilers + +class Optimization(object): + def __init__(self): + self.flags = ( + 'OPT', + 'CFLAGS', + 'CPPFLAGS', + 'EXTRA_CFLAGS', + 'BASECFLAGS', + 'PY_CFLAGS', + ) + self.state = sysconfig.get_config_vars(*self.flags) + self.config_vars = sysconfig.get_config_vars() + + + def disable_optimization(self): + "disable optimization for the C or C++ compiler" + badoptions = ('-O1', '-O2', '-O3') + + for flag, option in zip(self.flags, self.state): + if option is not None: + L = [opt for opt in option.split() if opt not in badoptions] + self.config_vars[flag] = ' '.join(L) + + def restore_state(self): + "restore the original state" + for flag, option in zip(self.flags, self.state): + if option is not None: + self.config_vars[flag] = option + + +optimization = Optimization() + + +class old_build_ext(_build_ext.build_ext): + + description = "build C/C++ and Cython extensions (compile/link to build directory)" + + sep_by = _build_ext.build_ext.sep_by user_options = _build_ext.build_ext.user_options[:] boolean_options = _build_ext.build_ext.boolean_options[:] help_options = _build_ext.build_ext.help_options[:] - - # Add the pyrex specific data. - user_options.extend([ - ('cython-cplus', None, - "generate C++ source files"), - ('cython-create-listing', None, - "write errors to a listing file"), - ('cython-line-directives', None, - "emit source line directives"), - ('cython-include-dirs=', None, - "path to the Cython include files" + sep_by), - ('cython-c-in-temp', None, - "put generated C files in temp directory"), - ('cython-gen-pxi', None, - "generate .pxi file for public declarations"), - ('cython-directives=', None, - "compiler directive overrides"), - ('cython-gdb', None, - "generate debug information for cygdb"), - ('cython-compile-time-env', None, - "cython compile time environment"), - - # For backwards compatibility. - ('pyrex-cplus', None, - "generate C++ source files"), - ('pyrex-create-listing', None, - "write errors to a listing file"), - ('pyrex-line-directives', None, - "emit source line directives"), - ('pyrex-include-dirs=', None, - "path to the Cython include files" + sep_by), - ('pyrex-c-in-temp', None, - "put generated C files in temp directory"), - ('pyrex-gen-pxi', None, - "generate .pxi file for public declarations"), - ('pyrex-directives=', None, - "compiler directive overrides"), - ('pyrex-gdb', None, - "generate debug information for cygdb"), - ]) - - boolean_options.extend([ - 'cython-cplus', 'cython-create-listing', 'cython-line-directives', - 'cython-c-in-temp', 'cython-gdb', - - # For backwards compatibility. - 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives', - 'pyrex-c-in-temp', 'pyrex-gdb', - ]) - - def initialize_options(self): - _build_ext.build_ext.initialize_options(self) - self.cython_cplus = 0 - self.cython_create_listing = 0 - self.cython_line_directives = 0 - self.cython_include_dirs = None - self.cython_directives = None - self.cython_c_in_temp = 0 - self.cython_gen_pxi = 0 - self.cython_gdb = False - self.no_c_in_traceback = 0 - self.cython_compile_time_env = None - - def __getattr__(self, name): - if name[:6] == 'pyrex_': - return getattr(self, 'cython_' + name[6:]) - else: - return _build_ext.build_ext.__getattr__(self, name) - - def __setattr__(self, name, value): - if name[:6] == 'pyrex_': - return setattr(self, 'cython_' + name[6:], value) - else: - # _build_ext.build_ext.__setattr__(self, name, value) - self.__dict__[name] = value - - def finalize_options (self): - _build_ext.build_ext.finalize_options(self) - if self.cython_include_dirs is None: - self.cython_include_dirs = [] - elif isinstance(self.cython_include_dirs, basestring): - self.cython_include_dirs = \ - self.cython_include_dirs.split(os.pathsep) - if self.cython_directives is None: - self.cython_directives = {} - # finalize_options () - - def run(self): - # We have one shot at this before build_ext initializes the compiler. - # If --pyrex-gdb is in effect as a command line option or as option - # of any Extension module, disable optimization for the C or C++ - # compiler. - if self.cython_gdb or [1 for ext in self.extensions - if getattr(ext, 'cython_gdb', False)]: - optimization.disable_optimization() - - _build_ext.build_ext.run(self) - - def build_extensions(self): - # First, sanity-check the 'extensions' list - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - ext.sources = self.cython_sources(ext.sources, ext) + + # Add the pyrex specific data. + user_options.extend([ + ('cython-cplus', None, + "generate C++ source files"), + ('cython-create-listing', None, + "write errors to a listing file"), + ('cython-line-directives', None, + "emit source line directives"), + ('cython-include-dirs=', None, + "path to the Cython include files" + sep_by), + ('cython-c-in-temp', None, + "put generated C files in temp directory"), + ('cython-gen-pxi', None, + "generate .pxi file for public declarations"), + ('cython-directives=', None, + "compiler directive overrides"), + ('cython-gdb', None, + "generate debug information for cygdb"), + ('cython-compile-time-env', None, + "cython compile time environment"), + + # For backwards compatibility. + ('pyrex-cplus', None, + "generate C++ source files"), + ('pyrex-create-listing', None, + "write errors to a listing file"), + ('pyrex-line-directives', None, + "emit source line directives"), + ('pyrex-include-dirs=', None, + "path to the Cython include files" + sep_by), + ('pyrex-c-in-temp', None, + "put generated C files in temp directory"), + ('pyrex-gen-pxi', None, + "generate .pxi file for public declarations"), + ('pyrex-directives=', None, + "compiler directive overrides"), + ('pyrex-gdb', None, + "generate debug information for cygdb"), + ]) + + boolean_options.extend([ + 'cython-cplus', 'cython-create-listing', 'cython-line-directives', + 'cython-c-in-temp', 'cython-gdb', + + # For backwards compatibility. + 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives', + 'pyrex-c-in-temp', 'pyrex-gdb', + ]) + + def initialize_options(self): + _build_ext.build_ext.initialize_options(self) + self.cython_cplus = 0 + self.cython_create_listing = 0 + self.cython_line_directives = 0 + self.cython_include_dirs = None + self.cython_directives = None + self.cython_c_in_temp = 0 + self.cython_gen_pxi = 0 + self.cython_gdb = False + self.no_c_in_traceback = 0 + self.cython_compile_time_env = None + + def __getattr__(self, name): + if name[:6] == 'pyrex_': + return getattr(self, 'cython_' + name[6:]) + else: + return _build_ext.build_ext.__getattr__(self, name) + + def __setattr__(self, name, value): + if name[:6] == 'pyrex_': + return setattr(self, 'cython_' + name[6:], value) + else: + # _build_ext.build_ext.__setattr__(self, name, value) + self.__dict__[name] = value + + def finalize_options (self): + _build_ext.build_ext.finalize_options(self) + if self.cython_include_dirs is None: + self.cython_include_dirs = [] + elif isinstance(self.cython_include_dirs, basestring): + self.cython_include_dirs = \ + self.cython_include_dirs.split(os.pathsep) + if self.cython_directives is None: + self.cython_directives = {} + # finalize_options () + + def run(self): + # We have one shot at this before build_ext initializes the compiler. + # If --pyrex-gdb is in effect as a command line option or as option + # of any Extension module, disable optimization for the C or C++ + # compiler. + if self.cython_gdb or [1 for ext in self.extensions + if getattr(ext, 'cython_gdb', False)]: + optimization.disable_optimization() + + _build_ext.build_ext.run(self) + + def build_extensions(self): + # First, sanity-check the 'extensions' list + self.check_extensions_list(self.extensions) + + for ext in self.extensions: + ext.sources = self.cython_sources(ext.sources, ext) # Call original build_extensions _build_ext.build_ext.build_extensions(self) - - def cython_sources(self, sources, extension): - """ - Walk the list of source files in 'sources', looking for Cython - source files (.pyx and .py). Run Cython on all that are - found, and return a modified 'sources' list with Cython source - files replaced by the generated C (or C++) files. - """ - try: - from Cython.Compiler.Main \ - import CompilationOptions, \ - default_options as cython_default_options, \ - compile as cython_compile - from Cython.Compiler.Errors import PyrexError - except ImportError: - e = sys.exc_info()[1] - print("failed to import Cython: %s" % e) - raise DistutilsPlatformError("Cython does not appear to be installed") - - new_sources = [] - cython_sources = [] - cython_targets = {} - - # Setup create_list and cplus from the extension options if - # Cython.Distutils.extension.Extension is used, otherwise just - # use what was parsed from the command-line or the configuration file. - # cplus will also be set to true is extension.language is equal to - # 'C++' or 'c++'. - #try: - # create_listing = self.cython_create_listing or \ - # extension.cython_create_listing - # cplus = self.cython_cplus or \ - # extension.cython_cplus or \ - # (extension.language != None and \ - # extension.language.lower() == 'c++') - #except AttributeError: - # create_listing = self.cython_create_listing - # cplus = self.cython_cplus or \ - # (extension.language != None and \ - # extension.language.lower() == 'c++') - - create_listing = self.cython_create_listing or \ - getattr(extension, 'cython_create_listing', 0) - line_directives = self.cython_line_directives or \ - getattr(extension, 'cython_line_directives', 0) - no_c_in_traceback = self.no_c_in_traceback or \ - getattr(extension, 'no_c_in_traceback', 0) - cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ - (extension.language and extension.language.lower() == 'c++') - cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0) - cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False) - cython_compile_time_env = self.cython_compile_time_env or \ - getattr(extension, 'cython_compile_time_env', None) - - # Set up the include_path for the Cython compiler: - # 1. Start with the command line option. - # 2. Add in any (unique) paths from the extension - # cython_include_dirs (if Cython.Distutils.extension is used). - # 3. Add in any (unique) paths from the extension include_dirs - includes = self.cython_include_dirs - try: - for i in extension.cython_include_dirs: - if not i in includes: - includes.append(i) - except AttributeError: - pass - - # In case extension.include_dirs is a generator, evaluate it and keep - # result - extension.include_dirs = list(extension.include_dirs) - for i in extension.include_dirs: - if not i in includes: - includes.append(i) - - # Set up Cython compiler directives: - # 1. Start with the command line option. - # 2. Add in any (unique) entries from the extension - # cython_directives (if Cython.Distutils.extension is used). - directives = self.cython_directives - if hasattr(extension, "cython_directives"): - directives.update(extension.cython_directives) - - # Set the target_ext to '.c'. Cython will change this to '.cpp' if - # needed. - if cplus: - target_ext = '.cpp' - else: - target_ext = '.c' - - # Decide whether to drop the generated C files into the temp dir - # or the source tree. - - if not self.inplace and (self.cython_c_in_temp - or getattr(extension, 'cython_c_in_temp', 0)): - target_dir = os.path.join(self.build_temp, "pyrex") - for package_name in extension.name.split('.')[:-1]: - target_dir = os.path.join(target_dir, package_name) - else: - target_dir = None - - newest_dependency = None - for source in sources: - (base, ext) = os.path.splitext(os.path.basename(source)) - if ext == ".py": - # FIXME: we might want to special case this some more - ext = '.pyx' - if ext == ".pyx": # Cython source file - output_dir = target_dir or os.path.dirname(source) - new_sources.append(os.path.join(output_dir, base + target_ext)) - cython_sources.append(source) - cython_targets[source] = new_sources[-1] - elif ext == '.pxi' or ext == '.pxd': - if newest_dependency is None \ - or newer(source, newest_dependency): - newest_dependency = source - else: - new_sources.append(source) - - if not cython_sources: - return new_sources - - module_name = extension.name - - for source in cython_sources: - target = cython_targets[source] - depends = [source] + list(extension.depends or ()) - if(source[-4:].lower()==".pyx" and os.path.isfile(source[:-3]+"pxd")): - depends += [source[:-3]+"pxd"] - rebuild = self.force or newer_group(depends, target, 'newer') - if not rebuild and newest_dependency is not None: - rebuild = newer(newest_dependency, target) - if rebuild: - log.info("cythoning %s to %s", source, target) - self.mkpath(os.path.dirname(target)) - if self.inplace: - output_dir = os.curdir - else: - output_dir = self.build_lib - options = CompilationOptions(cython_default_options, - use_listing_file = create_listing, - include_path = includes, - compiler_directives = directives, - output_file = target, - cplus = cplus, - emit_linenums = line_directives, - c_line_in_traceback = not no_c_in_traceback, - generate_pxi = cython_gen_pxi, - output_dir = output_dir, - gdb_debug = cython_gdb, - compile_time_env = cython_compile_time_env) - result = cython_compile(source, options=options, - full_module_name=module_name) - else: - log.info("skipping '%s' Cython extension (up-to-date)", target) - - return new_sources - - # cython_sources () - -# class build_ext + + def cython_sources(self, sources, extension): + """ + Walk the list of source files in 'sources', looking for Cython + source files (.pyx and .py). Run Cython on all that are + found, and return a modified 'sources' list with Cython source + files replaced by the generated C (or C++) files. + """ + try: + from Cython.Compiler.Main \ + import CompilationOptions, \ + default_options as cython_default_options, \ + compile as cython_compile + from Cython.Compiler.Errors import PyrexError + except ImportError: + e = sys.exc_info()[1] + print("failed to import Cython: %s" % e) + raise DistutilsPlatformError("Cython does not appear to be installed") + + new_sources = [] + cython_sources = [] + cython_targets = {} + + # Setup create_list and cplus from the extension options if + # Cython.Distutils.extension.Extension is used, otherwise just + # use what was parsed from the command-line or the configuration file. + # cplus will also be set to true is extension.language is equal to + # 'C++' or 'c++'. + #try: + # create_listing = self.cython_create_listing or \ + # extension.cython_create_listing + # cplus = self.cython_cplus or \ + # extension.cython_cplus or \ + # (extension.language != None and \ + # extension.language.lower() == 'c++') + #except AttributeError: + # create_listing = self.cython_create_listing + # cplus = self.cython_cplus or \ + # (extension.language != None and \ + # extension.language.lower() == 'c++') + + create_listing = self.cython_create_listing or \ + getattr(extension, 'cython_create_listing', 0) + line_directives = self.cython_line_directives or \ + getattr(extension, 'cython_line_directives', 0) + no_c_in_traceback = self.no_c_in_traceback or \ + getattr(extension, 'no_c_in_traceback', 0) + cplus = self.cython_cplus or getattr(extension, 'cython_cplus', 0) or \ + (extension.language and extension.language.lower() == 'c++') + cython_gen_pxi = self.cython_gen_pxi or getattr(extension, 'cython_gen_pxi', 0) + cython_gdb = self.cython_gdb or getattr(extension, 'cython_gdb', False) + cython_compile_time_env = self.cython_compile_time_env or \ + getattr(extension, 'cython_compile_time_env', None) + + # Set up the include_path for the Cython compiler: + # 1. Start with the command line option. + # 2. Add in any (unique) paths from the extension + # cython_include_dirs (if Cython.Distutils.extension is used). + # 3. Add in any (unique) paths from the extension include_dirs + includes = self.cython_include_dirs + try: + for i in extension.cython_include_dirs: + if not i in includes: + includes.append(i) + except AttributeError: + pass + + # In case extension.include_dirs is a generator, evaluate it and keep + # result + extension.include_dirs = list(extension.include_dirs) + for i in extension.include_dirs: + if not i in includes: + includes.append(i) + + # Set up Cython compiler directives: + # 1. Start with the command line option. + # 2. Add in any (unique) entries from the extension + # cython_directives (if Cython.Distutils.extension is used). + directives = self.cython_directives + if hasattr(extension, "cython_directives"): + directives.update(extension.cython_directives) + + # Set the target_ext to '.c'. Cython will change this to '.cpp' if + # needed. + if cplus: + target_ext = '.cpp' + else: + target_ext = '.c' + + # Decide whether to drop the generated C files into the temp dir + # or the source tree. + + if not self.inplace and (self.cython_c_in_temp + or getattr(extension, 'cython_c_in_temp', 0)): + target_dir = os.path.join(self.build_temp, "pyrex") + for package_name in extension.name.split('.')[:-1]: + target_dir = os.path.join(target_dir, package_name) + else: + target_dir = None + + newest_dependency = None + for source in sources: + (base, ext) = os.path.splitext(os.path.basename(source)) + if ext == ".py": + # FIXME: we might want to special case this some more + ext = '.pyx' + if ext == ".pyx": # Cython source file + output_dir = target_dir or os.path.dirname(source) + new_sources.append(os.path.join(output_dir, base + target_ext)) + cython_sources.append(source) + cython_targets[source] = new_sources[-1] + elif ext == '.pxi' or ext == '.pxd': + if newest_dependency is None \ + or newer(source, newest_dependency): + newest_dependency = source + else: + new_sources.append(source) + + if not cython_sources: + return new_sources + + module_name = extension.name + + for source in cython_sources: + target = cython_targets[source] + depends = [source] + list(extension.depends or ()) + if(source[-4:].lower()==".pyx" and os.path.isfile(source[:-3]+"pxd")): + depends += [source[:-3]+"pxd"] + rebuild = self.force or newer_group(depends, target, 'newer') + if not rebuild and newest_dependency is not None: + rebuild = newer(newest_dependency, target) + if rebuild: + log.info("cythoning %s to %s", source, target) + self.mkpath(os.path.dirname(target)) + if self.inplace: + output_dir = os.curdir + else: + output_dir = self.build_lib + options = CompilationOptions(cython_default_options, + use_listing_file = create_listing, + include_path = includes, + compiler_directives = directives, + output_file = target, + cplus = cplus, + emit_linenums = line_directives, + c_line_in_traceback = not no_c_in_traceback, + generate_pxi = cython_gen_pxi, + output_dir = output_dir, + gdb_debug = cython_gdb, + compile_time_env = cython_compile_time_env) + result = cython_compile(source, options=options, + full_module_name=module_name) + else: + log.info("skipping '%s' Cython extension (up-to-date)", target) + + return new_sources + + # cython_sources () + +# class build_ext diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python.pxd index 3b3f7f49ff..56236e925c 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_bool.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_bool.pxd index 01e0463713..9a6d253f45 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_bool.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_bool.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.bool cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_buffer.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_buffer.pxd index 6a1f84ef6f..2baeaae00c 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_buffer.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_buffer.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.buffer cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_bytes.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_bytes.pxd index e52d2c9bfa..87af662de0 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_bytes.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_bytes.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.bytes cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_cobject.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_cobject.pxd index a8539c3290..ed32c6b878 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_cobject.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_cobject.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.cobject cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_complex.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_complex.pxd index cb944a8a59..0a780b3b2d 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_complex.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_complex.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.complex cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_dict.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_dict.pxd index 231c358cde..05b5f4796a 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_dict.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_dict.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.dict cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_exc.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_exc.pxd index 58a31ca473..6eb236bccb 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_exc.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_exc.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.exc cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_float.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_float.pxd index 64dcff8c83..7e133ef9bb 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_float.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_float.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.float cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_function.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_function.pxd index d81960d768..1461c4e635 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_function.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_function.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.function cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_getargs.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_getargs.pxd index 80e99d3198..3852d6a6a1 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_getargs.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_getargs.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.getargs cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_instance.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_instance.pxd index f0640b0786..99cb5a9091 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_instance.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_instance.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.instance cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_int.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_int.pxd index b83e4c2798..c1fd5178d6 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_int.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_int.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.int cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_iterator.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_iterator.pxd index 0efcc4176b..e09aad2790 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_iterator.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_iterator.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.iterator cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_list.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_list.pxd index d70ef91ecf..64febcf969 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_list.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_list.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.list cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_long.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_long.pxd index 34f2d50c85..1a24380c4c 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_long.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_long.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.long cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_mapping.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_mapping.pxd index a1883e9e20..cd01bee015 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_mapping.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_mapping.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.mapping cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_mem.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_mem.pxd index 9d6ec7d281..d74429ea36 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_mem.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_mem.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.mem cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_method.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_method.pxd index 781a7299d1..e7da5154e4 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_method.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_method.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.method cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_module.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_module.pxd index adc4e8086f..6310c0247d 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_module.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_module.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.module cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_number.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_number.pxd index 30a852328d..ae67da1c38 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_number.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_number.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.number cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_object.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_object.pxd index a60aec203a..3981bfa44e 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_object.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_object.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.object cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_oldbuffer.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_oldbuffer.pxd index c5a7853486..e03e66a2e2 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_oldbuffer.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_oldbuffer.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.oldbuffer cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_pycapsule.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_pycapsule.pxd index 8789099181..fe9cf8f8d9 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_pycapsule.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_pycapsule.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.pycapsule cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_ref.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_ref.pxd index d8cbae574d..9447418198 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_ref.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_ref.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.ref cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_sequence.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_sequence.pxd index c5c422098d..fdef5b63eb 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_sequence.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_sequence.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.sequence cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_set.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_set.pxd index 63626aa3b3..a2feb93712 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_set.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_set.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.set cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_string.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_string.pxd index 501e18cf32..24c818338e 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_string.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_string.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.string cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_tuple.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_tuple.pxd index 6101315435..190713b020 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_tuple.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_tuple.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.tuple cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_type.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_type.pxd index 94436aa819..3ac47d1b3f 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_type.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_type.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.type cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_unicode.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_unicode.pxd index 9d383ec0b2..2b488b2dc8 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_unicode.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_unicode.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.unicode cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_version.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_version.pxd index 0e9c887b9d..c27ca4df95 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_version.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_version.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.version cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/python_weakref.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/python_weakref.pxd index c70f44cbe5..1f84f1a179 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/python_weakref.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/python_weakref.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from cpython.weakref cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/stdio.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/stdio.pxd index cd77b499fc..41a4aebf1d 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/stdio.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/stdio.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from libc.stdio cimport * diff --git a/contrib/tools/cython/Cython/Includes/Deprecated/stdlib.pxd b/contrib/tools/cython/Cython/Includes/Deprecated/stdlib.pxd index 5f48fbe2c6..499511cde9 100644 --- a/contrib/tools/cython/Cython/Includes/Deprecated/stdlib.pxd +++ b/contrib/tools/cython/Cython/Includes/Deprecated/stdlib.pxd @@ -1,2 +1,2 @@ -# Present for backwards compatibility +# Present for backwards compatibility from libc.stdlib cimport * diff --git a/contrib/tools/cython/Cython/Includes/cpython/array.pxd b/contrib/tools/cython/Cython/Includes/cpython/array.pxd index 5d37c007df..19230a0a82 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/array.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/array.pxd @@ -52,14 +52,14 @@ from libc.string cimport strcat, strncat, \ from cpython.object cimport Py_SIZE from cpython.ref cimport PyTypeObject, Py_TYPE from cpython.exc cimport PyErr_BadArgument -from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.mem cimport PyObject_Malloc, PyObject_Free cdef extern from *: # Hard-coded utility code hack. ctypedef class array.array [object arrayobject] ctypedef object GETF(array a, Py_ssize_t ix) ctypedef object SETF(array a, Py_ssize_t ix, object o) ctypedef struct arraydescr: # [object arraydescr]: - char typecode + char typecode int itemsize GETF getitem # PyObject * (*getitem)(struct arrayobject *, Py_ssize_t); SETF setitem # int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *); @@ -104,7 +104,7 @@ cdef extern from *: # Hard-coded utility code hack. info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float) info.len = info.itemsize * item_count - info.shape = <Py_ssize_t*> PyObject_Malloc(sizeof(Py_ssize_t) + 2) + info.shape = <Py_ssize_t*> PyObject_Malloc(sizeof(Py_ssize_t) + 2) if not info.shape: raise MemoryError() info.shape[0] = item_count # constant regardless of resizing @@ -116,7 +116,7 @@ cdef extern from *: # Hard-coded utility code hack. info.obj = self def __releasebuffer__(self, Py_buffer* info): - PyObject_Free(info.shape) + PyObject_Free(info.shape) array newarrayobject(PyTypeObject* type, Py_ssize_t size, arraydescr *descr) diff --git a/contrib/tools/cython/Cython/Includes/cpython/buffer.pxd b/contrib/tools/cython/Cython/Includes/cpython/buffer.pxd index 2f6158f1f1..3f1ada774a 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/buffer.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/buffer.pxd @@ -8,7 +8,7 @@ cdef extern from "Python.h": cdef enum: PyBUF_SIMPLE, PyBUF_WRITABLE, - PyBUF_WRITEABLE, # backwards compatibility + PyBUF_WRITEABLE, # backwards compatibility PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES, diff --git a/contrib/tools/cython/Cython/Includes/cpython/bytearray.pxd b/contrib/tools/cython/Cython/Includes/cpython/bytearray.pxd index 4da41c4a0a..1af4a6c427 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/bytearray.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/bytearray.pxd @@ -1,33 +1,33 @@ -from .object cimport PyObject - -cdef extern from "Python.h": - bint PyByteArray_Check(object o) - # Return true if the object o is a bytearray object or an instance of a subtype of the bytearray type. - - bint PyByteArray_CheckExact(object o) - # Return true if the object o is a bytearray object, but not an instance of a subtype of the bytearray type. - - bytearray PyByteArray_FromObject(object o) - # Return a new bytearray object from any object, o, that implements the buffer protocol. - - bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len) - # Create a new bytearray object from string and its length, len. On failure, NULL is returned. - - bytearray PyByteArray_Concat(object a, object b) - # Concat bytearrays a and b and return a new bytearray with the result. - - Py_ssize_t PyByteArray_Size(object bytearray) - # Return the size of bytearray after checking for a NULL pointer. - - char* PyByteArray_AsString(object bytearray) - # Return the contents of bytearray as a char array after checking for a NULL pointer. - # The returned array always has an extra null byte appended. - - int PyByteArray_Resize(object bytearray, Py_ssize_t len) - # Resize the internal buffer of bytearray to len. - - char* PyByteArray_AS_STRING(object bytearray) - # Macro version of PyByteArray_AsString(). - - Py_ssize_t PyByteArray_GET_SIZE(object bytearray) - # Macro version of PyByteArray_Size(). +from .object cimport PyObject + +cdef extern from "Python.h": + bint PyByteArray_Check(object o) + # Return true if the object o is a bytearray object or an instance of a subtype of the bytearray type. + + bint PyByteArray_CheckExact(object o) + # Return true if the object o is a bytearray object, but not an instance of a subtype of the bytearray type. + + bytearray PyByteArray_FromObject(object o) + # Return a new bytearray object from any object, o, that implements the buffer protocol. + + bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len) + # Create a new bytearray object from string and its length, len. On failure, NULL is returned. + + bytearray PyByteArray_Concat(object a, object b) + # Concat bytearrays a and b and return a new bytearray with the result. + + Py_ssize_t PyByteArray_Size(object bytearray) + # Return the size of bytearray after checking for a NULL pointer. + + char* PyByteArray_AsString(object bytearray) + # Return the contents of bytearray as a char array after checking for a NULL pointer. + # The returned array always has an extra null byte appended. + + int PyByteArray_Resize(object bytearray, Py_ssize_t len) + # Resize the internal buffer of bytearray to len. + + char* PyByteArray_AS_STRING(object bytearray) + # Macro version of PyByteArray_AsString(). + + Py_ssize_t PyByteArray_GET_SIZE(object bytearray) + # Macro version of PyByteArray_Size(). diff --git a/contrib/tools/cython/Cython/Includes/cpython/bytes.pxd b/contrib/tools/cython/Cython/Includes/cpython/bytes.pxd index a21cc3baf6..ea72c6aae7 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/bytes.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/bytes.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": ctypedef struct va_list diff --git a/contrib/tools/cython/Cython/Includes/cpython/ceval.pxd b/contrib/tools/cython/Cython/Includes/cpython/ceval.pxd index 51631b0a26..f22191f9fe 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/ceval.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/ceval.pxd @@ -1,8 +1,8 @@ - -cdef extern from "Python.h": - - void PyEval_InitThreads() - # Initialize and acquire the global interpreter lock. - - int PyEval_ThreadsInitialized() - # Returns a non-zero value if PyEval_InitThreads() has been called. + +cdef extern from "Python.h": + + void PyEval_InitThreads() + # Initialize and acquire the global interpreter lock. + + int PyEval_ThreadsInitialized() + # Returns a non-zero value if PyEval_InitThreads() has been called. diff --git a/contrib/tools/cython/Cython/Includes/cpython/datetime.pxd b/contrib/tools/cython/Cython/Includes/cpython/datetime.pxd index 4dbf40d677..cd0f90719b 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/datetime.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/datetime.pxd @@ -1,4 +1,4 @@ -from cpython.object cimport PyObject +from cpython.object cimport PyObject cdef extern from "Python.h": ctypedef struct PyTypeObject: diff --git a/contrib/tools/cython/Cython/Includes/cpython/dict.pxd b/contrib/tools/cython/Cython/Includes/cpython/dict.pxd index fc531ab7af..16dd5e1458 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/dict.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/dict.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": @@ -52,7 +52,7 @@ cdef extern from "Python.h": # be hashable; if it isn't, TypeError will be raised. Return 0 on # success or -1 on failure. - int PyDict_SetItemString(object p, const char *key, object val) except -1 + int PyDict_SetItemString(object p, const char *key, object val) except -1 # Insert value into the dictionary p using key as a key. key # should be a char*. The key object is created using # PyString_FromString(key). Return 0 on success or -1 on failure. @@ -62,7 +62,7 @@ cdef extern from "Python.h": # hashable; if it isn't, TypeError is raised. Return 0 on success # or -1 on failure. - int PyDict_DelItemString(object p, const char *key) except -1 + int PyDict_DelItemString(object p, const char *key) except -1 # Remove the entry in dictionary p which has a key specified by # the string key. Return 0 on success or -1 on failure. @@ -72,7 +72,7 @@ cdef extern from "Python.h": # NULL if the key key is not present, but without setting an # exception. - PyObject* PyDict_GetItemString(object p, const char *key) + PyObject* PyDict_GetItemString(object p, const char *key) # Return value: Borrowed reference. # This is the same as PyDict_GetItem(), but key is specified as a # char*, rather than a PyObject*. diff --git a/contrib/tools/cython/Cython/Includes/cpython/exc.pxd b/contrib/tools/cython/Cython/Includes/cpython/exc.pxd index 608fe3e560..bc57c0e571 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/exc.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/exc.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": @@ -50,7 +50,7 @@ cdef extern from "Python.h": # return value to a specific exception; use # PyErr_ExceptionMatches() instead, shown below. (The comparison # could easily fail since the exception may be an instance instead - # of a class, in the case of a class exception, or it may be a + # of a class, in the case of a class exception, or it may be a # subclass of the expected exception.) bint PyErr_ExceptionMatches(object exc) @@ -153,13 +153,13 @@ cdef extern from "Python.h": # PyErr_SetFromErrno(type);" when the system call returns an # error. - PyObject* PyErr_SetFromErrnoWithFilenameObject(object type, object filenameObject) except NULL - # Similar to PyErr_SetFromErrno(), with the additional behavior - # that if filenameObject is not NULL, it is passed to the - # constructor of type as a third parameter. - # In the case of OSError exception, this is used to define - # the filename attribute of the exception instance. - + PyObject* PyErr_SetFromErrnoWithFilenameObject(object type, object filenameObject) except NULL + # Similar to PyErr_SetFromErrno(), with the additional behavior + # that if filenameObject is not NULL, it is passed to the + # constructor of type as a third parameter. + # In the case of OSError exception, this is used to define + # the filename attribute of the exception instance. + PyObject* PyErr_SetFromErrnoWithFilename(object type, char *filename) except NULL # Return value: Always NULL. Similar to PyErr_SetFromErrno(), # with the additional behavior that if filename is not NULL, it is diff --git a/contrib/tools/cython/Cython/Includes/cpython/function.pxd b/contrib/tools/cython/Cython/Includes/cpython/function.pxd index 8818e1e560..0002a3f6cb 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/function.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/function.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/cpython/int.pxd b/contrib/tools/cython/Cython/Includes/cpython/int.pxd index e70cb3698f..50babff615 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/int.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/int.pxd @@ -46,12 +46,12 @@ cdef extern from "Python.h": object PyInt_FromSsize_t(Py_ssize_t ival) # Return value: New reference. # Create a new integer object with a value of ival. If the value - # is larger than LONG_MAX or smaller than LONG_MIN, a long integer - # object is returned. - - object PyInt_FromSize_t(size_t ival) - # Return value: New reference. - # Create a new integer object with a value of ival. If the value + # is larger than LONG_MAX or smaller than LONG_MIN, a long integer + # object is returned. + + object PyInt_FromSize_t(size_t ival) + # Return value: New reference. + # Create a new integer object with a value of ival. If the value # exceeds LONG_MAX, a long integer object is returned. long PyInt_AsLong(object io) except? -1 @@ -83,7 +83,7 @@ cdef extern from "Python.h": long PyInt_GetMax() # Return the system's idea of the largest integer it can handle # (LONG_MAX, as defined in the system header files). - - int PyInt_ClearFreeList() - # Clear the integer free list. Return the number of items that could not be freed. - # New in version 2.6. + + int PyInt_ClearFreeList() + # Clear the integer free list. Return the number of items that could not be freed. + # New in version 2.6. diff --git a/contrib/tools/cython/Cython/Includes/cpython/list.pxd b/contrib/tools/cython/Cython/Includes/cpython/list.pxd index 0d3d2c95a7..c6a29535c9 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/list.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/list.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/cpython/long.pxd b/contrib/tools/cython/Cython/Includes/cpython/long.pxd index 2b70f9bbb1..eb8140d417 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/long.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/long.pxd @@ -146,4 +146,4 @@ cdef extern from "Python.h": # pointer. If pylong cannot be converted, an OverflowError will be # raised. This is only assured to produce a usable void pointer # for values created with PyLong_FromVoidPtr(). For values outside - # 0..LONG_MAX, both signed and unsigned integers are accepted. + # 0..LONG_MAX, both signed and unsigned integers are accepted. diff --git a/contrib/tools/cython/Cython/Includes/cpython/longintrepr.pxd b/contrib/tools/cython/Cython/Includes/cpython/longintrepr.pxd index 12fb7dc0f5..c38c1bff88 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/longintrepr.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/longintrepr.pxd @@ -1,19 +1,19 @@ -# Internals of the "long" type (Python 2) or "int" type (Python 3). - +# Internals of the "long" type (Python 2) or "int" type (Python 3). + cdef extern from "Python.h": """ #if PY_MAJOR_VERSION < 3 #include "longintrepr.h" #endif """ - ctypedef unsigned int digit - ctypedef int sdigit # Python >= 2.7 only - - ctypedef class __builtin__.py_long [object PyLongObject]: - cdef digit* ob_digit - - cdef py_long _PyLong_New(Py_ssize_t s) - - cdef long PyLong_SHIFT - cdef digit PyLong_BASE - cdef digit PyLong_MASK + ctypedef unsigned int digit + ctypedef int sdigit # Python >= 2.7 only + + ctypedef class __builtin__.py_long [object PyLongObject]: + cdef digit* ob_digit + + cdef py_long _PyLong_New(Py_ssize_t s) + + cdef long PyLong_SHIFT + cdef digit PyLong_BASE + cdef digit PyLong_MASK diff --git a/contrib/tools/cython/Cython/Includes/cpython/mem.pxd b/contrib/tools/cython/Cython/Includes/cpython/mem.pxd index 57b12c9224..af820f2ee0 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/mem.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/mem.pxd @@ -27,7 +27,7 @@ cdef extern from "Python.h": # available for allocating and releasing memory from the Python # heap: - void* PyMem_RawMalloc(size_t n) nogil + void* PyMem_RawMalloc(size_t n) nogil void* PyMem_Malloc(size_t n) # Allocates n bytes and returns a pointer of type void* to the # allocated memory, or NULL if the request fails. Requesting zero @@ -35,7 +35,7 @@ cdef extern from "Python.h": # PyMem_Malloc(1) had been called instead. The memory will not # have been initialized in any way. - void* PyMem_RawRealloc(void *p, size_t n) nogil + void* PyMem_RawRealloc(void *p, size_t n) nogil void* PyMem_Realloc(void *p, size_t n) # Resizes the memory block pointed to by p to n bytes. The # contents will be unchanged to the minimum of the old and the new @@ -45,7 +45,7 @@ cdef extern from "Python.h": # NULL, it must have been returned by a previous call to # PyMem_Malloc() or PyMem_Realloc(). - void PyMem_RawFree(void *p) nogil + void PyMem_RawFree(void *p) nogil void PyMem_Free(void *p) # Frees the memory block pointed to by p, which must have been # returned by a previous call to PyMem_Malloc() or @@ -76,36 +76,36 @@ cdef extern from "Python.h": # PyMem_MALLOC(), PyMem_REALLOC(), PyMem_FREE(). # PyMem_NEW(), PyMem_RESIZE(), PyMem_DEL(). - - - ##################################################################### - # Raw object memory interface - ##################################################################### - - # Functions to call the same malloc/realloc/free as used by Python's - # object allocator. If WITH_PYMALLOC is enabled, these may differ from - # the platform malloc/realloc/free. The Python object allocator is - # designed for fast, cache-conscious allocation of many "small" objects, - # and with low hidden memory overhead. - # - # PyObject_Malloc(0) returns a unique non-NULL pointer if possible. - # - # PyObject_Realloc(NULL, n) acts like PyObject_Malloc(n). - # PyObject_Realloc(p != NULL, 0) does not return NULL, or free the memory - # at p. - # - # Returned pointers must be checked for NULL explicitly; no action is - # performed on failure other than to return NULL (no warning it printed, no - # exception is set, etc). - # - # For allocating objects, use PyObject_{New, NewVar} instead whenever - # possible. The PyObject_{Malloc, Realloc, Free} family is exposed - # so that you can exploit Python's small-block allocator for non-object - # uses. If you must use these routines to allocate object memory, make sure - # the object gets initialized via PyObject_{Init, InitVar} after obtaining - # the raw memory. - - void* PyObject_Malloc(size_t size) - void* PyObject_Calloc(size_t nelem, size_t elsize) - void* PyObject_Realloc(void *ptr, size_t new_size) - void PyObject_Free(void *ptr) + + + ##################################################################### + # Raw object memory interface + ##################################################################### + + # Functions to call the same malloc/realloc/free as used by Python's + # object allocator. If WITH_PYMALLOC is enabled, these may differ from + # the platform malloc/realloc/free. The Python object allocator is + # designed for fast, cache-conscious allocation of many "small" objects, + # and with low hidden memory overhead. + # + # PyObject_Malloc(0) returns a unique non-NULL pointer if possible. + # + # PyObject_Realloc(NULL, n) acts like PyObject_Malloc(n). + # PyObject_Realloc(p != NULL, 0) does not return NULL, or free the memory + # at p. + # + # Returned pointers must be checked for NULL explicitly; no action is + # performed on failure other than to return NULL (no warning it printed, no + # exception is set, etc). + # + # For allocating objects, use PyObject_{New, NewVar} instead whenever + # possible. The PyObject_{Malloc, Realloc, Free} family is exposed + # so that you can exploit Python's small-block allocator for non-object + # uses. If you must use these routines to allocate object memory, make sure + # the object gets initialized via PyObject_{Init, InitVar} after obtaining + # the raw memory. + + void* PyObject_Malloc(size_t size) + void* PyObject_Calloc(size_t nelem, size_t elsize) + void* PyObject_Realloc(void *ptr, size_t new_size) + void PyObject_Free(void *ptr) diff --git a/contrib/tools/cython/Cython/Includes/cpython/method.pxd b/contrib/tools/cython/Cython/Includes/cpython/method.pxd index 7866562605..f51ebcc7c7 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/method.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/method.pxd @@ -1,5 +1,5 @@ -from .object cimport PyObject - +from .object cimport PyObject + cdef extern from "Python.h": ############################################################################ # 7.5.4 Method Objects diff --git a/contrib/tools/cython/Cython/Includes/cpython/module.pxd b/contrib/tools/cython/Cython/Includes/cpython/module.pxd index d804eea5d1..8eb323b010 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/module.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/module.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": ctypedef struct _inittab @@ -6,7 +6,7 @@ cdef extern from "Python.h": ##################################################################### # 5.3 Importing Modules ##################################################################### - object PyImport_ImportModule(const char *name) + object PyImport_ImportModule(const char *name) # Return value: New reference. # This is a simplified interface to PyImport_ImportModuleEx() # below, leaving the globals and locals arguments set to @@ -20,7 +20,7 @@ cdef extern from "Python.h": # loaded.) Return a new reference to the imported module, or NULL # with an exception set on failure. - object PyImport_ImportModuleEx(const char *name, object globals, object locals, object fromlist) + object PyImport_ImportModuleEx(const char *name, object globals, object locals, object fromlist) # Return value: New reference. # Import a module. This is best described by referring to the @@ -64,7 +64,7 @@ cdef extern from "Python.h": # the reloaded module, or NULL with an exception set on failure # (the module still exists in this case). - PyObject* PyImport_AddModule(const char *name) except NULL + PyObject* PyImport_AddModule(const char *name) except NULL # Return value: Borrowed reference. # Return the module object corresponding to a module name. The # name argument may be of the form package.module. First check the @@ -145,7 +145,7 @@ cdef extern from "Python.h": bint PyModule_CheckExact(object p) # Return true if p is a module object, but not a subtype of PyModule_Type. - object PyModule_New(const char *name) + object PyModule_New(const char *name) # Return value: New reference. # Return a new module object with the __name__ attribute set to # name. Only the module's __doc__ and __name__ attributes are @@ -170,18 +170,18 @@ cdef extern from "Python.h": # module's __file__ attribute. If this is not defined, or if it is # not a string, raise SystemError and return NULL. - int PyModule_AddObject(object module, const char *name, object value) except -1 + int PyModule_AddObject(object module, const char *name, object value) except -1 # Add an object to module as name. This is a convenience function # which can be used from the module's initialization # function. This steals a reference to value. Return -1 on error, # 0 on success. - int PyModule_AddIntConstant(object module, const char *name, long value) except -1 + int PyModule_AddIntConstant(object module, const char *name, long value) except -1 # Add an integer constant to module as name. This convenience # function can be used from the module's initialization # function. Return -1 on error, 0 on success. - int PyModule_AddStringConstant(object module, const char *name, const char *value) except -1 + int PyModule_AddStringConstant(object module, const char *name, const char *value) except -1 # Add a string constant to module as name. This convenience # function can be used from the module's initialization # function. The string value must be null-terminated. Return -1 on diff --git a/contrib/tools/cython/Cython/Includes/cpython/number.pxd b/contrib/tools/cython/Cython/Includes/cpython/number.pxd index 4e926fc3c5..ded35c292a 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/number.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/number.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/cpython/object.pxd b/contrib/tools/cython/Cython/Includes/cpython/object.pxd index 891efb847f..5a81166393 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/object.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/object.pxd @@ -1,78 +1,78 @@ from libc.stdio cimport FILE -cimport cpython.type +cimport cpython.type cdef extern from "Python.h": - ctypedef struct PyObject # forward declaration - - ctypedef object (*newfunc)(cpython.type.type, object, object) # (type, args, kwargs) - - ctypedef object (*unaryfunc)(object) - ctypedef object (*binaryfunc)(object, object) - ctypedef object (*ternaryfunc)(object, object, object) + ctypedef struct PyObject # forward declaration + + ctypedef object (*newfunc)(cpython.type.type, object, object) # (type, args, kwargs) + + ctypedef object (*unaryfunc)(object) + ctypedef object (*binaryfunc)(object, object) + ctypedef object (*ternaryfunc)(object, object, object) ctypedef int (*inquiry)(object) except -1 ctypedef Py_ssize_t (*lenfunc)(object) except -1 - ctypedef object (*ssizeargfunc)(object, Py_ssize_t) - ctypedef object (*ssizessizeargfunc)(object, Py_ssize_t, Py_ssize_t) + ctypedef object (*ssizeargfunc)(object, Py_ssize_t) + ctypedef object (*ssizessizeargfunc)(object, Py_ssize_t, Py_ssize_t) ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object) except -1 ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object) except -1 ctypedef int (*objobjargproc)(object, object, object) except -1 ctypedef int (*objobjproc)(object, object) except -1 - + ctypedef Py_hash_t (*hashfunc)(object) except -1 - ctypedef object (*reprfunc)(object) - + ctypedef object (*reprfunc)(object) + ctypedef int (*cmpfunc)(object, object) except -2 - ctypedef object (*richcmpfunc)(object, object, int) - - # The following functions use 'PyObject*' as first argument instead of 'object' to prevent - # accidental reference counting when calling them during a garbage collection run. - ctypedef void (*destructor)(PyObject*) + ctypedef object (*richcmpfunc)(object, object, int) + + # The following functions use 'PyObject*' as first argument instead of 'object' to prevent + # accidental reference counting when calling them during a garbage collection run. + ctypedef void (*destructor)(PyObject*) ctypedef int (*visitproc)(PyObject*, void *) except -1 ctypedef int (*traverseproc)(PyObject*, visitproc, void*) except -1 ctypedef void (*freefunc)(void*) - - ctypedef object (*descrgetfunc)(object, object, object) - ctypedef int (*descrsetfunc)(object, object, object) except -1 - - ctypedef struct PyTypeObject: - const char* tp_name - const char* tp_doc - Py_ssize_t tp_basicsize - Py_ssize_t tp_itemsize - Py_ssize_t tp_dictoffset - unsigned long tp_flags - - newfunc tp_new - destructor tp_dealloc - traverseproc tp_traverse - inquiry tp_clear + + ctypedef object (*descrgetfunc)(object, object, object) + ctypedef int (*descrsetfunc)(object, object, object) except -1 + + ctypedef struct PyTypeObject: + const char* tp_name + const char* tp_doc + Py_ssize_t tp_basicsize + Py_ssize_t tp_itemsize + Py_ssize_t tp_dictoffset + unsigned long tp_flags + + newfunc tp_new + destructor tp_dealloc + traverseproc tp_traverse + inquiry tp_clear freefunc tp_free - - ternaryfunc tp_call - hashfunc tp_hash - reprfunc tp_str - reprfunc tp_repr - - cmpfunc tp_compare - richcmpfunc tp_richcompare - - PyTypeObject* tp_base - PyObject* tp_dict - - descrgetfunc tp_descr_get - descrsetfunc tp_descr_set - - ctypedef struct PyObject: - Py_ssize_t ob_refcnt - PyTypeObject *ob_type - - cdef PyTypeObject *Py_TYPE(object) - - void* PyObject_Malloc(size_t) - void* PyObject_Realloc(void *, size_t) - void PyObject_Free(void *) - + + ternaryfunc tp_call + hashfunc tp_hash + reprfunc tp_str + reprfunc tp_repr + + cmpfunc tp_compare + richcmpfunc tp_richcompare + + PyTypeObject* tp_base + PyObject* tp_dict + + descrgetfunc tp_descr_get + descrsetfunc tp_descr_set + + ctypedef struct PyObject: + Py_ssize_t ob_refcnt + PyTypeObject *ob_type + + cdef PyTypeObject *Py_TYPE(object) + + void* PyObject_Malloc(size_t) + void* PyObject_Realloc(void *, size_t) + void PyObject_Free(void *) + ##################################################################### # 6.1 Object Protocol ##################################################################### @@ -82,12 +82,12 @@ cdef extern from "Python.h": # option currently supported is Py_PRINT_RAW; if given, the str() # of the object is written instead of the repr(). - bint PyObject_HasAttrString(object o, const char *attr_name) + bint PyObject_HasAttrString(object o, const char *attr_name) # Returns 1 if o has the attribute attr_name, and 0 # otherwise. This is equivalent to the Python expression # "hasattr(o, attr_name)". This function always succeeds. - object PyObject_GetAttrString(object o, const char *attr_name) + object PyObject_GetAttrString(object o, const char *attr_name) # Return value: New reference. Retrieve an attribute named # attr_name from object o. Returns the attribute value on success, # or NULL on failure. This is the equivalent of the Python @@ -106,7 +106,7 @@ cdef extern from "Python.h": object PyObject_GenericGetAttr(object o, object attr_name) - int PyObject_SetAttrString(object o, const char *attr_name, object v) except -1 + int PyObject_SetAttrString(object o, const char *attr_name, object v) except -1 # Set the value of the attribute named attr_name, for object o, to # the value v. Returns -1 on failure. This is the equivalent of # the Python statement "o.attr_name = v". @@ -118,7 +118,7 @@ cdef extern from "Python.h": int PyObject_GenericSetAttr(object o, object attr_name, object v) except -1 - int PyObject_DelAttrString(object o, const char *attr_name) except -1 + int PyObject_DelAttrString(object o, const char *attr_name) except -1 # Delete attribute named attr_name, for object o. Returns -1 on # failure. This is the equivalent of the Python statement: "del # o.attr_name". @@ -364,36 +364,36 @@ cdef extern from "Python.h": # Takes an arbitrary object and returns the result of calling # obj.__format__(format_spec). # Added in Py2.6 - - # Type flags (tp_flags of PyTypeObject) - long Py_TPFLAGS_HAVE_GETCHARBUFFER - long Py_TPFLAGS_HAVE_SEQUENCE_IN - long Py_TPFLAGS_HAVE_INPLACEOPS - long Py_TPFLAGS_CHECKTYPES - long Py_TPFLAGS_HAVE_RICHCOMPARE - long Py_TPFLAGS_HAVE_WEAKREFS - long Py_TPFLAGS_HAVE_ITER - long Py_TPFLAGS_HAVE_CLASS - long Py_TPFLAGS_HEAPTYPE - long Py_TPFLAGS_BASETYPE - long Py_TPFLAGS_READY - long Py_TPFLAGS_READYING - long Py_TPFLAGS_HAVE_GC - long Py_TPFLAGS_HAVE_STACKLESS_EXTENSION - long Py_TPFLAGS_HAVE_INDEX - long Py_TPFLAGS_HAVE_VERSION_TAG - long Py_TPFLAGS_VALID_VERSION_TAG - long Py_TPFLAGS_IS_ABSTRACT - long Py_TPFLAGS_HAVE_NEWBUFFER - long Py_TPFLAGS_INT_SUBCLASS - long Py_TPFLAGS_LONG_SUBCLASS - long Py_TPFLAGS_LIST_SUBCLASS - long Py_TPFLAGS_TUPLE_SUBCLASS - long Py_TPFLAGS_STRING_SUBCLASS - long Py_TPFLAGS_UNICODE_SUBCLASS - long Py_TPFLAGS_DICT_SUBCLASS - long Py_TPFLAGS_BASE_EXC_SUBCLASS - long Py_TPFLAGS_TYPE_SUBCLASS - long Py_TPFLAGS_DEFAULT_EXTERNAL - long Py_TPFLAGS_DEFAULT_CORE - long Py_TPFLAGS_DEFAULT + + # Type flags (tp_flags of PyTypeObject) + long Py_TPFLAGS_HAVE_GETCHARBUFFER + long Py_TPFLAGS_HAVE_SEQUENCE_IN + long Py_TPFLAGS_HAVE_INPLACEOPS + long Py_TPFLAGS_CHECKTYPES + long Py_TPFLAGS_HAVE_RICHCOMPARE + long Py_TPFLAGS_HAVE_WEAKREFS + long Py_TPFLAGS_HAVE_ITER + long Py_TPFLAGS_HAVE_CLASS + long Py_TPFLAGS_HEAPTYPE + long Py_TPFLAGS_BASETYPE + long Py_TPFLAGS_READY + long Py_TPFLAGS_READYING + long Py_TPFLAGS_HAVE_GC + long Py_TPFLAGS_HAVE_STACKLESS_EXTENSION + long Py_TPFLAGS_HAVE_INDEX + long Py_TPFLAGS_HAVE_VERSION_TAG + long Py_TPFLAGS_VALID_VERSION_TAG + long Py_TPFLAGS_IS_ABSTRACT + long Py_TPFLAGS_HAVE_NEWBUFFER + long Py_TPFLAGS_INT_SUBCLASS + long Py_TPFLAGS_LONG_SUBCLASS + long Py_TPFLAGS_LIST_SUBCLASS + long Py_TPFLAGS_TUPLE_SUBCLASS + long Py_TPFLAGS_STRING_SUBCLASS + long Py_TPFLAGS_UNICODE_SUBCLASS + long Py_TPFLAGS_DICT_SUBCLASS + long Py_TPFLAGS_BASE_EXC_SUBCLASS + long Py_TPFLAGS_TYPE_SUBCLASS + long Py_TPFLAGS_DEFAULT_EXTERNAL + long Py_TPFLAGS_DEFAULT_CORE + long Py_TPFLAGS_DEFAULT diff --git a/contrib/tools/cython/Cython/Includes/cpython/pylifecycle.pxd b/contrib/tools/cython/Cython/Includes/cpython/pylifecycle.pxd index 07f4edd397..2c71e37163 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/pylifecycle.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/pylifecycle.pxd @@ -1,68 +1,68 @@ -# Interfaces to configure, query, create & destroy the Python runtime - -from libc.stdio cimport FILE -from .pystate cimport PyThreadState - - -cdef extern from "Python.h": - ctypedef int wchar_t - - void Py_SetProgramName(wchar_t *) - wchar_t *Py_GetProgramName() - - void Py_SetPythonHome(wchar_t *) - wchar_t *Py_GetPythonHome() - - # Only used by applications that embed the interpreter and need to - # override the standard encoding determination mechanism - int Py_SetStandardStreamEncoding(const char *encoding, const char *errors) - - void Py_Initialize() - void Py_InitializeEx(int) - void _Py_InitializeEx_Private(int, int) - void Py_Finalize() - int Py_FinalizeEx() - int Py_IsInitialized() - PyThreadState *Py_NewInterpreter() - void Py_EndInterpreter(PyThreadState *) - - +# Interfaces to configure, query, create & destroy the Python runtime + +from libc.stdio cimport FILE +from .pystate cimport PyThreadState + + +cdef extern from "Python.h": + ctypedef int wchar_t + + void Py_SetProgramName(wchar_t *) + wchar_t *Py_GetProgramName() + + void Py_SetPythonHome(wchar_t *) + wchar_t *Py_GetPythonHome() + + # Only used by applications that embed the interpreter and need to + # override the standard encoding determination mechanism + int Py_SetStandardStreamEncoding(const char *encoding, const char *errors) + + void Py_Initialize() + void Py_InitializeEx(int) + void _Py_InitializeEx_Private(int, int) + void Py_Finalize() + int Py_FinalizeEx() + int Py_IsInitialized() + PyThreadState *Py_NewInterpreter() + void Py_EndInterpreter(PyThreadState *) + + # _Py_PyAtExit is for the atexit module, Py_AtExit is for low-level - # exit functions. + # exit functions. void _Py_PyAtExit(void (*func)(object), object) - int Py_AtExit(void (*func)()) - - void Py_Exit(int) - - # Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. - void _Py_RestoreSignals() - - int Py_FdIsInteractive(FILE *, const char *) - - # Bootstrap __main__ (defined in Modules/main.c) - int Py_Main(int argc, wchar_t **argv) - - # In getpath.c - wchar_t *Py_GetProgramFullPath() - wchar_t *Py_GetPrefix() - wchar_t *Py_GetExecPrefix() - wchar_t *Py_GetPath() - void Py_SetPath(const wchar_t *) - int _Py_CheckPython3() - - # In their own files - const char *Py_GetVersion() - const char *Py_GetPlatform() - const char *Py_GetCopyright() - const char *Py_GetCompiler() - const char *Py_GetBuildInfo() - const char *_Py_gitidentifier() - const char *_Py_gitversion() - - ctypedef void (*PyOS_sighandler_t)(int) - PyOS_sighandler_t PyOS_getsig(int) - PyOS_sighandler_t PyOS_setsig(int, PyOS_sighandler_t) - - # Random - int _PyOS_URandom(void *buffer, Py_ssize_t size) - int _PyOS_URandomNonblock(void *buffer, Py_ssize_t size) + int Py_AtExit(void (*func)()) + + void Py_Exit(int) + + # Restore signals that the interpreter has called SIG_IGN on to SIG_DFL. + void _Py_RestoreSignals() + + int Py_FdIsInteractive(FILE *, const char *) + + # Bootstrap __main__ (defined in Modules/main.c) + int Py_Main(int argc, wchar_t **argv) + + # In getpath.c + wchar_t *Py_GetProgramFullPath() + wchar_t *Py_GetPrefix() + wchar_t *Py_GetExecPrefix() + wchar_t *Py_GetPath() + void Py_SetPath(const wchar_t *) + int _Py_CheckPython3() + + # In their own files + const char *Py_GetVersion() + const char *Py_GetPlatform() + const char *Py_GetCopyright() + const char *Py_GetCompiler() + const char *Py_GetBuildInfo() + const char *_Py_gitidentifier() + const char *_Py_gitversion() + + ctypedef void (*PyOS_sighandler_t)(int) + PyOS_sighandler_t PyOS_getsig(int) + PyOS_sighandler_t PyOS_setsig(int, PyOS_sighandler_t) + + # Random + int _PyOS_URandom(void *buffer, Py_ssize_t size) + int _PyOS_URandomNonblock(void *buffer, Py_ssize_t size) diff --git a/contrib/tools/cython/Cython/Includes/cpython/pystate.pxd b/contrib/tools/cython/Cython/Includes/cpython/pystate.pxd index 07dcfda048..1af6307931 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/pystate.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/pystate.pxd @@ -1,14 +1,14 @@ # Thread and interpreter state structures and their interfaces -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": - # We make these an opaque types. If the user wants specific attributes, + # We make these an opaque types. If the user wants specific attributes, # they can be declared manually. - ctypedef long PY_INT64_T # FIXME: Py2.7+, not defined here but used here - + ctypedef long PY_INT64_T # FIXME: Py2.7+, not defined here but used here + ctypedef struct PyInterpreterState: pass @@ -20,8 +20,8 @@ cdef extern from "Python.h": # This is not actually a struct, but make sure it can never be coerced to # an int or used in arithmetic expressions - ctypedef struct PyGILState_STATE: - pass + ctypedef struct PyGILState_STATE: + pass # The type of the trace function registered using PyEval_SetProfile() and # PyEval_SetTrace(). @@ -42,14 +42,14 @@ cdef extern from "Python.h": PyInterpreterState * PyInterpreterState_New() void PyInterpreterState_Clear(PyInterpreterState *) void PyInterpreterState_Delete(PyInterpreterState *) - PY_INT64_T PyInterpreterState_GetID(PyInterpreterState *) + PY_INT64_T PyInterpreterState_GetID(PyInterpreterState *) PyThreadState * PyThreadState_New(PyInterpreterState *) void PyThreadState_Clear(PyThreadState *) void PyThreadState_Delete(PyThreadState *) PyThreadState * PyThreadState_Get() - PyThreadState * PyThreadState_Swap(PyThreadState *) # NOTE: DO NOT USE IN CYTHON CODE ! + PyThreadState * PyThreadState_Swap(PyThreadState *) # NOTE: DO NOT USE IN CYTHON CODE ! PyObject * PyThreadState_GetDict() int PyThreadState_SetAsyncExc(long, PyObject *) diff --git a/contrib/tools/cython/Cython/Includes/cpython/ref.pxd b/contrib/tools/cython/Cython/Includes/cpython/ref.pxd index e586ad5b2b..4bc9a7d7c8 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/ref.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/ref.pxd @@ -1,5 +1,5 @@ -from .object cimport PyObject, PyTypeObject, Py_TYPE # legacy imports for re-export - +from .object cimport PyObject, PyTypeObject, Py_TYPE # legacy imports for re-export + cdef extern from "Python.h": ##################################################################### # 3. Reference Counts diff --git a/contrib/tools/cython/Cython/Includes/cpython/sequence.pxd b/contrib/tools/cython/Cython/Includes/cpython/sequence.pxd index 4550d1711b..eb279968d2 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/sequence.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/sequence.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/cpython/slice.pxd b/contrib/tools/cython/Cython/Includes/cpython/slice.pxd index a4b26f0c81..202dea716c 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/slice.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/slice.pxd @@ -1,50 +1,50 @@ -cdef extern from "Python.h": - - # PyTypeObject PySlice_Type - # - # The type object for slice objects. This is the same as slice and types.SliceType - - bint PySlice_Check(object ob) - # - # Return true if ob is a slice object; ob must not be NULL. - - slice PySlice_New(object start, object stop, object step) - # - # Return a new slice object with the given values. The start, stop, and step - # parameters are used as the values of the slice object attributes of the same - # names. Any of the values may be NULL, in which case the None will be used - # for the corresponding attribute. Return NULL if the new object could not be - # allocated. - - int PySlice_GetIndices(object slice, Py_ssize_t length, - Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step) except? -1 - # - # Retrieve the start, stop and step indices from the slice object slice, - # assuming a sequence of length length. Treats indices greater than length - # as errors. - # - # Returns 0 on success and -1 on error with no exception set (unless one - # of the indices was not None and failed to be converted to an integer, - # in which case -1 is returned with an exception set). - # - # You probably do not want to use this function. - # - # Changed in version 3.2: The parameter type for the slice parameter was - # PySliceObject* before. - - int PySlice_GetIndicesEx(object slice, Py_ssize_t length, - Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step, - Py_ssize_t *slicelength) except -1 - # - # Usable replacement for PySlice_GetIndices(). Retrieve the start, stop, and step - # indices from the slice object slice assuming a sequence of length length, and - # store the length of the slice in slicelength. Out of bounds indices are clipped - # in a manner consistent with the handling of normal slices. - # - # Returns 0 on success and -1 on error with exception set. - # - # Changed in version 3.2: The parameter type for the slice parameter was - # PySliceObject* before. +cdef extern from "Python.h": + + # PyTypeObject PySlice_Type + # + # The type object for slice objects. This is the same as slice and types.SliceType + + bint PySlice_Check(object ob) + # + # Return true if ob is a slice object; ob must not be NULL. + + slice PySlice_New(object start, object stop, object step) + # + # Return a new slice object with the given values. The start, stop, and step + # parameters are used as the values of the slice object attributes of the same + # names. Any of the values may be NULL, in which case the None will be used + # for the corresponding attribute. Return NULL if the new object could not be + # allocated. + + int PySlice_GetIndices(object slice, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step) except? -1 + # + # Retrieve the start, stop and step indices from the slice object slice, + # assuming a sequence of length length. Treats indices greater than length + # as errors. + # + # Returns 0 on success and -1 on error with no exception set (unless one + # of the indices was not None and failed to be converted to an integer, + # in which case -1 is returned with an exception set). + # + # You probably do not want to use this function. + # + # Changed in version 3.2: The parameter type for the slice parameter was + # PySliceObject* before. + + int PySlice_GetIndicesEx(object slice, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step, + Py_ssize_t *slicelength) except -1 + # + # Usable replacement for PySlice_GetIndices(). Retrieve the start, stop, and step + # indices from the slice object slice assuming a sequence of length length, and + # store the length of the slice in slicelength. Out of bounds indices are clipped + # in a manner consistent with the handling of normal slices. + # + # Returns 0 on success and -1 on error with exception set. + # + # Changed in version 3.2: The parameter type for the slice parameter was + # PySliceObject* before. int PySlice_Unpack(object slice, Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step) except -1 diff --git a/contrib/tools/cython/Cython/Includes/cpython/string.pxd b/contrib/tools/cython/Cython/Includes/cpython/string.pxd index d0476f0fb2..8af78f3dde 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/string.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/string.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": ctypedef struct va_list diff --git a/contrib/tools/cython/Cython/Includes/cpython/tuple.pxd b/contrib/tools/cython/Cython/Includes/cpython/tuple.pxd index 7e30fc796c..09c46e0b4b 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/tuple.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/tuple.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/cpython/unicode.pxd b/contrib/tools/cython/Cython/Includes/cpython/unicode.pxd index a4b6a39b3c..ad01ed64df 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/unicode.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/unicode.pxd @@ -36,60 +36,60 @@ cdef extern from *: char* PyUnicode_AS_DATA(object o) # Return 1 or 0 depending on whether ch is a whitespace character. - bint Py_UNICODE_ISSPACE(Py_UCS4 ch) + bint Py_UNICODE_ISSPACE(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a lowercase character. - bint Py_UNICODE_ISLOWER(Py_UCS4 ch) + bint Py_UNICODE_ISLOWER(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is an uppercase character. - bint Py_UNICODE_ISUPPER(Py_UCS4 ch) + bint Py_UNICODE_ISUPPER(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a titlecase character. - bint Py_UNICODE_ISTITLE(Py_UCS4 ch) + bint Py_UNICODE_ISTITLE(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a linebreak character. - bint Py_UNICODE_ISLINEBREAK(Py_UCS4 ch) + bint Py_UNICODE_ISLINEBREAK(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a decimal character. - bint Py_UNICODE_ISDECIMAL(Py_UCS4 ch) + bint Py_UNICODE_ISDECIMAL(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a digit character. - bint Py_UNICODE_ISDIGIT(Py_UCS4 ch) + bint Py_UNICODE_ISDIGIT(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is a numeric character. - bint Py_UNICODE_ISNUMERIC(Py_UCS4 ch) + bint Py_UNICODE_ISNUMERIC(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is an alphabetic character. - bint Py_UNICODE_ISALPHA(Py_UCS4 ch) + bint Py_UNICODE_ISALPHA(Py_UCS4 ch) # Return 1 or 0 depending on whether ch is an alphanumeric character. - bint Py_UNICODE_ISALNUM(Py_UCS4 ch) + bint Py_UNICODE_ISALNUM(Py_UCS4 ch) # Return the character ch converted to lower case. - # Used to return a Py_UNICODE value before Py3.3. - Py_UCS4 Py_UNICODE_TOLOWER(Py_UCS4 ch) + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOLOWER(Py_UCS4 ch) # Return the character ch converted to upper case. - # Used to return a Py_UNICODE value before Py3.3. - Py_UCS4 Py_UNICODE_TOUPPER(Py_UCS4 ch) + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOUPPER(Py_UCS4 ch) # Return the character ch converted to title case. - # Used to return a Py_UNICODE value before Py3.3. - Py_UCS4 Py_UNICODE_TOTITLE(Py_UCS4 ch) + # Used to return a Py_UNICODE value before Py3.3. + Py_UCS4 Py_UNICODE_TOTITLE(Py_UCS4 ch) # Return the character ch converted to a decimal positive # integer. Return -1 if this is not possible. This macro does not # raise exceptions. - int Py_UNICODE_TODECIMAL(Py_UCS4 ch) + int Py_UNICODE_TODECIMAL(Py_UCS4 ch) # Return the character ch converted to a single digit # integer. Return -1 if this is not possible. This macro does not # raise exceptions. - int Py_UNICODE_TODIGIT(Py_UCS4 ch) + int Py_UNICODE_TODIGIT(Py_UCS4 ch) # Return the character ch converted to a double. Return -1.0 if # this is not possible. This macro does not raise exceptions. - double Py_UNICODE_TONUMERIC(Py_UCS4 ch) + double Py_UNICODE_TONUMERIC(Py_UCS4 ch) # To create Unicode objects and access their basic sequence # properties, use these APIs: @@ -145,131 +145,131 @@ cdef extern from *: #Py_ssize_t PyUnicode_AsWideChar(object o, wchar_t *w, Py_ssize_t size) - -# Unicode Methods - - # Concat two strings giving a new Unicode string. - # Return value: New reference. - unicode PyUnicode_Concat(object left, object right) - - # Split a string giving a list of Unicode strings. If sep is NULL, - # splitting will be done at all whitespace substrings. Otherwise, - # splits occur at the given separator. At most maxsplit splits will - # be done. If negative, no limit is set. Separators are not included - # in the resulting list. - # Return value: New reference. - list PyUnicode_Split(object s, object sep, Py_ssize_t maxsplit) - - # Split a Unicode string at line breaks, returning a list of Unicode - # strings. CRLF is considered to be one line break. If keepend is 0, - # the Line break characters are not included in the resulting strings. - # Return value: New reference. - list PyUnicode_Splitlines(object s, bint keepend) - - # Translate a string by applying a character mapping table to it and - # return the resulting Unicode object. - # - # The mapping table must map Unicode ordinal integers to Unicode ordinal - # integers or None (causing deletion of the character). - # - # Mapping tables need only provide the __getitem__() interface; - # dictionaries and sequences work well. Unmapped character ordinals (ones - # which cause a LookupError) are left untouched and are copied as-is. - # - # errors has the usual meaning for codecs. It may be NULL which indicates - # to use the default error handling. - # Return value: New reference. - unicode PyUnicode_Translate(object str, object table, const char *errors) - - # Join a sequence of strings using the given separator and return the - # resulting Unicode string. - # Return value: New reference. - unicode PyUnicode_Join(object separator, object seq) - - # Return 1 if substr matches str[start:end] at the given tail end - # (direction == -1 means to do a prefix match, direction == 1 a - # suffix match), 0 otherwise. - # Return -1 if an error occurred. - Py_ssize_t PyUnicode_Tailmatch(object str, object substr, - Py_ssize_t start, Py_ssize_t end, int direction) except -1 - - # Return the first position of substr in str[start:end] using the given - # direction (direction == 1 means to do a forward search, direction == -1 - # a backward search). The return value is the index of the first match; - # a value of -1 indicates that no match was found, and -2 indicates that an - # error occurred and an exception has been set. - Py_ssize_t PyUnicode_Find(object str, object substr, Py_ssize_t start, Py_ssize_t end, int direction) except -2 - - # Return the first position of the character ch in str[start:end] using - # the given direction (direction == 1 means to do a forward search, - # direction == -1 a backward search). The return value is the index of - # the first match; a value of -1 indicates that no match was found, and - # -2 indicates that an error occurred and an exception has been set. - # New in version 3.3. - Py_ssize_t PyUnicode_FindChar(object str, Py_UCS4 ch, Py_ssize_t start, Py_ssize_t end, int direction) except -2 - - # Return the number of non-overlapping occurrences of substr in - # str[start:end]. Return -1 if an error occurred. - Py_ssize_t PyUnicode_Count(object str, object substr, Py_ssize_t start, Py_ssize_t end) except -1 - - # Replace at most maxcount occurrences of substr in str with replstr and - # return the resulting Unicode object. maxcount == -1 means replace all - # occurrences. - # Return value: New reference. - unicode PyUnicode_Replace(object str, object substr, object replstr, Py_ssize_t maxcount) - - # Compare two strings and return -1, 0, 1 for less than, - # equal, and greater than, respectively. - int PyUnicode_Compare(object left, object right) except? -1 - - # Compare a unicode object, uni, with string and return -1, 0, 1 for less than, - # equal, and greater than, respectively. It is best to pass only ASCII-encoded - # strings, but the function interprets the input string as ISO-8859-1 if it - # contains non-ASCII characters. + +# Unicode Methods + + # Concat two strings giving a new Unicode string. + # Return value: New reference. + unicode PyUnicode_Concat(object left, object right) + + # Split a string giving a list of Unicode strings. If sep is NULL, + # splitting will be done at all whitespace substrings. Otherwise, + # splits occur at the given separator. At most maxsplit splits will + # be done. If negative, no limit is set. Separators are not included + # in the resulting list. + # Return value: New reference. + list PyUnicode_Split(object s, object sep, Py_ssize_t maxsplit) + + # Split a Unicode string at line breaks, returning a list of Unicode + # strings. CRLF is considered to be one line break. If keepend is 0, + # the Line break characters are not included in the resulting strings. + # Return value: New reference. + list PyUnicode_Splitlines(object s, bint keepend) + + # Translate a string by applying a character mapping table to it and + # return the resulting Unicode object. + # + # The mapping table must map Unicode ordinal integers to Unicode ordinal + # integers or None (causing deletion of the character). + # + # Mapping tables need only provide the __getitem__() interface; + # dictionaries and sequences work well. Unmapped character ordinals (ones + # which cause a LookupError) are left untouched and are copied as-is. + # + # errors has the usual meaning for codecs. It may be NULL which indicates + # to use the default error handling. + # Return value: New reference. + unicode PyUnicode_Translate(object str, object table, const char *errors) + + # Join a sequence of strings using the given separator and return the + # resulting Unicode string. + # Return value: New reference. + unicode PyUnicode_Join(object separator, object seq) + + # Return 1 if substr matches str[start:end] at the given tail end + # (direction == -1 means to do a prefix match, direction == 1 a + # suffix match), 0 otherwise. + # Return -1 if an error occurred. + Py_ssize_t PyUnicode_Tailmatch(object str, object substr, + Py_ssize_t start, Py_ssize_t end, int direction) except -1 + + # Return the first position of substr in str[start:end] using the given + # direction (direction == 1 means to do a forward search, direction == -1 + # a backward search). The return value is the index of the first match; + # a value of -1 indicates that no match was found, and -2 indicates that an + # error occurred and an exception has been set. + Py_ssize_t PyUnicode_Find(object str, object substr, Py_ssize_t start, Py_ssize_t end, int direction) except -2 + + # Return the first position of the character ch in str[start:end] using + # the given direction (direction == 1 means to do a forward search, + # direction == -1 a backward search). The return value is the index of + # the first match; a value of -1 indicates that no match was found, and + # -2 indicates that an error occurred and an exception has been set. + # New in version 3.3. + Py_ssize_t PyUnicode_FindChar(object str, Py_UCS4 ch, Py_ssize_t start, Py_ssize_t end, int direction) except -2 + + # Return the number of non-overlapping occurrences of substr in + # str[start:end]. Return -1 if an error occurred. + Py_ssize_t PyUnicode_Count(object str, object substr, Py_ssize_t start, Py_ssize_t end) except -1 + + # Replace at most maxcount occurrences of substr in str with replstr and + # return the resulting Unicode object. maxcount == -1 means replace all + # occurrences. + # Return value: New reference. + unicode PyUnicode_Replace(object str, object substr, object replstr, Py_ssize_t maxcount) + + # Compare two strings and return -1, 0, 1 for less than, + # equal, and greater than, respectively. + int PyUnicode_Compare(object left, object right) except? -1 + + # Compare a unicode object, uni, with string and return -1, 0, 1 for less than, + # equal, and greater than, respectively. It is best to pass only ASCII-encoded + # strings, but the function interprets the input string as ISO-8859-1 if it + # contains non-ASCII characters. int PyUnicode_CompareWithASCIIString(object uni, const char *string) - - # Rich compare two unicode strings and return one of the following: - # - # NULL in case an exception was raised - # Py_True or Py_False for successful comparisons - # Py_NotImplemented in case the type combination is unknown - # - # Note that Py_EQ and Py_NE comparisons can cause a UnicodeWarning in case - # the conversion of the arguments to Unicode fails with a UnicodeDecodeError. - # - # Possible values for op are Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, and Py_LE. - object PyUnicode_RichCompare(object left, object right, int op) - - # Return a new string object from format and args; this is analogous to - # format % args. - # Return value: New reference. - unicode PyUnicode_Format(object format, object args) - - # Check whether element is contained in container and return true or false - # accordingly. - # - # element has to coerce to a one element Unicode string. -1 is returned - # if there was an error. - int PyUnicode_Contains(object container, object element) except -1 - - # Intern the argument *string in place. The argument must be the address - # of a pointer variable pointing to a Python unicode string object. If - # there is an existing interned string that is the same as *string, it sets - # *string to it (decrementing the reference count of the old string object - # and incrementing the reference count of the interned string object), - # otherwise it leaves *string alone and interns it (incrementing its reference - # count). (Clarification: even though there is a lot of talk about reference - # counts, think of this function as reference-count-neutral; you own the object - # after the call if and only if you owned it before the call.) - #void PyUnicode_InternInPlace(PyObject **string) - - # A combination of PyUnicode_FromString() and PyUnicode_InternInPlace(), - # returning either a new unicode string object that has been interned, or - # a new ("owned") reference to an earlier interned string object with the - # same value. - unicode PyUnicode_InternFromString(const char *v) - - + + # Rich compare two unicode strings and return one of the following: + # + # NULL in case an exception was raised + # Py_True or Py_False for successful comparisons + # Py_NotImplemented in case the type combination is unknown + # + # Note that Py_EQ and Py_NE comparisons can cause a UnicodeWarning in case + # the conversion of the arguments to Unicode fails with a UnicodeDecodeError. + # + # Possible values for op are Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, and Py_LE. + object PyUnicode_RichCompare(object left, object right, int op) + + # Return a new string object from format and args; this is analogous to + # format % args. + # Return value: New reference. + unicode PyUnicode_Format(object format, object args) + + # Check whether element is contained in container and return true or false + # accordingly. + # + # element has to coerce to a one element Unicode string. -1 is returned + # if there was an error. + int PyUnicode_Contains(object container, object element) except -1 + + # Intern the argument *string in place. The argument must be the address + # of a pointer variable pointing to a Python unicode string object. If + # there is an existing interned string that is the same as *string, it sets + # *string to it (decrementing the reference count of the old string object + # and incrementing the reference count of the interned string object), + # otherwise it leaves *string alone and interns it (incrementing its reference + # count). (Clarification: even though there is a lot of talk about reference + # counts, think of this function as reference-count-neutral; you own the object + # after the call if and only if you owned it before the call.) + #void PyUnicode_InternInPlace(PyObject **string) + + # A combination of PyUnicode_FromString() and PyUnicode_InternInPlace(), + # returning either a new unicode string object that has been interned, or + # a new ("owned") reference to an earlier interned string object with the + # same value. + unicode PyUnicode_InternFromString(const char *v) + + # Codecs # Create a Unicode object by decoding size bytes of the encoded @@ -300,22 +300,22 @@ cdef extern from *: # Create a Unicode object by decoding size bytes of the UTF-8 # encoded string s. Return NULL if an exception was raised by the # codec. - unicode PyUnicode_DecodeUTF8(char *s, Py_ssize_t size, char *errors) + unicode PyUnicode_DecodeUTF8(char *s, Py_ssize_t size, char *errors) # If consumed is NULL, behave like PyUnicode_DecodeUTF8(). If # consumed is not NULL, trailing incomplete UTF-8 byte sequences # will not be treated as an error. Those bytes will not be decoded # and the number of bytes that have been decoded will be stored in # consumed. New in version 2.4. - unicode PyUnicode_DecodeUTF8Stateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) + unicode PyUnicode_DecodeUTF8Stateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) # Encode the Py_UNICODE buffer of the given size using UTF-8 and # return a Python string object. Return NULL if an exception was # raised by the codec. - bytes PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors) + bytes PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors) # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec. - bytes PyUnicode_AsUTF8String(object unicode) + bytes PyUnicode_AsUTF8String(object unicode) # These are the UTF-16 codec APIs: @@ -337,7 +337,7 @@ cdef extern from *: # order at the. # # If byteorder is NULL, the codec starts in native order mode. - unicode PyUnicode_DecodeUTF16(char *s, Py_ssize_t size, char *errors, int *byteorder) + unicode PyUnicode_DecodeUTF16(char *s, Py_ssize_t size, char *errors, int *byteorder) # If consumed is NULL, behave like PyUnicode_DecodeUTF16(). If # consumed is not NULL, PyUnicode_DecodeUTF16Stateful() will not @@ -345,7 +345,7 @@ cdef extern from *: # number of bytes or a split surrogate pair) as an error. Those # bytes will not be decoded and the number of bytes that have been # decoded will be stored in consumed. New in version 2.4. - unicode PyUnicode_DecodeUTF16Stateful(char *s, Py_ssize_t size, char *errors, int *byteorder, Py_ssize_t *consumed) + unicode PyUnicode_DecodeUTF16Stateful(char *s, Py_ssize_t size, char *errors, int *byteorder, Py_ssize_t *consumed) # Return a Python string object holding the UTF-16 encoded value # of the Unicode data in s. If byteorder is not 0, output is @@ -362,13 +362,13 @@ cdef extern from *: # If Py_UNICODE_WIDE is defined, a single Py_UNICODE value may get # represented as a surrogate pair. If it is not defined, each # Py_UNICODE values is interpreted as an UCS-2 character. - bytes PyUnicode_EncodeUTF16(Py_UNICODE *s, Py_ssize_t size, char *errors, int byteorder) + bytes PyUnicode_EncodeUTF16(Py_UNICODE *s, Py_ssize_t size, char *errors, int byteorder) # Return a Python string using the UTF-16 encoding in native byte # order. The string always starts with a BOM mark. Error handling # is ``strict''. Return NULL if an exception was raised by the # codec. - bytes PyUnicode_AsUTF16String(object unicode) + bytes PyUnicode_AsUTF16String(object unicode) # These are the ``Unicode Escape'' codec APIs: @@ -409,17 +409,17 @@ cdef extern from *: # Create a Unicode object by decoding size bytes of the Latin-1 # encoded string s. Return NULL if an exception was raised by the # codec. - unicode PyUnicode_DecodeLatin1(char *s, Py_ssize_t size, char *errors) + unicode PyUnicode_DecodeLatin1(char *s, Py_ssize_t size, char *errors) # Encode the Py_UNICODE buffer of the given size using Latin-1 and - # return a Python bytes object. Return NULL if an exception was + # return a Python bytes object. Return NULL if an exception was # raised by the codec. - bytes PyUnicode_EncodeLatin1(Py_UNICODE *s, Py_ssize_t size, char *errors) + bytes PyUnicode_EncodeLatin1(Py_UNICODE *s, Py_ssize_t size, char *errors) # Encode a Unicode objects using Latin-1 and return the result as - # Python bytes object. Error handling is ``strict''. Return NULL + # Python bytes object. Error handling is ``strict''. Return NULL # if an exception was raised by the codec. - bytes PyUnicode_AsLatin1String(object unicode) + bytes PyUnicode_AsLatin1String(object unicode) # These are the ASCII codec APIs. Only 7-bit ASCII data is # accepted. All other codes generate errors. @@ -427,17 +427,17 @@ cdef extern from *: # Create a Unicode object by decoding size bytes of the ASCII # encoded string s. Return NULL if an exception was raised by the # codec. - unicode PyUnicode_DecodeASCII(char *s, Py_ssize_t size, char *errors) + unicode PyUnicode_DecodeASCII(char *s, Py_ssize_t size, char *errors) # Encode the Py_UNICODE buffer of the given size using ASCII and - # return a Python bytes object. Return NULL if an exception was + # return a Python bytes object. Return NULL if an exception was # raised by the codec. - bytes PyUnicode_EncodeASCII(Py_UNICODE *s, Py_ssize_t size, char *errors) + bytes PyUnicode_EncodeASCII(Py_UNICODE *s, Py_ssize_t size, char *errors) # Encode a Unicode objects using ASCII and return the result as - # Python bytes object. Error handling is ``strict''. Return NULL + # Python bytes object. Error handling is ``strict''. Return NULL # if an exception was raised by the codec. - bytes PyUnicode_AsASCIIString(object o) + bytes PyUnicode_AsASCIIString(object o) # These are the mapping codec APIs: # @@ -478,8 +478,8 @@ cdef extern from *: # Encode the Py_UNICODE buffer of the given size using the given # mapping object and return a Python string object. Return NULL if # an exception was raised by the codec. - # - # Deprecated since version 3.3, will be removed in version 4.0. + # + # Deprecated since version 3.3, will be removed in version 4.0. object PyUnicode_EncodeCharmap(Py_UNICODE *s, Py_ssize_t size, object mapping, char *errors) # Encode a Unicode objects using the given mapping object and @@ -500,8 +500,8 @@ cdef extern from *: # dictionaries and sequences work well. Unmapped character # ordinals (ones which cause a LookupError) are left untouched and # are copied as-is. - # - # Deprecated since version 3.3, will be removed in version 4.0. + # + # Deprecated since version 3.3, will be removed in version 4.0. object PyUnicode_TranslateCharmap(Py_UNICODE *s, Py_ssize_t size, object table, char *errors) @@ -514,43 +514,43 @@ cdef extern from *: # Create a Unicode object by decoding size bytes of the MBCS # encoded string s. Return NULL if an exception was raised by the # codec. - unicode PyUnicode_DecodeMBCS(char *s, Py_ssize_t size, char *errors) + unicode PyUnicode_DecodeMBCS(char *s, Py_ssize_t size, char *errors) # If consumed is NULL, behave like PyUnicode_DecodeMBCS(). If # consumed is not NULL, PyUnicode_DecodeMBCSStateful() will not # decode trailing lead byte and the number of bytes that have been # decoded will be stored in consumed. New in version 2.5. # NOTE: Python 2.x uses 'int' values for 'size' and 'consumed' (changed in 3.0) - unicode PyUnicode_DecodeMBCSStateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) + unicode PyUnicode_DecodeMBCSStateful(char *s, Py_ssize_t size, char *errors, Py_ssize_t *consumed) # Encode the Py_UNICODE buffer of the given size using MBCS and # return a Python string object. Return NULL if an exception was # raised by the codec. - bytes PyUnicode_EncodeMBCS(Py_UNICODE *s, Py_ssize_t size, char *errors) + bytes PyUnicode_EncodeMBCS(Py_UNICODE *s, Py_ssize_t size, char *errors) # Encode a Unicode objects using MBCS and return the result as # Python string object. Error handling is ``strict''. Return NULL # if an exception was raised by the codec. - bytes PyUnicode_AsMBCSString(object o) - - # Encode the Unicode object using the specified code page and return - # a Python bytes object. Return NULL if an exception was raised by the - # codec. Use CP_ACP code page to get the MBCS encoder. - # - # New in version 3.3. - bytes PyUnicode_EncodeCodePage(int code_page, object unicode, const char *errors) - - -# Py_UCS4 helpers (new in CPython 3.3) - - # These utility functions work on strings of Py_UCS4 characters and - # otherwise behave like the C standard library functions with the same name. - - size_t Py_UCS4_strlen(const Py_UCS4 *u) - Py_UCS4* Py_UCS4_strcpy(Py_UCS4 *s1, const Py_UCS4 *s2) - Py_UCS4* Py_UCS4_strncpy(Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) - Py_UCS4* Py_UCS4_strcat(Py_UCS4 *s1, const Py_UCS4 *s2) - int Py_UCS4_strcmp(const Py_UCS4 *s1, const Py_UCS4 *s2) - int Py_UCS4_strncmp(const Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) - Py_UCS4* Py_UCS4_strchr(const Py_UCS4 *s, Py_UCS4 c) - Py_UCS4* Py_UCS4_strrchr(const Py_UCS4 *s, Py_UCS4 c) + bytes PyUnicode_AsMBCSString(object o) + + # Encode the Unicode object using the specified code page and return + # a Python bytes object. Return NULL if an exception was raised by the + # codec. Use CP_ACP code page to get the MBCS encoder. + # + # New in version 3.3. + bytes PyUnicode_EncodeCodePage(int code_page, object unicode, const char *errors) + + +# Py_UCS4 helpers (new in CPython 3.3) + + # These utility functions work on strings of Py_UCS4 characters and + # otherwise behave like the C standard library functions with the same name. + + size_t Py_UCS4_strlen(const Py_UCS4 *u) + Py_UCS4* Py_UCS4_strcpy(Py_UCS4 *s1, const Py_UCS4 *s2) + Py_UCS4* Py_UCS4_strncpy(Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) + Py_UCS4* Py_UCS4_strcat(Py_UCS4 *s1, const Py_UCS4 *s2) + int Py_UCS4_strcmp(const Py_UCS4 *s1, const Py_UCS4 *s2) + int Py_UCS4_strncmp(const Py_UCS4 *s1, const Py_UCS4 *s2, size_t n) + Py_UCS4* Py_UCS4_strchr(const Py_UCS4 *s, Py_UCS4 c) + Py_UCS4* Py_UCS4_strrchr(const Py_UCS4 *s, Py_UCS4 c) diff --git a/contrib/tools/cython/Cython/Includes/cpython/weakref.pxd b/contrib/tools/cython/Cython/Includes/cpython/weakref.pxd index de5a28027a..9c4b50f564 100644 --- a/contrib/tools/cython/Cython/Includes/cpython/weakref.pxd +++ b/contrib/tools/cython/Cython/Includes/cpython/weakref.pxd @@ -1,4 +1,4 @@ -from .object cimport PyObject +from .object cimport PyObject cdef extern from "Python.h": diff --git a/contrib/tools/cython/Cython/Includes/libc/errno.pxd b/contrib/tools/cython/Cython/Includes/libc/errno.pxd index a4d0b6951a..191d47b3dc 100644 --- a/contrib/tools/cython/Cython/Includes/libc/errno.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/errno.pxd @@ -1,6 +1,6 @@ # 7.5 Errors <errno.h> -cdef extern from "<errno.h>" nogil: +cdef extern from "<errno.h>" nogil: enum: EPERM ENOENT @@ -61,7 +61,7 @@ cdef extern from "<errno.h>" nogil: EBFONT ENOSTR ENODATA - ENOATTR + ENOATTR ETIME ENOSR ENONET diff --git a/contrib/tools/cython/Cython/Includes/libc/float.pxd b/contrib/tools/cython/Cython/Includes/libc/float.pxd index ce1150d6ca..5e4e12d4f4 100644 --- a/contrib/tools/cython/Cython/Includes/libc/float.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/float.pxd @@ -1,43 +1,43 @@ # 5.2.4.2.2 Characteristics of floating types <float.h> -cdef extern from "<float.h>": +cdef extern from "<float.h>": - const float FLT_RADIX + const float FLT_RADIX - const float FLT_MANT_DIG - const double DBL_MANT_DIG - const long double LDBL_MANT_DIG + const float FLT_MANT_DIG + const double DBL_MANT_DIG + const long double LDBL_MANT_DIG - const double DECIMAL_DIG + const double DECIMAL_DIG - const float FLT_DIG - const double DBL_DIG - const long double LDBL_DIG + const float FLT_DIG + const double DBL_DIG + const long double LDBL_DIG - const float FLT_MIN_EXP - const double DBL_MIN_EXP - const long double LDBL_MIN_EXP + const float FLT_MIN_EXP + const double DBL_MIN_EXP + const long double LDBL_MIN_EXP - const float FLT_MIN_10_EXP - const double DBL_MIN_10_EXP - const long double LDBL_MIN_10_EXP + const float FLT_MIN_10_EXP + const double DBL_MIN_10_EXP + const long double LDBL_MIN_10_EXP - const float FLT_MAX_EXP - const double DBL_MAX_EXP - const long double LDBL_MAX_EXP + const float FLT_MAX_EXP + const double DBL_MAX_EXP + const long double LDBL_MAX_EXP - const float FLT_MAX_10_EXP - const double DBL_MAX_10_EXP - const long double LDBL_MAX_10_EXP + const float FLT_MAX_10_EXP + const double DBL_MAX_10_EXP + const long double LDBL_MAX_10_EXP - const float FLT_MAX - const double DBL_MAX - const long double LDBL_MAX + const float FLT_MAX + const double DBL_MAX + const long double LDBL_MAX - const float FLT_EPSILON - const double DBL_EPSILON - const long double LDBL_EPSILON + const float FLT_EPSILON + const double DBL_EPSILON + const long double LDBL_EPSILON - const float FLT_MIN - const double DBL_MIN - const long double LDBL_MIN + const float FLT_MIN + const double DBL_MIN + const long double LDBL_MIN diff --git a/contrib/tools/cython/Cython/Includes/libc/limits.pxd b/contrib/tools/cython/Cython/Includes/libc/limits.pxd index ed035e91fc..39d10a1ff9 100644 --- a/contrib/tools/cython/Cython/Includes/libc/limits.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/limits.pxd @@ -1,6 +1,6 @@ # 5.2.4.2.1 Sizes of integer types <limits.h> -cdef extern from "<limits.h>": +cdef extern from "<limits.h>": const int CHAR_BIT const int MB_LEN_MAX diff --git a/contrib/tools/cython/Cython/Includes/libc/locale.pxd b/contrib/tools/cython/Cython/Includes/libc/locale.pxd index 56af447e4d..5cbec953ef 100644 --- a/contrib/tools/cython/Cython/Includes/libc/locale.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/locale.pxd @@ -4,7 +4,7 @@ from libc.string cimport const_char -cdef extern from "<locale.h>" nogil: +cdef extern from "<locale.h>" nogil: struct lconv: char *decimal_point diff --git a/contrib/tools/cython/Cython/Includes/libc/math.pxd b/contrib/tools/cython/Cython/Includes/libc/math.pxd index ef9ef1a916..b002670b22 100644 --- a/contrib/tools/cython/Cython/Includes/libc/math.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/math.pxd @@ -1,27 +1,27 @@ -cdef extern from "<math.h>" nogil: - const double M_E - const double e "M_E" # as in Python's math module - const double M_LOG2E - const double M_LOG10E - const double M_LN2 - const double M_LN10 - const double M_PI - const double pi "M_PI" # as in Python's math module - const double M_PI_2 - const double M_PI_4 - const double M_1_PI - const double M_2_PI - const double M_2_SQRTPI - const double M_SQRT2 - const double M_SQRT1_2 +cdef extern from "<math.h>" nogil: + const double M_E + const double e "M_E" # as in Python's math module + const double M_LOG2E + const double M_LOG10E + const double M_LN2 + const double M_LN10 + const double M_PI + const double pi "M_PI" # as in Python's math module + const double M_PI_2 + const double M_PI_4 + const double M_1_PI + const double M_2_PI + const double M_2_SQRTPI + const double M_SQRT2 + const double M_SQRT1_2 # C99 constants - const float INFINITY - const float NAN - # note: not providing "nan" and "inf" aliases here as nan() is a function in C - const double HUGE_VAL - const float HUGE_VALF - const long double HUGE_VALL + const float INFINITY + const float NAN + # note: not providing "nan" and "inf" aliases here as nan() is a function in C + const double HUGE_VAL + const float HUGE_VALF + const long double HUGE_VALL double acos(double x) double asin(double x) @@ -91,19 +91,19 @@ cdef extern from "<math.h>" nogil: long double erfcl(long double) double fdim(double x, double y) - double fma(double x, double y, double z) + double fma(double x, double y, double z) double fmax(double x, double y) double fmin(double x, double y) double scalbln(double x, long n) double scalbn(double x, int n) double nan(const char*) - - int isinf(long double) # -1 / 0 / 1 + + int isinf(long double) # -1 / 0 / 1 bint isfinite(long double) - bint isnan(long double) + bint isnan(long double) bint isnormal(long double) - bint signbit(long double) + bint signbit(long double) int fpclassify(long double) const int FP_NAN const int FP_INFINITE diff --git a/contrib/tools/cython/Cython/Includes/libc/setjmp.pxd b/contrib/tools/cython/Cython/Includes/libc/setjmp.pxd index 9e8d47778a..6c11a534d4 100644 --- a/contrib/tools/cython/Cython/Includes/libc/setjmp.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/setjmp.pxd @@ -1,10 +1,10 @@ -cdef extern from "<setjmp.h>" nogil: +cdef extern from "<setjmp.h>" nogil: ctypedef struct jmp_buf: pass int setjmp(jmp_buf state) void longjmp(jmp_buf state, int value) - - ctypedef struct sigjmp_buf: - pass - int sigsetjmp(sigjmp_buf state, int savesigs) - void siglongjmp(sigjmp_buf state, int value) + + ctypedef struct sigjmp_buf: + pass + int sigsetjmp(sigjmp_buf state, int savesigs) + void siglongjmp(sigjmp_buf state, int value) diff --git a/contrib/tools/cython/Cython/Includes/libc/signal.pxd b/contrib/tools/cython/Cython/Includes/libc/signal.pxd index d515ad007e..5d34935543 100644 --- a/contrib/tools/cython/Cython/Includes/libc/signal.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/signal.pxd @@ -2,7 +2,7 @@ ctypedef void (*sighandler_t)(int SIGNUM) nogil -cdef extern from "<signal.h>" nogil: +cdef extern from "<signal.h>" nogil: ctypedef int sig_atomic_t diff --git a/contrib/tools/cython/Cython/Includes/libc/stddef.pxd b/contrib/tools/cython/Cython/Includes/libc/stddef.pxd index 61c1e4cc56..9b0f4c5fd2 100644 --- a/contrib/tools/cython/Cython/Includes/libc/stddef.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/stddef.pxd @@ -1,6 +1,6 @@ # 7.17 Common definitions <stddef.h> -cdef extern from "<stddef.h>": +cdef extern from "<stddef.h>": ctypedef signed int ptrdiff_t diff --git a/contrib/tools/cython/Cython/Includes/libc/stdint.pxd b/contrib/tools/cython/Cython/Includes/libc/stdint.pxd index 6140d6e712..ced3d46add 100644 --- a/contrib/tools/cython/Cython/Includes/libc/stdint.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/stdint.pxd @@ -2,7 +2,7 @@ # Actual compile time size used for conversions. # 7.18 Integer types <stdint.h> -cdef extern from "<stdint.h>" nogil: +cdef extern from "<stdint.h>" nogil: # 7.18.1 Integer types # 7.18.1.1 Exact-width integer types @@ -13,7 +13,7 @@ cdef extern from "<stdint.h>" nogil: ctypedef unsigned char uint8_t ctypedef unsigned short uint16_t ctypedef unsigned int uint32_t - ctypedef unsigned long long uint64_t + ctypedef unsigned long long uint64_t # 7.18.1.2 Minimum-width integer types ctypedef signed char int_least8_t ctypedef signed short int_least16_t @@ -22,7 +22,7 @@ cdef extern from "<stdint.h>" nogil: ctypedef unsigned char uint_least8_t ctypedef unsigned short uint_least16_t ctypedef unsigned int uint_least32_t - ctypedef unsigned long long uint_least64_t + ctypedef unsigned long long uint_least64_t # 7.18.1.3 Fastest minimum-width integer types ctypedef signed char int_fast8_t ctypedef signed short int_fast16_t @@ -31,7 +31,7 @@ cdef extern from "<stdint.h>" nogil: ctypedef unsigned char uint_fast8_t ctypedef unsigned short uint_fast16_t ctypedef unsigned int uint_fast32_t - ctypedef unsigned long long uint_fast64_t + ctypedef unsigned long long uint_fast64_t # 7.18.1.4 Integer types capable of holding object pointers ctypedef ssize_t intptr_t ctypedef size_t uintptr_t diff --git a/contrib/tools/cython/Cython/Includes/libc/stdio.pxd b/contrib/tools/cython/Cython/Includes/libc/stdio.pxd index f77418923b..1644a5a0ab 100644 --- a/contrib/tools/cython/Cython/Includes/libc/stdio.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/stdio.pxd @@ -5,7 +5,7 @@ from libc.string cimport const_char, const_void -cdef extern from "<stdio.h>" nogil: +cdef extern from "<stdio.h>" nogil: ctypedef struct FILE cdef FILE *stdin diff --git a/contrib/tools/cython/Cython/Includes/libc/stdlib.pxd b/contrib/tools/cython/Cython/Includes/libc/stdlib.pxd index 24d14a46a9..e6fac821c7 100644 --- a/contrib/tools/cython/Cython/Includes/libc/stdlib.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/stdlib.pxd @@ -4,7 +4,7 @@ from libc.string cimport const_char, const_void -cdef extern from "<stdlib.h>" nogil: +cdef extern from "<stdlib.h>" nogil: # 7.20.1 Numeric conversion functions int atoi (const char *string) diff --git a/contrib/tools/cython/Cython/Includes/libc/string.pxd b/contrib/tools/cython/Cython/Includes/libc/string.pxd index af1b9bf8a3..e6d96183f2 100644 --- a/contrib/tools/cython/Cython/Includes/libc/string.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/string.pxd @@ -7,7 +7,7 @@ cdef extern from *: ctypedef const unsigned char const_uchar "const unsigned char" ctypedef const void const_void "const void" -cdef extern from "<string.h>" nogil: +cdef extern from "<string.h>" nogil: void *memcpy (void *pto, const void *pfrom, size_t size) void *memmove (void *pto, const void *pfrom, size_t size) diff --git a/contrib/tools/cython/Cython/Includes/libc/time.pxd b/contrib/tools/cython/Cython/Includes/libc/time.pxd index 7c7ea4a987..3aa15a2eee 100644 --- a/contrib/tools/cython/Cython/Includes/libc/time.pxd +++ b/contrib/tools/cython/Cython/Includes/libc/time.pxd @@ -1,46 +1,46 @@ -# http://en.wikipedia.org/wiki/C_date_and_time_functions - -from libc.stddef cimport wchar_t - -cdef extern from "<time.h>" nogil: - ctypedef long clock_t - ctypedef long time_t - - enum: CLOCKS_PER_SEC - clock_t clock() # CPU time - time_t time(time_t *) # wall clock time since Unix epoch - - cdef struct tm: - int tm_sec - int tm_min - int tm_hour - int tm_mday - int tm_mon - int tm_year - int tm_wday - int tm_yday - int tm_isdst - char *tm_zone - long tm_gmtoff - - int daylight # global state - long timezone - char *tzname[2] - void tzset() - - char *asctime(const tm *) - char *asctime_r(const tm *, char *) - char *ctime(const time_t *) - char *ctime_r(const time_t *, char *) - double difftime(time_t, time_t) - tm *getdate(const char *) - tm *gmtime(const time_t *) - tm *gmtime_r(const time_t *, tm *) - tm *localtime(const time_t *) - tm *localtime_r(const time_t *, tm *) - time_t mktime(tm *) - size_t strftime(char *, size_t, const char *, const tm *) - size_t wcsftime(wchar_t *str, size_t cnt, const wchar_t *fmt, tm *time) - - # POSIX not stdC - char *strptime(const char *, const char *, tm *) +# http://en.wikipedia.org/wiki/C_date_and_time_functions + +from libc.stddef cimport wchar_t + +cdef extern from "<time.h>" nogil: + ctypedef long clock_t + ctypedef long time_t + + enum: CLOCKS_PER_SEC + clock_t clock() # CPU time + time_t time(time_t *) # wall clock time since Unix epoch + + cdef struct tm: + int tm_sec + int tm_min + int tm_hour + int tm_mday + int tm_mon + int tm_year + int tm_wday + int tm_yday + int tm_isdst + char *tm_zone + long tm_gmtoff + + int daylight # global state + long timezone + char *tzname[2] + void tzset() + + char *asctime(const tm *) + char *asctime_r(const tm *, char *) + char *ctime(const time_t *) + char *ctime_r(const time_t *, char *) + double difftime(time_t, time_t) + tm *getdate(const char *) + tm *gmtime(const time_t *) + tm *gmtime_r(const time_t *, tm *) + tm *localtime(const time_t *) + tm *localtime_r(const time_t *, tm *) + time_t mktime(tm *) + size_t strftime(char *, size_t, const char *, const tm *) + size_t wcsftime(wchar_t *str, size_t cnt, const wchar_t *fmt, tm *time) + + # POSIX not stdC + char *strptime(const char *, const char *, tm *) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/__init__.pxd b/contrib/tools/cython/Cython/Includes/libcpp/__init__.pxd index 842d72d1f2..111ea25c2f 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/__init__.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/__init__.pxd @@ -1,4 +1,4 @@ cdef extern from *: ctypedef bint bool - ctypedef void* nullptr_t - nullptr_t nullptr + ctypedef void* nullptr_t + nullptr_t nullptr diff --git a/contrib/tools/cython/Cython/Includes/libcpp/algorithm.pxd b/contrib/tools/cython/Cython/Includes/libcpp/algorithm.pxd index 7acc1b3b64..ec7c3835b4 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/algorithm.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/algorithm.pxd @@ -1,12 +1,12 @@ -from libcpp cimport bool - - +from libcpp cimport bool + + cdef extern from "<algorithm>" namespace "std" nogil: - # Sorting and searching - bool binary_search[Iter, T](Iter first, Iter last, const T& value) - bool binary_search[Iter, T, Compare](Iter first, Iter last, const T& value, - Compare comp) - + # Sorting and searching + bool binary_search[Iter, T](Iter first, Iter last, const T& value) + bool binary_search[Iter, T, Compare](Iter first, Iter last, const T& value, + Compare comp) + Iter lower_bound[Iter, T](Iter first, Iter last, const T& value) Iter lower_bound[Iter, T, Compare](Iter first, Iter last, const T& value, Compare comp) @@ -15,18 +15,18 @@ cdef extern from "<algorithm>" namespace "std" nogil: Iter upper_bound[Iter, T, Compare](Iter first, Iter last, const T& value, Compare comp) - void partial_sort[Iter](Iter first, Iter middle, Iter last) - void partial_sort[Iter, Compare](Iter first, Iter middle, Iter last, - Compare comp) - - void sort[Iter](Iter first, Iter last) - void sort[Iter, Compare](Iter first, Iter last, Compare comp) - + void partial_sort[Iter](Iter first, Iter middle, Iter last) + void partial_sort[Iter, Compare](Iter first, Iter middle, Iter last, + Compare comp) + + void sort[Iter](Iter first, Iter last) + void sort[Iter, Compare](Iter first, Iter last, Compare comp) + # Removing duplicates Iter unique[Iter](Iter first, Iter last) Iter unique[Iter, BinaryPredicate](Iter first, Iter last, BinaryPredicate p) - # Binary heaps (priority queues) + # Binary heaps (priority queues) void make_heap[Iter](Iter first, Iter last) void make_heap[Iter, Compare](Iter first, Iter last, Compare comp) @@ -38,6 +38,6 @@ cdef extern from "<algorithm>" namespace "std" nogil: void sort_heap[Iter](Iter first, Iter last) void sort_heap[Iter, Compare](Iter first, Iter last, Compare comp) - - # Copy - OutputIter copy[InputIter,OutputIter](InputIter,InputIter,OutputIter) + + # Copy + OutputIter copy[InputIter,OutputIter](InputIter,InputIter,OutputIter) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/cast.pxd b/contrib/tools/cython/Cython/Includes/libcpp/cast.pxd index b9b2e73ee5..c3a4d8978f 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/cast.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/cast.pxd @@ -5,7 +5,7 @@ # than the standard C cast operator which can be written "<T>(expression)" in # Cython. -cdef extern from * nogil: +cdef extern from * nogil: cdef T dynamic_cast[T](void *) except + # nullptr may also indicate failure cdef T static_cast[T](void *) cdef T reinterpret_cast[T](void *) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/complex.pxd b/contrib/tools/cython/Cython/Includes/libcpp/complex.pxd index 2c912e5f3a..c875d5e5bd 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/complex.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/complex.pxd @@ -39,63 +39,63 @@ cdef extern from "<complex>" namespace "std" nogil: void imag(T) # Return real part - T real[T](complex[T]&) + T real[T](complex[T]&) long double real(long double) double real(double) float real(float) # Return imaginary part - T imag[T](complex[T]&) + T imag[T](complex[T]&) long double imag(long double) double imag(double) float imag(float) - T abs[T](complex[T]&) - T arg[T](complex[T]&) + T abs[T](complex[T]&) + T arg[T](complex[T]&) long double arg(long double) double arg(double) float arg(float) - T norm[T](complex[T]) + T norm[T](complex[T]) long double norm(long double) double norm(double) float norm(float) - complex[T] conj[T](complex[T]&) + complex[T] conj[T](complex[T]&) complex[long double] conj(long double) complex[double] conj(double) complex[float] conj(float) - complex[T] proj[T](complex[T]) + complex[T] proj[T](complex[T]) complex[long double] proj(long double) complex[double] proj(double) complex[float] proj(float) - complex[T] polar[T](T&, T&) - complex[T] ploar[T](T&) + complex[T] polar[T](T&, T&) + complex[T] ploar[T](T&) - complex[T] exp[T](complex[T]&) - complex[T] log[T](complex[T]&) - complex[T] log10[T](complex[T]&) + complex[T] exp[T](complex[T]&) + complex[T] log[T](complex[T]&) + complex[T] log10[T](complex[T]&) - complex[T] pow[T](complex[T]&, complex[T]&) - complex[T] pow[T](complex[T]&, T&) - complex[T] pow[T](T&, complex[T]&) + complex[T] pow[T](complex[T]&, complex[T]&) + complex[T] pow[T](complex[T]&, T&) + complex[T] pow[T](T&, complex[T]&) # There are some promotion versions too - complex[T] sqrt[T](complex[T]&) + complex[T] sqrt[T](complex[T]&) - complex[T] sin[T](complex[T]&) - complex[T] cos[T](complex[T]&) - complex[T] tan[T](complex[T]&) - complex[T] asin[T](complex[T]&) - complex[T] acos[T](complex[T]&) - complex[T] atan[T](complex[T]&) + complex[T] sin[T](complex[T]&) + complex[T] cos[T](complex[T]&) + complex[T] tan[T](complex[T]&) + complex[T] asin[T](complex[T]&) + complex[T] acos[T](complex[T]&) + complex[T] atan[T](complex[T]&) - complex[T] sinh[T](complex[T]&) - complex[T] cosh[T](complex[T]&) - complex[T] tanh[T](complex[T]&) + complex[T] sinh[T](complex[T]&) + complex[T] cosh[T](complex[T]&) + complex[T] tanh[T](complex[T]&) - complex[T] asinh[T](complex[T]&) - complex[T] acosh[T](complex[T]&) - complex[T] atanh[T](complex[T]&) + complex[T] asinh[T](complex[T]&) + complex[T] acosh[T](complex[T]&) + complex[T] atanh[T](complex[T]&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/deque.pxd b/contrib/tools/cython/Cython/Includes/libcpp/deque.pxd index 9b5ba89ae3..9e2b2291d0 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/deque.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/deque.pxd @@ -1,11 +1,11 @@ cdef extern from "<deque>" namespace "std" nogil: - cdef cppclass deque[T,ALLOCATOR=*]: + cdef cppclass deque[T,ALLOCATOR=*]: ctypedef T value_type ctypedef ALLOCATOR allocator_type # these should really be allocator_type.size_type and # allocator_type.difference_type to be true to the C++ definition - # but cython doesn't support deferred access on template arguments + # but cython doesn't support deferred access on template arguments ctypedef size_t size_type ctypedef ptrdiff_t difference_type @@ -35,8 +35,8 @@ cdef extern from "<deque>" namespace "std" nogil: bint operator>(reverse_iterator) bint operator<=(reverse_iterator) bint operator>=(reverse_iterator) - cppclass const_iterator(iterator): - pass + cppclass const_iterator(iterator): + pass cppclass const_reverse_iterator(reverse_iterator): pass deque() except + @@ -57,11 +57,11 @@ cdef extern from "<deque>" namespace "std" nogil: T& at(size_t) T& back() iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() void clear() bint empty() iterator end() - const_iterator const_end "end"() + const_iterator const_end "end"() iterator erase(iterator) iterator erase(iterator, iterator) T& front() @@ -81,6 +81,6 @@ cdef extern from "<deque>" namespace "std" nogil: void resize(size_t, T&) size_t size() void swap(deque&) - - # C++11 methods - void shrink_to_fit() + + # C++11 methods + void shrink_to_fit() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/forward_list.pxd b/contrib/tools/cython/Cython/Includes/libcpp/forward_list.pxd index b8c7ba19c2..8c3b240d04 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/forward_list.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/forward_list.pxd @@ -1,62 +1,62 @@ -cdef extern from "<forward_list>" namespace "std" nogil: - cdef cppclass forward_list[T,ALLOCATOR=*]: - ctypedef T value_type - ctypedef ALLOCATOR allocator_type - - # these should really be allocator_type.size_type and - # allocator_type.difference_type to be true to the C++ definition - # but cython doesn't support deferred access on template arguments - ctypedef size_t size_type - ctypedef ptrdiff_t difference_type - - cppclass iterator: - iterator() - iterator(iterator &) - T& operator*() - iterator operator++() - bint operator==(iterator) - bint operator!=(iterator) - cppclass const_iterator(iterator): - pass - forward_list() except + - forward_list(forward_list&) except + - forward_list(size_t, T&) except + - #forward_list& operator=(forward_list&) - bint operator==(forward_list&, forward_list&) - bint operator!=(forward_list&, forward_list&) - bint operator<(forward_list&, forward_list&) - bint operator>(forward_list&, forward_list&) - bint operator<=(forward_list&, forward_list&) - bint operator>=(forward_list&, forward_list&) - void assign(size_t, T&) - T& front() - iterator before_begin() - const_iterator const_before_begin "before_begin"() - iterator begin() - const_iterator const_begin "begin"() - iterator end() - const_iterator const_end "end"() - bint empty() - size_t max_size() - void clear() - iterator insert_after(iterator, T&) - void insert_after(iterator, size_t, T&) - iterator erase_after(iterator) - iterator erase_after(iterator, iterator) - void push_front(T&) - void pop_front() - void resize(size_t) - void resize(size_t, T&) - void swap(forward_list&) - void merge(forward_list&) - void merge[Compare](forward_list&, Compare) - void splice_after(iterator, forward_list&) - void splice_after(iterator, forward_list&, iterator) - void splice_after(iterator, forward_list&, iterator, iterator) - void remove(const T&) - void remove_if[Predicate](Predicate) - void reverse() - void unique() - void unique[Predicate](Predicate) - void sort() - void sort[Compare](Compare) +cdef extern from "<forward_list>" namespace "std" nogil: + cdef cppclass forward_list[T,ALLOCATOR=*]: + ctypedef T value_type + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + + cppclass iterator: + iterator() + iterator(iterator &) + T& operator*() + iterator operator++() + bint operator==(iterator) + bint operator!=(iterator) + cppclass const_iterator(iterator): + pass + forward_list() except + + forward_list(forward_list&) except + + forward_list(size_t, T&) except + + #forward_list& operator=(forward_list&) + bint operator==(forward_list&, forward_list&) + bint operator!=(forward_list&, forward_list&) + bint operator<(forward_list&, forward_list&) + bint operator>(forward_list&, forward_list&) + bint operator<=(forward_list&, forward_list&) + bint operator>=(forward_list&, forward_list&) + void assign(size_t, T&) + T& front() + iterator before_begin() + const_iterator const_before_begin "before_begin"() + iterator begin() + const_iterator const_begin "begin"() + iterator end() + const_iterator const_end "end"() + bint empty() + size_t max_size() + void clear() + iterator insert_after(iterator, T&) + void insert_after(iterator, size_t, T&) + iterator erase_after(iterator) + iterator erase_after(iterator, iterator) + void push_front(T&) + void pop_front() + void resize(size_t) + void resize(size_t, T&) + void swap(forward_list&) + void merge(forward_list&) + void merge[Compare](forward_list&, Compare) + void splice_after(iterator, forward_list&) + void splice_after(iterator, forward_list&, iterator) + void splice_after(iterator, forward_list&, iterator, iterator) + void remove(const T&) + void remove_if[Predicate](Predicate) + void reverse() + void unique() + void unique[Predicate](Predicate) + void sort() + void sort[Compare](Compare) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/iterator.pxd b/contrib/tools/cython/Cython/Includes/libcpp/iterator.pxd index 8ce29b25e0..e0f8bd8d6e 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/iterator.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/iterator.pxd @@ -1,32 +1,32 @@ -#Basic reference: http://www.cplusplus.com/reference/iterator/ -#Most of these classes are in fact empty structs - -cdef extern from "<iterator>" namespace "std" nogil: - cdef cppclass iterator[Category,T,Distance,Pointer,Reference]: - pass - cdef cppclass output_iterator_tag: - pass - cdef cppclass input_iterator_tag: - pass - cdef cppclass forward_iterator_tag(input_iterator_tag): - pass - cdef cppclass bidirectional_iterator_tag(forward_iterator_tag): - pass - cdef cppclass random_access_iterator_tag(bidirectional_iterator_tag): - pass +#Basic reference: http://www.cplusplus.com/reference/iterator/ +#Most of these classes are in fact empty structs + +cdef extern from "<iterator>" namespace "std" nogil: + cdef cppclass iterator[Category,T,Distance,Pointer,Reference]: + pass + cdef cppclass output_iterator_tag: + pass + cdef cppclass input_iterator_tag: + pass + cdef cppclass forward_iterator_tag(input_iterator_tag): + pass + cdef cppclass bidirectional_iterator_tag(forward_iterator_tag): + pass + cdef cppclass random_access_iterator_tag(bidirectional_iterator_tag): + pass + + cdef cppclass back_insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): + pass + cdef cppclass front_insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): + pass + cdef cppclass insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): + pass + back_insert_iterator[CONTAINER] back_inserter[CONTAINER](CONTAINER &) + front_insert_iterator[CONTAINER] front_inserter[CONTAINER](CONTAINER &) + ##Note: this is the C++98 version of inserter. + ##The C++11 versions's prototype relies on typedef members of classes, which Cython doesn't currently support: + ##template <class Container> + ##insert_iterator<Container> inserter (Container& x, typename Container::iterator it) + insert_iterator[CONTAINER] inserter[CONTAINER,ITERATOR](CONTAINER &, ITERATOR) - cdef cppclass back_insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): - pass - cdef cppclass front_insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): - pass - cdef cppclass insert_iterator[T](iterator[output_iterator_tag,void,void,void,void]): - pass - back_insert_iterator[CONTAINER] back_inserter[CONTAINER](CONTAINER &) - front_insert_iterator[CONTAINER] front_inserter[CONTAINER](CONTAINER &) - ##Note: this is the C++98 version of inserter. - ##The C++11 versions's prototype relies on typedef members of classes, which Cython doesn't currently support: - ##template <class Container> - ##insert_iterator<Container> inserter (Container& x, typename Container::iterator it) - insert_iterator[CONTAINER] inserter[CONTAINER,ITERATOR](CONTAINER &, ITERATOR) - diff --git a/contrib/tools/cython/Cython/Includes/libcpp/limits.pxd b/contrib/tools/cython/Cython/Includes/libcpp/limits.pxd index a26bb20244..c325263b72 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/limits.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/limits.pxd @@ -1,61 +1,61 @@ -cdef extern from "<limits>" namespace "std" nogil: - enum float_round_style: - round_indeterminate = -1 - round_toward_zero = 0 - round_to_nearest = 1 - round_toward_infinity = 2 - round_toward_neg_infinity = 3 +cdef extern from "<limits>" namespace "std" nogil: + enum float_round_style: + round_indeterminate = -1 + round_toward_zero = 0 + round_to_nearest = 1 + round_toward_infinity = 2 + round_toward_neg_infinity = 3 - enum float_denorm_style: - denorm_indeterminate = -1 - denorm_absent = 0 - denorm_present = 1 - - #The static methods can be called as, e.g. numeric_limits[int].round_error(), etc. - #The const data members should be declared as static. Cython currently doesn't allow that - #and/or I can't figure it out, so you must instantiate an object to access, e.g. - #cdef numeric_limits[double] lm - #print lm.round_style - cdef cppclass numeric_limits[T]: - const bint is_specialized - @staticmethod - T min() - @staticmethod - T max() - const int digits - const int digits10 - const bint is_signed - const bint is_integer - const bint is_exact - const int radix - @staticmethod - T epsilon() - @staticmethod - T round_error() - - const int min_exponent - const int min_exponent10 - const int max_exponent - const int max_exponent10 + enum float_denorm_style: + denorm_indeterminate = -1 + denorm_absent = 0 + denorm_present = 1 - const bint has_infinity - const bint has_quiet_NaN - const bint has_signaling_NaN - const float_denorm_style has_denorm - const bint has_denorm_loss - @staticmethod - T infinity() - @staticmethod - T quiet_NaN() - @staticmethod - T signaling_NaN() - @staticmethod - T denorm_min() - - const bint is_iec559 - const bint is_bounded - const bint is_modulo - - const bint traps - const bint tinyness_before - const float_round_style round_style + #The static methods can be called as, e.g. numeric_limits[int].round_error(), etc. + #The const data members should be declared as static. Cython currently doesn't allow that + #and/or I can't figure it out, so you must instantiate an object to access, e.g. + #cdef numeric_limits[double] lm + #print lm.round_style + cdef cppclass numeric_limits[T]: + const bint is_specialized + @staticmethod + T min() + @staticmethod + T max() + const int digits + const int digits10 + const bint is_signed + const bint is_integer + const bint is_exact + const int radix + @staticmethod + T epsilon() + @staticmethod + T round_error() + + const int min_exponent + const int min_exponent10 + const int max_exponent + const int max_exponent10 + + const bint has_infinity + const bint has_quiet_NaN + const bint has_signaling_NaN + const float_denorm_style has_denorm + const bint has_denorm_loss + @staticmethod + T infinity() + @staticmethod + T quiet_NaN() + @staticmethod + T signaling_NaN() + @staticmethod + T denorm_min() + + const bint is_iec559 + const bint is_bounded + const bint is_modulo + + const bint traps + const bint tinyness_before + const float_round_style round_style diff --git a/contrib/tools/cython/Cython/Includes/libcpp/list.pxd b/contrib/tools/cython/Cython/Includes/libcpp/list.pxd index e350694da3..b5b0410ad8 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/list.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/list.pxd @@ -1,14 +1,14 @@ cdef extern from "<list>" namespace "std" nogil: - cdef cppclass list[T,ALLOCATOR=*]: - ctypedef T value_type - ctypedef ALLOCATOR allocator_type - - # these should really be allocator_type.size_type and - # allocator_type.difference_type to be true to the C++ definition - # but cython doesn't support deferred access on template arguments - ctypedef size_t size_type - ctypedef ptrdiff_t difference_type - + cdef cppclass list[T,ALLOCATOR=*]: + ctypedef T value_type + ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support deferred access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + cppclass iterator: iterator() iterator(iterator &) @@ -25,10 +25,10 @@ cdef extern from "<list>" namespace "std" nogil: reverse_iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass list() except + list(list&) except + list(size_t, T&) except + @@ -42,11 +42,11 @@ cdef extern from "<list>" namespace "std" nogil: void assign(size_t, T&) T& back() iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() void clear() bint empty() iterator end() - const_iterator const_end "end"() + const_iterator const_end "end"() iterator erase(iterator) iterator erase(iterator, iterator) T& front() @@ -60,11 +60,11 @@ cdef extern from "<list>" namespace "std" nogil: void push_back(T&) void push_front(T&) reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "rbegin"() void remove(T&) #void remove_if(UnPred) reverse_iterator rend() - const_reverse_iterator const_rend "rend"() + const_reverse_iterator const_rend "rend"() void resize(size_t, T&) void reverse() size_t size() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/map.pxd b/contrib/tools/cython/Cython/Includes/libcpp/map.pxd index 4507baf2a6..624a7ac026 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/map.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/map.pxd @@ -1,12 +1,12 @@ from .utility cimport pair cdef extern from "<map>" namespace "std" nogil: - cdef cppclass map[T, U, COMPARE=*, ALLOCATOR=*]: - ctypedef T key_type - ctypedef U mapped_type - ctypedef pair[const T, U] value_type - ctypedef COMPARE key_compare - ctypedef ALLOCATOR allocator_type + cdef cppclass map[T, U, COMPARE=*, ALLOCATOR=*]: + ctypedef T key_type + ctypedef U mapped_type + ctypedef pair[const T, U] value_type + ctypedef COMPARE key_compare + ctypedef ALLOCATOR allocator_type cppclass iterator: pair[T, U]& operator*() iterator operator++() @@ -19,10 +19,10 @@ cdef extern from "<map>" namespace "std" nogil: iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass map() except + map(map&) except + #map(key_compare&) @@ -34,35 +34,35 @@ cdef extern from "<map>" namespace "std" nogil: bint operator>(map&, map&) bint operator<=(map&, map&) bint operator>=(map&, map&) - U& at(const T&) except + - const U& const_at "at"(const T&) except + + U& at(const T&) except + + const U& const_at "at"(const T&) except + iterator begin() const_iterator const_begin "begin" () void clear() - size_t count(const T&) + size_t count(const T&) bint empty() iterator end() const_iterator const_end "end" () - pair[iterator, iterator] equal_range(const T&) + pair[iterator, iterator] equal_range(const T&) #pair[const_iterator, const_iterator] equal_range(key_type&) void erase(iterator) void erase(iterator, iterator) - size_t erase(const T&) - iterator find(const T&) - const_iterator const_find "find" (const T&) - pair[iterator, bint] insert(pair[T, U]) except + # XXX pair[T,U]& - iterator insert(iterator, pair[T, U]) except + # XXX pair[T,U]& + size_t erase(const T&) + iterator find(const T&) + const_iterator const_find "find" (const T&) + pair[iterator, bint] insert(pair[T, U]) except + # XXX pair[T,U]& + iterator insert(iterator, pair[T, U]) except + # XXX pair[T,U]& #void insert(input_iterator, input_iterator) #key_compare key_comp() - iterator lower_bound(const T&) - const_iterator const_lower_bound "lower_bound"(const T&) + iterator lower_bound(const T&) + const_iterator const_lower_bound "lower_bound"(const T&) size_t max_size() reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "rbegin"() reverse_iterator rend() - const_reverse_iterator const_rend "rend"() + const_reverse_iterator const_rend "rend"() size_t size() void swap(map&) - iterator upper_bound(const T&) - const_iterator const_upper_bound "upper_bound"(const T&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) #value_compare value_comp() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/memory.pxd b/contrib/tools/cython/Cython/Includes/libcpp/memory.pxd index f8548011e4..2151c1ec7f 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/memory.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/memory.pxd @@ -1,115 +1,115 @@ -from libcpp cimport bool, nullptr_t, nullptr - -cdef extern from "<memory>" namespace "std" nogil: - cdef cppclass default_delete[T]: - default_delete() - - cdef cppclass allocator[T]: - allocator() - allocator(const allocator &) - #allocator(const allocator[U] &) #unique_ptr unit tests fail w/this - T * address(T &) - const T * address(const T &) const - T * allocate( size_t n ) # Not to standard. should be a second default argument - void deallocate(T * , size_t) - size_t max_size() const - void construct( T *, const T &) #C++98. The C++11 version is variadic AND perfect-forwarding - void destroy(T *) #C++98 - void destroy[U](U *) #unique_ptr unit tests fail w/this - - - cdef cppclass unique_ptr[T,DELETER=*]: - unique_ptr() - unique_ptr(nullptr_t) - unique_ptr(T*) - unique_ptr(unique_ptr[T]&) - - # Modifiers - T* release() - void reset() - void reset(nullptr_t) - void reset(T*) - void swap(unique_ptr&) - - # Observers - T* get() - T& operator*() - #T* operator->() # Not Supported - bool operator bool() - bool operator!() - - bool operator==(const unique_ptr&) - bool operator!=(const unique_ptr&) - bool operator<(const unique_ptr&) - bool operator>(const unique_ptr&) - bool operator<=(const unique_ptr&) - bool operator>=(const unique_ptr&) - - bool operator==(nullptr_t) - bool operator!=(nullptr_t) - - # Forward Declaration not working ("Compiler crash in AnalyseDeclarationsTransform") - #cdef cppclass weak_ptr[T] - - cdef cppclass shared_ptr[T]: - shared_ptr() - shared_ptr(nullptr_t) - shared_ptr(T*) - shared_ptr(shared_ptr[T]&) - shared_ptr(shared_ptr[T]&, T*) - shared_ptr(unique_ptr[T]&) - #shared_ptr(weak_ptr[T]&) # Not Supported - - # Modifiers - void reset() - void reset(T*) - void swap(shared_ptr&) - - # Observers - T* get() - T& operator*() - #T* operator->() # Not Supported - long use_count() - bool unique() - bool operator bool() - bool operator!() - #bool owner_before[Y](const weak_ptr[Y]&) # Not Supported - bool owner_before[Y](const shared_ptr[Y]&) - - bool operator==(const shared_ptr&) - bool operator!=(const shared_ptr&) - bool operator<(const shared_ptr&) - bool operator>(const shared_ptr&) - bool operator<=(const shared_ptr&) - bool operator>=(const shared_ptr&) - - bool operator==(nullptr_t) - bool operator!=(nullptr_t) - - cdef cppclass weak_ptr[T]: - weak_ptr() - weak_ptr(weak_ptr[T]&) - weak_ptr(shared_ptr[T]&) - - # Modifiers - void reset() - void swap(weak_ptr&) - - # Observers - long use_count() - bool expired() - shared_ptr[T] lock() - bool owner_before[Y](const weak_ptr[Y]&) - bool owner_before[Y](const shared_ptr[Y]&) - - # Smart pointer non-member operations - shared_ptr[T] make_shared[T](...) except + - - # Temporaries used for exception handling break generated code - unique_ptr[T] make_unique[T](...) # except + - - # No checking on the compatibility of T and U. - cdef shared_ptr[T] static_pointer_cast[T, U](const shared_ptr[U]&) - cdef shared_ptr[T] dynamic_pointer_cast[T, U](const shared_ptr[U]&) - cdef shared_ptr[T] const_pointer_cast[T, U](const shared_ptr[U]&) - cdef shared_ptr[T] reinterpret_pointer_cast[T, U](const shared_ptr[U]&) +from libcpp cimport bool, nullptr_t, nullptr + +cdef extern from "<memory>" namespace "std" nogil: + cdef cppclass default_delete[T]: + default_delete() + + cdef cppclass allocator[T]: + allocator() + allocator(const allocator &) + #allocator(const allocator[U] &) #unique_ptr unit tests fail w/this + T * address(T &) + const T * address(const T &) const + T * allocate( size_t n ) # Not to standard. should be a second default argument + void deallocate(T * , size_t) + size_t max_size() const + void construct( T *, const T &) #C++98. The C++11 version is variadic AND perfect-forwarding + void destroy(T *) #C++98 + void destroy[U](U *) #unique_ptr unit tests fail w/this + + + cdef cppclass unique_ptr[T,DELETER=*]: + unique_ptr() + unique_ptr(nullptr_t) + unique_ptr(T*) + unique_ptr(unique_ptr[T]&) + + # Modifiers + T* release() + void reset() + void reset(nullptr_t) + void reset(T*) + void swap(unique_ptr&) + + # Observers + T* get() + T& operator*() + #T* operator->() # Not Supported + bool operator bool() + bool operator!() + + bool operator==(const unique_ptr&) + bool operator!=(const unique_ptr&) + bool operator<(const unique_ptr&) + bool operator>(const unique_ptr&) + bool operator<=(const unique_ptr&) + bool operator>=(const unique_ptr&) + + bool operator==(nullptr_t) + bool operator!=(nullptr_t) + + # Forward Declaration not working ("Compiler crash in AnalyseDeclarationsTransform") + #cdef cppclass weak_ptr[T] + + cdef cppclass shared_ptr[T]: + shared_ptr() + shared_ptr(nullptr_t) + shared_ptr(T*) + shared_ptr(shared_ptr[T]&) + shared_ptr(shared_ptr[T]&, T*) + shared_ptr(unique_ptr[T]&) + #shared_ptr(weak_ptr[T]&) # Not Supported + + # Modifiers + void reset() + void reset(T*) + void swap(shared_ptr&) + + # Observers + T* get() + T& operator*() + #T* operator->() # Not Supported + long use_count() + bool unique() + bool operator bool() + bool operator!() + #bool owner_before[Y](const weak_ptr[Y]&) # Not Supported + bool owner_before[Y](const shared_ptr[Y]&) + + bool operator==(const shared_ptr&) + bool operator!=(const shared_ptr&) + bool operator<(const shared_ptr&) + bool operator>(const shared_ptr&) + bool operator<=(const shared_ptr&) + bool operator>=(const shared_ptr&) + + bool operator==(nullptr_t) + bool operator!=(nullptr_t) + + cdef cppclass weak_ptr[T]: + weak_ptr() + weak_ptr(weak_ptr[T]&) + weak_ptr(shared_ptr[T]&) + + # Modifiers + void reset() + void swap(weak_ptr&) + + # Observers + long use_count() + bool expired() + shared_ptr[T] lock() + bool owner_before[Y](const weak_ptr[Y]&) + bool owner_before[Y](const shared_ptr[Y]&) + + # Smart pointer non-member operations + shared_ptr[T] make_shared[T](...) except + + + # Temporaries used for exception handling break generated code + unique_ptr[T] make_unique[T](...) # except + + + # No checking on the compatibility of T and U. + cdef shared_ptr[T] static_pointer_cast[T, U](const shared_ptr[U]&) + cdef shared_ptr[T] dynamic_pointer_cast[T, U](const shared_ptr[U]&) + cdef shared_ptr[T] const_pointer_cast[T, U](const shared_ptr[U]&) + cdef shared_ptr[T] reinterpret_pointer_cast[T, U](const shared_ptr[U]&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/queue.pxd b/contrib/tools/cython/Cython/Includes/libcpp/queue.pxd index f610249c52..578cbd9159 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/queue.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/queue.pxd @@ -9,9 +9,9 @@ cdef extern from "<queue>" namespace "std" nogil: void pop() void push(T&) size_t size() - # C++11 methods - void swap(queue&) - + # C++11 methods + void swap(queue&) + cdef cppclass priority_queue[T]: priority_queue() except + priority_queue(priority_queue&) except + @@ -21,5 +21,5 @@ cdef extern from "<queue>" namespace "std" nogil: void push(T&) size_t size() T& top() - # C++11 methods - void swap(priority_queue&) + # C++11 methods + void swap(priority_queue&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/set.pxd b/contrib/tools/cython/Cython/Includes/libcpp/set.pxd index 2de86db1ea..1069be7466 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/set.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/set.pxd @@ -2,7 +2,7 @@ from .utility cimport pair cdef extern from "<set>" namespace "std" nogil: cdef cppclass set[T]: - ctypedef T value_type + ctypedef T value_type cppclass iterator: T& operator*() iterator operator++() @@ -15,10 +15,10 @@ cdef extern from "<set>" namespace "std" nogil: iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass set() except + set(set&) except + #set(key_compare&) @@ -30,32 +30,32 @@ cdef extern from "<set>" namespace "std" nogil: bint operator<=(set&, set&) bint operator>=(set&, set&) iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() void clear() - size_t count(const T&) + size_t count(const T&) bint empty() iterator end() - const_iterator const_end "end"() - pair[iterator, iterator] equal_range(const T&) + const_iterator const_end "end"() + pair[iterator, iterator] equal_range(const T&) #pair[const_iterator, const_iterator] equal_range(T&) - iterator erase(iterator) - iterator erase(iterator, iterator) + iterator erase(iterator) + iterator erase(iterator, iterator) size_t erase(T&) iterator find(T&) - const_iterator const_find "find"(T&) - pair[iterator, bint] insert(const T&) except + - iterator insert(iterator, const T&) except + - void insert(iterator, iterator) except + + const_iterator const_find "find"(T&) + pair[iterator, bint] insert(const T&) except + + iterator insert(iterator, const T&) except + + void insert(iterator, iterator) except + #key_compare key_comp() iterator lower_bound(T&) - const_iterator const_lower_bound "lower_bound"(T&) + const_iterator const_lower_bound "lower_bound"(T&) size_t max_size() reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "rbegin"() reverse_iterator rend() - const_reverse_iterator const_rend "rend"() + const_reverse_iterator const_rend "rend"() size_t size() void swap(set&) - iterator upper_bound(const T&) - const_iterator const_upper_bound "upper_bound"(const T&) + iterator upper_bound(const T&) + const_iterator const_upper_bound "upper_bound"(const T&) #value_compare value_comp() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/stack.pxd b/contrib/tools/cython/Cython/Includes/libcpp/stack.pxd index 4bfcf0593a..2dc80992b7 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/stack.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/stack.pxd @@ -1,6 +1,6 @@ cdef extern from "<stack>" namespace "std" nogil: cdef cppclass stack[T]: - ctypedef T value_type + ctypedef T value_type stack() except + stack(stack&) except + #stack(Container&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/string.pxd b/contrib/tools/cython/Cython/Includes/libcpp/string.pxd index 1214a2a7c3..a894144f1f 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/string.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/string.pxd @@ -8,34 +8,34 @@ cdef extern from "<string>" namespace "std::string" nogil: cdef extern from "<string>" namespace "std" nogil: cdef cppclass string: - cppclass iterator: - iterator() - char& operator*() + cppclass iterator: + iterator() + char& operator*() iterator(iterator&) - iterator operator++() - iterator operator--() - bint operator==(iterator) - bint operator!=(iterator) - - cppclass reverse_iterator: - char& operator*() - iterator operator++() - iterator operator--() - iterator operator+(size_t) - iterator operator-(size_t) - bint operator==(reverse_iterator) - bint operator!=(reverse_iterator) - bint operator<(reverse_iterator) - bint operator>(reverse_iterator) - bint operator<=(reverse_iterator) - bint operator>=(reverse_iterator) - - cppclass const_iterator(iterator): - pass - - cppclass const_reverse_iterator(reverse_iterator): - pass - + iterator operator++() + iterator operator--() + bint operator==(iterator) + bint operator!=(iterator) + + cppclass reverse_iterator: + char& operator*() + iterator operator++() + iterator operator--() + iterator operator+(size_t) + iterator operator-(size_t) + bint operator==(reverse_iterator) + bint operator!=(reverse_iterator) + bint operator<(reverse_iterator) + bint operator>(reverse_iterator) + bint operator<=(reverse_iterator) + bint operator>=(reverse_iterator) + + cppclass const_iterator(iterator): + pass + + cppclass const_reverse_iterator(reverse_iterator): + pass + string() except + string(const string& s) except + string(const string& s, size_t pos) except + @@ -45,15 +45,15 @@ cdef extern from "<string>" namespace "std" nogil: string(size_t n, char c) except + string(iterator first, iterator last) except + - iterator begin() - const_iterator const_begin "begin"() - iterator end() - const_iterator const_end "end"() - reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() - reverse_iterator rend() - const_reverse_iterator const_rend "rend"() - + iterator begin() + const_iterator const_begin "begin"() + iterator end() + const_iterator const_end "end"() + reverse_iterator rbegin() + const_reverse_iterator const_rbegin "rbegin"() + reverse_iterator rend() + const_reverse_iterator const_rend "rend"() + const char* c_str() const char* data() size_t size() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/typeindex.pxd b/contrib/tools/cython/Cython/Includes/libcpp/typeindex.pxd index 73f94e93a0..d5b7e9149f 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/typeindex.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/typeindex.pxd @@ -1,15 +1,15 @@ -from libcpp cimport bool -from .typeinfo cimport type_info - -# This class is C++11-only -cdef extern from "<typeindex>" namespace "std" nogil: - cdef cppclass type_index: - type_index(const type_info &) - const char* name() - size_t hash_code() - bool operator==(const type_index &) - bool operator!=(const type_index &) - bool operator<(const type_index &) - bool operator<=(const type_index &) - bool operator>(const type_index &) - bool operator>=(const type_index &) +from libcpp cimport bool +from .typeinfo cimport type_info + +# This class is C++11-only +cdef extern from "<typeindex>" namespace "std" nogil: + cdef cppclass type_index: + type_index(const type_info &) + const char* name() + size_t hash_code() + bool operator==(const type_index &) + bool operator!=(const type_index &) + bool operator<(const type_index &) + bool operator<=(const type_index &) + bool operator>(const type_index &) + bool operator>=(const type_index &) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/typeinfo.pxd b/contrib/tools/cython/Cython/Includes/libcpp/typeinfo.pxd index 989bce171c..9118e00649 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/typeinfo.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/typeinfo.pxd @@ -1,10 +1,10 @@ -from libcpp cimport bool - -cdef extern from "<typeinfo>" namespace "std" nogil: - cdef cppclass type_info: - const char* name() - int before(const type_info&) - bool operator==(const type_info&) - bool operator!=(const type_info&) - # C++11-only - size_t hash_code() +from libcpp cimport bool + +cdef extern from "<typeinfo>" namespace "std" nogil: + cdef cppclass type_info: + const char* name() + int before(const type_info&) + bool operator==(const type_info&) + bool operator!=(const type_info&) + # C++11-only + size_t hash_code() diff --git a/contrib/tools/cython/Cython/Includes/libcpp/unordered_map.pxd b/contrib/tools/cython/Cython/Includes/libcpp/unordered_map.pxd index 7db3ba0544..a00fbbed28 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/unordered_map.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/unordered_map.pxd @@ -2,9 +2,9 @@ from .utility cimport pair cdef extern from "<unordered_map>" namespace "std" nogil: cdef cppclass unordered_map[T, U, HASH=*, PRED=*, ALLOCATOR=*]: - ctypedef T key_type - ctypedef U mapped_type - ctypedef pair[const T, U] value_type + ctypedef T key_type + ctypedef U mapped_type + ctypedef pair[const T, U] value_type cppclass iterator: pair[T, U]& operator*() iterator operator++() @@ -17,10 +17,10 @@ cdef extern from "<unordered_map>" namespace "std" nogil: iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass unordered_map() except + unordered_map(unordered_map&) except + #unordered_map(key_compare&) @@ -32,43 +32,43 @@ cdef extern from "<unordered_map>" namespace "std" nogil: bint operator>(unordered_map&, unordered_map&) bint operator<=(unordered_map&, unordered_map&) bint operator>=(unordered_map&, unordered_map&) - U& at(const T&) - const U& const_at "at"(const T&) + U& at(const T&) + const U& const_at "at"(const T&) iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() void clear() size_t count(T&) bint empty() iterator end() - const_iterator const_end "end"() + const_iterator const_end "end"() pair[iterator, iterator] equal_range(T&) - pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(const T&) iterator erase(iterator) iterator erase(iterator, iterator) size_t erase(T&) iterator find(T&) - const_iterator const_find "find"(T&) + const_iterator const_find "find"(T&) pair[iterator, bint] insert(pair[T, U]) # XXX pair[T,U]& iterator insert(iterator, pair[T, U]) # XXX pair[T,U]& - iterator insert(iterator, iterator) + iterator insert(iterator, iterator) #key_compare key_comp() iterator lower_bound(T&) - const_iterator const_lower_bound "lower_bound"(T&) + const_iterator const_lower_bound "lower_bound"(T&) size_t max_size() reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "rbegin"() reverse_iterator rend() - const_reverse_iterator const_rend "rend"() + const_reverse_iterator const_rend "rend"() size_t size() void swap(unordered_map&) iterator upper_bound(T&) - const_iterator const_upper_bound "upper_bound"(T&) + const_iterator const_upper_bound "upper_bound"(T&) #value_compare value_comp() - void max_load_factor(float) - float max_load_factor() - void rehash(size_t) - void reserve(size_t) - size_t bucket_count() - size_t max_bucket_count() - size_t bucket_size(size_t) - size_t bucket(const T&) + void max_load_factor(float) + float max_load_factor() + void rehash(size_t) + void reserve(size_t) + size_t bucket_count() + size_t max_bucket_count() + size_t bucket_size(size_t) + size_t bucket(const T&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/unordered_set.pxd b/contrib/tools/cython/Cython/Includes/libcpp/unordered_set.pxd index 47101fb29f..5aa2417528 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/unordered_set.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/unordered_set.pxd @@ -1,8 +1,8 @@ from .utility cimport pair cdef extern from "<unordered_set>" namespace "std" nogil: - cdef cppclass unordered_set[T,HASH=*,PRED=*,ALLOCATOR=*]: - ctypedef T value_type + cdef cppclass unordered_set[T,HASH=*,PRED=*,ALLOCATOR=*]: + ctypedef T value_type cppclass iterator: T& operator*() iterator operator++() @@ -15,10 +15,10 @@ cdef extern from "<unordered_set>" namespace "std" nogil: iterator operator--() bint operator==(reverse_iterator) bint operator!=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass unordered_set() except + unordered_set(unordered_set&) except + #unordered_set(key_compare&) @@ -30,40 +30,40 @@ cdef extern from "<unordered_set>" namespace "std" nogil: bint operator<=(unordered_set&, unordered_set&) bint operator>=(unordered_set&, unordered_set&) iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() void clear() size_t count(T&) bint empty() iterator end() - const_iterator const_end "end"() + const_iterator const_end "end"() pair[iterator, iterator] equal_range(T&) - pair[const_iterator, const_iterator] const_equal_range "equal_range"(T&) - iterator erase(iterator) - iterator erase(iterator, iterator) + pair[const_iterator, const_iterator] const_equal_range "equal_range"(T&) + iterator erase(iterator) + iterator erase(iterator, iterator) size_t erase(T&) iterator find(T&) - const_iterator const_find "find"(T&) + const_iterator const_find "find"(T&) pair[iterator, bint] insert(T&) iterator insert(iterator, T&) #key_compare key_comp() - iterator insert(iterator, iterator) + iterator insert(iterator, iterator) iterator lower_bound(T&) - const_iterator const_lower_bound "lower_bound"(T&) + const_iterator const_lower_bound "lower_bound"(T&) size_t max_size() reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "rbegin"() reverse_iterator rend() - const_reverse_iterator const_rend "rend"() + const_reverse_iterator const_rend "rend"() size_t size() void swap(unordered_set&) iterator upper_bound(T&) - const_iterator const_upper_bound "upper_bound"(T&) + const_iterator const_upper_bound "upper_bound"(T&) #value_compare value_comp() - void max_load_factor(float) - float max_load_factor() - void rehash(size_t) - void reserve(size_t) - size_t bucket_count() - size_t max_bucket_count() - size_t bucket_size(size_t) - size_t bucket(const T&) + void max_load_factor(float) + float max_load_factor() + void rehash(size_t) + void reserve(size_t) + size_t bucket_count() + size_t max_bucket_count() + size_t bucket_size(size_t) + size_t bucket(const T&) diff --git a/contrib/tools/cython/Cython/Includes/libcpp/utility.pxd b/contrib/tools/cython/Cython/Includes/libcpp/utility.pxd index fd306e0eeb..e0df69b166 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/utility.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/utility.pxd @@ -1,7 +1,7 @@ cdef extern from "<utility>" namespace "std" nogil: cdef cppclass pair[T, U]: - ctypedef T first_type - ctypedef U second_type + ctypedef T first_type + ctypedef U second_type T first U second pair() except + diff --git a/contrib/tools/cython/Cython/Includes/libcpp/vector.pxd b/contrib/tools/cython/Cython/Includes/libcpp/vector.pxd index 02f772326c..9b007dd0c7 100644 --- a/contrib/tools/cython/Cython/Includes/libcpp/vector.pxd +++ b/contrib/tools/cython/Cython/Includes/libcpp/vector.pxd @@ -1,11 +1,11 @@ cdef extern from "<vector>" namespace "std" nogil: - cdef cppclass vector[T,ALLOCATOR=*]: - ctypedef T value_type - ctypedef ALLOCATOR allocator_type + cdef cppclass vector[T,ALLOCATOR=*]: + ctypedef T value_type + ctypedef ALLOCATOR allocator_type # these should really be allocator_type.size_type and # allocator_type.difference_type to be true to the C++ definition - # but cython doesn't support deferred access on template arguments + # but cython doesn't support deferred access on template arguments ctypedef size_t size_type ctypedef ptrdiff_t difference_type @@ -35,10 +35,10 @@ cdef extern from "<vector>" namespace "std" nogil: bint operator>(reverse_iterator) bint operator<=(reverse_iterator) bint operator>=(reverse_iterator) - cppclass const_iterator(iterator): - pass - cppclass const_reverse_iterator(reverse_iterator): - pass + cppclass const_iterator(iterator): + pass + cppclass const_reverse_iterator(reverse_iterator): + pass vector() except + vector(vector&) except + vector(size_type) except + @@ -53,25 +53,25 @@ cdef extern from "<vector>" namespace "std" nogil: bint operator<=(vector&, vector&) bint operator>=(vector&, vector&) void assign(size_type, const T&) - void assign[input_iterator](input_iterator, input_iterator) except + + void assign[input_iterator](input_iterator, input_iterator) except + T& at(size_type) except + T& back() iterator begin() - const_iterator const_begin "begin"() + const_iterator const_begin "begin"() size_type capacity() void clear() bint empty() iterator end() - const_iterator const_end "end"() + const_iterator const_end "end"() iterator erase(iterator) iterator erase(iterator, iterator) T& front() - iterator insert(iterator, const T&) except + + iterator insert(iterator, const T&) except + iterator insert(iterator, size_type, const T&) except + iterator insert[Iter](iterator, Iter, Iter) except + size_type max_size() void pop_back() - void push_back(T&) except + + void push_back(T&) except + reverse_iterator rbegin() const_reverse_iterator const_rbegin "crbegin"() reverse_iterator rend() @@ -81,8 +81,8 @@ cdef extern from "<vector>" namespace "std" nogil: void resize(size_type, T&) except + size_type size() void swap(vector&) - - # C++11 methods + + # C++11 methods T* data() - const T* const_data "data"() + const T* const_data "data"() void shrink_to_fit() diff --git a/contrib/tools/cython/Cython/Includes/numpy.pxd b/contrib/tools/cython/Cython/Includes/numpy.pxd index 603c08b4f2..789669dac1 100644 --- a/contrib/tools/cython/Cython/Includes/numpy.pxd +++ b/contrib/tools/cython/Cython/Includes/numpy.pxd @@ -3,7 +3,7 @@ # If any of the PyArray_* functions are called, import_array must be # called first. # -# This also defines backwards-compatibility buffer acquisition +# This also defines backwards-compatibility buffer acquisition # code for use in Python 2.x (or Python <= 2.5 when NumPy starts # implementing PEP-3118 directly). # @@ -17,9 +17,9 @@ DEF _buffer_format_string_len = 255 cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF -from cpython.mem cimport PyObject_Malloc, PyObject_Free -from cpython.object cimport PyObject, PyTypeObject +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject from cpython.type cimport type cimport libc.stdio as stdio @@ -52,8 +52,8 @@ cdef extern from "numpy/arrayobject.h": NPY_STRING NPY_UNICODE NPY_VOID - NPY_DATETIME - NPY_TIMEDELTA + NPY_DATETIME + NPY_TIMEDELTA NPY_NTYPES NPY_NOTYPE @@ -90,15 +90,15 @@ cdef extern from "numpy/arrayobject.h": NPY_ANYORDER NPY_CORDER NPY_FORTRANORDER - NPY_KEEPORDER - - ctypedef enum NPY_CASTING: - NPY_NO_CASTING - NPY_EQUIV_CASTING - NPY_SAFE_CASTING - NPY_SAME_KIND_CASTING - NPY_UNSAFE_CASTING - + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + ctypedef enum NPY_CLIPMODE: NPY_CLIP NPY_WRAP @@ -123,7 +123,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SEARCHRIGHT enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! + # DEPRECATED since NumPy 1.7 ! Do not use in new code! NPY_C_CONTIGUOUS NPY_F_CONTIGUOUS NPY_CONTIGUOUS @@ -156,37 +156,37 @@ cdef extern from "numpy/arrayobject.h": NPY_UPDATE_ALL - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. - NPY_ARRAY_C_CONTIGUOUS - NPY_ARRAY_F_CONTIGUOUS - NPY_ARRAY_OWNDATA - NPY_ARRAY_FORCECAST - NPY_ARRAY_ENSURECOPY - NPY_ARRAY_ENSUREARRAY - NPY_ARRAY_ELEMENTSTRIDES - NPY_ARRAY_ALIGNED - NPY_ARRAY_NOTSWAPPED - NPY_ARRAY_WRITEABLE - NPY_ARRAY_UPDATEIFCOPY - - NPY_ARRAY_BEHAVED - NPY_ARRAY_BEHAVED_NS - NPY_ARRAY_CARRAY - NPY_ARRAY_CARRAY_RO - NPY_ARRAY_FARRAY - NPY_ARRAY_FARRAY_RO - NPY_ARRAY_DEFAULT - - NPY_ARRAY_IN_ARRAY - NPY_ARRAY_OUT_ARRAY - NPY_ARRAY_INOUT_ARRAY - NPY_ARRAY_IN_FARRAY - NPY_ARRAY_OUT_FARRAY - NPY_ARRAY_INOUT_FARRAY - - NPY_ARRAY_UPDATE_ALL - + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + cdef enum: NPY_MAXDIMS @@ -194,37 +194,37 @@ cdef extern from "numpy/arrayobject.h": ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) - ctypedef struct PyArray_ArrayDescr: - # shape is a tuple, but Cython doesn't support "tuple shape" - # inside a non-PyObject declaration, so we have to declare it - # as just a PyObject*. - PyObject* shape - - ctypedef struct PyArray_Descr: - pass - - ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type - # Numpy sometimes mutates this without warning (e.g. it'll - # sometimes change "|" to "<" in shared dtype objects on - # little-endian machines). If this matters to you, use - # PyArray_IsNativeByteOrder(dtype.byteorder) instead of - # directly accessing this field. - cdef char byteorder - cdef char flags + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags cdef int type_num cdef int itemsize "elsize" - cdef int alignment - cdef dict fields + cdef int alignment + cdef dict fields cdef tuple names - # Use PyDataType_HASSUBARRAY to test whether this field is - # valid (the pointer can be NULL). Most users should access - # this field via the inline helper method PyDataType_SHAPE. - cdef PyArray_ArrayDescr* subarray + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray ctypedef extern class numpy.flatiter [object PyArrayIterObject]: # Use through macros @@ -239,7 +239,7 @@ cdef extern from "numpy/arrayobject.h": # like PyArrayObject**. pass - ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: cdef __cythonbufferdefaults__ = {"mode": "strided"} cdef: @@ -249,7 +249,7 @@ cdef extern from "numpy/arrayobject.h": int ndim "nd" npy_intp *shape "dimensions" npy_intp *strides - dtype descr # deprecated since NumPy 1.7 ! + dtype descr # deprecated since NumPy 1.7 ! PyObject* base # Note: This syntax (function definition in pxd files) is an @@ -257,30 +257,30 @@ cdef extern from "numpy/arrayobject.h": # -- the details of this may change. def __getbuffer__(ndarray self, Py_buffer* info, int flags): # This implementation of getbuffer is geared towards Cython - # requirements, and does not yet fulfill the PEP. + # requirements, and does not yet fulfill the PEP. # In particular strided access is always provided regardless # of flags - cdef int i, ndim + cdef int i, ndim cdef int endian_detector = 1 cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) ndim = PyArray_NDIM(self) if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): + and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): raise ValueError(u"ndarray is not C contiguous") if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): + and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): raise ValueError(u"ndarray is not Fortran contiguous") info.buf = PyArray_DATA(self) info.ndim = ndim - if sizeof(npy_intp) != sizeof(Py_ssize_t): + if sizeof(npy_intp) != sizeof(Py_ssize_t): # Allocate new buffer for strides and shape info. # This is allocated as one block, strides first. - info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) + info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) info.shape = info.strides + ndim for i in range(ndim): info.strides[i] = PyArray_STRIDES(self)[i] @@ -294,12 +294,12 @@ cdef extern from "numpy/arrayobject.h": cdef int t cdef char* f = NULL - cdef dtype descr = <dtype>PyArray_DESCR(self) + cdef dtype descr = <dtype>PyArray_DESCR(self) cdef int offset - info.obj = self + info.obj = self - if not PyDataType_HASFIELDS(descr): + if not PyDataType_HASFIELDS(descr): t = descr.type_num if ((descr.byteorder == c'>' and little_endian) or (descr.byteorder == c'<' and not little_endian)): @@ -326,7 +326,7 @@ cdef extern from "numpy/arrayobject.h": info.format = f return else: - info.format = <char*>PyObject_Malloc(_buffer_format_string_len) + info.format = <char*>PyObject_Malloc(_buffer_format_string_len) info.format[0] = c'^' # Native data types, manual alignment offset = 0 f = _util_dtypestring(descr, info.format + 1, @@ -336,13 +336,13 @@ cdef extern from "numpy/arrayobject.h": def __releasebuffer__(ndarray self, Py_buffer* info): if PyArray_HASFIELDS(self): - PyObject_Free(info.format) + PyObject_Free(info.format) if sizeof(npy_intp) != sizeof(Py_ssize_t): - PyObject_Free(info.strides) + PyObject_Free(info.strides) # info.shape was stored after info.strides in the same block - ctypedef unsigned char npy_bool + ctypedef unsigned char npy_bool ctypedef signed char npy_byte ctypedef signed short npy_short @@ -389,41 +389,41 @@ cdef extern from "numpy/arrayobject.h": double imag ctypedef struct npy_clongdouble: - long double real - long double imag + long double real + long double imag ctypedef struct npy_complex64: - float real - float imag + float real + float imag ctypedef struct npy_complex128: double real double imag ctypedef struct npy_complex160: - long double real - long double imag + long double real + long double imag ctypedef struct npy_complex192: - long double real - long double imag + long double real + long double imag ctypedef struct npy_complex256: - long double real - long double imag + long double real + long double imag ctypedef struct PyArray_Dims: npy_intp *ptr int len - int _import_array() except -1 + int _import_array() except -1 # # Macros from ndarrayobject.h # bint PyArray_CHKFLAGS(ndarray m, int flags) - bint PyArray_IS_C_CONTIGUOUS(ndarray arr) - bint PyArray_IS_F_CONTIGUOUS(ndarray arr) + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) bint PyArray_ISCONTIGUOUS(ndarray m) bint PyArray_ISWRITEABLE(ndarray m) bint PyArray_ISALIGNED(ndarray m) @@ -440,8 +440,8 @@ cdef extern from "numpy/arrayobject.h": npy_intp PyArray_DIM(ndarray, size_t) npy_intp PyArray_STRIDE(ndarray, size_t) - PyObject *PyArray_BASE(ndarray) # returns borrowed reference! - PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! + PyObject *PyArray_BASE(ndarray) # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! int PyArray_FLAGS(ndarray) npy_intp PyArray_ITEMSIZE(ndarray) int PyArray_TYPE(ndarray arr) @@ -477,7 +477,7 @@ cdef extern from "numpy/arrayobject.h": bint PyDataType_ISEXTENDED(dtype) bint PyDataType_ISOBJECT(dtype) bint PyDataType_HASFIELDS(dtype) - bint PyDataType_HASSUBARRAY(dtype) + bint PyDataType_HASSUBARRAY(dtype) bint PyArray_ISBOOL(ndarray) bint PyArray_ISUNSIGNED(ndarray) @@ -497,8 +497,8 @@ cdef extern from "numpy/arrayobject.h": bint PyArray_ISVARIABLE(ndarray) bint PyArray_SAFEALIGNEDCOPY(ndarray) - bint PyArray_ISNBO(char) # works on ndarray.byteorder - bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder + bint PyArray_ISNBO(char) # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder bint PyArray_ISNOTSWAPPED(ndarray) bint PyArray_ISBYTESWAPPED(ndarray) @@ -540,8 +540,8 @@ cdef extern from "numpy/arrayobject.h": object PyArray_FROM_O(object) object PyArray_FROM_OF(object m, int flags) - object PyArray_FROM_OT(object m, int type) - object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) object PyArray_FROMANY(object m, int type, int min, int max, int flags) object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) @@ -764,7 +764,7 @@ cdef extern from "numpy/arrayobject.h": object PyArray_CheckAxis (ndarray, int *, int) npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) int PyArray_CompareString (char *, char *, size_t) - int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. # Typedefs that matches the runtime dtype objects in @@ -833,12 +833,12 @@ cdef inline object PyArray_MultiIterNew4(a, b, c, d): cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) -cdef inline tuple PyDataType_SHAPE(dtype d): - if PyDataType_HASSUBARRAY(d): - return <tuple>d.subarray.shape - else: - return () - +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return <tuple>d.subarray.shape + else: + return () + cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # Recursive utility function used in __getbuffer__ to get format # string. The new location in the format string is returned. @@ -852,7 +852,7 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset fields = descr.fields[childname] child, new_offset = fields - if (end - f) - <int>(new_offset - offset[0]) < 15: + if (end - f) - <int>(new_offset - offset[0]) < 15: raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") if ((child.byteorder == c'>' and little_endian) or @@ -1017,34 +1017,34 @@ cdef extern from "numpy/ufuncobject.h": (PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, char *) - int _import_umath() except -1 + int _import_umath() except -1 cdef inline void set_array_base(ndarray arr, object base): - Py_INCREF(base) # important to do this before stealing the reference below! - PyArray_SetBaseObject(arr, base) + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) cdef inline object get_array_base(ndarray arr): - base = PyArray_BASE(arr) - if base is NULL: + base = PyArray_BASE(arr) + if base is NULL: return None - return <object>base - -# Versions of the import_* functions which are more suitable for -# Cython code. -cdef inline int import_array() except -1: - try: - _import_array() - except Exception: - raise ImportError("numpy.core.multiarray failed to import") - -cdef inline int import_umath() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy.core.umath failed to import") - -cdef inline int import_ufunc() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy.core.umath failed to import") + return <object>base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + _import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") diff --git a/contrib/tools/cython/Cython/Includes/numpy/__init__.pxd b/contrib/tools/cython/Cython/Includes/numpy/__init__.pxd index fa8c763de2..15700c05ef 100644 --- a/contrib/tools/cython/Cython/Includes/numpy/__init__.pxd +++ b/contrib/tools/cython/Cython/Includes/numpy/__init__.pxd @@ -3,7 +3,7 @@ # If any of the PyArray_* functions are called, import_array must be # called first. # -# This also defines backwards-compatibility buffer acquisition +# This also defines backwards-compatibility buffer acquisition # code for use in Python 2.x (or Python <= 2.5 when NumPy starts # implementing PEP-3118 directly). # @@ -17,9 +17,9 @@ DEF _buffer_format_string_len = 255 cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF +from cpython.ref cimport Py_INCREF from cpython.mem cimport PyObject_Malloc, PyObject_Free -from cpython.object cimport PyObject, PyTypeObject +from cpython.object cimport PyObject, PyTypeObject from cpython.type cimport type cimport libc.stdio as stdio @@ -92,13 +92,13 @@ cdef extern from "numpy/arrayobject.h": NPY_FORTRANORDER NPY_KEEPORDER - ctypedef enum NPY_CASTING: - NPY_NO_CASTING - NPY_EQUIV_CASTING - NPY_SAFE_CASTING - NPY_SAME_KIND_CASTING - NPY_UNSAFE_CASTING - + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + ctypedef enum NPY_CLIPMODE: NPY_CLIP NPY_WRAP @@ -123,7 +123,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SEARCHRIGHT enum: - # DEPRECATED since NumPy 1.7 ! Do not use in new code! + # DEPRECATED since NumPy 1.7 ! Do not use in new code! NPY_C_CONTIGUOUS NPY_F_CONTIGUOUS NPY_CONTIGUOUS @@ -156,37 +156,37 @@ cdef extern from "numpy/arrayobject.h": NPY_UPDATE_ALL - enum: - # Added in NumPy 1.7 to replace the deprecated enums above. - NPY_ARRAY_C_CONTIGUOUS - NPY_ARRAY_F_CONTIGUOUS - NPY_ARRAY_OWNDATA - NPY_ARRAY_FORCECAST - NPY_ARRAY_ENSURECOPY - NPY_ARRAY_ENSUREARRAY - NPY_ARRAY_ELEMENTSTRIDES - NPY_ARRAY_ALIGNED - NPY_ARRAY_NOTSWAPPED - NPY_ARRAY_WRITEABLE - NPY_ARRAY_UPDATEIFCOPY - - NPY_ARRAY_BEHAVED - NPY_ARRAY_BEHAVED_NS - NPY_ARRAY_CARRAY - NPY_ARRAY_CARRAY_RO - NPY_ARRAY_FARRAY - NPY_ARRAY_FARRAY_RO - NPY_ARRAY_DEFAULT - - NPY_ARRAY_IN_ARRAY - NPY_ARRAY_OUT_ARRAY - NPY_ARRAY_INOUT_ARRAY - NPY_ARRAY_IN_FARRAY - NPY_ARRAY_OUT_FARRAY - NPY_ARRAY_INOUT_FARRAY - - NPY_ARRAY_UPDATE_ALL - + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + cdef enum: NPY_MAXDIMS @@ -200,26 +200,26 @@ cdef extern from "numpy/arrayobject.h": # as just a PyObject*. PyObject* shape - ctypedef struct PyArray_Descr: - pass - - ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type # Numpy sometimes mutates this without warning (e.g. it'll # sometimes change "|" to "<" in shared dtype objects on # little-endian machines). If this matters to you, use # PyArray_IsNativeByteOrder(dtype.byteorder) instead of # directly accessing this field. - cdef char byteorder - cdef char flags + cdef char byteorder + cdef char flags cdef int type_num cdef int itemsize "elsize" - cdef int alignment - cdef dict fields + cdef int alignment + cdef dict fields cdef tuple names # Use PyDataType_HASSUBARRAY to test whether this field is # valid (the pointer can be NULL). Most users should access @@ -239,7 +239,7 @@ cdef extern from "numpy/arrayobject.h": # like PyArrayObject**. pass - ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: cdef __cythonbufferdefaults__ = {"mode": "strided"} cdef: @@ -249,7 +249,7 @@ cdef extern from "numpy/arrayobject.h": int ndim "nd" npy_intp *shape "dimensions" npy_intp *strides - dtype descr # deprecated since NumPy 1.7 ! + dtype descr # deprecated since NumPy 1.7 ! PyObject* base # Note: This syntax (function definition in pxd files) is an @@ -268,11 +268,11 @@ cdef extern from "numpy/arrayobject.h": ndim = PyArray_NDIM(self) if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): + and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): raise ValueError(u"ndarray is not C contiguous") if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): + and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): raise ValueError(u"ndarray is not Fortran contiguous") info.buf = PyArray_DATA(self) @@ -294,7 +294,7 @@ cdef extern from "numpy/arrayobject.h": cdef int t cdef char* f = NULL - cdef dtype descr = <dtype>PyArray_DESCR(self) + cdef dtype descr = <dtype>PyArray_DESCR(self) cdef int offset info.obj = self @@ -415,14 +415,14 @@ cdef extern from "numpy/arrayobject.h": npy_intp *ptr int len - int _import_array() except -1 + int _import_array() except -1 # # Macros from ndarrayobject.h # bint PyArray_CHKFLAGS(ndarray m, int flags) - bint PyArray_IS_C_CONTIGUOUS(ndarray arr) - bint PyArray_IS_F_CONTIGUOUS(ndarray arr) + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) bint PyArray_ISCONTIGUOUS(ndarray m) bint PyArray_ISWRITEABLE(ndarray m) bint PyArray_ISALIGNED(ndarray m) @@ -439,8 +439,8 @@ cdef extern from "numpy/arrayobject.h": npy_intp PyArray_DIM(ndarray, size_t) npy_intp PyArray_STRIDE(ndarray, size_t) - PyObject *PyArray_BASE(ndarray) # returns borrowed reference! - PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! + PyObject *PyArray_BASE(ndarray) # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype! int PyArray_FLAGS(ndarray) npy_intp PyArray_ITEMSIZE(ndarray) int PyArray_TYPE(ndarray arr) @@ -763,7 +763,7 @@ cdef extern from "numpy/arrayobject.h": object PyArray_CheckAxis (ndarray, int *, int) npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) int PyArray_CompareString (char *, char *, size_t) - int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. # Typedefs that matches the runtime dtype objects in @@ -1016,34 +1016,34 @@ cdef extern from "numpy/ufuncobject.h": (PyUFuncGenericFunction *, void **, char *, int, int, int, int, char *, char *, int, char *) - int _import_umath() except -1 + int _import_umath() except -1 cdef inline void set_array_base(ndarray arr, object base): - Py_INCREF(base) # important to do this before stealing the reference below! - PyArray_SetBaseObject(arr, base) + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) cdef inline object get_array_base(ndarray arr): - base = PyArray_BASE(arr) - if base is NULL: + base = PyArray_BASE(arr) + if base is NULL: return None - return <object>base - -# Versions of the import_* functions which are more suitable for -# Cython code. -cdef inline int import_array() except -1: - try: - _import_array() - except Exception: - raise ImportError("numpy.core.multiarray failed to import") - -cdef inline int import_umath() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy.core.umath failed to import") - -cdef inline int import_ufunc() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy.core.umath failed to import") + return <object>base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + _import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") diff --git a/contrib/tools/cython/Cython/Includes/numpy/math.pxd b/contrib/tools/cython/Cython/Includes/numpy/math.pxd index 20cbe9f5f9..c16df1c51a 100644 --- a/contrib/tools/cython/Cython/Includes/numpy/math.pxd +++ b/contrib/tools/cython/Cython/Includes/numpy/math.pxd @@ -25,7 +25,7 @@ cdef extern from "numpy/npy_math.h" nogil: long double NZERO "NPY_NZERO" # negative zero # These four are actually macros and work on any floating-point type. - int isinf "npy_isinf"(long double) # -1 / 0 / 1 + int isinf "npy_isinf"(long double) # -1 / 0 / 1 bint isfinite "npy_isfinite"(long double) bint isnan "npy_isnan"(long double) bint signbit "npy_signbit"(long double) @@ -79,11 +79,11 @@ cdef extern from "numpy/npy_math.h" nogil: float log1pf "npy_log1pf"(float x) float exp2f "npy_exp2f"(float x) float log2f "npy_log2f"(float x) - float atan2f "npy_atan2f"(float x, float y) - float hypotf "npy_hypotf"(float x, float y) - float powf "npy_powf"(float x, float y) - float fmodf "npy_fmodf"(float x, float y) - float modff "npy_modff"(float x, float* y) + float atan2f "npy_atan2f"(float x, float y) + float hypotf "npy_hypotf"(float x, float y) + float powf "npy_powf"(float x, float y) + float fmodf "npy_fmodf"(float x, float y) + float modff "npy_modff"(float x, float* y) # Long double C99 functions long double sinl "npy_sinl"(long double x) @@ -110,24 +110,24 @@ cdef extern from "numpy/npy_math.h" nogil: long double log1pl "npy_log1pl"(long double x) long double exp2l "npy_exp2l"(long double x) long double log2l "npy_log2l"(long double x) - long double atan2l "npy_atan2l"(long double x, long double y) - long double hypotl "npy_hypotl"(long double x, long double y) - long double powl "npy_powl"(long double x, long double y) - long double fmodl "npy_fmodl"(long double x, long double y) - long double modfl "npy_modfl"(long double x, long double* y) + long double atan2l "npy_atan2l"(long double x, long double y) + long double hypotl "npy_hypotl"(long double x, long double y) + long double powl "npy_powl"(long double x, long double y) + long double fmodl "npy_fmodl"(long double x, long double y) + long double modfl "npy_modfl"(long double x, long double* y) # NumPy extensions float deg2radf "npy_deg2radf"(float x) float rad2degf "npy_rad2degf"(float x) - float logaddexpf "npy_logaddexpf"(float x, float y) - float logaddexp2f "npy_logaddexp2f"(float x, float y) + float logaddexpf "npy_logaddexpf"(float x, float y) + float logaddexp2f "npy_logaddexp2f"(float x, float y) double deg2rad "npy_deg2rad"(double x) double rad2deg "npy_rad2deg"(double x) - double logaddexp "npy_logaddexp"(double x, double y) - double logaddexp2 "npy_logaddexp2"(double x, double y) + double logaddexp "npy_logaddexp"(double x, double y) + double logaddexp2 "npy_logaddexp2"(double x, double y) long double deg2radl "npy_deg2radl"(long double x) long double rad2degl "npy_rad2degl"(long double x) - long double logaddexpl "npy_logaddexpl"(long double x, long double y) - long double logaddexp2l "npy_logaddexp2l"(long double x, long double y) + long double logaddexpl "npy_logaddexpl"(long double x, long double y) + long double logaddexp2l "npy_logaddexp2l"(long double x, long double y) diff --git a/contrib/tools/cython/Cython/Includes/openmp.pxd b/contrib/tools/cython/Cython/Includes/openmp.pxd index 767dcf6efd..30873a588b 100644 --- a/contrib/tools/cython/Cython/Includes/openmp.pxd +++ b/contrib/tools/cython/Cython/Includes/openmp.pxd @@ -1,4 +1,4 @@ -cdef extern from "<omp.h>": +cdef extern from "<omp.h>": ctypedef struct omp_lock_t: pass ctypedef struct omp_nest_lock_t: diff --git a/contrib/tools/cython/Cython/Includes/posix/dlfcn.pxd b/contrib/tools/cython/Cython/Includes/posix/dlfcn.pxd index cc0b6ab93a..cff5bea15a 100644 --- a/contrib/tools/cython/Cython/Includes/posix/dlfcn.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/dlfcn.pxd @@ -1,14 +1,14 @@ -# POSIX dynamic linking/loading interface. -# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/dlfcn.h.html - -cdef extern from "<dlfcn.h>" nogil: - void *dlopen(const char *, int) - char *dlerror() - void *dlsym(void *, const char *) - int dlclose(void *) - - enum: - RTLD_LAZY - RTLD_NOW - RTLD_GLOBAL - RTLD_LOCAL +# POSIX dynamic linking/loading interface. +# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/dlfcn.h.html + +cdef extern from "<dlfcn.h>" nogil: + void *dlopen(const char *, int) + char *dlerror() + void *dlsym(void *, const char *) + int dlclose(void *) + + enum: + RTLD_LAZY + RTLD_NOW + RTLD_GLOBAL + RTLD_LOCAL diff --git a/contrib/tools/cython/Cython/Includes/posix/fcntl.pxd b/contrib/tools/cython/Cython/Includes/posix/fcntl.pxd index 889b2d9b96..9afc33a368 100644 --- a/contrib/tools/cython/Cython/Includes/posix/fcntl.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/fcntl.pxd @@ -1,6 +1,6 @@ # http://www.opengroup.org/onlinepubs/009695399/basedefs/fcntl.h.html -cdef extern from "<fcntl.h>" nogil: +cdef extern from "<fcntl.h>" nogil: enum: F_DUPFD enum: F_GETFD diff --git a/contrib/tools/cython/Cython/Includes/posix/ioctl.pxd b/contrib/tools/cython/Cython/Includes/posix/ioctl.pxd index b9e8814b8f..dacbc307f3 100644 --- a/contrib/tools/cython/Cython/Includes/posix/ioctl.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/ioctl.pxd @@ -1,4 +1,4 @@ -cdef extern from "<sys/ioctl.h>" nogil: +cdef extern from "<sys/ioctl.h>" nogil: enum: FIONBIO int ioctl(int fd, int request, ...) diff --git a/contrib/tools/cython/Cython/Includes/posix/mman.pxd b/contrib/tools/cython/Cython/Includes/posix/mman.pxd index 483547621a..c810f431b3 100644 --- a/contrib/tools/cython/Cython/Includes/posix/mman.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/mman.pxd @@ -1,99 +1,99 @@ -# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/mman.h.html - -from posix.types cimport off_t, mode_t - -cdef extern from "<sys/mman.h>" nogil: - enum: PROT_EXEC # protection bits for mmap/mprotect - enum: PROT_READ - enum: PROT_WRITE - enum: PROT_NONE - - enum: MAP_PRIVATE # flag bits for mmap - enum: MAP_SHARED - enum: MAP_FIXED - enum: MAP_ANON # These three are not in POSIX, but are - enum: MAP_ANONYMOUS # fairly common in spelling/semantics - enum: MAP_STACK - - enum: MAP_LOCKED # Typically available only on Linux - enum: MAP_HUGETLB - enum: MAP_POPULATE - enum: MAP_NORESERVE - enum: MAP_GROWSDOWN - - enum: MAP_NOCORE # Typically available only on BSD - enum: MAP_NOSYNC - - void *MAP_FAILED - - void *mmap(void *addr, size_t Len, int prot, int flags, int fd, off_t off) - int munmap(void *addr, size_t Len) - int mprotect(void *addr, size_t Len, int prot) - - enum: MS_ASYNC - enum: MS_SYNC - enum: MS_INVALIDATE - int msync(void *addr, size_t Len, int flags) - - enum: POSIX_MADV_NORMAL # POSIX advice flags - enum: POSIX_MADV_SEQUENTIAL - enum: POSIX_MADV_RANDOM - enum: POSIX_MADV_WILLNEED - enum: POSIX_MADV_DONTNEED - int posix_madvise(void *addr, size_t Len, int advice) - - enum: MCL_CURRENT - enum: MCL_FUTURE - int mlock(const void *addr, size_t Len) - int munlock(const void *addr, size_t Len) - int mlockall(int flags) - int munlockall() - # Linux-specific - enum: MLOCK_ONFAULT - enum: MCL_ONFAULT - int mlock2(const void *addr, size_t len, int flags) - - int shm_open(const char *name, int oflag, mode_t mode) - int shm_unlink(const char *name) - - # often available - enum: MADV_NORMAL # pre-POSIX advice flags; should translate 1-1 to POSIX_* - enum: MADV_RANDOM # but in practice it is not always the same. - enum: MADV_SEQUENTIAL - enum: MADV_WILLNEED - enum: MADV_DONTNEED - enum: MADV_REMOVE # other pre-POSIX advice flags; often available - enum: MADV_DONTFORK - enum: MADV_DOFORK - enum: MADV_HWPOISON - enum: MADV_MERGEABLE, - enum: MADV_UNMERGEABLE - enum: MADV_SOFT_OFFLINE - enum: MADV_HUGEPAGE - enum: MADV_NOHUGEPAGE - enum: MADV_DONTDUMP - enum: MADV_DODUMP - enum: MADV_FREE - enum: MADV_WIPEONFORK - enum: MADV_KEEPONFORK - int madvise(void *addr, size_t Len, int advice) - - # sometimes available - int mincore(void *addr, size_t Len, unsigned char *vec) - - # These two are Linux specific but sometimes very efficient - void *mremap(void *old_addr, size_t old_len, size_t new_len, int flags, ...) - int remap_file_pages(void *addr, size_t Len, int prot, - size_t pgoff, int flags) - - # The rare but standardized typed memory option - enum: POSIX_TYPED_MEM_ALLOCATE - enum: POSIX_TYPED_MEM_ALLOCATE_CONTIG - enum: POSIX_TYPED_MEM_MAP_ALLOCATABLE - int posix_typed_mem_open(const char *name, int oflag, int tflag) - int posix_mem_offset(const void *addr, size_t Len, off_t *off, - size_t *contig_len, int *fildes) - - cdef struct posix_typed_mem_info: - size_t posix_tmi_length - int posix_typed_mem_get_info(int fildes, posix_typed_mem_info *info) +# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/mman.h.html + +from posix.types cimport off_t, mode_t + +cdef extern from "<sys/mman.h>" nogil: + enum: PROT_EXEC # protection bits for mmap/mprotect + enum: PROT_READ + enum: PROT_WRITE + enum: PROT_NONE + + enum: MAP_PRIVATE # flag bits for mmap + enum: MAP_SHARED + enum: MAP_FIXED + enum: MAP_ANON # These three are not in POSIX, but are + enum: MAP_ANONYMOUS # fairly common in spelling/semantics + enum: MAP_STACK + + enum: MAP_LOCKED # Typically available only on Linux + enum: MAP_HUGETLB + enum: MAP_POPULATE + enum: MAP_NORESERVE + enum: MAP_GROWSDOWN + + enum: MAP_NOCORE # Typically available only on BSD + enum: MAP_NOSYNC + + void *MAP_FAILED + + void *mmap(void *addr, size_t Len, int prot, int flags, int fd, off_t off) + int munmap(void *addr, size_t Len) + int mprotect(void *addr, size_t Len, int prot) + + enum: MS_ASYNC + enum: MS_SYNC + enum: MS_INVALIDATE + int msync(void *addr, size_t Len, int flags) + + enum: POSIX_MADV_NORMAL # POSIX advice flags + enum: POSIX_MADV_SEQUENTIAL + enum: POSIX_MADV_RANDOM + enum: POSIX_MADV_WILLNEED + enum: POSIX_MADV_DONTNEED + int posix_madvise(void *addr, size_t Len, int advice) + + enum: MCL_CURRENT + enum: MCL_FUTURE + int mlock(const void *addr, size_t Len) + int munlock(const void *addr, size_t Len) + int mlockall(int flags) + int munlockall() + # Linux-specific + enum: MLOCK_ONFAULT + enum: MCL_ONFAULT + int mlock2(const void *addr, size_t len, int flags) + + int shm_open(const char *name, int oflag, mode_t mode) + int shm_unlink(const char *name) + + # often available + enum: MADV_NORMAL # pre-POSIX advice flags; should translate 1-1 to POSIX_* + enum: MADV_RANDOM # but in practice it is not always the same. + enum: MADV_SEQUENTIAL + enum: MADV_WILLNEED + enum: MADV_DONTNEED + enum: MADV_REMOVE # other pre-POSIX advice flags; often available + enum: MADV_DONTFORK + enum: MADV_DOFORK + enum: MADV_HWPOISON + enum: MADV_MERGEABLE, + enum: MADV_UNMERGEABLE + enum: MADV_SOFT_OFFLINE + enum: MADV_HUGEPAGE + enum: MADV_NOHUGEPAGE + enum: MADV_DONTDUMP + enum: MADV_DODUMP + enum: MADV_FREE + enum: MADV_WIPEONFORK + enum: MADV_KEEPONFORK + int madvise(void *addr, size_t Len, int advice) + + # sometimes available + int mincore(void *addr, size_t Len, unsigned char *vec) + + # These two are Linux specific but sometimes very efficient + void *mremap(void *old_addr, size_t old_len, size_t new_len, int flags, ...) + int remap_file_pages(void *addr, size_t Len, int prot, + size_t pgoff, int flags) + + # The rare but standardized typed memory option + enum: POSIX_TYPED_MEM_ALLOCATE + enum: POSIX_TYPED_MEM_ALLOCATE_CONTIG + enum: POSIX_TYPED_MEM_MAP_ALLOCATABLE + int posix_typed_mem_open(const char *name, int oflag, int tflag) + int posix_mem_offset(const void *addr, size_t Len, off_t *off, + size_t *contig_len, int *fildes) + + cdef struct posix_typed_mem_info: + size_t posix_tmi_length + int posix_typed_mem_get_info(int fildes, posix_typed_mem_info *info) diff --git a/contrib/tools/cython/Cython/Includes/posix/resource.pxd b/contrib/tools/cython/Cython/Includes/posix/resource.pxd index 1227724323..9f55c6ab4e 100644 --- a/contrib/tools/cython/Cython/Includes/posix/resource.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/resource.pxd @@ -1,9 +1,9 @@ # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/resource.h.html -from posix.time cimport timeval +from posix.time cimport timeval from posix.types cimport id_t -cdef extern from "<sys/resource.h>" nogil: +cdef extern from "<sys/resource.h>" nogil: enum: PRIO_PROCESS enum: PRIO_PGRP diff --git a/contrib/tools/cython/Cython/Includes/posix/select.pxd b/contrib/tools/cython/Cython/Includes/posix/select.pxd index af9509ea43..46703df104 100644 --- a/contrib/tools/cython/Cython/Includes/posix/select.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/select.pxd @@ -1,19 +1,19 @@ -from .types cimport sigset_t -from .time cimport timeval, timespec - -cdef extern from "<sys/select.h>" nogil: - ctypedef struct fd_set: - pass - - int FD_SETSIZE - void FD_SET(int, fd_set*) - void FD_CLR(int, fd_set*) - bint FD_ISSET(int, fd_set*) - void FD_ZERO(fd_set*) - - int select(int nfds, fd_set *readfds, fd_set *writefds, - fd_set *exceptfds, const timeval *timeout) - - int pselect(int nfds, fd_set *readfds, fd_set *writefds, - fd_set *exceptfds, const timespec *timeout, - const sigset_t *sigmask) +from .types cimport sigset_t +from .time cimport timeval, timespec + +cdef extern from "<sys/select.h>" nogil: + ctypedef struct fd_set: + pass + + int FD_SETSIZE + void FD_SET(int, fd_set*) + void FD_CLR(int, fd_set*) + bint FD_ISSET(int, fd_set*) + void FD_ZERO(fd_set*) + + int select(int nfds, fd_set *readfds, fd_set *writefds, + fd_set *exceptfds, const timeval *timeout) + + int pselect(int nfds, fd_set *readfds, fd_set *writefds, + fd_set *exceptfds, const timespec *timeout, + const sigset_t *sigmask) diff --git a/contrib/tools/cython/Cython/Includes/posix/signal.pxd b/contrib/tools/cython/Cython/Includes/posix/signal.pxd index eafb164313..9fe7d9c36c 100644 --- a/contrib/tools/cython/Cython/Includes/posix/signal.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/signal.pxd @@ -2,7 +2,7 @@ from posix.types cimport pid_t, sigset_t, uid_t -cdef extern from "<signal.h>" nogil: +cdef extern from "<signal.h>" nogil: cdef union sigval: int sival_int @@ -12,7 +12,7 @@ cdef extern from "<signal.h>" nogil: int sigev_notify int sigev_signo sigval sigev_value - void sigev_notify_function(sigval) + void sigev_notify_function(sigval) ctypedef struct siginfo_t: int si_signo @@ -26,8 +26,8 @@ cdef extern from "<signal.h>" nogil: sigval si_value cdef struct sigaction_t "sigaction": - void sa_handler(int) - void sa_sigaction(int, siginfo_t *, void *) + void sa_handler(int) + void sa_sigaction(int, siginfo_t *, void *) sigset_t sa_mask int sa_flags @@ -68,6 +68,6 @@ cdef extern from "<signal.h>" nogil: int sigdelset (sigset_t *, int) int sigemptyset (sigset_t *) int sigfillset (sigset_t *) - int sigismember (const sigset_t *, int) + int sigismember (const sigset_t *, int) int sigaltstack(const stack_t *, stack_t *) diff --git a/contrib/tools/cython/Cython/Includes/posix/stat.pxd b/contrib/tools/cython/Cython/Includes/posix/stat.pxd index 30f1791ba5..69c2eca166 100644 --- a/contrib/tools/cython/Cython/Includes/posix/stat.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/stat.pxd @@ -2,7 +2,7 @@ from posix.types cimport (blkcnt_t, blksize_t, dev_t, gid_t, ino_t, mode_t, nlink_t, off_t, time_t, uid_t) -cdef extern from "<sys/stat.h>" nogil: +cdef extern from "<sys/stat.h>" nogil: cdef struct struct_stat "stat": dev_t st_dev ino_t st_ino @@ -24,7 +24,7 @@ cdef extern from "<sys/stat.h>" nogil: time_t st_birthtime # POSIX prescribes including both <sys/stat.h> and <unistd.h> for these -cdef extern from "<unistd.h>" nogil: +cdef extern from "<unistd.h>" nogil: int fchmod(int, mode_t) int chmod(const char *, mode_t) diff --git a/contrib/tools/cython/Cython/Includes/posix/stdio.pxd b/contrib/tools/cython/Cython/Includes/posix/stdio.pxd index 327fbb3cb6..53913fdf45 100644 --- a/contrib/tools/cython/Cython/Includes/posix/stdio.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/stdio.pxd @@ -1,37 +1,37 @@ -# POSIX additions to <stdio.h>. -# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdio.h.html - -from libc.stdio cimport FILE -from libc.stddef cimport wchar_t -from posix.types cimport off_t - -cdef extern from "<stdio.h>" nogil: - # File descriptors - FILE *fdopen(int, const char *) - int fileno(FILE *) - - # Pipes - FILE *popen(const char *, const char *) - int pclose(FILE *) - - # Memory streams (POSIX.2008) - FILE *fmemopen(void *, size_t, const char *) - FILE *open_memstream(char **, size_t *) - FILE *open_wmemstream(wchar_t **, size_t *) - - # Seek and tell with off_t - int fseeko(FILE *, off_t, int) - off_t ftello(FILE *) - - # Locking (for multithreading) - void flockfile(FILE *) - int ftrylockfile(FILE *) - void funlockfile(FILE *) - int getc_unlocked(FILE *) - int getchar_unlocked() - int putc_unlocked(int, FILE *) - int putchar_unlocked(int) - - # Reading lines and records (POSIX.2008) - ssize_t getline(char **, size_t *, FILE *) - ssize_t getdelim(char **, size_t *, int, FILE *) +# POSIX additions to <stdio.h>. +# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdio.h.html + +from libc.stdio cimport FILE +from libc.stddef cimport wchar_t +from posix.types cimport off_t + +cdef extern from "<stdio.h>" nogil: + # File descriptors + FILE *fdopen(int, const char *) + int fileno(FILE *) + + # Pipes + FILE *popen(const char *, const char *) + int pclose(FILE *) + + # Memory streams (POSIX.2008) + FILE *fmemopen(void *, size_t, const char *) + FILE *open_memstream(char **, size_t *) + FILE *open_wmemstream(wchar_t **, size_t *) + + # Seek and tell with off_t + int fseeko(FILE *, off_t, int) + off_t ftello(FILE *) + + # Locking (for multithreading) + void flockfile(FILE *) + int ftrylockfile(FILE *) + void funlockfile(FILE *) + int getc_unlocked(FILE *) + int getchar_unlocked() + int putc_unlocked(int, FILE *) + int putchar_unlocked(int) + + # Reading lines and records (POSIX.2008) + ssize_t getline(char **, size_t *, FILE *) + ssize_t getdelim(char **, size_t *, int, FILE *) diff --git a/contrib/tools/cython/Cython/Includes/posix/stdlib.pxd b/contrib/tools/cython/Cython/Includes/posix/stdlib.pxd index e276ddac92..513de938a8 100644 --- a/contrib/tools/cython/Cython/Includes/posix/stdlib.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/stdlib.pxd @@ -1,7 +1,7 @@ # POSIX additions to <stdlib.h> # http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdlib.h.html -cdef extern from "<stdlib.h>" nogil: +cdef extern from "<stdlib.h>" nogil: void _Exit(int) double drand48() double erand48(unsigned short *) diff --git a/contrib/tools/cython/Cython/Includes/posix/strings.pxd b/contrib/tools/cython/Cython/Includes/posix/strings.pxd index 40d33e11eb..6ee48491eb 100644 --- a/contrib/tools/cython/Cython/Includes/posix/strings.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/strings.pxd @@ -1,9 +1,9 @@ -cdef extern from "<strings.h>" nogil: - int bcmp(const void *, const void *, size_t) - void bcopy(const void *, void *, size_t) - void bzero(void *, size_t) - int ffs(int) - char *index(const char *, int) - char *rindex(const char *, int) - int strcasecmp(const char *, const char *) - int strncasecmp(const char *, const char *, size_t) +cdef extern from "<strings.h>" nogil: + int bcmp(const void *, const void *, size_t) + void bcopy(const void *, void *, size_t) + void bzero(void *, size_t) + int ffs(int) + char *index(const char *, int) + char *rindex(const char *, int) + int strcasecmp(const char *, const char *) + int strncasecmp(const char *, const char *, size_t) diff --git a/contrib/tools/cython/Cython/Includes/posix/time.pxd b/contrib/tools/cython/Cython/Includes/posix/time.pxd index 0c67ddc3a4..6bc81bfea0 100644 --- a/contrib/tools/cython/Cython/Includes/posix/time.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/time.pxd @@ -1,9 +1,9 @@ # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/time.h.html -from posix.types cimport suseconds_t, time_t, clockid_t, timer_t +from posix.types cimport suseconds_t, time_t, clockid_t, timer_t from posix.signal cimport sigevent -cdef extern from "<sys/time.h>" nogil: +cdef extern from "<sys/time.h>" nogil: enum: CLOCK_REALTIME enum: TIMER_ABSTIME enum: CLOCK_MONOTONIC @@ -32,18 +32,18 @@ cdef extern from "<sys/time.h>" nogil: enum: ITIMER_VIRTUAL enum: ITIMER_PROF - cdef struct timezone: - int tz_minuteswest - int dsttime - + cdef struct timezone: + int tz_minuteswest + int dsttime + cdef struct timeval: time_t tv_sec suseconds_t tv_usec - cdef struct timespec: - time_t tv_sec - long tv_nsec - + cdef struct timespec: + time_t tv_sec + long tv_nsec + cdef struct itimerval: timeval it_interval timeval it_value @@ -52,20 +52,20 @@ cdef extern from "<sys/time.h>" nogil: timespec it_interval timespec it_value - int nanosleep(const timespec *, timespec *) + int nanosleep(const timespec *, timespec *) + + int getitimer(int, itimerval *) + int gettimeofday(timeval *tp, timezone *tzp) + int setitimer(int, const itimerval *, itimerval *) - int getitimer(int, itimerval *) - int gettimeofday(timeval *tp, timezone *tzp) - int setitimer(int, const itimerval *, itimerval *) + int clock_getcpuclockid(pid_t, clockid_t *) + int clock_getres(clockid_t, timespec *) + int clock_gettime(clockid_t, timespec *) + int clock_nanosleep(clockid_t, int, const timespec *, timespec *) + int clock_settime(clockid_t, const timespec *) - int clock_getcpuclockid(pid_t, clockid_t *) - int clock_getres(clockid_t, timespec *) - int clock_gettime(clockid_t, timespec *) - int clock_nanosleep(clockid_t, int, const timespec *, timespec *) - int clock_settime(clockid_t, const timespec *) - - int timer_create(clockid_t, sigevent *, timer_t *) - int timer_delete(timer_t) - int timer_gettime(timer_t, itimerspec *) - int timer_getoverrun(timer_t) - int timer_settime(timer_t, int, const itimerspec *, itimerspec *) + int timer_create(clockid_t, sigevent *, timer_t *) + int timer_delete(timer_t) + int timer_gettime(timer_t, itimerspec *) + int timer_getoverrun(timer_t) + int timer_settime(timer_t, int, const itimerspec *, itimerspec *) diff --git a/contrib/tools/cython/Cython/Includes/posix/types.pxd b/contrib/tools/cython/Cython/Includes/posix/types.pxd index b74bba3a8f..308f2954ee 100644 --- a/contrib/tools/cython/Cython/Includes/posix/types.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/types.pxd @@ -1,23 +1,23 @@ -# Note that the actual size of these types is system-dependent, and -# can't be detected at C compile time. However, the generated C code -# will correctly use the actual size of these types *except* for -# determining promotion in binary arithmetic expressions involving -# mixed types. In this case, operands are promoted to the declared -# larger type, with a bias towards typedef types. Thus, with the -# declarations below, long + time_t will result in a time_t whereas -# long long + time_t will result in a long long which should be -# acceptable for either 32-bit or 64-bit signed time_t (though admittedly -# the POSIX standard doesn't even specify that time_t must be an integral -# type). - -cdef extern from "<sys/types.h>": +# Note that the actual size of these types is system-dependent, and +# can't be detected at C compile time. However, the generated C code +# will correctly use the actual size of these types *except* for +# determining promotion in binary arithmetic expressions involving +# mixed types. In this case, operands are promoted to the declared +# larger type, with a bias towards typedef types. Thus, with the +# declarations below, long + time_t will result in a time_t whereas +# long long + time_t will result in a long long which should be +# acceptable for either 32-bit or 64-bit signed time_t (though admittedly +# the POSIX standard doesn't even specify that time_t must be an integral +# type). + +cdef extern from "<sys/types.h>": ctypedef long blkcnt_t ctypedef long blksize_t ctypedef long clockid_t ctypedef long dev_t ctypedef long gid_t ctypedef long id_t - ctypedef unsigned long ino_t + ctypedef unsigned long ino_t ctypedef long mode_t ctypedef long nlink_t ctypedef long off_t diff --git a/contrib/tools/cython/Cython/Includes/posix/unistd.pxd b/contrib/tools/cython/Cython/Includes/posix/unistd.pxd index bd7c3daa49..1afeca3854 100644 --- a/contrib/tools/cython/Cython/Includes/posix/unistd.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/unistd.pxd @@ -2,7 +2,7 @@ from posix.types cimport gid_t, pid_t, off_t, uid_t -cdef extern from "<unistd.h>" nogil: +cdef extern from "<unistd.h>" nogil: #:NULL diff --git a/contrib/tools/cython/Cython/Includes/posix/wait.pxd b/contrib/tools/cython/Cython/Includes/posix/wait.pxd index 6041d02eb1..d18cff9cf8 100644 --- a/contrib/tools/cython/Cython/Includes/posix/wait.pxd +++ b/contrib/tools/cython/Cython/Includes/posix/wait.pxd @@ -1,38 +1,38 @@ -# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/wait.h.html - -from posix.types cimport pid_t, id_t -from posix.signal cimport siginfo_t -from posix.resource cimport rusage - -cdef extern from "<sys/wait.h>" nogil: - enum: WNOHANG - enum: WUNTRACED - enum: WCONTINUED - enum: WEXITED - enum: WSTOPPED - enum: WNOWAIT - - int WEXITSTATUS(int status) - int WIFCONTINUED(int status) - int WIFEXITED(int status) - int WIFSIGNALED(int status) - int WIFSTOPPED(int status) - int WSTOPSIG(int status) - int WTERMSIG(int status) - - ctypedef int idtype_t - enum: P_ALL # idtype_t values - enum: P_PID - enum: P_PGID - - pid_t wait(int *stat_loc) - pid_t waitpid(pid_t pid, int *status, int options) - int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) - -# wait3 was in POSIX until 2008 while wait4 was never standardized. -# Even so, these calls are in almost every Unix, always in sys/wait.h. -# Hence, posix.wait is the least surprising place to declare them for Cython. -# libc may require _XXX_SOURCE to be defined at C-compile time to provide them. - - pid_t wait3(int *status, int options, rusage *rusage) - pid_t wait4(pid_t pid, int *status, int options, rusage *rusage) +# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/wait.h.html + +from posix.types cimport pid_t, id_t +from posix.signal cimport siginfo_t +from posix.resource cimport rusage + +cdef extern from "<sys/wait.h>" nogil: + enum: WNOHANG + enum: WUNTRACED + enum: WCONTINUED + enum: WEXITED + enum: WSTOPPED + enum: WNOWAIT + + int WEXITSTATUS(int status) + int WIFCONTINUED(int status) + int WIFEXITED(int status) + int WIFSIGNALED(int status) + int WIFSTOPPED(int status) + int WSTOPSIG(int status) + int WTERMSIG(int status) + + ctypedef int idtype_t + enum: P_ALL # idtype_t values + enum: P_PID + enum: P_PGID + + pid_t wait(int *stat_loc) + pid_t waitpid(pid_t pid, int *status, int options) + int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) + +# wait3 was in POSIX until 2008 while wait4 was never standardized. +# Even so, these calls are in almost every Unix, always in sys/wait.h. +# Hence, posix.wait is the least surprising place to declare them for Cython. +# libc may require _XXX_SOURCE to be defined at C-compile time to provide them. + + pid_t wait3(int *status, int options, rusage *rusage) + pid_t wait4(pid_t pid, int *status, int options, rusage *rusage) diff --git a/contrib/tools/cython/Cython/Parser/ConcreteSyntaxTree.pyx b/contrib/tools/cython/Cython/Parser/ConcreteSyntaxTree.pyx index c6196d8892..f9888c561c 100644 --- a/contrib/tools/cython/Cython/Parser/ConcreteSyntaxTree.pyx +++ b/contrib/tools/cython/Cython/Parser/ConcreteSyntaxTree.pyx @@ -1,88 +1,88 @@ -cdef extern from "graminit.c": - ctypedef struct grammar: - pass - cdef grammar _PyParser_Grammar - cdef int Py_file_input - -cdef extern from "node.h": - ctypedef struct node - void PyNode_Free(node* n) - int NCH(node* n) - node* CHILD(node* n, int ix) - node* RCHILD(node* n, int ix) - short TYPE(node* n) - char* STR(node* n) - -cdef extern from "parsetok.h": - ctypedef struct perrdetail: - pass - cdef void PyParser_SetError(perrdetail *err) except * - cdef node * PyParser_ParseStringFlagsFilenameEx( - const char * s, - const char * filename, - grammar * g, - int start, - perrdetail * err_ret, - int * flags) - -import distutils.sysconfig -import os -import re - -def extract_names(path): - # All parse tree types are #defined in these files as ints. - type_names = {} - for line in open(path): - if line.startswith('#define'): - try: - _, name, value = line.strip().split() - type_names[int(value)] = name - except: - pass - return type_names - -cdef dict type_names = {} - -cdef print_tree(node* n, indent=""): - if not type_names: - type_names.update(extract_names( - os.path.join(distutils.sysconfig.get_python_inc(), 'token.h'))) - type_names.update(extract_names( - os.path.join(os.path.dirname(__file__), 'graminit.h'))) - - print indent, type_names.get(TYPE(n), 'unknown'), <object>STR(n) if NCH(n) == 0 else NCH(n) - indent += " " - for i in range(NCH(n)): - print_tree(CHILD(n, i), indent) - -def handle_includes(source, path): - # TODO: Use include directory. - def include_here(include_line): - included = os.path.join(os.path.dirname(path), include_line.group(1)[1:-1]) - if not os.path.exists(included): - return include_line.group(0) + ' # no such path: ' + included - return handle_includes(open(included).read(), path) - # TODO: Proper string tokenizing. - return re.sub(r'^include\s+([^\n]+[\'"])\s*(#.*)?$', include_here, source, flags=re.M) - -def p_module(path): - cdef perrdetail err - cdef int flags - cdef node* n - source = open(path).read() - if '\ninclude ' in source: - # TODO: Tokanizer needs to understand includes. - source = handle_includes(source, path) - path = "preparse(%s)" % path - n = PyParser_ParseStringFlagsFilenameEx( - source, - path, - &_PyParser_Grammar, - Py_file_input, - &err, - &flags) - if n: -# print_tree(n) - PyNode_Free(n) - else: - PyParser_SetError(&err) +cdef extern from "graminit.c": + ctypedef struct grammar: + pass + cdef grammar _PyParser_Grammar + cdef int Py_file_input + +cdef extern from "node.h": + ctypedef struct node + void PyNode_Free(node* n) + int NCH(node* n) + node* CHILD(node* n, int ix) + node* RCHILD(node* n, int ix) + short TYPE(node* n) + char* STR(node* n) + +cdef extern from "parsetok.h": + ctypedef struct perrdetail: + pass + cdef void PyParser_SetError(perrdetail *err) except * + cdef node * PyParser_ParseStringFlagsFilenameEx( + const char * s, + const char * filename, + grammar * g, + int start, + perrdetail * err_ret, + int * flags) + +import distutils.sysconfig +import os +import re + +def extract_names(path): + # All parse tree types are #defined in these files as ints. + type_names = {} + for line in open(path): + if line.startswith('#define'): + try: + _, name, value = line.strip().split() + type_names[int(value)] = name + except: + pass + return type_names + +cdef dict type_names = {} + +cdef print_tree(node* n, indent=""): + if not type_names: + type_names.update(extract_names( + os.path.join(distutils.sysconfig.get_python_inc(), 'token.h'))) + type_names.update(extract_names( + os.path.join(os.path.dirname(__file__), 'graminit.h'))) + + print indent, type_names.get(TYPE(n), 'unknown'), <object>STR(n) if NCH(n) == 0 else NCH(n) + indent += " " + for i in range(NCH(n)): + print_tree(CHILD(n, i), indent) + +def handle_includes(source, path): + # TODO: Use include directory. + def include_here(include_line): + included = os.path.join(os.path.dirname(path), include_line.group(1)[1:-1]) + if not os.path.exists(included): + return include_line.group(0) + ' # no such path: ' + included + return handle_includes(open(included).read(), path) + # TODO: Proper string tokenizing. + return re.sub(r'^include\s+([^\n]+[\'"])\s*(#.*)?$', include_here, source, flags=re.M) + +def p_module(path): + cdef perrdetail err + cdef int flags + cdef node* n + source = open(path).read() + if '\ninclude ' in source: + # TODO: Tokanizer needs to understand includes. + source = handle_includes(source, path) + path = "preparse(%s)" % path + n = PyParser_ParseStringFlagsFilenameEx( + source, + path, + &_PyParser_Grammar, + Py_file_input, + &err, + &flags) + if n: +# print_tree(n) + PyNode_Free(n) + else: + PyParser_SetError(&err) diff --git a/contrib/tools/cython/Cython/Parser/Grammar b/contrib/tools/cython/Cython/Parser/Grammar index 97e16386e4..214e36d5a3 100644 --- a/contrib/tools/cython/Cython/Parser/Grammar +++ b/contrib/tools/cython/Cython/Parser/Grammar @@ -1,214 +1,214 @@ -# Grammar for Cython, based on the Grammar for Python 3 - -# Note: This grammar is not yet used by the Cython parser and is subject to change. - -# Start symbols for the grammar: -# single_input is a single interactive statement; -# file_input is a module or sequence of commands read from an input file; -# eval_input is the input for the eval() functions. -# NB: compound_stmt in single_input is followed by extra NEWLINE! -single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE -file_input: (NEWLINE | stmt)* ENDMARKER -eval_input: testlist NEWLINE* ENDMARKER - -decorator: '@' dotted_PY_NAME [ '(' [arglist] ')' ] NEWLINE -decorators: decorator+ -decorated: decorators (classdef | funcdef | async_funcdef | cdef_stmt) -async_funcdef: 'async' funcdef -funcdef: 'def' PY_NAME parameters ['->' test] ':' suite -parameters: '(' [typedargslist] ')' -typedargslist: (tfpdef ['=' (test | '*')] (',' tfpdef ['=' (test | '*')])* [',' - ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] - | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) [',' ellipsis] -tfpdef: maybe_typed_name [('not' | 'or') 'None'] [':' test] -varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' - ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] - | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) -vfpdef: maybe_typed_name ['not' 'None'] - -stmt: simple_stmt | compound_stmt | cdef_stmt | ctypedef_stmt | DEF_stmt | IF_stmt -simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE -small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | - import_stmt | global_stmt | nonlocal_stmt | assert_stmt | print_stmt) -expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | - ('=' (yield_expr|testlist_star_expr))*) -testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] -augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | - '<<=' | '>>=' | '**=' | '//=') -print_stmt: 'print' ( [ test (',' test)* [','] ] | - '>>' test [ (',' test)+ [','] ] ) -# For normal assignments, additional restrictions enforced by the interpreter -del_stmt: 'del' exprlist -pass_stmt: 'pass' -flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt -break_stmt: 'break' -continue_stmt: 'continue' -return_stmt: 'return' [testlist] -yield_stmt: yield_expr -raise_stmt: 'raise' [test ['from' test]] -# raise_stmt: 'raise' [test [',' test [',' test]]] -import_stmt: import_PY_NAME | import_from -import_PY_NAME: ('import' | 'cimport') dotted_as_PY_NAMEs -# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS -import_from: ('from' (('.' | '...')* dotted_PY_NAME | ('.' | '...')+) - ('import' | 'cimport') ('*' | '(' import_as_PY_NAMEs ')' | import_as_PY_NAMEs)) -import_as_PY_NAME: PY_NAME ['as' PY_NAME] -dotted_as_PY_NAME: dotted_PY_NAME ['as' PY_NAME] -import_as_PY_NAMEs: import_as_PY_NAME (',' import_as_PY_NAME)* [','] -dotted_as_PY_NAMEs: dotted_as_PY_NAME (',' dotted_as_PY_NAME)* -dotted_PY_NAME: PY_NAME ('.' PY_NAME)* -global_stmt: 'global' PY_NAME (',' PY_NAME)* -nonlocal_stmt: 'nonlocal' PY_NAME (',' PY_NAME)* -exec_stmt: 'exec' expr ['in' test [',' test]] -assert_stmt: 'assert' test [',' test] - -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated -if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] -while_stmt: 'while' test ':' suite ['else' ':' suite] -for_stmt: 'for' exprlist ('in' testlist | for_from_clause)':' suite ['else' ':' suite] -for_from_clause: 'from' expr comp_op PY_NAME comp_op expr ['by' expr] -try_stmt: ('try' ':' suite - ((except_clause ':' suite)+ - ['else' ':' suite] - ['finally' ':' suite] | - 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite -with_item: test ['as' expr] -# NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [('as' | ',') test]] -suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT - -test: or_test ['if' or_test 'else' test] | lambdef -test_nocond: or_test | lambdef_nocond -lambdef: 'lambda' [varargslist] ':' test -lambdef_nocond: 'lambda' [varargslist] ':' test_nocond -or_test: and_test ('or' and_test)* -and_test: not_test ('and' not_test)* -not_test: 'not' not_test | comparison -comparison: expr (comp_op expr)* -# <> isn't actually a valid comparison operator in Python. It's here for the -# sake of a __future__ import described in PEP 401 -comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' -star_expr: '*' expr -expr: xor_expr ('|' xor_expr)* -xor_expr: and_expr ('^' and_expr)* -and_expr: shift_expr ('&' shift_expr)* -shift_expr: arith_expr (('<<'|'>>') arith_expr)* -arith_expr: term (('+'|'-') term)* -term: factor (('*'|'/'|'%'|'//') factor)* -factor: ('+'|'-'|'~') factor | power | address | size_of | cast -power: atom_expr ['**' factor] -atom_expr: ['await'] atom trailer* -atom: ('(' [yield_expr|testlist_comp] ')' | - '[' [testlist_comp] ']' | - '{' [dictorsetmaker] '}' | - new_expr | - PY_NAME | NUMBER | STRING+ | ellipsis | 'None' | 'True' | 'False') -testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) -trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' (PY_NAME | 'sizeof') -subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] -sliceop: ':' [test] -exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] -testlist: test (',' test)* [','] -dictorsetmaker: ( ((test ':' test | '**' expr) - (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) - -classdef: 'class' PY_NAME ['(' [arglist] ')'] ':' suite - -arglist: argument (',' argument)* [','] - -# The reason that keywords are test nodes instead of NAME is that using NAME -# results in an ambiguity. ast.c makes sure it's a NAME. -# "test '=' test" is really "keyword '=' test", but we have no such token. -# These need to be in a single rule to avoid grammar that is ambiguous -# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, -# we explicitly match '*' here, too, to give it proper precedence. -# Illegal combinations and orderings are blocked in ast.c: +# Grammar for Cython, based on the Grammar for Python 3 + +# Note: This grammar is not yet used by the Cython parser and is subject to change. + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_PY_NAME [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef | cdef_stmt) +async_funcdef: 'async' funcdef +funcdef: 'def' PY_NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' +typedargslist: (tfpdef ['=' (test | '*')] (',' tfpdef ['=' (test | '*')])* [',' + ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) [',' ellipsis] +tfpdef: maybe_typed_name [('not' | 'or') 'None'] [':' test] +varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' + ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) +vfpdef: maybe_typed_name ['not' 'None'] + +stmt: simple_stmt | compound_stmt | cdef_stmt | ctypedef_stmt | DEF_stmt | IF_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt | print_stmt) +expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +# For normal assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +# raise_stmt: 'raise' [test [',' test [',' test]]] +import_stmt: import_PY_NAME | import_from +import_PY_NAME: ('import' | 'cimport') dotted_as_PY_NAMEs +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_PY_NAME | ('.' | '...')+) + ('import' | 'cimport') ('*' | '(' import_as_PY_NAMEs ')' | import_as_PY_NAMEs)) +import_as_PY_NAME: PY_NAME ['as' PY_NAME] +dotted_as_PY_NAME: dotted_PY_NAME ['as' PY_NAME] +import_as_PY_NAMEs: import_as_PY_NAME (',' import_as_PY_NAME)* [','] +dotted_as_PY_NAMEs: dotted_as_PY_NAME (',' dotted_as_PY_NAME)* +dotted_PY_NAME: PY_NAME ('.' PY_NAME)* +global_stmt: 'global' PY_NAME (',' PY_NAME)* +nonlocal_stmt: 'nonlocal' PY_NAME (',' PY_NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist ('in' testlist | for_from_clause)':' suite ['else' ':' suite] +for_from_clause: 'from' expr comp_op PY_NAME comp_op expr ['by' expr] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [('as' | ',') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power | address | size_of | cast +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + new_expr | + PY_NAME | NUMBER | STRING+ | ellipsis | 'None' | 'True' | 'False') +testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' (PY_NAME | 'sizeof') +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' PY_NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: # multiple (test comp_for) arguments are blocked; keyword unpackings -# that precede iterable unpackings are blocked; etc. -argument: ( test [comp_for] | - test '=' test | - '**' expr | - star_expr ) - -comp_iter: comp_for | comp_if -comp_for: 'for' exprlist ('in' or_test | for_from_clause) [comp_iter] -comp_if: 'if' test_nocond [comp_iter] - -# not used in grammar, but may appear in "node" passed from Parser to Compiler -encoding_decl: NAME - -yield_expr: 'yield' [yield_arg] -yield_arg: 'from' test | testlist - - -# Cython extensions - -# Accommodate to Py2 tokenizer. -ellipsis: '...' | '.' '.' '.' - -signedness: 'unsigned' | 'signed' -longness: 'char' | 'short' | 'long' | 'long' 'long' -# TODO: [unsigned] double doesn't make sens, but we need long double -int_type: signedness [longness] | longness | [signedness] [longness] ('int' | 'double') | 'complex' - -type: ['const'] (NAME ('.' PY_NAME)* | int_type | '(' type ')') ['complex'] [type_qualifiers] -maybe_typed_name: ['const'] (NAME [('.' PY_NAME)* ['complex'] [type_qualifiers] NAME] | (int_type | '(' type ')') ['complex'] [type_qualifiers] NAME) -teplate_params: '[' NAME (',' NAME)* ']' -type_qualifiers: type_qualifier+ -type_qualifier: '*' | '**' | '&' | type_index ('.' NAME [type_index])* -# TODO: old buffer syntax -type_index: '[' [(NUMBER | type (',' type)* | (memory_view_index (',' memory_view_index)*))] ']' -memory_view_index: ':' [':'] [NUMBER] - -address: '&' factor -cast: '<' type ['?'] '>' factor -size_of: 'sizeof' '(' (type) ')' -type_id: 'typeid' '(' (type) ')' -new_expr: 'new' type '(' [arglist] ')' - -# TODO: Restrict cdef_stmt to "top-level" statements. -cdef_stmt: ('cdef' | 'cpdef') (cvar_def | cdef_type_decl | extern_block) -cdef_type_decl: ctype_decl | fused | cclass -ctype_decl: struct | enum | cppclass -# TODO: Does the cdef/ctypedef distinction even make sense for fused? -ctypedef_stmt: 'ctypedef' (cvar_decl | struct | enum | fused) - -# Note: these two are similar but can't be used in an or clause -# as it would cause ambiguity in the LL(1) parser. -# Requires a type -cvar_decl: [visibility] type cname (NEWLINE | cfunc) -# Allows an assignment -cvar_def: [visibility] maybe_typed_name (['=' test] (',' PY_NAME ['=' test])* NEWLINE | cfunc) - -visibility: 'public' | 'api' | 'readonly' -# TODO: Standardize gil_spec first or last. -cfunc: [teplate_params] parameters [gil_spec] [exception_value] [gil_spec] (':' suite | NEWLINE) -exception_value: 'except' (['?'] expr | '*' | '+' [PY_NAME]) -gil_spec: 'with' ('gil' | 'nogil') | 'nogil' - -cname: NAME [STRING] -cclass: classdef -fused: 'fused' PY_NAME ':' NEWLINE INDENT ( type NEWLINE)+ DEDENT -enum: 'enum' [cname] (NEWLINE | ':' enum_suite) -enum_suite: NEWLINE INDENT (cname ['=' NUMBER] NEWLINE | pass_stmt NEWLINE)+ DEDENT -struct: ('struct' | 'union') cname (NEWLINE | (':' struct_suite)) -struct_suite: NEWLINE INDENT (cvar_decl | pass_stmt NEWLINE)+ DEDENT -cppclass: 'cppclass' cname [teplate_params] [cppclass_bases] (NEWLINE | ':' cppclass_suite) -cppclass_bases: '(' dotted_PY_NAME (',' dotted_PY_NAME [teplate_params])*')' -cppclass_suite: NEWLINE INDENT (cvar_decl | ctype_decl | pass_stmt NEWLINE)+ DEDENT -# TODO: C++ constructors, operators - -extern_block: 'extern' (cvar_decl | 'from' ('*' | STRING) ['namespace' STRING] [gil_spec] ':' (pass_stmt | extern_suite)) -extern_suite: NEWLINE INDENT (['cdef' | 'cpdef'] (cvar_decl | cdef_type_decl) | ctypedef_stmt)+ DEDENT - -cy_type_kwd: 'struct' | 'union' | 'fused' | 'cppclass' | 'int' | 'double' | 'complex' -cy_kwd: cy_type_kwd | signedness | longness | visibility | 'gil' | 'nogil' | 'namespace' | 'const' | 'by' | 'extern' -PY_NAME: NAME | cy_kwd - -# TODO: Do we really want these? Don't play well with include... -DEF_stmt: 'DEF' NAME '=' testlist -IF_stmt: 'IF' test ':' suite ('ELIF' test ':' suite)* ['ELSE' ':' suite] +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test '=' test | + '**' expr | + star_expr ) + +comp_iter: comp_for | comp_if +comp_for: 'for' exprlist ('in' or_test | for_from_clause) [comp_iter] +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist + + +# Cython extensions + +# Accommodate to Py2 tokenizer. +ellipsis: '...' | '.' '.' '.' + +signedness: 'unsigned' | 'signed' +longness: 'char' | 'short' | 'long' | 'long' 'long' +# TODO: [unsigned] double doesn't make sens, but we need long double +int_type: signedness [longness] | longness | [signedness] [longness] ('int' | 'double') | 'complex' + +type: ['const'] (NAME ('.' PY_NAME)* | int_type | '(' type ')') ['complex'] [type_qualifiers] +maybe_typed_name: ['const'] (NAME [('.' PY_NAME)* ['complex'] [type_qualifiers] NAME] | (int_type | '(' type ')') ['complex'] [type_qualifiers] NAME) +teplate_params: '[' NAME (',' NAME)* ']' +type_qualifiers: type_qualifier+ +type_qualifier: '*' | '**' | '&' | type_index ('.' NAME [type_index])* +# TODO: old buffer syntax +type_index: '[' [(NUMBER | type (',' type)* | (memory_view_index (',' memory_view_index)*))] ']' +memory_view_index: ':' [':'] [NUMBER] + +address: '&' factor +cast: '<' type ['?'] '>' factor +size_of: 'sizeof' '(' (type) ')' +type_id: 'typeid' '(' (type) ')' +new_expr: 'new' type '(' [arglist] ')' + +# TODO: Restrict cdef_stmt to "top-level" statements. +cdef_stmt: ('cdef' | 'cpdef') (cvar_def | cdef_type_decl | extern_block) +cdef_type_decl: ctype_decl | fused | cclass +ctype_decl: struct | enum | cppclass +# TODO: Does the cdef/ctypedef distinction even make sense for fused? +ctypedef_stmt: 'ctypedef' (cvar_decl | struct | enum | fused) + +# Note: these two are similar but can't be used in an or clause +# as it would cause ambiguity in the LL(1) parser. +# Requires a type +cvar_decl: [visibility] type cname (NEWLINE | cfunc) +# Allows an assignment +cvar_def: [visibility] maybe_typed_name (['=' test] (',' PY_NAME ['=' test])* NEWLINE | cfunc) + +visibility: 'public' | 'api' | 'readonly' +# TODO: Standardize gil_spec first or last. +cfunc: [teplate_params] parameters [gil_spec] [exception_value] [gil_spec] (':' suite | NEWLINE) +exception_value: 'except' (['?'] expr | '*' | '+' [PY_NAME]) +gil_spec: 'with' ('gil' | 'nogil') | 'nogil' + +cname: NAME [STRING] +cclass: classdef +fused: 'fused' PY_NAME ':' NEWLINE INDENT ( type NEWLINE)+ DEDENT +enum: 'enum' [cname] (NEWLINE | ':' enum_suite) +enum_suite: NEWLINE INDENT (cname ['=' NUMBER] NEWLINE | pass_stmt NEWLINE)+ DEDENT +struct: ('struct' | 'union') cname (NEWLINE | (':' struct_suite)) +struct_suite: NEWLINE INDENT (cvar_decl | pass_stmt NEWLINE)+ DEDENT +cppclass: 'cppclass' cname [teplate_params] [cppclass_bases] (NEWLINE | ':' cppclass_suite) +cppclass_bases: '(' dotted_PY_NAME (',' dotted_PY_NAME [teplate_params])*')' +cppclass_suite: NEWLINE INDENT (cvar_decl | ctype_decl | pass_stmt NEWLINE)+ DEDENT +# TODO: C++ constructors, operators + +extern_block: 'extern' (cvar_decl | 'from' ('*' | STRING) ['namespace' STRING] [gil_spec] ':' (pass_stmt | extern_suite)) +extern_suite: NEWLINE INDENT (['cdef' | 'cpdef'] (cvar_decl | cdef_type_decl) | ctypedef_stmt)+ DEDENT + +cy_type_kwd: 'struct' | 'union' | 'fused' | 'cppclass' | 'int' | 'double' | 'complex' +cy_kwd: cy_type_kwd | signedness | longness | visibility | 'gil' | 'nogil' | 'namespace' | 'const' | 'by' | 'extern' +PY_NAME: NAME | cy_kwd + +# TODO: Do we really want these? Don't play well with include... +DEF_stmt: 'DEF' NAME '=' testlist +IF_stmt: 'IF' test ':' suite ('ELIF' test ':' suite)* ['ELSE' ':' suite] diff --git a/contrib/tools/cython/Cython/Plex/Actions.py b/contrib/tools/cython/Cython/Plex/Actions.py index 5b978623f9..c88176e716 100644 --- a/contrib/tools/cython/Cython/Plex/Actions.py +++ b/contrib/tools/cython/Cython/Plex/Actions.py @@ -1,4 +1,4 @@ -# cython: auto_pickle=False +# cython: auto_pickle=False #======================================================================= # # Python Lexical Analyser @@ -8,102 +8,102 @@ #======================================================================= class Action(object): - def perform(self, token_stream, text): - pass # abstract + def perform(self, token_stream, text): + pass # abstract - def same_as(self, other): - return self is other + def same_as(self, other): + return self is other class Return(Action): - """ - Internal Plex action which causes |value| to - be returned as the value of the associated token - """ + """ + Internal Plex action which causes |value| to + be returned as the value of the associated token + """ - def __init__(self, value): - self.value = value + def __init__(self, value): + self.value = value - def perform(self, token_stream, text): - return self.value + def perform(self, token_stream, text): + return self.value - def same_as(self, other): - return isinstance(other, Return) and self.value == other.value + def same_as(self, other): + return isinstance(other, Return) and self.value == other.value - def __repr__(self): - return "Return(%s)" % repr(self.value) + def __repr__(self): + return "Return(%s)" % repr(self.value) class Call(Action): - """ - Internal Plex action which causes a function to be called. - """ + """ + Internal Plex action which causes a function to be called. + """ - def __init__(self, function): - self.function = function + def __init__(self, function): + self.function = function - def perform(self, token_stream, text): - return self.function(token_stream, text) + def perform(self, token_stream, text): + return self.function(token_stream, text) - def __repr__(self): - return "Call(%s)" % self.function.__name__ + def __repr__(self): + return "Call(%s)" % self.function.__name__ - def same_as(self, other): - return isinstance(other, Call) and self.function is other.function + def same_as(self, other): + return isinstance(other, Call) and self.function is other.function class Begin(Action): - """ - Begin(state_name) is a Plex action which causes the Scanner to - enter the state |state_name|. See the docstring of Plex.Lexicon - for more information. - """ + """ + Begin(state_name) is a Plex action which causes the Scanner to + enter the state |state_name|. See the docstring of Plex.Lexicon + for more information. + """ - def __init__(self, state_name): - self.state_name = state_name + def __init__(self, state_name): + self.state_name = state_name - def perform(self, token_stream, text): - token_stream.begin(self.state_name) + def perform(self, token_stream, text): + token_stream.begin(self.state_name) - def __repr__(self): - return "Begin(%s)" % self.state_name + def __repr__(self): + return "Begin(%s)" % self.state_name - def same_as(self, other): - return isinstance(other, Begin) and self.state_name == other.state_name + def same_as(self, other): + return isinstance(other, Begin) and self.state_name == other.state_name class Ignore(Action): - """ - IGNORE is a Plex action which causes its associated token - to be ignored. See the docstring of Plex.Lexicon for more - information. - """ - - def perform(self, token_stream, text): - return None - - def __repr__(self): - return "IGNORE" - - + """ + IGNORE is a Plex action which causes its associated token + to be ignored. See the docstring of Plex.Lexicon for more + information. + """ + + def perform(self, token_stream, text): + return None + + def __repr__(self): + return "IGNORE" + + IGNORE = Ignore() #IGNORE.__doc__ = Ignore.__doc__ - + class Text(Action): - """ - TEXT is a Plex action which causes the text of a token to - be returned as the value of the token. See the docstring of - Plex.Lexicon for more information. - """ + """ + TEXT is a Plex action which causes the text of a token to + be returned as the value of the token. See the docstring of + Plex.Lexicon for more information. + """ + + def perform(self, token_stream, text): + return text - def perform(self, token_stream, text): - return text + def __repr__(self): + return "TEXT" - def __repr__(self): - return "TEXT" - TEXT = Text() #TEXT.__doc__ = Text.__doc__ diff --git a/contrib/tools/cython/Cython/Plex/DFA.py b/contrib/tools/cython/Cython/Plex/DFA.py index 039f0d8786..76324621fc 100644 --- a/contrib/tools/cython/Cython/Plex/DFA.py +++ b/contrib/tools/cython/Cython/Plex/DFA.py @@ -13,152 +13,152 @@ from .Machines import LOWEST_PRIORITY from .Transitions import TransitionMap -def nfa_to_dfa(old_machine, debug=None): - """ - Given a nondeterministic Machine, return a new equivalent - Machine which is deterministic. - """ - # We build a new machine whose states correspond to sets of states - # in the old machine. Initially we add a new state corresponding to - # the epsilon-closure of each initial old state. Then we give transitions - # to each new state which are the union of all transitions out of any - # of the corresponding old states. The new state reached on a given - # character is the one corresponding to the set of states reachable - # on that character from any of the old states. As new combinations of - # old states are created, new states are added as needed until closure - # is reached. - new_machine = Machines.FastMachine() - state_map = StateMap(new_machine) - # Seed the process using the initial states of the old machine. - # Make the corresponding new states into initial states of the new - # machine with the same names. - for (key, old_state) in old_machine.initial_states.items(): - new_state = state_map.old_to_new(epsilon_closure(old_state)) - new_machine.make_initial_state(key, new_state) - # Tricky bit here: we add things to the end of this list while we're - # iterating over it. The iteration stops when closure is achieved. - for new_state in new_machine.states: - transitions = TransitionMap() - for old_state in state_map.new_to_old(new_state): - for event, old_target_states in old_state.transitions.items(): - if event and old_target_states: - transitions.add_set(event, set_epsilon_closure(old_target_states)) - for event, old_states in transitions.items(): - new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states)) - if debug: - debug.write("\n===== State Mapping =====\n") - state_map.dump(debug) - return new_machine - - +def nfa_to_dfa(old_machine, debug=None): + """ + Given a nondeterministic Machine, return a new equivalent + Machine which is deterministic. + """ + # We build a new machine whose states correspond to sets of states + # in the old machine. Initially we add a new state corresponding to + # the epsilon-closure of each initial old state. Then we give transitions + # to each new state which are the union of all transitions out of any + # of the corresponding old states. The new state reached on a given + # character is the one corresponding to the set of states reachable + # on that character from any of the old states. As new combinations of + # old states are created, new states are added as needed until closure + # is reached. + new_machine = Machines.FastMachine() + state_map = StateMap(new_machine) + # Seed the process using the initial states of the old machine. + # Make the corresponding new states into initial states of the new + # machine with the same names. + for (key, old_state) in old_machine.initial_states.items(): + new_state = state_map.old_to_new(epsilon_closure(old_state)) + new_machine.make_initial_state(key, new_state) + # Tricky bit here: we add things to the end of this list while we're + # iterating over it. The iteration stops when closure is achieved. + for new_state in new_machine.states: + transitions = TransitionMap() + for old_state in state_map.new_to_old(new_state): + for event, old_target_states in old_state.transitions.items(): + if event and old_target_states: + transitions.add_set(event, set_epsilon_closure(old_target_states)) + for event, old_states in transitions.items(): + new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states)) + if debug: + debug.write("\n===== State Mapping =====\n") + state_map.dump(debug) + return new_machine + + def set_epsilon_closure(state_set): - """ - Given a set of states, return the union of the epsilon - closures of its member states. - """ - result = {} - for state1 in state_set: - for state2 in epsilon_closure(state1): - result[state2] = 1 - return result - - + """ + Given a set of states, return the union of the epsilon + closures of its member states. + """ + result = {} + for state1 in state_set: + for state2 in epsilon_closure(state1): + result[state2] = 1 + return result + + def epsilon_closure(state): - """ - Return the set of states reachable from the given state - by epsilon moves. - """ - # Cache the result - result = state.epsilon_closure - if result is None: - result = {} - state.epsilon_closure = result - add_to_epsilon_closure(result, state) - return result - - + """ + Return the set of states reachable from the given state + by epsilon moves. + """ + # Cache the result + result = state.epsilon_closure + if result is None: + result = {} + state.epsilon_closure = result + add_to_epsilon_closure(result, state) + return result + + def add_to_epsilon_closure(state_set, state): - """ - Recursively add to |state_set| states reachable from the given state - by epsilon moves. - """ - if not state_set.get(state, 0): - state_set[state] = 1 - state_set_2 = state.transitions.get_epsilon() - if state_set_2: - for state2 in state_set_2: - add_to_epsilon_closure(state_set, state2) - - + """ + Recursively add to |state_set| states reachable from the given state + by epsilon moves. + """ + if not state_set.get(state, 0): + state_set[state] = 1 + state_set_2 = state.transitions.get_epsilon() + if state_set_2: + for state2 in state_set_2: + add_to_epsilon_closure(state_set, state2) + + class StateMap(object): """ - Helper class used by nfa_to_dfa() to map back and forth between - sets of states from the old machine and states of the new machine. + Helper class used by nfa_to_dfa() to map back and forth between + sets of states from the old machine and states of the new machine. """ - new_machine = None # Machine - old_to_new_dict = None # {(old_state,...) : new_state} - new_to_old_dict = None # {id(new_state) : old_state_set} - - def __init__(self, new_machine): - self.new_machine = new_machine - self.old_to_new_dict = {} - self.new_to_old_dict = {} - - def old_to_new(self, old_state_set): - """ - Return the state of the new machine corresponding to the - set of old machine states represented by |state_set|. A new - state will be created if necessary. If any of the old states - are accepting states, the new state will be an accepting state - with the highest priority action from the old states. - """ - key = self.make_key(old_state_set) - new_state = self.old_to_new_dict.get(key, None) - if not new_state: - action = self.highest_priority_action(old_state_set) - new_state = self.new_machine.new_state(action) - self.old_to_new_dict[key] = new_state - self.new_to_old_dict[id(new_state)] = old_state_set - #for old_state in old_state_set.keys(): - #new_state.merge_actions(old_state) - return new_state - - def highest_priority_action(self, state_set): - best_action = None - best_priority = LOWEST_PRIORITY - for state in state_set: - priority = state.action_priority - if priority > best_priority: - best_action = state.action - best_priority = priority - return best_action - - # def old_to_new_set(self, old_state_set): - # """ - # Return the new state corresponding to a set of old states as - # a singleton set. - # """ - # return {self.old_to_new(old_state_set):1} - - def new_to_old(self, new_state): - """Given a new state, return a set of corresponding old states.""" - return self.new_to_old_dict[id(new_state)] - - def make_key(self, state_set): - """ - Convert a set of states into a uniquified - sorted tuple suitable for use as a dictionary key. - """ - lst = list(state_set) - lst.sort() - return tuple(lst) - - def dump(self, file): - from .Transitions import state_set_str - - for new_state in self.new_machine.states: - old_state_set = self.new_to_old_dict[id(new_state)] - file.write(" State %s <-- %s\n" % ( - new_state['number'], state_set_str(old_state_set))) - - + new_machine = None # Machine + old_to_new_dict = None # {(old_state,...) : new_state} + new_to_old_dict = None # {id(new_state) : old_state_set} + + def __init__(self, new_machine): + self.new_machine = new_machine + self.old_to_new_dict = {} + self.new_to_old_dict = {} + + def old_to_new(self, old_state_set): + """ + Return the state of the new machine corresponding to the + set of old machine states represented by |state_set|. A new + state will be created if necessary. If any of the old states + are accepting states, the new state will be an accepting state + with the highest priority action from the old states. + """ + key = self.make_key(old_state_set) + new_state = self.old_to_new_dict.get(key, None) + if not new_state: + action = self.highest_priority_action(old_state_set) + new_state = self.new_machine.new_state(action) + self.old_to_new_dict[key] = new_state + self.new_to_old_dict[id(new_state)] = old_state_set + #for old_state in old_state_set.keys(): + #new_state.merge_actions(old_state) + return new_state + + def highest_priority_action(self, state_set): + best_action = None + best_priority = LOWEST_PRIORITY + for state in state_set: + priority = state.action_priority + if priority > best_priority: + best_action = state.action + best_priority = priority + return best_action + + # def old_to_new_set(self, old_state_set): + # """ + # Return the new state corresponding to a set of old states as + # a singleton set. + # """ + # return {self.old_to_new(old_state_set):1} + + def new_to_old(self, new_state): + """Given a new state, return a set of corresponding old states.""" + return self.new_to_old_dict[id(new_state)] + + def make_key(self, state_set): + """ + Convert a set of states into a uniquified + sorted tuple suitable for use as a dictionary key. + """ + lst = list(state_set) + lst.sort() + return tuple(lst) + + def dump(self, file): + from .Transitions import state_set_str + + for new_state in self.new_machine.states: + old_state_set = self.new_to_old_dict[id(new_state)] + file.write(" State %s <-- %s\n" % ( + new_state['number'], state_set_str(old_state_set))) + + diff --git a/contrib/tools/cython/Cython/Plex/Errors.py b/contrib/tools/cython/Cython/Plex/Errors.py index 21e2d2ac3e..f460100d77 100644 --- a/contrib/tools/cython/Cython/Plex/Errors.py +++ b/contrib/tools/cython/Cython/Plex/Errors.py @@ -6,49 +6,49 @@ # #======================================================================= - + class PlexError(Exception): - message = "" + message = "" + - class PlexTypeError(PlexError, TypeError): - pass + pass + - class PlexValueError(PlexError, ValueError): - pass + pass + - class InvalidRegex(PlexError): - pass + pass + - class InvalidToken(PlexError): - def __init__(self, token_number, message): - PlexError.__init__(self, "Token number %d: %s" % (token_number, message)) + def __init__(self, token_number, message): + PlexError.__init__(self, "Token number %d: %s" % (token_number, message)) class InvalidScanner(PlexError): - pass + pass + - class AmbiguousAction(PlexError): - message = "Two tokens with different actions can match the same string" + message = "Two tokens with different actions can match the same string" + + def __init__(self): + pass - def __init__(self): - pass - class UnrecognizedInput(PlexError): - scanner = None - position = None - state_name = None - - def __init__(self, scanner, state_name): - self.scanner = scanner - self.position = scanner.get_position() - self.state_name = state_name - - def __str__(self): - return ("'%s', line %d, char %d: Token not recognised in state %r" % ( - self.position + (self.state_name,))) + scanner = None + position = None + state_name = None + + def __init__(self, scanner, state_name): + self.scanner = scanner + self.position = scanner.get_position() + self.state_name = state_name + + def __str__(self): + return ("'%s', line %d, char %d: Token not recognised in state %r" % ( + self.position + (self.state_name,))) diff --git a/contrib/tools/cython/Cython/Plex/Lexicons.py b/contrib/tools/cython/Cython/Plex/Lexicons.py index eaacbc9c54..787f5854b8 100644 --- a/contrib/tools/cython/Cython/Plex/Lexicons.py +++ b/contrib/tools/cython/Cython/Plex/Lexicons.py @@ -22,179 +22,179 @@ DUMP_DFA = 2 class State(object): - """ - This class is used as part of a Plex.Lexicon specification to - introduce a user-defined state. + """ + This class is used as part of a Plex.Lexicon specification to + introduce a user-defined state. - Constructor: + Constructor: - State(name, token_specifications) - """ + State(name, token_specifications) + """ - name = None - tokens = None + name = None + tokens = None + + def __init__(self, name, tokens): + self.name = name + self.tokens = tokens - def __init__(self, name, tokens): - self.name = name - self.tokens = tokens - class Lexicon(object): - """ - Lexicon(specification) builds a lexical analyser from the given - |specification|. The specification consists of a list of - specification items. Each specification item may be either: + """ + Lexicon(specification) builds a lexical analyser from the given + |specification|. The specification consists of a list of + specification items. Each specification item may be either: - 1) A token definition, which is a tuple: + 1) A token definition, which is a tuple: - (pattern, action) - - The |pattern| is a regular axpression built using the - constructors defined in the Plex module. - - The |action| is the action to be performed when this pattern - is recognised (see below). - - 2) A state definition: - - State(name, tokens) - - where |name| is a character string naming the state, - and |tokens| is a list of token definitions as - above. The meaning and usage of states is described - below. - - Actions - ------- - - The |action| in a token specication may be one of three things: - - 1) A function, which is called as follows: - - function(scanner, text) - - where |scanner| is the relevant Scanner instance, and |text| - is the matched text. If the function returns anything - other than None, that value is returned as the value of the - token. If it returns None, scanning continues as if the IGNORE - action were specified (see below). - - 2) One of the following special actions: - - IGNORE means that the recognised characters will be treated as - white space and ignored. Scanning will continue until - the next non-ignored token is recognised before returning. - - TEXT causes the scanned text itself to be returned as the - value of the token. - - 3) Any other value, which is returned as the value of the token. - - States - ------ - - At any given time, the scanner is in one of a number of states. - Associated with each state is a set of possible tokens. When scanning, - only tokens associated with the current state are recognised. - - There is a default state, whose name is the empty string. Token - definitions which are not inside any State definition belong to - the default state. - - The initial state of the scanner is the default state. The state can - be changed in one of two ways: - - 1) Using Begin(state_name) as the action of a token. - - 2) Calling the begin(state_name) method of the Scanner. - - To change back to the default state, use '' as the state name. - """ - - machine = None # Machine - tables = None # StateTableMachine - - def __init__(self, specifications, debug=None, debug_flags=7, timings=None): - if not isinstance(specifications, list): - raise Errors.InvalidScanner("Scanner definition is not a list") - if timings: - from .Timing import time - - total_time = 0.0 - time1 = time() - nfa = Machines.Machine() - default_initial_state = nfa.new_initial_state('') - token_number = 1 - for spec in specifications: - if isinstance(spec, State): - user_initial_state = nfa.new_initial_state(spec.name) - for token in spec.tokens: - self.add_token_to_machine( - nfa, user_initial_state, token, token_number) - token_number += 1 - elif isinstance(spec, tuple): - self.add_token_to_machine( - nfa, default_initial_state, spec, token_number) - token_number += 1 - else: - raise Errors.InvalidToken( - token_number, - "Expected a token definition (tuple) or State instance") - if timings: - time2 = time() - total_time = total_time + (time2 - time1) - time3 = time() - if debug and (debug_flags & 1): - debug.write("\n============= NFA ===========\n") - nfa.dump(debug) - dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug) - if timings: - time4 = time() - total_time = total_time + (time4 - time3) - if debug and (debug_flags & 2): - debug.write("\n============= DFA ===========\n") - dfa.dump(debug) - if timings: - timings.write("Constructing NFA : %5.2f\n" % (time2 - time1)) - timings.write("Converting to DFA: %5.2f\n" % (time4 - time3)) - timings.write("TOTAL : %5.2f\n" % total_time) - self.machine = dfa - - def add_token_to_machine(self, machine, initial_state, token_spec, token_number): + (pattern, action) + + The |pattern| is a regular axpression built using the + constructors defined in the Plex module. + + The |action| is the action to be performed when this pattern + is recognised (see below). + + 2) A state definition: + + State(name, tokens) + + where |name| is a character string naming the state, + and |tokens| is a list of token definitions as + above. The meaning and usage of states is described + below. + + Actions + ------- + + The |action| in a token specication may be one of three things: + + 1) A function, which is called as follows: + + function(scanner, text) + + where |scanner| is the relevant Scanner instance, and |text| + is the matched text. If the function returns anything + other than None, that value is returned as the value of the + token. If it returns None, scanning continues as if the IGNORE + action were specified (see below). + + 2) One of the following special actions: + + IGNORE means that the recognised characters will be treated as + white space and ignored. Scanning will continue until + the next non-ignored token is recognised before returning. + + TEXT causes the scanned text itself to be returned as the + value of the token. + + 3) Any other value, which is returned as the value of the token. + + States + ------ + + At any given time, the scanner is in one of a number of states. + Associated with each state is a set of possible tokens. When scanning, + only tokens associated with the current state are recognised. + + There is a default state, whose name is the empty string. Token + definitions which are not inside any State definition belong to + the default state. + + The initial state of the scanner is the default state. The state can + be changed in one of two ways: + + 1) Using Begin(state_name) as the action of a token. + + 2) Calling the begin(state_name) method of the Scanner. + + To change back to the default state, use '' as the state name. + """ + + machine = None # Machine + tables = None # StateTableMachine + + def __init__(self, specifications, debug=None, debug_flags=7, timings=None): + if not isinstance(specifications, list): + raise Errors.InvalidScanner("Scanner definition is not a list") + if timings: + from .Timing import time + + total_time = 0.0 + time1 = time() + nfa = Machines.Machine() + default_initial_state = nfa.new_initial_state('') + token_number = 1 + for spec in specifications: + if isinstance(spec, State): + user_initial_state = nfa.new_initial_state(spec.name) + for token in spec.tokens: + self.add_token_to_machine( + nfa, user_initial_state, token, token_number) + token_number += 1 + elif isinstance(spec, tuple): + self.add_token_to_machine( + nfa, default_initial_state, spec, token_number) + token_number += 1 + else: + raise Errors.InvalidToken( + token_number, + "Expected a token definition (tuple) or State instance") + if timings: + time2 = time() + total_time = total_time + (time2 - time1) + time3 = time() + if debug and (debug_flags & 1): + debug.write("\n============= NFA ===========\n") + nfa.dump(debug) + dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug) + if timings: + time4 = time() + total_time = total_time + (time4 - time3) + if debug and (debug_flags & 2): + debug.write("\n============= DFA ===========\n") + dfa.dump(debug) + if timings: + timings.write("Constructing NFA : %5.2f\n" % (time2 - time1)) + timings.write("Converting to DFA: %5.2f\n" % (time4 - time3)) + timings.write("TOTAL : %5.2f\n" % total_time) + self.machine = dfa + + def add_token_to_machine(self, machine, initial_state, token_spec, token_number): try: - (re, action_spec) = self.parse_token_definition(token_spec) - # Disabled this -- matching empty strings can be useful - #if re.nullable: - # raise Errors.InvalidToken( - # token_number, "Pattern can match 0 input symbols") - if isinstance(action_spec, Actions.Action): - action = action_spec - else: - try: - action_spec.__call__ - except AttributeError: - action = Actions.Return(action_spec) - else: - action = Actions.Call(action_spec) - final_state = machine.new_state() - re.build_machine(machine, initial_state, final_state, - match_bol=1, nocase=0) - final_state.set_action(action, priority=-token_number) - except Errors.PlexError as e: - raise e.__class__("Token number %d: %s" % (token_number, e)) - - def parse_token_definition(self, token_spec): - if not isinstance(token_spec, tuple): - raise Errors.InvalidToken("Token definition is not a tuple") - if len(token_spec) != 2: - raise Errors.InvalidToken("Wrong number of items in token definition") - pattern, action = token_spec - if not isinstance(pattern, Regexps.RE): - raise Errors.InvalidToken("Pattern is not an RE instance") - return (pattern, action) - - def get_initial_state(self, name): - return self.machine.get_initial_state(name) + (re, action_spec) = self.parse_token_definition(token_spec) + # Disabled this -- matching empty strings can be useful + #if re.nullable: + # raise Errors.InvalidToken( + # token_number, "Pattern can match 0 input symbols") + if isinstance(action_spec, Actions.Action): + action = action_spec + else: + try: + action_spec.__call__ + except AttributeError: + action = Actions.Return(action_spec) + else: + action = Actions.Call(action_spec) + final_state = machine.new_state() + re.build_machine(machine, initial_state, final_state, + match_bol=1, nocase=0) + final_state.set_action(action, priority=-token_number) + except Errors.PlexError as e: + raise e.__class__("Token number %d: %s" % (token_number, e)) + + def parse_token_definition(self, token_spec): + if not isinstance(token_spec, tuple): + raise Errors.InvalidToken("Token definition is not a tuple") + if len(token_spec) != 2: + raise Errors.InvalidToken("Wrong number of items in token definition") + pattern, action = token_spec + if not isinstance(pattern, Regexps.RE): + raise Errors.InvalidToken("Pattern is not an RE instance") + return (pattern, action) + + def get_initial_state(self, name): + return self.machine.get_initial_state(name) diff --git a/contrib/tools/cython/Cython/Plex/Machines.py b/contrib/tools/cython/Cython/Plex/Machines.py index 062e8087e6..398850976b 100644 --- a/contrib/tools/cython/Cython/Plex/Machines.py +++ b/contrib/tools/cython/Cython/Plex/Machines.py @@ -12,244 +12,244 @@ import sys from .Transitions import TransitionMap -try: - from sys import maxsize as maxint -except ImportError: - from sys import maxint - -try: - unichr -except NameError: - unichr = chr - -LOWEST_PRIORITY = -maxint - - +try: + from sys import maxsize as maxint +except ImportError: + from sys import maxint + +try: + unichr +except NameError: + unichr = chr + +LOWEST_PRIORITY = -maxint + + class Machine(object): - """A collection of Nodes representing an NFA or DFA.""" - states = None # [Node] - next_state_number = 1 - initial_states = None # {(name, bol): Node} - - def __init__(self): - self.states = [] - self.initial_states = {} - - def __del__(self): - #print "Destroying", self ### - for state in self.states: - state.destroy() - - def new_state(self): - """Add a new state to the machine and return it.""" - s = Node() - n = self.next_state_number - self.next_state_number = n + 1 - s.number = n - self.states.append(s) - return s - - def new_initial_state(self, name): - state = self.new_state() - self.make_initial_state(name, state) - return state - - def make_initial_state(self, name, state): - self.initial_states[name] = state - - def get_initial_state(self, name): - return self.initial_states[name] - - def dump(self, file): - file.write("Plex.Machine:\n") - if self.initial_states is not None: - file.write(" Initial states:\n") - for (name, state) in sorted(self.initial_states.items()): - file.write(" '%s': %d\n" % (name, state.number)) - for s in self.states: - s.dump(file) - - + """A collection of Nodes representing an NFA or DFA.""" + states = None # [Node] + next_state_number = 1 + initial_states = None # {(name, bol): Node} + + def __init__(self): + self.states = [] + self.initial_states = {} + + def __del__(self): + #print "Destroying", self ### + for state in self.states: + state.destroy() + + def new_state(self): + """Add a new state to the machine and return it.""" + s = Node() + n = self.next_state_number + self.next_state_number = n + 1 + s.number = n + self.states.append(s) + return s + + def new_initial_state(self, name): + state = self.new_state() + self.make_initial_state(name, state) + return state + + def make_initial_state(self, name, state): + self.initial_states[name] = state + + def get_initial_state(self, name): + return self.initial_states[name] + + def dump(self, file): + file.write("Plex.Machine:\n") + if self.initial_states is not None: + file.write(" Initial states:\n") + for (name, state) in sorted(self.initial_states.items()): + file.write(" '%s': %d\n" % (name, state.number)) + for s in self.states: + s.dump(file) + + class Node(object): - """A state of an NFA or DFA.""" - transitions = None # TransitionMap - action = None # Action - action_priority = None # integer - number = 0 # for debug output - epsilon_closure = None # used by nfa_to_dfa() - - def __init__(self): - # Preinitialise the list of empty transitions, because - # the nfa-to-dfa algorithm needs it - #self.transitions = {'':[]} - self.transitions = TransitionMap() - self.action_priority = LOWEST_PRIORITY - - def destroy(self): - #print "Destroying", self ### - self.transitions = None - self.action = None - self.epsilon_closure = None - - def add_transition(self, event, new_state): - self.transitions.add(event, new_state) - - def link_to(self, state): - """Add an epsilon-move from this state to another state.""" - self.add_transition('', state) - - def set_action(self, action, priority): - """Make this an accepting state with the given action. If - there is already an action, choose the action with highest - priority.""" - if priority > self.action_priority: - self.action = action - self.action_priority = priority - - def get_action(self): - return self.action - - def get_action_priority(self): - return self.action_priority - - def is_accepting(self): - return self.action is not None - - def __str__(self): - return "State %d" % self.number - - def dump(self, file): - # Header - file.write(" State %d:\n" % self.number) - # Transitions - # self.dump_transitions(file) - self.transitions.dump(file) - # Action - action = self.action - priority = self.action_priority - if action is not None: - file.write(" %s [priority %d]\n" % (action, priority)) - - def __lt__(self, other): - return self.number < other.number - - + """A state of an NFA or DFA.""" + transitions = None # TransitionMap + action = None # Action + action_priority = None # integer + number = 0 # for debug output + epsilon_closure = None # used by nfa_to_dfa() + + def __init__(self): + # Preinitialise the list of empty transitions, because + # the nfa-to-dfa algorithm needs it + #self.transitions = {'':[]} + self.transitions = TransitionMap() + self.action_priority = LOWEST_PRIORITY + + def destroy(self): + #print "Destroying", self ### + self.transitions = None + self.action = None + self.epsilon_closure = None + + def add_transition(self, event, new_state): + self.transitions.add(event, new_state) + + def link_to(self, state): + """Add an epsilon-move from this state to another state.""" + self.add_transition('', state) + + def set_action(self, action, priority): + """Make this an accepting state with the given action. If + there is already an action, choose the action with highest + priority.""" + if priority > self.action_priority: + self.action = action + self.action_priority = priority + + def get_action(self): + return self.action + + def get_action_priority(self): + return self.action_priority + + def is_accepting(self): + return self.action is not None + + def __str__(self): + return "State %d" % self.number + + def dump(self, file): + # Header + file.write(" State %d:\n" % self.number) + # Transitions + # self.dump_transitions(file) + self.transitions.dump(file) + # Action + action = self.action + priority = self.action_priority + if action is not None: + file.write(" %s [priority %d]\n" % (action, priority)) + + def __lt__(self, other): + return self.number < other.number + + class FastMachine(object): - """ - FastMachine is a deterministic machine represented in a way that - allows fast scanning. - """ - initial_states = None # {state_name:state} - states = None # [state] where state = {event:state, 'else':state, 'action':Action} - next_number = 1 # for debugging - - new_state_template = { - '': None, 'bol': None, 'eol': None, 'eof': None, 'else': None - } - - def __init__(self): - self.initial_states = {} - self.states = [] - - def __del__(self): - for state in self.states: - state.clear() - - def new_state(self, action=None): - number = self.next_number - self.next_number = number + 1 - result = self.new_state_template.copy() - result['number'] = number - result['action'] = action - self.states.append(result) - return result - - def make_initial_state(self, name, state): - self.initial_states[name] = state - - def add_transitions(self, state, event, new_state, maxint=maxint): - if type(event) is tuple: - code0, code1 = event - if code0 == -maxint: - state['else'] = new_state - elif code1 != maxint: - while code0 < code1: - state[unichr(code0)] = new_state - code0 += 1 - else: - state[event] = new_state - - def get_initial_state(self, name): - return self.initial_states[name] - - def dump(self, file): - file.write("Plex.FastMachine:\n") - file.write(" Initial states:\n") - for name, state in sorted(self.initial_states.items()): - file.write(" %s: %s\n" % (repr(name), state['number'])) - for state in self.states: - self.dump_state(state, file) - - def dump_state(self, state, file): - # Header - file.write(" State %d:\n" % state['number']) - # Transitions - self.dump_transitions(state, file) - # Action - action = state['action'] - if action is not None: - file.write(" %s\n" % action) - - def dump_transitions(self, state, file): - chars_leading_to_state = {} - special_to_state = {} - for (c, s) in state.items(): - if len(c) == 1: - chars = chars_leading_to_state.get(id(s), None) - if chars is None: - chars = [] - chars_leading_to_state[id(s)] = chars - chars.append(c) - elif len(c) <= 4: - special_to_state[c] = s - ranges_to_state = {} - for state in self.states: - char_list = chars_leading_to_state.get(id(state), None) - if char_list: - ranges = self.chars_to_ranges(char_list) - ranges_to_state[ranges] = state - ranges_list = ranges_to_state.keys() - ranges_list.sort() - for ranges in ranges_list: - key = self.ranges_to_string(ranges) - state = ranges_to_state[ranges] - file.write(" %s --> State %d\n" % (key, state['number'])) - for key in ('bol', 'eol', 'eof', 'else'): - state = special_to_state.get(key, None) - if state: - file.write(" %s --> State %d\n" % (key, state['number'])) - - def chars_to_ranges(self, char_list): - char_list.sort() - i = 0 - n = len(char_list) - result = [] - while i < n: - c1 = ord(char_list[i]) - c2 = c1 - i += 1 - while i < n and ord(char_list[i]) == c2 + 1: - i += 1 - c2 += 1 - result.append((chr(c1), chr(c2))) - return tuple(result) - - def ranges_to_string(self, range_list): - return ','.join(map(self.range_to_string, range_list)) - - def range_to_string(self, range_tuple): - (c1, c2) = range_tuple - if c1 == c2: - return repr(c1) - else: - return "%s..%s" % (repr(c1), repr(c2)) + """ + FastMachine is a deterministic machine represented in a way that + allows fast scanning. + """ + initial_states = None # {state_name:state} + states = None # [state] where state = {event:state, 'else':state, 'action':Action} + next_number = 1 # for debugging + + new_state_template = { + '': None, 'bol': None, 'eol': None, 'eof': None, 'else': None + } + + def __init__(self): + self.initial_states = {} + self.states = [] + + def __del__(self): + for state in self.states: + state.clear() + + def new_state(self, action=None): + number = self.next_number + self.next_number = number + 1 + result = self.new_state_template.copy() + result['number'] = number + result['action'] = action + self.states.append(result) + return result + + def make_initial_state(self, name, state): + self.initial_states[name] = state + + def add_transitions(self, state, event, new_state, maxint=maxint): + if type(event) is tuple: + code0, code1 = event + if code0 == -maxint: + state['else'] = new_state + elif code1 != maxint: + while code0 < code1: + state[unichr(code0)] = new_state + code0 += 1 + else: + state[event] = new_state + + def get_initial_state(self, name): + return self.initial_states[name] + + def dump(self, file): + file.write("Plex.FastMachine:\n") + file.write(" Initial states:\n") + for name, state in sorted(self.initial_states.items()): + file.write(" %s: %s\n" % (repr(name), state['number'])) + for state in self.states: + self.dump_state(state, file) + + def dump_state(self, state, file): + # Header + file.write(" State %d:\n" % state['number']) + # Transitions + self.dump_transitions(state, file) + # Action + action = state['action'] + if action is not None: + file.write(" %s\n" % action) + + def dump_transitions(self, state, file): + chars_leading_to_state = {} + special_to_state = {} + for (c, s) in state.items(): + if len(c) == 1: + chars = chars_leading_to_state.get(id(s), None) + if chars is None: + chars = [] + chars_leading_to_state[id(s)] = chars + chars.append(c) + elif len(c) <= 4: + special_to_state[c] = s + ranges_to_state = {} + for state in self.states: + char_list = chars_leading_to_state.get(id(state), None) + if char_list: + ranges = self.chars_to_ranges(char_list) + ranges_to_state[ranges] = state + ranges_list = ranges_to_state.keys() + ranges_list.sort() + for ranges in ranges_list: + key = self.ranges_to_string(ranges) + state = ranges_to_state[ranges] + file.write(" %s --> State %d\n" % (key, state['number'])) + for key in ('bol', 'eol', 'eof', 'else'): + state = special_to_state.get(key, None) + if state: + file.write(" %s --> State %d\n" % (key, state['number'])) + + def chars_to_ranges(self, char_list): + char_list.sort() + i = 0 + n = len(char_list) + result = [] + while i < n: + c1 = ord(char_list[i]) + c2 = c1 + i += 1 + while i < n and ord(char_list[i]) == c2 + 1: + i += 1 + c2 += 1 + result.append((chr(c1), chr(c2))) + return tuple(result) + + def ranges_to_string(self, range_list): + return ','.join(map(self.range_to_string, range_list)) + + def range_to_string(self, range_tuple): + (c1, c2) = range_tuple + if c1 == c2: + return repr(c1) + else: + return "%s..%s" % (repr(c1), repr(c2)) diff --git a/contrib/tools/cython/Cython/Plex/Regexps.py b/contrib/tools/cython/Cython/Plex/Regexps.py index ffa3ded1d7..41816c939a 100644 --- a/contrib/tools/cython/Cython/Plex/Regexps.py +++ b/contrib/tools/cython/Cython/Plex/Regexps.py @@ -9,10 +9,10 @@ from __future__ import absolute_import import types -try: - from sys import maxsize as maxint -except ImportError: - from sys import maxint +try: + from sys import maxsize as maxint +except ImportError: + from sys import maxint from . import Errors @@ -45,15 +45,15 @@ def chars_to_ranges(s): while i < n: code1 = ord(char_list[i]) code2 = code1 + 1 - i += 1 + i += 1 while i < n and code2 >= ord(char_list[i]): - code2 += 1 - i += 1 + code2 += 1 + i += 1 result.append(code1) result.append(code2) return result - + def uppercase_range(code1, code2): """ If the range of characters from code1 to code2-1 includes any @@ -67,7 +67,7 @@ def uppercase_range(code1, code2): else: return None - + def lowercase_range(code1, code2): """ If the range of characters from code1 to code2-1 includes any @@ -81,16 +81,16 @@ def lowercase_range(code1, code2): else: return None - + def CodeRanges(code_list): """ Given a list of codes as returned by chars_to_ranges, return an RE which will match a character in any of the ranges. """ - re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)] + re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)] return Alt(*re_list) - + def CodeRange(code1, code2): """ CodeRange(code1, code2) is an RE which matches any character @@ -98,12 +98,12 @@ def CodeRange(code1, code2): """ if code1 <= nl_code < code2: return Alt(RawCodeRange(code1, nl_code), - RawNewline, - RawCodeRange(nl_code + 1, code2)) + RawNewline, + RawCodeRange(nl_code + 1, code2)) else: return RawCodeRange(code1, code2) - + # # Abstract classes # @@ -116,12 +116,12 @@ class RE(object): re1 | re2 is an RE which matches either |re1| or |re2| """ - nullable = 1 # True if this RE can match 0 input symbols - match_nl = 1 # True if this RE can match a string ending with '\n' - str = None # Set to a string to override the class's __str__ result + nullable = 1 # True if this RE can match 0 input symbols + match_nl = 1 # True if this RE can match a string ending with '\n' + str = None # Set to a string to override the class's __str__ result def build_machine(self, machine, initial_state, final_state, - match_bol, nocase): + match_bol, nocase): """ This method should add states to |machine| to implement this RE, starting at |initial_state| and ending at |final_state|. @@ -130,7 +130,7 @@ class RE(object): letters should be treated as equivalent. """ raise NotImplementedError("%s.build_machine not implemented" % - self.__class__.__name__) + self.__class__.__name__) def build_opt(self, m, initial_state, c): """ @@ -166,18 +166,18 @@ class RE(object): self.check_string(num, value) if len(value) != 1: raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s." - "Expected a string of length 1, got: %s" % ( - num, self.__class__.__name__, repr(value))) + "Expected a string of length 1, got: %s" % ( + num, self.__class__.__name__, repr(value))) def wrong_type(self, num, value, expected): if type(value) == types.InstanceType: - got = "%s.%s instance" % ( - value.__class__.__module__, value.__class__.__name__) + got = "%s.%s instance" % ( + value.__class__.__module__, value.__class__.__name__) else: got = type(value).__name__ raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s " - "(expected %s, got %s" % ( - num, self.__class__.__name__, expected, got)) + "(expected %s, got %s" % ( + num, self.__class__.__name__, expected, got)) # # Primitive RE constructors @@ -217,7 +217,7 @@ class RE(object): ## def calc_str(self): ## return "Char(%s)" % repr(self.char) - + def Char(c): """ Char(c) is an RE which matches the character |c|. @@ -229,7 +229,7 @@ def Char(c): result.str = "Char(%s)" % repr(c) return result - + class RawCodeRange(RE): """ RawCodeRange(code1, code2) is a low-level RE which matches any character @@ -238,9 +238,9 @@ class RawCodeRange(RE): """ nullable = 0 match_nl = 0 - range = None # (code, code) - uppercase_range = None # (code, code) or None - lowercase_range = None # (code, code) or None + range = None # (code, code) + uppercase_range = None # (code, code) or None + lowercase_range = None # (code, code) or None def __init__(self, code1, code2): self.range = (code1, code2) @@ -260,7 +260,7 @@ class RawCodeRange(RE): def calc_str(self): return "CodeRange(%d,%d)" % (self.code1, self.code2) - + class _RawNewline(RE): """ RawNewline is a low-level RE which matches a newline character. @@ -275,7 +275,7 @@ class _RawNewline(RE): s = self.build_opt(m, initial_state, EOL) s.add_transition((nl_code, nl_code + 1), final_state) - + RawNewline = _RawNewline() @@ -305,7 +305,7 @@ class Seq(RE): def __init__(self, *re_list): nullable = 1 - for i, re in enumerate(re_list): + for i, re in enumerate(re_list): self.check_re(i, re) nullable = nullable and re.nullable self.re_list = re_list @@ -313,7 +313,7 @@ class Seq(RE): i = len(re_list) match_nl = 0 while i: - i -= 1 + i -= 1 re = re_list[i] if re.match_nl: match_nl = 1 @@ -329,7 +329,7 @@ class Seq(RE): else: s1 = initial_state n = len(re_list) - for i, re in enumerate(re_list): + for i, re in enumerate(re_list): if i < n - 1: s2 = m.new_state() else: @@ -362,7 +362,7 @@ class Alt(RE): non_nullable_res.append(re) if re.match_nl: match_nl = 1 - i += 1 + i += 1 self.nullable_res = nullable_res self.non_nullable_res = non_nullable_res self.nullable = nullable @@ -419,7 +419,7 @@ class SwitchCase(RE): def build_machine(self, m, initial_state, final_state, match_bol, nocase): self.re.build_machine(m, initial_state, final_state, match_bol, - self.nocase) + self.nocase) def calc_str(self): if self.nocase: @@ -442,7 +442,7 @@ Empty.__doc__ = \ """ Empty.str = "Empty" - + def Str1(s): """ Str1(s) is an RE which matches the literal string |s|. @@ -451,7 +451,7 @@ def Str1(s): result.str = "Str(%s)" % repr(s) return result - + def Str(*strs): """ Str(s) is an RE which matches the literal string |s|. @@ -464,7 +464,7 @@ def Str(*strs): result.str = "Str(%s)" % ','.join(map(repr, strs)) return result - + def Any(s): """ Any(s) is an RE which matches any character in the string |s|. @@ -474,7 +474,7 @@ def Any(s): result.str = "Any(%s)" % repr(s) return result - + def AnyBut(s): """ AnyBut(s) is an RE which matches any character (including @@ -487,7 +487,7 @@ def AnyBut(s): result.str = "AnyBut(%s)" % repr(s) return result - + AnyChar = AnyBut("") AnyChar.__doc__ = \ """ @@ -495,8 +495,8 @@ AnyChar.__doc__ = \ """ AnyChar.str = "AnyChar" - -def Range(s1, s2=None): + +def Range(s1, s2=None): """ Range(c1, c2) is an RE which matches any single character in the range |c1| to |c2| inclusive. @@ -509,12 +509,12 @@ def Range(s1, s2=None): else: ranges = [] for i in range(0, len(s1), 2): - ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1)) + ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1)) result = Alt(*ranges) result.str = "Range(%s)" % repr(s1) return result - + def Opt(re): """ Opt(re) is an RE which matches either |re| or the empty string. @@ -523,7 +523,7 @@ def Opt(re): result.str = "Opt(%s)" % re return result - + def Rep(re): """ Rep(re) is an RE which matches zero or more repetitions of |re|. @@ -532,22 +532,22 @@ def Rep(re): result.str = "Rep(%s)" % re return result - + def NoCase(re): """ NoCase(re) is an RE which matches the same strings as RE, but treating upper and lower case letters as equivalent. """ - return SwitchCase(re, nocase=1) + return SwitchCase(re, nocase=1) + - def Case(re): """ Case(re) is an RE which matches the same strings as RE, but treating upper and lower case letters as distinct, i.e. it cancels the effect of any enclosing NoCase(). """ - return SwitchCase(re, nocase=0) + return SwitchCase(re, nocase=0) # # RE Constants diff --git a/contrib/tools/cython/Cython/Plex/Scanners.pxd b/contrib/tools/cython/Cython/Plex/Scanners.pxd index 6c22c0b208..6e75f55e61 100644 --- a/contrib/tools/cython/Cython/Plex/Scanners.pxd +++ b/contrib/tools/cython/Cython/Plex/Scanners.pxd @@ -28,23 +28,23 @@ cdef class Scanner: cdef public level - @cython.final + @cython.final @cython.locals(input_state=long) cdef next_char(self) @cython.locals(action=Action) - cpdef tuple read(self) - @cython.final + cpdef tuple read(self) + @cython.final cdef tuple scan_a_token(self) - ##cdef tuple position(self) # used frequently by Parsing.py + ##cdef tuple position(self) # used frequently by Parsing.py - @cython.final + @cython.final @cython.locals(cur_pos=Py_ssize_t, cur_line=Py_ssize_t, cur_line_start=Py_ssize_t, input_state=long, next_pos=Py_ssize_t, state=dict, buf_start_pos=Py_ssize_t, buf_len=Py_ssize_t, buf_index=Py_ssize_t, trace=bint, discard=Py_ssize_t, data=unicode, buffer=unicode) cdef run_machine_inlined(self) - @cython.final + @cython.final cdef begin(self, state) - @cython.final + @cython.final cdef produce(self, value, text = *) diff --git a/contrib/tools/cython/Cython/Plex/Scanners.py b/contrib/tools/cython/Cython/Plex/Scanners.py index 668bb488a8..88f7e2da3b 100644 --- a/contrib/tools/cython/Cython/Plex/Scanners.py +++ b/contrib/tools/cython/Cython/Plex/Scanners.py @@ -1,4 +1,4 @@ -# cython: auto_pickle=False +# cython: auto_pickle=False #======================================================================= # # Python Lexical Analyser @@ -11,7 +11,7 @@ from __future__ import absolute_import import cython - + cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object) from . import Errors @@ -21,318 +21,318 @@ NOT_FOUND = object() class Scanner(object): - """ - A Scanner is used to read tokens from a stream of characters - using the token set specified by a Plex.Lexicon. - - Constructor: - - Scanner(lexicon, stream, name = '') - - See the docstring of the __init__ method for details. - - Methods: - - See the docstrings of the individual methods for more - information. - - read() --> (value, text) - Reads the next lexical token from the stream. - - position() --> (name, line, col) - Returns the position of the last token read using the - read() method. - - begin(state_name) - Causes scanner to change state. - - produce(value [, text]) - Causes return of a token value to the caller of the - Scanner. - - """ - - # lexicon = None # Lexicon - # stream = None # file-like object - # name = '' - # buffer = '' - # buf_start_pos = 0 # position in input of start of buffer - # next_pos = 0 # position in input of next char to read - # cur_pos = 0 # position in input of current char - # cur_line = 1 # line number of current char - # cur_line_start = 0 # position in input of start of current line - # start_pos = 0 # position in input of start of token - # start_line = 0 # line number of start of token - # start_col = 0 # position in line of start of token - # text = None # text of last token read - # initial_state = None # Node - # state_name = '' # Name of initial state - # queue = None # list of tokens to be returned - # trace = 0 - - def __init__(self, lexicon, stream, name='', initial_pos=None): - """ - Scanner(lexicon, stream, name = '') - - |lexicon| is a Plex.Lexicon instance specifying the lexical tokens - to be recognised. - - |stream| can be a file object or anything which implements a - compatible read() method. - - |name| is optional, and may be the name of the file being - scanned or any other identifying string. - """ - self.trace = 0 - - self.buffer = u'' - self.buf_start_pos = 0 - self.next_pos = 0 - self.cur_pos = 0 - self.cur_line = 1 - self.start_pos = 0 - self.start_line = 0 - self.start_col = 0 - self.text = None - self.state_name = None - - self.lexicon = lexicon - self.stream = stream - self.name = name - self.queue = [] - self.initial_state = None - self.begin('') - self.next_pos = 0 - self.cur_pos = 0 - self.cur_line_start = 0 - self.cur_char = BOL - self.input_state = 1 - if initial_pos is not None: - self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2] - - def read(self): - """ - Read the next lexical token from the stream and return a - tuple (value, text), where |value| is the value associated with - the token as specified by the Lexicon, and |text| is the actual - string read from the stream. Returns (None, '') on end of file. - """ - queue = self.queue - while not queue: - self.text, action = self.scan_a_token() - if action is None: - self.produce(None) - self.eof() - else: - value = action.perform(self, self.text) - if value is not None: - self.produce(value) - result = queue[0] - del queue[0] - return result - - def scan_a_token(self): - """ - Read the next input sequence recognised by the machine - and return (text, action). Returns ('', None) on end of - file. - """ - self.start_pos = self.cur_pos - self.start_line = self.cur_line - self.start_col = self.cur_pos - self.cur_line_start - action = self.run_machine_inlined() - if action is not None: - if self.trace: - print("Scanner: read: Performing %s %d:%d" % ( - action, self.start_pos, self.cur_pos)) - text = self.buffer[ - self.start_pos - self.buf_start_pos: - self.cur_pos - self.buf_start_pos] - return (text, action) - else: - if self.cur_pos == self.start_pos: - if self.cur_char is EOL: - self.next_char() - if self.cur_char is None or self.cur_char is EOF: - return (u'', None) - raise Errors.UnrecognizedInput(self, self.state_name) - - def run_machine_inlined(self): - """ - Inlined version of run_machine for speed. - """ - state = self.initial_state - cur_pos = self.cur_pos - cur_line = self.cur_line - cur_line_start = self.cur_line_start - cur_char = self.cur_char - input_state = self.input_state - next_pos = self.next_pos - buffer = self.buffer - buf_start_pos = self.buf_start_pos - buf_len = len(buffer) + """ + A Scanner is used to read tokens from a stream of characters + using the token set specified by a Plex.Lexicon. + + Constructor: + + Scanner(lexicon, stream, name = '') + + See the docstring of the __init__ method for details. + + Methods: + + See the docstrings of the individual methods for more + information. + + read() --> (value, text) + Reads the next lexical token from the stream. + + position() --> (name, line, col) + Returns the position of the last token read using the + read() method. + + begin(state_name) + Causes scanner to change state. + + produce(value [, text]) + Causes return of a token value to the caller of the + Scanner. + + """ + + # lexicon = None # Lexicon + # stream = None # file-like object + # name = '' + # buffer = '' + # buf_start_pos = 0 # position in input of start of buffer + # next_pos = 0 # position in input of next char to read + # cur_pos = 0 # position in input of current char + # cur_line = 1 # line number of current char + # cur_line_start = 0 # position in input of start of current line + # start_pos = 0 # position in input of start of token + # start_line = 0 # line number of start of token + # start_col = 0 # position in line of start of token + # text = None # text of last token read + # initial_state = None # Node + # state_name = '' # Name of initial state + # queue = None # list of tokens to be returned + # trace = 0 + + def __init__(self, lexicon, stream, name='', initial_pos=None): + """ + Scanner(lexicon, stream, name = '') + + |lexicon| is a Plex.Lexicon instance specifying the lexical tokens + to be recognised. + + |stream| can be a file object or anything which implements a + compatible read() method. + + |name| is optional, and may be the name of the file being + scanned or any other identifying string. + """ + self.trace = 0 + + self.buffer = u'' + self.buf_start_pos = 0 + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line = 1 + self.start_pos = 0 + self.start_line = 0 + self.start_col = 0 + self.text = None + self.state_name = None + + self.lexicon = lexicon + self.stream = stream + self.name = name + self.queue = [] + self.initial_state = None + self.begin('') + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line_start = 0 + self.cur_char = BOL + self.input_state = 1 + if initial_pos is not None: + self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2] + + def read(self): + """ + Read the next lexical token from the stream and return a + tuple (value, text), where |value| is the value associated with + the token as specified by the Lexicon, and |text| is the actual + string read from the stream. Returns (None, '') on end of file. + """ + queue = self.queue + while not queue: + self.text, action = self.scan_a_token() + if action is None: + self.produce(None) + self.eof() + else: + value = action.perform(self, self.text) + if value is not None: + self.produce(value) + result = queue[0] + del queue[0] + return result + + def scan_a_token(self): + """ + Read the next input sequence recognised by the machine + and return (text, action). Returns ('', None) on end of + file. + """ + self.start_pos = self.cur_pos + self.start_line = self.cur_line + self.start_col = self.cur_pos - self.cur_line_start + action = self.run_machine_inlined() + if action is not None: + if self.trace: + print("Scanner: read: Performing %s %d:%d" % ( + action, self.start_pos, self.cur_pos)) + text = self.buffer[ + self.start_pos - self.buf_start_pos: + self.cur_pos - self.buf_start_pos] + return (text, action) + else: + if self.cur_pos == self.start_pos: + if self.cur_char is EOL: + self.next_char() + if self.cur_char is None or self.cur_char is EOF: + return (u'', None) + raise Errors.UnrecognizedInput(self, self.state_name) + + def run_machine_inlined(self): + """ + Inlined version of run_machine for speed. + """ + state = self.initial_state + cur_pos = self.cur_pos + cur_line = self.cur_line + cur_line_start = self.cur_line_start + cur_char = self.cur_char + input_state = self.input_state + next_pos = self.next_pos + buffer = self.buffer + buf_start_pos = self.buf_start_pos + buf_len = len(buffer) b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ - None, 0, 0, 0, u'', 0, 0 - trace = self.trace - while 1: - if trace: #TRACE# - print("State %d, %d/%d:%s -->" % ( #TRACE# - state['number'], input_state, cur_pos, repr(cur_char))) #TRACE# - # Begin inlined self.save_for_backup() - #action = state.action #@slow - action = state['action'] #@fast - if action is not None: - b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ - action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos - # End inlined self.save_for_backup() - c = cur_char - #new_state = state.new_state(c) #@slow - new_state = state.get(c, NOT_FOUND) #@fast - if new_state is NOT_FOUND: #@fast - new_state = c and state.get('else') #@fast - if new_state: - if trace: #TRACE# - print("State %d" % new_state['number']) #TRACE# - state = new_state - # Begin inlined: self.next_char() - if input_state == 1: - cur_pos = next_pos - # Begin inlined: c = self.read_char() - buf_index = next_pos - buf_start_pos - if buf_index < buf_len: - c = buffer[buf_index] - next_pos += 1 - else: - discard = self.start_pos - buf_start_pos - data = self.stream.read(0x1000) - buffer = self.buffer[discard:] + data - self.buffer = buffer - buf_start_pos += discard - self.buf_start_pos = buf_start_pos - buf_len = len(buffer) - buf_index -= discard - if data: - c = buffer[buf_index] - next_pos += 1 - else: - c = u'' - # End inlined: c = self.read_char() - if c == u'\n': - cur_char = EOL - input_state = 2 - elif not c: - cur_char = EOL - input_state = 4 - else: - cur_char = c - elif input_state == 2: - cur_char = u'\n' - input_state = 3 - elif input_state == 3: - cur_line += 1 - cur_line_start = cur_pos = next_pos - cur_char = BOL - input_state = 1 - elif input_state == 4: - cur_char = EOF - input_state = 5 - else: # input_state = 5 - cur_char = u'' - # End inlined self.next_char() - else: # not new_state - if trace: #TRACE# - print("blocked") #TRACE# - # Begin inlined: action = self.back_up() - if b_action is not None: - (action, cur_pos, cur_line, cur_line_start, - cur_char, input_state, next_pos) = \ - (b_action, b_cur_pos, b_cur_line, b_cur_line_start, - b_cur_char, b_input_state, b_next_pos) - else: - action = None - break # while 1 - # End inlined: action = self.back_up() - self.cur_pos = cur_pos - self.cur_line = cur_line - self.cur_line_start = cur_line_start - self.cur_char = cur_char - self.input_state = input_state - self.next_pos = next_pos - if trace: #TRACE# - if action is not None: #TRACE# - print("Doing %s" % action) #TRACE# - return action - - def next_char(self): - input_state = self.input_state - if self.trace: - print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos)) + None, 0, 0, 0, u'', 0, 0 + trace = self.trace + while 1: + if trace: #TRACE# + print("State %d, %d/%d:%s -->" % ( #TRACE# + state['number'], input_state, cur_pos, repr(cur_char))) #TRACE# + # Begin inlined self.save_for_backup() + #action = state.action #@slow + action = state['action'] #@fast + if action is not None: + b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ + action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos + # End inlined self.save_for_backup() + c = cur_char + #new_state = state.new_state(c) #@slow + new_state = state.get(c, NOT_FOUND) #@fast + if new_state is NOT_FOUND: #@fast + new_state = c and state.get('else') #@fast + if new_state: + if trace: #TRACE# + print("State %d" % new_state['number']) #TRACE# + state = new_state + # Begin inlined: self.next_char() + if input_state == 1: + cur_pos = next_pos + # Begin inlined: c = self.read_char() + buf_index = next_pos - buf_start_pos + if buf_index < buf_len: + c = buffer[buf_index] + next_pos += 1 + else: + discard = self.start_pos - buf_start_pos + data = self.stream.read(0x1000) + buffer = self.buffer[discard:] + data + self.buffer = buffer + buf_start_pos += discard + self.buf_start_pos = buf_start_pos + buf_len = len(buffer) + buf_index -= discard + if data: + c = buffer[buf_index] + next_pos += 1 + else: + c = u'' + # End inlined: c = self.read_char() + if c == u'\n': + cur_char = EOL + input_state = 2 + elif not c: + cur_char = EOL + input_state = 4 + else: + cur_char = c + elif input_state == 2: + cur_char = u'\n' + input_state = 3 + elif input_state == 3: + cur_line += 1 + cur_line_start = cur_pos = next_pos + cur_char = BOL + input_state = 1 + elif input_state == 4: + cur_char = EOF + input_state = 5 + else: # input_state = 5 + cur_char = u'' + # End inlined self.next_char() + else: # not new_state + if trace: #TRACE# + print("blocked") #TRACE# + # Begin inlined: action = self.back_up() + if b_action is not None: + (action, cur_pos, cur_line, cur_line_start, + cur_char, input_state, next_pos) = \ + (b_action, b_cur_pos, b_cur_line, b_cur_line_start, + b_cur_char, b_input_state, b_next_pos) + else: + action = None + break # while 1 + # End inlined: action = self.back_up() + self.cur_pos = cur_pos + self.cur_line = cur_line + self.cur_line_start = cur_line_start + self.cur_char = cur_char + self.input_state = input_state + self.next_pos = next_pos + if trace: #TRACE# + if action is not None: #TRACE# + print("Doing %s" % action) #TRACE# + return action + + def next_char(self): + input_state = self.input_state + if self.trace: + print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos)) if input_state == 1: - self.cur_pos = self.next_pos - c = self.read_char() - if c == u'\n': - self.cur_char = EOL - self.input_state = 2 - elif not c: - self.cur_char = EOL - self.input_state = 4 + self.cur_pos = self.next_pos + c = self.read_char() + if c == u'\n': + self.cur_char = EOL + self.input_state = 2 + elif not c: + self.cur_char = EOL + self.input_state = 4 else: - self.cur_char = c + self.cur_char = c elif input_state == 2: - self.cur_char = u'\n' - self.input_state = 3 + self.cur_char = u'\n' + self.input_state = 3 elif input_state == 3: - self.cur_line += 1 - self.cur_line_start = self.cur_pos = self.next_pos - self.cur_char = BOL - self.input_state = 1 + self.cur_line += 1 + self.cur_line_start = self.cur_pos = self.next_pos + self.cur_char = BOL + self.input_state = 1 elif input_state == 4: - self.cur_char = EOF - self.input_state = 5 - else: # input_state = 5 - self.cur_char = u'' - if self.trace: - print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char)) - - def position(self): - """ - Return a tuple (name, line, col) representing the location of - the last token read using the read() method. |name| is the - name that was provided to the Scanner constructor; |line| - is the line number in the stream (1-based); |col| is the - position within the line of the first character of the token - (0-based). - """ - return (self.name, self.start_line, self.start_col) - - def get_position(self): - """Python accessible wrapper around position(), only for error reporting. - """ - return self.position() - - def begin(self, state_name): - """Set the current state of the scanner to the named state.""" - self.initial_state = ( - self.lexicon.get_initial_state(state_name)) - self.state_name = state_name - - def produce(self, value, text=None): - """ - Called from an action procedure, causes |value| to be returned - as the token value from read(). If |text| is supplied, it is - returned in place of the scanned text. - - produce() can be called more than once during a single call to an action - procedure, in which case the tokens are queued up and returned one - at a time by subsequent calls to read(), until the queue is empty, - whereupon scanning resumes. - """ - if text is None: - text = self.text - self.queue.append((value, text)) - - def eof(self): - """ - Override this method if you want something to be done at - end of file. - """ + self.cur_char = EOF + self.input_state = 5 + else: # input_state = 5 + self.cur_char = u'' + if self.trace: + print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char)) + + def position(self): + """ + Return a tuple (name, line, col) representing the location of + the last token read using the read() method. |name| is the + name that was provided to the Scanner constructor; |line| + is the line number in the stream (1-based); |col| is the + position within the line of the first character of the token + (0-based). + """ + return (self.name, self.start_line, self.start_col) + + def get_position(self): + """Python accessible wrapper around position(), only for error reporting. + """ + return self.position() + + def begin(self, state_name): + """Set the current state of the scanner to the named state.""" + self.initial_state = ( + self.lexicon.get_initial_state(state_name)) + self.state_name = state_name + + def produce(self, value, text=None): + """ + Called from an action procedure, causes |value| to be returned + as the token value from read(). If |text| is supplied, it is + returned in place of the scanned text. + + produce() can be called more than once during a single call to an action + procedure, in which case the tokens are queued up and returned one + at a time by subsequent calls to read(), until the queue is empty, + whereupon scanning resumes. + """ + if text is None: + text = self.text + self.queue.append((value, text)) + + def eof(self): + """ + Override this method if you want something to be done at + end of file. + """ diff --git a/contrib/tools/cython/Cython/Plex/Traditional.py b/contrib/tools/cython/Cython/Plex/Traditional.py index f9198a8b18..ec7252daed 100644 --- a/contrib/tools/cython/Cython/Plex/Traditional.py +++ b/contrib/tools/cython/Cython/Plex/Traditional.py @@ -13,146 +13,146 @@ from .Errors import PlexError class RegexpSyntaxError(PlexError): - pass + pass def re(s): - """ - Convert traditional string representation of regular expression |s| - into Plex representation. - """ - return REParser(s).parse_re() + """ + Convert traditional string representation of regular expression |s| + into Plex representation. + """ + return REParser(s).parse_re() class REParser(object): - def __init__(self, s): - self.s = s - self.i = -1 - self.end = 0 - self.next() - - def parse_re(self): - re = self.parse_alt() - if not self.end: - self.error("Unexpected %s" % repr(self.c)) - return re - - def parse_alt(self): - """Parse a set of alternative regexps.""" - re = self.parse_seq() - if self.c == '|': - re_list = [re] - while self.c == '|': - self.next() - re_list.append(self.parse_seq()) - re = Alt(*re_list) - return re - - def parse_seq(self): - """Parse a sequence of regexps.""" - re_list = [] - while not self.end and not self.c in "|)": - re_list.append(self.parse_mod()) - return Seq(*re_list) - - def parse_mod(self): - """Parse a primitive regexp followed by *, +, ? modifiers.""" - re = self.parse_prim() - while not self.end and self.c in "*+?": - if self.c == '*': - re = Rep(re) - elif self.c == '+': - re = Rep1(re) - else: # self.c == '?' - re = Opt(re) - self.next() - return re - - def parse_prim(self): - """Parse a primitive regexp.""" + def __init__(self, s): + self.s = s + self.i = -1 + self.end = 0 + self.next() + + def parse_re(self): + re = self.parse_alt() + if not self.end: + self.error("Unexpected %s" % repr(self.c)) + return re + + def parse_alt(self): + """Parse a set of alternative regexps.""" + re = self.parse_seq() + if self.c == '|': + re_list = [re] + while self.c == '|': + self.next() + re_list.append(self.parse_seq()) + re = Alt(*re_list) + return re + + def parse_seq(self): + """Parse a sequence of regexps.""" + re_list = [] + while not self.end and not self.c in "|)": + re_list.append(self.parse_mod()) + return Seq(*re_list) + + def parse_mod(self): + """Parse a primitive regexp followed by *, +, ? modifiers.""" + re = self.parse_prim() + while not self.end and self.c in "*+?": + if self.c == '*': + re = Rep(re) + elif self.c == '+': + re = Rep1(re) + else: # self.c == '?' + re = Opt(re) + self.next() + return re + + def parse_prim(self): + """Parse a primitive regexp.""" c = self.get() - if c == '.': - re = AnyBut("\n") - elif c == '^': - re = Bol - elif c == '$': - re = Eol - elif c == '(': - re = self.parse_alt() - self.expect(')') - elif c == '[': - re = self.parse_charset() - self.expect(']') - else: - if c == '\\': - c = self.get() - re = Char(c) - return re - - def parse_charset(self): - """Parse a charset. Does not include the surrounding [].""" - char_list = [] - invert = 0 - if self.c == '^': - invert = 1 - self.next() - if self.c == ']': - char_list.append(']') - self.next() - while not self.end and self.c != ']': - c1 = self.get() - if self.c == '-' and self.lookahead(1) != ']': - self.next() - c2 = self.get() - for a in range(ord(c1), ord(c2) + 1): - char_list.append(chr(a)) - else: - char_list.append(c1) - chars = ''.join(char_list) - if invert: - return AnyBut(chars) - else: - return Any(chars) - - def next(self): - """Advance to the next char.""" - s = self.s - i = self.i = self.i + 1 - if i < len(s): - self.c = s[i] - else: - self.c = '' - self.end = 1 - - def get(self): - if self.end: - self.error("Premature end of string") - c = self.c - self.next() - return c - - def lookahead(self, n): - """Look ahead n chars.""" - j = self.i + n - if j < len(self.s): - return self.s[j] - else: - return '' - - def expect(self, c): - """ - Expect to find character |c| at current position. - Raises an exception otherwise. - """ - if self.c == c: - self.next() - else: - self.error("Missing %s" % repr(c)) - - def error(self, mess): - """Raise exception to signal syntax error in regexp.""" - raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % ( - repr(self.s), self.i, mess)) + if c == '.': + re = AnyBut("\n") + elif c == '^': + re = Bol + elif c == '$': + re = Eol + elif c == '(': + re = self.parse_alt() + self.expect(')') + elif c == '[': + re = self.parse_charset() + self.expect(']') + else: + if c == '\\': + c = self.get() + re = Char(c) + return re + + def parse_charset(self): + """Parse a charset. Does not include the surrounding [].""" + char_list = [] + invert = 0 + if self.c == '^': + invert = 1 + self.next() + if self.c == ']': + char_list.append(']') + self.next() + while not self.end and self.c != ']': + c1 = self.get() + if self.c == '-' and self.lookahead(1) != ']': + self.next() + c2 = self.get() + for a in range(ord(c1), ord(c2) + 1): + char_list.append(chr(a)) + else: + char_list.append(c1) + chars = ''.join(char_list) + if invert: + return AnyBut(chars) + else: + return Any(chars) + + def next(self): + """Advance to the next char.""" + s = self.s + i = self.i = self.i + 1 + if i < len(s): + self.c = s[i] + else: + self.c = '' + self.end = 1 + + def get(self): + if self.end: + self.error("Premature end of string") + c = self.c + self.next() + return c + + def lookahead(self, n): + """Look ahead n chars.""" + j = self.i + n + if j < len(self.s): + return self.s[j] + else: + return '' + + def expect(self, c): + """ + Expect to find character |c| at current position. + Raises an exception otherwise. + """ + if self.c == c: + self.next() + else: + self.error("Missing %s" % repr(c)) + + def error(self, mess): + """Raise exception to signal syntax error in regexp.""" + raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % ( + repr(self.s), self.i, mess)) diff --git a/contrib/tools/cython/Cython/Plex/Transitions.py b/contrib/tools/cython/Cython/Plex/Transitions.py index 92c8a34df5..3833817946 100644 --- a/contrib/tools/cython/Cython/Plex/Transitions.py +++ b/contrib/tools/cython/Cython/Plex/Transitions.py @@ -1,242 +1,242 @@ # -# Plex - Transition Maps +# Plex - Transition Maps # -# This version represents state sets directly as dicts for speed. +# This version represents state sets directly as dicts for speed. # from __future__ import absolute_import -try: - from sys import maxsize as maxint -except ImportError: - from sys import maxint +try: + from sys import maxsize as maxint +except ImportError: + from sys import maxint class TransitionMap(object): - """ - A TransitionMap maps an input event to a set of states. - An input event is one of: a range of character codes, - the empty string (representing an epsilon move), or one - of the special symbols BOL, EOL, EOF. - - For characters, this implementation compactly represents - the map by means of a list: - - [code_0, states_0, code_1, states_1, code_2, states_2, - ..., code_n-1, states_n-1, code_n] - - where |code_i| is a character code, and |states_i| is a - set of states corresponding to characters with codes |c| - in the range |code_i| <= |c| <= |code_i+1|. - - The following invariants hold: - n >= 1 - code_0 == -maxint - code_n == maxint - code_i < code_i+1 for i in 0..n-1 - states_0 == states_n-1 - - Mappings for the special events '', BOL, EOL, EOF are - kept separately in a dictionary. - """ - - map = None # The list of codes and states - special = None # Mapping for special events - - def __init__(self, map=None, special=None): - if not map: - map = [-maxint, {}, maxint] - if not special: - special = {} - self.map = map - self.special = special - #self.check() ### - - def add(self, event, new_state, - TupleType=tuple): - """ - Add transition to |new_state| on |event|. - """ - if type(event) is TupleType: - code0, code1 = event - i = self.split(code0) - j = self.split(code1) - map = self.map - while i < j: - map[i + 1][new_state] = 1 - i += 2 - else: - self.get_special(event)[new_state] = 1 - - def add_set(self, event, new_set, - TupleType=tuple): - """ - Add transitions to the states in |new_set| on |event|. - """ - if type(event) is TupleType: - code0, code1 = event - i = self.split(code0) - j = self.split(code1) - map = self.map - while i < j: - map[i + 1].update(new_set) - i += 2 - else: - self.get_special(event).update(new_set) - - def get_epsilon(self, - none=None): - """ - Return the mapping for epsilon, or None. - """ - return self.special.get('', none) - - def iteritems(self, - len=len): - """ - Return the mapping as an iterable of ((code1, code2), state_set) and - (special_event, state_set) pairs. - """ - result = [] - map = self.map - else_set = map[1] - i = 0 - n = len(map) - 1 - code0 = map[0] - while i < n: - set = map[i + 1] - code1 = map[i + 2] - if set or else_set: - result.append(((code0, code1), set)) - code0 = code1 - i += 2 - for event, set in self.special.items(): - if set: - result.append((event, set)) - return iter(result) - - items = iteritems - - # ------------------- Private methods -------------------- - - def split(self, code, - len=len, maxint=maxint): - """ - Search the list for the position of the split point for |code|, - inserting a new split point if necessary. Returns index |i| such - that |code| == |map[i]|. - """ - # We use a funky variation on binary search. - map = self.map - hi = len(map) - 1 - # Special case: code == map[-1] - if code == maxint: - return hi - # General case - lo = 0 - # loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2 - while hi - lo >= 4: - # Find midpoint truncated to even index - mid = ((lo + hi) // 2) & ~1 - if code < map[mid]: - hi = mid - else: - lo = mid - # map[lo] <= code < map[hi] and hi - lo == 2 - if map[lo] == code: - return lo - else: - map[hi:hi] = [code, map[hi - 1].copy()] - #self.check() ### - return hi - - def get_special(self, event): - """ - Get state set for special event, adding a new entry if necessary. - """ - special = self.special - set = special.get(event, None) - if not set: - set = {} - special[event] = set - return set - - # --------------------- Conversion methods ----------------------- - - def __str__(self): - map_strs = [] - map = self.map - n = len(map) - i = 0 - while i < n: - code = map[i] - if code == -maxint: - code_str = "-inf" - elif code == maxint: - code_str = "inf" - else: - code_str = str(code) - map_strs.append(code_str) - i += 1 - if i < n: - map_strs.append(state_set_str(map[i])) - i += 1 - special_strs = {} - for event, set in self.special.items(): - special_strs[event] = state_set_str(set) - return "[%s]+%s" % ( - ','.join(map_strs), - special_strs - ) - - # --------------------- Debugging methods ----------------------- - - def check(self): - """Check data structure integrity.""" - if not self.map[-3] < self.map[-1]: - print(self) - assert 0 - - def dump(self, file): - map = self.map - i = 0 - n = len(map) - 1 - while i < n: - self.dump_range(map[i], map[i + 2], map[i + 1], file) - i += 2 - for event, set in self.special.items(): - if set: - if not event: - event = 'empty' - self.dump_trans(event, set, file) - - def dump_range(self, code0, code1, set, file): - if set: - if code0 == -maxint: - if code1 == maxint: - k = "any" - else: - k = "< %s" % self.dump_char(code1) - elif code1 == maxint: - k = "> %s" % self.dump_char(code0 - 1) - elif code0 == code1 - 1: - k = self.dump_char(code0) - else: - k = "%s..%s" % (self.dump_char(code0), - self.dump_char(code1 - 1)) - self.dump_trans(k, set, file) - - def dump_char(self, code): - if 0 <= code <= 255: - return repr(chr(code)) + """ + A TransitionMap maps an input event to a set of states. + An input event is one of: a range of character codes, + the empty string (representing an epsilon move), or one + of the special symbols BOL, EOL, EOF. + + For characters, this implementation compactly represents + the map by means of a list: + + [code_0, states_0, code_1, states_1, code_2, states_2, + ..., code_n-1, states_n-1, code_n] + + where |code_i| is a character code, and |states_i| is a + set of states corresponding to characters with codes |c| + in the range |code_i| <= |c| <= |code_i+1|. + + The following invariants hold: + n >= 1 + code_0 == -maxint + code_n == maxint + code_i < code_i+1 for i in 0..n-1 + states_0 == states_n-1 + + Mappings for the special events '', BOL, EOL, EOF are + kept separately in a dictionary. + """ + + map = None # The list of codes and states + special = None # Mapping for special events + + def __init__(self, map=None, special=None): + if not map: + map = [-maxint, {}, maxint] + if not special: + special = {} + self.map = map + self.special = special + #self.check() ### + + def add(self, event, new_state, + TupleType=tuple): + """ + Add transition to |new_state| on |event|. + """ + if type(event) is TupleType: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1][new_state] = 1 + i += 2 else: - return "chr(%d)" % code + self.get_special(event)[new_state] = 1 + + def add_set(self, event, new_set, + TupleType=tuple): + """ + Add transitions to the states in |new_set| on |event|. + """ + if type(event) is TupleType: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1].update(new_set) + i += 2 + else: + self.get_special(event).update(new_set) + + def get_epsilon(self, + none=None): + """ + Return the mapping for epsilon, or None. + """ + return self.special.get('', none) + + def iteritems(self, + len=len): + """ + Return the mapping as an iterable of ((code1, code2), state_set) and + (special_event, state_set) pairs. + """ + result = [] + map = self.map + else_set = map[1] + i = 0 + n = len(map) - 1 + code0 = map[0] + while i < n: + set = map[i + 1] + code1 = map[i + 2] + if set or else_set: + result.append(((code0, code1), set)) + code0 = code1 + i += 2 + for event, set in self.special.items(): + if set: + result.append((event, set)) + return iter(result) + + items = iteritems + + # ------------------- Private methods -------------------- + + def split(self, code, + len=len, maxint=maxint): + """ + Search the list for the position of the split point for |code|, + inserting a new split point if necessary. Returns index |i| such + that |code| == |map[i]|. + """ + # We use a funky variation on binary search. + map = self.map + hi = len(map) - 1 + # Special case: code == map[-1] + if code == maxint: + return hi + # General case + lo = 0 + # loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2 + while hi - lo >= 4: + # Find midpoint truncated to even index + mid = ((lo + hi) // 2) & ~1 + if code < map[mid]: + hi = mid + else: + lo = mid + # map[lo] <= code < map[hi] and hi - lo == 2 + if map[lo] == code: + return lo + else: + map[hi:hi] = [code, map[hi - 1].copy()] + #self.check() ### + return hi + + def get_special(self, event): + """ + Get state set for special event, adding a new entry if necessary. + """ + special = self.special + set = special.get(event, None) + if not set: + set = {} + special[event] = set + return set + + # --------------------- Conversion methods ----------------------- + + def __str__(self): + map_strs = [] + map = self.map + n = len(map) + i = 0 + while i < n: + code = map[i] + if code == -maxint: + code_str = "-inf" + elif code == maxint: + code_str = "inf" + else: + code_str = str(code) + map_strs.append(code_str) + i += 1 + if i < n: + map_strs.append(state_set_str(map[i])) + i += 1 + special_strs = {} + for event, set in self.special.items(): + special_strs[event] = state_set_str(set) + return "[%s]+%s" % ( + ','.join(map_strs), + special_strs + ) + + # --------------------- Debugging methods ----------------------- + + def check(self): + """Check data structure integrity.""" + if not self.map[-3] < self.map[-1]: + print(self) + assert 0 + + def dump(self, file): + map = self.map + i = 0 + n = len(map) - 1 + while i < n: + self.dump_range(map[i], map[i + 2], map[i + 1], file) + i += 2 + for event, set in self.special.items(): + if set: + if not event: + event = 'empty' + self.dump_trans(event, set, file) + + def dump_range(self, code0, code1, set, file): + if set: + if code0 == -maxint: + if code1 == maxint: + k = "any" + else: + k = "< %s" % self.dump_char(code1) + elif code1 == maxint: + k = "> %s" % self.dump_char(code0 - 1) + elif code0 == code1 - 1: + k = self.dump_char(code0) + else: + k = "%s..%s" % (self.dump_char(code0), + self.dump_char(code1 - 1)) + self.dump_trans(k, set, file) + + def dump_char(self, code): + if 0 <= code <= 255: + return repr(chr(code)) + else: + return "chr(%d)" % code - def dump_trans(self, key, set, file): - file.write(" %s --> %s\n" % (key, self.dump_set(set))) + def dump_trans(self, key, set, file): + file.write(" %s --> %s\n" % (key, self.dump_set(set))) - def dump_set(self, set): - return state_set_str(set) + def dump_set(self, set): + return state_set_str(set) # @@ -248,4 +248,4 @@ class TransitionMap(object): # set1[state] = 1 def state_set_str(set): - return "[%s]" % ','.join(["S%d" % state.number for state in set]) + return "[%s]" % ','.join(["S%d" % state.number for state in set]) diff --git a/contrib/tools/cython/Cython/Runtime/refnanny.pyx b/contrib/tools/cython/Cython/Runtime/refnanny.pyx index bafda31cfa..d4b873fe97 100644 --- a/contrib/tools/cython/Cython/Runtime/refnanny.pyx +++ b/contrib/tools/cython/Cython/Runtime/refnanny.pyx @@ -1,5 +1,5 @@ # cython: language_level=3, auto_pickle=False - + from cpython.ref cimport PyObject, Py_INCREF, Py_DECREF, Py_XDECREF, Py_XINCREF from cpython.exc cimport PyErr_Fetch, PyErr_Restore from cpython.pystate cimport PyThreadState_Get @@ -32,7 +32,7 @@ cdef class Context(object): cdef regref(self, obj, lineno, bint is_null): log(LOG_ALL, u'regref', u"<NULL>" if is_null else obj, lineno) if is_null: - self.errors.append(f"NULL argument on line {lineno}") + self.errors.append(f"NULL argument on line {lineno}") return id_ = id(obj) count, linenumbers = self.refs.get(id_, (0, [])) @@ -43,12 +43,12 @@ cdef class Context(object): # returns whether it is ok to do the decref operation log(LOG_ALL, u'delref', u"<NULL>" if is_null else obj, lineno) if is_null: - self.errors.append(f"NULL argument on line {lineno}") + self.errors.append(f"NULL argument on line {lineno}") return False id_ = id(obj) count, linenumbers = self.refs.get(id_, (0, [])) if count == 0: - self.errors.append(f"Too many decrefs on line {lineno}, reference acquired on lines {linenumbers!r}") + self.errors.append(f"Too many decrefs on line {lineno}, reference acquired on lines {linenumbers!r}") return False elif count == 1: del self.refs[id_] @@ -61,7 +61,7 @@ cdef class Context(object): if self.refs: msg = u"References leaked:" for count, linenos in self.refs.itervalues(): - msg += f"\n ({count}) acquired on lines: {u', '.join([f'{x}' for x in linenos])}" + msg += f"\n ({count}) acquired on lines: {u', '.join([f'{x}' for x in linenos])}" self.errors.append(msg) if self.errors: return u"\n".join([u'REFNANNY: '+error for error in self.errors]) @@ -73,7 +73,7 @@ cdef void report_unraisable(object e=None): if e is None: import sys e = sys.exc_info()[1] - print(f"refnanny raised an exception: {e}") + print(f"refnanny raised an exception: {e}") except: pass # We absolutely cannot exit with an exception @@ -160,8 +160,8 @@ cdef void FinishContext(PyObject** ctx): context = <Context>ctx[0] errors = context.end() if errors: - print(f"{context.filename.decode('latin1')}: {context.name.decode('latin1')}()") - print(errors) + print(f"{context.filename.decode('latin1')}: {context.name.decode('latin1')}()") + print(errors) context = None except: report_unraisable() diff --git a/contrib/tools/cython/Cython/Shadow.py b/contrib/tools/cython/Cython/Shadow.py index 751808d1c6..e7b9e4f612 100644 --- a/contrib/tools/cython/Cython/Shadow.py +++ b/contrib/tools/cython/Cython/Shadow.py @@ -1,14 +1,14 @@ # cython.* namespace for pure mode. -from __future__ import absolute_import +from __future__ import absolute_import __version__ = "0.29.27" -try: - from __builtin__ import basestring -except ImportError: - basestring = str - - +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + # BEGIN shameless copy from Cython/minivect/minitypes.py class _ArrayType(object): @@ -65,13 +65,13 @@ def index_type(base_type, item): return _ArrayType(base_type, len(item), is_c_contig=step_idx == len(item) - 1, is_f_contig=step_idx == 0) - elif isinstance(item, slice): + elif isinstance(item, slice): verify_slice(item) return _ArrayType(base_type, 1, is_c_contig=bool(item.step)) - else: - # int[8] etc. - assert int(item) == item # array size must be a plain integer - array(base_type, item) + else: + # int[8] etc. + assert int(item) == item # array size must be a plain integer + array(base_type, item) # END shameless copy @@ -102,47 +102,47 @@ class _EmptyDecoratorAndManager(object): def __exit__(self, exc_type, exc_value, traceback): pass -class _Optimization(object): - pass - +class _Optimization(object): + pass + cclass = ccall = cfunc = _EmptyDecoratorAndManager() -returns = wraparound = boundscheck = initializedcheck = nonecheck = \ - embedsignature = cdivision = cdivision_warnings = \ +returns = wraparound = boundscheck = initializedcheck = nonecheck = \ + embedsignature = cdivision = cdivision_warnings = \ always_allows_keywords = profile = linetrace = infer_types = \ - unraisable_tracebacks = freelist = \ + unraisable_tracebacks = freelist = \ lambda _: _EmptyDecoratorAndManager() exceptval = lambda _=None, check=True: _EmptyDecoratorAndManager() -overflowcheck = lambda _: _EmptyDecoratorAndManager() -optimization = _Optimization() +overflowcheck = lambda _: _EmptyDecoratorAndManager() +optimization = _Optimization() + +overflowcheck.fold = optimization.use_switch = \ + optimization.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager() + +final = internal = type_version_tag = no_gc_clear = no_gc = _empty_decorator -overflowcheck.fold = optimization.use_switch = \ - optimization.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager() - -final = internal = type_version_tag = no_gc_clear = no_gc = _empty_decorator - binding = lambda _: _empty_decorator - -_cython_inline = None + +_cython_inline = None def inline(f, *args, **kwds): - if isinstance(f, basestring): - global _cython_inline - if _cython_inline is None: - from Cython.Build.Inline import cython_inline as _cython_inline - return _cython_inline(f, *args, **kwds) - else: - assert len(args) == len(kwds) == 0 - return f - - + if isinstance(f, basestring): + global _cython_inline + if _cython_inline is None: + from Cython.Build.Inline import cython_inline as _cython_inline + return _cython_inline(f, *args, **kwds) + else: + assert len(args) == len(kwds) == 0 + return f + + def compile(f): from Cython.Build.Inline import RuntimeCompiledFunction return RuntimeCompiledFunction(f) - + # Special functions def cdiv(a, b): @@ -160,9 +160,9 @@ def cmod(a, b): # Emulated language constructs -def cast(type, *args, **kwargs): - kwargs.pop('typecheck', None) - assert not kwargs +def cast(type, *args, **kwargs): + kwargs.pop('typecheck', None) + assert not kwargs if hasattr(type, '__call__'): return type(*args) else: @@ -188,15 +188,15 @@ def declare(type=None, value=_Unspecified, **kwds): return value class _nogil(object): - """Support for 'with nogil' statement and @nogil decorator. + """Support for 'with nogil' statement and @nogil decorator. """ - def __call__(self, x): - if callable(x): - # Used as function decorator => return the function unchanged. - return x - # Used as conditional context manager or to create an "@nogil(True/False)" decorator => keep going. - return self - + def __call__(self, x): + if callable(x): + # Used as function decorator => return the function unchanged. + return x + # Used as conditional context manager or to create an "@nogil(True/False)" decorator => keep going. + return self + def __enter__(self): pass def __exit__(self, exc_class, exc, tb): @@ -206,7 +206,7 @@ nogil = _nogil() gil = _nogil() del _nogil - + # Emulated types class CythonMetaType(type): @@ -274,7 +274,7 @@ class StructType(CythonType): for key, value in cast_from.__dict__.items(): setattr(self, key, value) else: - for key, value in data.items(): + for key, value in data.items(): setattr(self, key, value) def __setattr__(self, key, value): @@ -301,7 +301,7 @@ class UnionType(CythonType): datadict = data if len(datadict) > 1: raise AttributeError("Union can only store one field at a time.") - for key, value in datadict.items(): + for key, value in datadict.items(): setattr(self, key, value) def __setattr__(self, key, value): @@ -385,7 +385,7 @@ def _specialized_from_args(signatures, args, kwargs): py_int = typedef(int, "int") try: py_long = typedef(long, "long") -except NameError: # Py3 +except NameError: # Py3 py_long = typedef(int, "long") py_float = typedef(float, "float") py_complex = typedef(complex, "double complex") @@ -408,15 +408,15 @@ to_repr = { gs = globals() -# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str -try: - import __builtin__ as builtins -except ImportError: # Py3 - import builtins - -gs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode') -del builtins - +# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str +try: + import __builtin__ as builtins +except ImportError: # Py3 + import builtins + +gs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode') +del builtins + for name in int_types: reprname = to_repr(name, name) gs[name] = typedef(py_int, reprname) @@ -457,7 +457,7 @@ class CythonDotParallel(object): def parallel(self, num_threads=None): return nogil - def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None): + def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None): if stop is None: stop = start start = 0 diff --git a/contrib/tools/cython/Cython/StringIOTree.py b/contrib/tools/cython/Cython/StringIOTree.py index f9a8e4bd47..d8239efeda 100644 --- a/contrib/tools/cython/Cython/StringIOTree.py +++ b/contrib/tools/cython/Cython/StringIOTree.py @@ -1,5 +1,5 @@ -# cython: auto_pickle=False - +# cython: auto_pickle=False + r""" Implements a buffer with insertion points. When you know you need to "get back" to a place and write more later, simply call insertion_point() @@ -34,13 +34,13 @@ EXAMPLE: from __future__ import absolute_import #, unicode_literals -try: +try: # Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2. - from cStringIO import StringIO -except ImportError: + from cStringIO import StringIO +except ImportError: from io import StringIO - + class StringIOTree(object): """ See module docs. diff --git a/contrib/tools/cython/Cython/Tempita/__init__.py b/contrib/tools/cython/Cython/Tempita/__init__.py index b5014481d4..41a0ce3d0e 100644 --- a/contrib/tools/cython/Cython/Tempita/__init__.py +++ b/contrib/tools/cython/Cython/Tempita/__init__.py @@ -1,4 +1,4 @@ # The original Tempita implements all of its templating code here. # Moved it to _tempita.py to make the compilation portable. -from ._tempita import * +from ._tempita import * diff --git a/contrib/tools/cython/Cython/Tempita/_tempita.py b/contrib/tools/cython/Cython/Tempita/_tempita.py index f836a63345..587f6e4841 100644 --- a/contrib/tools/cython/Cython/Tempita/_tempita.py +++ b/contrib/tools/cython/Cython/Tempita/_tempita.py @@ -29,8 +29,8 @@ can use ``__name='tmpl.html'`` to set the name of the template. If there are syntax errors ``TemplateError`` will be raised. """ -from __future__ import absolute_import - +from __future__ import absolute_import + import re import sys try: @@ -46,11 +46,11 @@ except ImportError: # Py3 pass import os import tokenize -from io import StringIO +from io import StringIO + +from ._looper import looper +from .compat3 import bytes, unicode_, basestring_, next, is_unicode, coerce_text -from ._looper import looper -from .compat3 import bytes, unicode_, basestring_, next, is_unicode, coerce_text - __all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate', 'sub_html', 'html', 'bunch'] @@ -208,7 +208,7 @@ class Template(object): position=None, name=self.name) templ = self.get_template(inherit_template, self) self_ = TemplateObject(self.name) - for name, value in defs.items(): + for name, value in defs.items(): setattr(self_, name, value) self_.body = body ns = ns.copy() @@ -304,28 +304,28 @@ class Template(object): try: try: value = eval(code, self.default_namespace, ns) - except SyntaxError as e: + except SyntaxError as e: raise SyntaxError( 'invalid syntax in expression: %s' % code) return value - except Exception as e: + except Exception as e: if getattr(e, 'args', None): arg0 = e.args[0] else: arg0 = coerce_text(e) e.args = (self._add_line_info(arg0, pos),) - raise + raise def _exec(self, code, ns, pos): __traceback_hide__ = True try: - exec(code, self.default_namespace, ns) - except Exception as e: + exec(code, self.default_namespace, ns) + except Exception as e: if e.args: e.args = (self._add_line_info(e.args[0], pos),) else: e.args = (self._add_line_info(None, pos),) - raise + raise def _repr(self, value, pos): __traceback_hide__ = True @@ -334,7 +334,7 @@ class Template(object): return '' if self._unicode: try: - value = unicode_(value) + value = unicode_(value) except UnicodeDecodeError: value = bytes(value) else: @@ -343,9 +343,9 @@ class Template(object): if (is_unicode(value) and self.default_encoding): value = value.encode(self.default_encoding) - except Exception as e: + except Exception as e: e.args = (self._add_line_info(e.args[0], pos),) - raise + raise else: if self._unicode and isinstance(value, bytes): if not self.default_encoding: @@ -354,7 +354,7 @@ class Template(object): '(no default_encoding provided)' % value) try: value = value.decode(self.default_encoding) - except UnicodeDecodeError as e: + except UnicodeDecodeError as e: raise UnicodeDecodeError( e.encoding, e.object, @@ -391,7 +391,7 @@ def paste_script_template_renderer(content, vars, filename=None): class bunch(dict): def __init__(self, **kw): - for name, value in kw.items(): + for name, value in kw.items(): setattr(self, name, value) def __setattr__(self, name, value): @@ -415,7 +415,7 @@ class bunch(dict): def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, - ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) + ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) ############################################################ ## HTML Templating @@ -465,7 +465,7 @@ def url(v): def attr(**kw): parts = [] - for name, value in sorted(kw.items()): + for name, value in sorted(kw.items()): if value is None: continue if name.endswith('_'): @@ -544,7 +544,7 @@ class TemplateDef(object): values = {} sig_args, var_args, var_kw, defaults = self._func_signature extra_kw = {} - for name, value in kw.items(): + for name, value in kw.items(): if not var_kw and name not in sig_args: raise TypeError( 'Unexpected argument %s' % name) @@ -567,7 +567,7 @@ class TemplateDef(object): raise TypeError( 'Extra position arguments: %s' % ', '.join([repr(v) for v in args])) - for name, value_expr in defaults.items(): + for name, value_expr in defaults.items(): if name not in values: values[name] = self._template._eval( value_expr, self._ns, self._pos) diff --git a/contrib/tools/cython/Cython/Tempita/compat3.py b/contrib/tools/cython/Cython/Tempita/compat3.py index 20aefddb20..9905530757 100644 --- a/contrib/tools/cython/Cython/Tempita/compat3.py +++ b/contrib/tools/cython/Cython/Tempita/compat3.py @@ -1,11 +1,11 @@ import sys -__all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode'] +__all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode'] if sys.version < "3": b = bytes = str basestring_ = basestring - unicode_ = unicode + unicode_ = unicode else: def b(s): @@ -14,7 +14,7 @@ else: return bytes(s) basestring_ = (bytes, str) bytes = bytes - unicode_ = str + unicode_ = str text = str if sys.version < "3": diff --git a/contrib/tools/cython/Cython/TestUtils.py b/contrib/tools/cython/Cython/TestUtils.py index 847647b157..9d6eb67fc3 100644 --- a/contrib/tools/cython/Cython/TestUtils.py +++ b/contrib/tools/cython/Cython/TestUtils.py @@ -1,16 +1,16 @@ -from __future__ import absolute_import +from __future__ import absolute_import -import os +import os import unittest import tempfile -from .Compiler import Errors -from .CodeWriter import CodeWriter -from .Compiler.TreeFragment import TreeFragment, strip_common_indent -from .Compiler.Visitor import TreeVisitor, VisitorTransform -from .Compiler import TreePath +from .Compiler import Errors +from .CodeWriter import CodeWriter +from .Compiler.TreeFragment import TreeFragment, strip_common_indent +from .Compiler.Visitor import TreeVisitor, VisitorTransform +from .Compiler import TreePath + - class NodeTypeWriter(TreeVisitor): def __init__(self): super(NodeTypeWriter, self).__init__() @@ -57,15 +57,15 @@ class CythonTest(unittest.TestCase): def assertLines(self, expected, result): "Checks that the given strings or lists of strings are equal line by line" - if not isinstance(expected, list): - expected = expected.split(u"\n") - if not isinstance(result, list): - result = result.split(u"\n") + if not isinstance(expected, list): + expected = expected.split(u"\n") + if not isinstance(result, list): + result = result.split(u"\n") for idx, (expected_line, result_line) in enumerate(zip(expected, result)): - self.assertEqual(expected_line, result_line, - "Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line)) + self.assertEqual(expected_line, result_line, + "Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line)) self.assertEqual(len(expected), len(result), - "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result))) + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result))) def codeToLines(self, tree): writer = CodeWriter() @@ -81,24 +81,24 @@ class CythonTest(unittest.TestCase): expected_lines = strip_common_indent(expected.split("\n")) for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)): - self.assertEqual(expected_line, line, - "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line)) + self.assertEqual(expected_line, line, + "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line)) self.assertEqual(len(result_lines), len(expected_lines), - "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected)) + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected)) def assertNodeExists(self, path, result_tree): self.assertNotEqual(TreePath.find_first(result_tree, path), None, "Path '%s' not found in result tree" % path) - def fragment(self, code, pxds=None, pipeline=None): + def fragment(self, code, pxds=None, pipeline=None): "Simply create a tree fragment using the name of the test-case in parse errors." - if pxds is None: - pxds = {} - if pipeline is None: - pipeline = [] + if pxds is None: + pxds = {} + if pipeline is None: + pipeline = [] name = self.id() - if name.startswith("__main__."): - name = name[len("__main__."):] + if name.startswith("__main__."): + name = name[len("__main__."):] name = name.replace(".", "_") return TreeFragment(code, name, pxds, pipeline=pipeline) @@ -112,8 +112,8 @@ class CythonTest(unittest.TestCase): try: func() self.fail("Expected an exception of type %r" % exc_type) - except exc_type as e: - self.assertTrue(isinstance(e, exc_type)) + except exc_type as e: + self.assertTrue(isinstance(e, exc_type)) return e def should_not_fail(self, func): @@ -122,8 +122,8 @@ class CythonTest(unittest.TestCase): the return value of func.""" try: return func() - except Exception as exc: - self.fail(str(exc)) + except Exception as exc: + self.fail(str(exc)) class TransformTest(CythonTest): @@ -150,9 +150,9 @@ class TransformTest(CythonTest): Plans: One could have a pxd dictionary parameter to run_pipeline. """ - def run_pipeline(self, pipeline, pyx, pxds=None): - if pxds is None: - pxds = {} + def run_pipeline(self, pipeline, pyx, pxds=None): + if pxds is None: + pxds = {} tree = self.fragment(pyx, pxds).root # Run pipeline for T in pipeline: diff --git a/contrib/tools/cython/Cython/Tests/TestCythonUtils.py b/contrib/tools/cython/Cython/Tests/TestCythonUtils.py index ed0ea9593b..2641900c01 100644 --- a/contrib/tools/cython/Cython/Tests/TestCythonUtils.py +++ b/contrib/tools/cython/Cython/Tests/TestCythonUtils.py @@ -1,11 +1,11 @@ -import unittest - -from ..Utils import build_hex_version - -class TestCythonUtils(unittest.TestCase): - def test_build_hex_version(self): - self.assertEqual('0x001D00A1', build_hex_version('0.29a1')) - self.assertEqual('0x001D00A1', build_hex_version('0.29a1')) - self.assertEqual('0x001D03C4', build_hex_version('0.29.3rc4')) - self.assertEqual('0x001D00F0', build_hex_version('0.29')) - self.assertEqual('0x040000F0', build_hex_version('4.0')) +import unittest + +from ..Utils import build_hex_version + +class TestCythonUtils(unittest.TestCase): + def test_build_hex_version(self): + self.assertEqual('0x001D00A1', build_hex_version('0.29a1')) + self.assertEqual('0x001D00A1', build_hex_version('0.29a1')) + self.assertEqual('0x001D03C4', build_hex_version('0.29.3rc4')) + self.assertEqual('0x001D00F0', build_hex_version('0.29')) + self.assertEqual('0x040000F0', build_hex_version('4.0')) diff --git a/contrib/tools/cython/Cython/Tests/TestJediTyper.py b/contrib/tools/cython/Cython/Tests/TestJediTyper.py index 2409f0b9cc..253adef171 100644 --- a/contrib/tools/cython/Cython/Tests/TestJediTyper.py +++ b/contrib/tools/cython/Cython/Tests/TestJediTyper.py @@ -20,7 +20,7 @@ TOOLS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', @contextmanager def _tempfile(code): code = dedent(code) - if not isinstance(code, bytes): + if not isinstance(code, bytes): code = code.encode('utf8') with NamedTemporaryFile(suffix='.py') as f: @@ -32,14 +32,14 @@ def _tempfile(code): def _test_typing(code, inject=False): sys.path.insert(0, TOOLS_DIR) try: - import jedityper + import jedityper finally: sys.path.remove(TOOLS_DIR) lines = [] with _tempfile(code) as f: - types = jedityper.analyse(f.name) + types = jedityper.analyse(f.name) if inject: - lines = jedityper.inject_types(f.name, types) + lines = jedityper.inject_types(f.name, types) return types, lines @@ -84,7 +84,7 @@ class TestJediTyper(TransformTest): self.assertFalse(types) self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables) - def test_conflicting_types_in_function(self): + def test_conflicting_types_in_function(self): code = '''\ def func(a, b): print(a) @@ -99,7 +99,7 @@ class TestJediTyper(TransformTest): self.assertIn(('func', (1, 0)), types) variables = types.pop(('func', (1, 0))) self.assertFalse(types) - self.assertEqual({'a': set(['float', 'int', 'str']), 'b': set(['int'])}, variables) + self.assertEqual({'a': set(['float', 'int', 'str']), 'b': set(['int'])}, variables) def _test_typing_function_char_loop(self): code = '''\ @@ -117,93 +117,93 @@ class TestJediTyper(TransformTest): self.assertFalse(types) self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables) - def test_typing_global_list(self): - code = '''\ - a = [x for x in range(10)] - b = list(range(10)) - c = a + b - d = [0]*10 - ''' - types = self._test(code) - self.assertIn((None, (1, 0)), types) - variables = types.pop((None, (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'd': set(['list'])}, variables) - - def test_typing_function_list(self): - code = '''\ - def func(x): - a = [[], []] - b = [0]* 10 + a - c = a[0] - - print(func([0]*100)) - ''' - types = self._test(code) - self.assertIn(('func', (1, 0)), types) - variables = types.pop(('func', (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'x': set(['list'])}, variables) - - def test_typing_global_dict(self): - code = '''\ - a = dict() - b = {i: i**2 for i in range(10)} + def test_typing_global_list(self): + code = '''\ + a = [x for x in range(10)] + b = list(range(10)) + c = a + b + d = [0]*10 + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'd': set(['list'])}, variables) + + def test_typing_function_list(self): + code = '''\ + def func(x): + a = [[], []] + b = [0]* 10 + a + c = a[0] + + print(func([0]*100)) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'x': set(['list'])}, variables) + + def test_typing_global_dict(self): + code = '''\ + a = dict() + b = {i: i**2 for i in range(10)} c = a - ''' - types = self._test(code) - self.assertIn((None, (1, 0)), types) - variables = types.pop((None, (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict'])}, variables) - - def test_typing_function_dict(self): - code = '''\ - def func(x): - a = dict() - b = {i: i**2 for i in range(10)} - c = x - - print(func({1:2, 'x':7})) - ''' - types = self._test(code) - self.assertIn(('func', (1, 0)), types) - variables = types.pop(('func', (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict']), 'x': set(['dict'])}, variables) - - - def test_typing_global_set(self): - code = '''\ - a = set() - # b = {i for i in range(10)} # jedi does not support set comprehension yet - c = a - d = {1,2,3} - e = a | b - ''' - types = self._test(code) - self.assertIn((None, (1, 0)), types) - variables = types.pop((None, (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'e': set(['set'])}, variables) - - def test_typing_function_set(self): - code = '''\ - def func(x): - a = set() - # b = {i for i in range(10)} # jedi does not support set comprehension yet - c = a - d = a | b - - print(func({1,2,3})) - ''' - types = self._test(code) - self.assertIn(('func', (1, 0)), types) - variables = types.pop(('func', (1, 0))) - self.assertFalse(types) - self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'x': set(['set'])}, variables) - - + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict'])}, variables) + + def test_typing_function_dict(self): + code = '''\ + def func(x): + a = dict() + b = {i: i**2 for i in range(10)} + c = x + + print(func({1:2, 'x':7})) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict']), 'x': set(['dict'])}, variables) + + + def test_typing_global_set(self): + code = '''\ + a = set() + # b = {i for i in range(10)} # jedi does not support set comprehension yet + c = a + d = {1,2,3} + e = a | b + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'e': set(['set'])}, variables) + + def test_typing_function_set(self): + code = '''\ + def func(x): + a = set() + # b = {i for i in range(10)} # jedi does not support set comprehension yet + c = a + d = a | b + + print(func({1,2,3})) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'x': set(['set'])}, variables) + + class TestTypeInjection(TestJediTyper): """ Subtype of TestJediTyper that additionally tests type injection and compilation. diff --git a/contrib/tools/cython/Cython/Tests/TestStringIOTree.py b/contrib/tools/cython/Cython/Tests/TestStringIOTree.py index e1b3b63386..a15f2cd88d 100644 --- a/contrib/tools/cython/Cython/Tests/TestStringIOTree.py +++ b/contrib/tools/cython/Cython/Tests/TestStringIOTree.py @@ -52,7 +52,7 @@ class TestStringIOTree(unittest.TestCase): self.write_line(10, tree=line_10_insertion_point) self.write_line(12, tree=line_9_to_12_insertion_point) - self.assertEqual(self.tree.allmarkers(), list(range(1, 17))) + self.assertEqual(self.tree.allmarkers(), list(range(1, 17))) self.assertEqual(code.strip(), self.tree.getvalue().strip()) diff --git a/contrib/tools/cython/Cython/Tests/xmlrunner.py b/contrib/tools/cython/Cython/Tests/xmlrunner.py index 0ade225fd2..d6838aa22e 100644 --- a/contrib/tools/cython/Cython/Tests/xmlrunner.py +++ b/contrib/tools/cython/Cython/Tests/xmlrunner.py @@ -38,17 +38,17 @@ if __name__ == '__main__': unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports')) """ -from __future__ import absolute_import - +from __future__ import absolute_import + import os import sys import time from unittest import TestResult, TextTestResult, TextTestRunner import xml.dom.minidom -try: - from StringIO import StringIO -except ImportError: - from io import StringIO # doesn't accept 'str' in Py2 +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 class XMLDocument(xml.dom.minidom.Document): @@ -186,27 +186,27 @@ class _XMLTestResult(TextTestResult): for test_info in errors: if isinstance(test_info, tuple): test_info, exc_info = test_info - - try: - t = test_info.get_elapsed_time() - except AttributeError: - t = 0 - try: - descr = test_info.get_description() - except AttributeError: - try: - descr = test_info.getDescription() - except AttributeError: - descr = str(test_info) - try: - err_info = test_info.get_error_info() - except AttributeError: - err_info = str(test_info) - + + try: + t = test_info.get_elapsed_time() + except AttributeError: + t = 0 + try: + descr = test_info.get_description() + except AttributeError: + try: + descr = test_info.getDescription() + except AttributeError: + descr = str(test_info) + try: + err_info = test_info.get_error_info() + except AttributeError: + err_info = str(test_info) + self.stream.writeln(self.separator1) - self.stream.writeln('%s [%.3fs]: %s' % (flavour, t, descr)) + self.stream.writeln('%s [%.3fs]: %s' % (flavour, t, descr)) self.stream.writeln(self.separator2) - self.stream.writeln('%s' % err_info) + self.stream.writeln('%s' % err_info) def _get_info_by_testcase(self): """This method organizes test results by TestCase module. This @@ -217,9 +217,9 @@ class _XMLTestResult(TextTestResult): for tests in (self.successes, self.failures, self.errors): for test_info in tests: - if not isinstance(test_info, _TestInfo): - print("Unexpected test result type: %r" % (test_info,)) - continue + if not isinstance(test_info, _TestInfo): + print("Unexpected test result type: %r" % (test_info,)) + continue testcase = type(test_info.test_method) # Ignore module name if it is '__main__' @@ -335,10 +335,10 @@ class _XMLTestResult(TextTestResult): class XMLTestRunner(TextTestRunner): """A test runner class that outputs the results in JUnit like XML files. """ - def __init__(self, output='.', stream=None, descriptions=True, verbose=False, elapsed_times=True): + def __init__(self, output='.', stream=None, descriptions=True, verbose=False, elapsed_times=True): "Create a new instance of XMLTestRunner." - if stream is None: - stream = sys.stderr + if stream is None: + stream = sys.stderr verbosity = (1, 2)[verbose] TextTestRunner.__init__(self, stream, descriptions, verbosity) self.output = output @@ -367,11 +367,11 @@ class XMLTestRunner(TextTestRunner): stop_time = time.time() time_taken = stop_time - start_time - # Generate reports - self.stream.writeln() - self.stream.writeln('Generating XML reports...') - result.generate_reports(self) - + # Generate reports + self.stream.writeln() + self.stream.writeln('Generating XML reports...') + result.generate_reports(self) + # Print results result.printErrors() self.stream.writeln(result.separator2) diff --git a/contrib/tools/cython/Cython/Utility/AsyncGen.c b/contrib/tools/cython/Cython/Utility/AsyncGen.c index a6ba7143c2..9a11d6a129 100644 --- a/contrib/tools/cython/Cython/Utility/AsyncGen.c +++ b/contrib/tools/cython/Cython/Utility/AsyncGen.c @@ -103,7 +103,7 @@ static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) { //@requires: AsyncGeneratorInitFinalizer //@requires: Coroutine.c::Coroutine //@requires: Coroutine.c::ReturnWithStopIteration -//@requires: ObjectHandling.c::PyObjectCall2Args +//@requires: ObjectHandling.c::PyObjectCall2Args //@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict PyDoc_STRVAR(__Pyx_async_gen_send_doc, @@ -223,20 +223,20 @@ __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) firstiter = tstate->async_gen_firstiter; if (firstiter) { PyObject *res; -#if CYTHON_UNPACK_METHODS - PyObject *self; -#endif +#if CYTHON_UNPACK_METHODS + PyObject *self; +#endif Py_INCREF(firstiter); // at least asyncio stores methods here => optimise the call -#if CYTHON_UNPACK_METHODS - if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) { - PyObject *function = PyMethod_GET_FUNCTION(firstiter); - res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o); - } else -#endif - res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); - +#if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) { + PyObject *function = PyMethod_GET_FUNCTION(firstiter); + res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o); + } else +#endif + res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); + Py_DECREF(firstiter); if (unlikely(res == NULL)) { return 1; @@ -259,12 +259,12 @@ __Pyx_async_gen_anext(PyObject *g) return __Pyx_async_gen_asend_new(o, NULL); } -static PyObject * -__Pyx_async_gen_anext_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { - return __Pyx_async_gen_anext(g); -} +static PyObject * +__Pyx_async_gen_anext_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { + return __Pyx_async_gen_anext(g); +} + - static PyObject * __Pyx_async_gen_asend(__pyx_PyAsyncGenObject *o, PyObject *arg) { @@ -284,7 +284,7 @@ __Pyx_async_gen_aclose(__pyx_PyAsyncGenObject *o, CYTHON_UNUSED PyObject *arg) return __Pyx_async_gen_athrow_new(o, NULL); } - + static PyObject * __Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args) { @@ -295,12 +295,12 @@ __Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args) } -static PyObject * -__Pyx_async_gen_self_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { - return __Pyx_NewRef(g); -} - - +static PyObject * +__Pyx_async_gen_self_method(PyObject *g, CYTHON_UNUSED PyObject *arg) { + return __Pyx_NewRef(g); +} + + static PyGetSetDef __Pyx_async_gen_getsetlist[] = { {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, (char*) PyDoc_STR("name of the async generator"), 0}, @@ -340,8 +340,8 @@ static PyMethodDef __Pyx_async_gen_methods[] = { {"asend", (PyCFunction)__Pyx_async_gen_asend, METH_O, __Pyx_async_asend_doc}, {"athrow",(PyCFunction)__Pyx_async_gen_athrow, METH_VARARGS, __Pyx_async_athrow_doc}, {"aclose", (PyCFunction)__Pyx_async_gen_aclose, METH_NOARGS, __Pyx_async_aclose_doc}, - {"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc}, - {"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc}, + {"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc}, + {"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc}, {0, 0, 0, 0} /* Sentinel */ }; @@ -425,11 +425,11 @@ static PyTypeObject __pyx_AsyncGenType_type = { 0, /* tp_finalize */ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif @@ -587,7 +587,7 @@ static PyMethodDef __Pyx_async_gen_asend_methods[] = { {"send", (PyCFunction)__Pyx_async_gen_asend_send, METH_O, __Pyx_async_gen_send_doc}, {"throw", (PyCFunction)__Pyx_async_gen_asend_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, {"close", (PyCFunction)__Pyx_async_gen_asend_close, METH_NOARGS, __Pyx_async_gen_close_doc}, - {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, + {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, {0, 0, 0, 0} /* Sentinel */ }; @@ -666,11 +666,11 @@ static PyTypeObject __pyx__PyAsyncGenASendType_type = { 0, /* tp_finalize */ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif @@ -784,11 +784,11 @@ static PyTypeObject __pyx__PyAsyncGenWrappedValueType_type = { 0, /* tp_finalize */ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif @@ -997,7 +997,7 @@ static PyMethodDef __Pyx_async_gen_athrow_methods[] = { {"send", (PyCFunction)__Pyx_async_gen_athrow_send, METH_O, __Pyx_async_gen_send_doc}, {"throw", (PyCFunction)__Pyx_async_gen_athrow_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, {"close", (PyCFunction)__Pyx_async_gen_athrow_close, METH_NOARGS, __Pyx_async_gen_close_doc}, - {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, + {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, {0, 0, 0, 0} /* Sentinel */ }; @@ -1075,11 +1075,11 @@ static PyTypeObject __pyx__PyAsyncGenAThrowType_type = { 0, /* tp_finalize */ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif diff --git a/contrib/tools/cython/Cython/Utility/Buffer.c b/contrib/tools/cython/Cython/Utility/Buffer.c index 8c2c5de779..3c7105fa35 100644 --- a/contrib/tools/cython/Cython/Utility/Buffer.c +++ b/contrib/tools/cython/Cython/Utility/Buffer.c @@ -200,7 +200,7 @@ static int __Pyx__GetBufferAndValidate( __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } - if (unlikely((size_t)buf->itemsize != dtype->size)) { + if (unlikely((size_t)buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", @@ -229,9 +229,9 @@ fail:; // // The alignment code is copied from _struct.c in Python. -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /*proto*/ /////////////// BufferFormatCheck /////////////// @@ -273,7 +273,7 @@ static int __Pyx_BufFmt_ParseNumber(const char** ts) { return -1; } else { count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { + while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } @@ -886,13 +886,13 @@ static struct __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *type) case 'I': case 'U': if (size == 1) - *buf = (type->is_unsigned) ? 'B' : 'b'; + *buf = (type->is_unsigned) ? 'B' : 'b'; else if (size == 2) - *buf = (type->is_unsigned) ? 'H' : 'h'; + *buf = (type->is_unsigned) ? 'H' : 'h'; else if (size == 4) - *buf = (type->is_unsigned) ? 'I' : 'i'; + *buf = (type->is_unsigned) ? 'I' : 'i'; else if (size == 8) - *buf = (type->is_unsigned) ? 'Q' : 'q'; + *buf = (type->is_unsigned) ? 'Q' : 'q'; break; case 'P': *buf = 'P'; diff --git a/contrib/tools/cython/Cython/Utility/Builtins.c b/contrib/tools/cython/Cython/Utility/Builtins.c index 81cc1ce82f..1ffb3bcebd 100644 --- a/contrib/tools/cython/Cython/Utility/Builtins.c +++ b/contrib/tools/cython/Cython/Utility/Builtins.c @@ -29,7 +29,7 @@ static PyObject* __Pyx_Globals(void) { goto bad; for (i = PyList_GET_SIZE(names)-1; i >= 0; i--) { #if CYTHON_COMPILING_IN_PYPY - PyObject* name = PySequence_ITEM(names, i); + PyObject* name = PySequence_ITEM(names, i); if (!name) goto bad; #else @@ -115,12 +115,12 @@ static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) } if (PyCode_Check(o)) { - if (__Pyx_PyCode_HasFreeVars((PyCodeObject *)o)) { + if (__Pyx_PyCode_HasFreeVars((PyCodeObject *)o)) { PyErr_SetString(PyExc_TypeError, "code object passed to exec() may not contain free variables"); goto bad; } - #if CYTHON_COMPILING_IN_PYPY || PY_VERSION_HEX < 0x030200B1 + #if CYTHON_COMPILING_IN_PYPY || PY_VERSION_HEX < 0x030200B1 result = PyEval_EvalCode((PyCodeObject *)o, globals, locals); #else result = PyEval_EvalCode(o, globals, locals); @@ -237,111 +237,111 @@ static PyObject* __Pyx_Intern(PyObject* s) { //////////////////// abs_longlong.proto //////////////////// static CYTHON_INLINE PY_LONG_LONG __Pyx_abs_longlong(PY_LONG_LONG x) { -#if defined (__cplusplus) && __cplusplus >= 201103L +#if defined (__cplusplus) && __cplusplus >= 201103L return std::abs(x); -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L return llabs(x); #elif defined (_MSC_VER) - // abs() is defined for long, but 64-bits type on MSVC is long long. + // abs() is defined for long, but 64-bits type on MSVC is long long. // Use MS-specific _abs64() instead, which returns the original (negative) value for abs(-MAX-1) return _abs64(x); -#elif defined (__GNUC__) - // gcc or clang on 64 bit windows. +#elif defined (__GNUC__) + // gcc or clang on 64 bit windows. return __builtin_llabs(x); #else - if (sizeof(PY_LONG_LONG) <= sizeof(Py_ssize_t)) - return __Pyx_sst_abs(x); + if (sizeof(PY_LONG_LONG) <= sizeof(Py_ssize_t)) + return __Pyx_sst_abs(x); return (x<0) ? -x : x; #endif } - -//////////////////// py_abs.proto //////////////////// - -#if CYTHON_USE_PYLONG_INTERNALS -static PyObject *__Pyx_PyLong_AbsNeg(PyObject *num);/*proto*/ - -#define __Pyx_PyNumber_Absolute(x) \ - ((likely(PyLong_CheckExact(x))) ? \ - (likely(Py_SIZE(x) >= 0) ? (Py_INCREF(x), (x)) : __Pyx_PyLong_AbsNeg(x)) : \ - PyNumber_Absolute(x)) - -#else -#define __Pyx_PyNumber_Absolute(x) PyNumber_Absolute(x) -#endif - -//////////////////// py_abs //////////////////// - -#if CYTHON_USE_PYLONG_INTERNALS -static PyObject *__Pyx_PyLong_AbsNeg(PyObject *n) { - if (likely(Py_SIZE(n) == -1)) { - // digits are unsigned - return PyLong_FromLong(((PyLongObject*)n)->ob_digit[0]); - } -#if CYTHON_COMPILING_IN_CPYTHON - { - PyObject *copy = _PyLong_Copy((PyLongObject*)n); - if (likely(copy)) { + +//////////////////// py_abs.proto //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *num);/*proto*/ + +#define __Pyx_PyNumber_Absolute(x) \ + ((likely(PyLong_CheckExact(x))) ? \ + (likely(Py_SIZE(x) >= 0) ? (Py_INCREF(x), (x)) : __Pyx_PyLong_AbsNeg(x)) : \ + PyNumber_Absolute(x)) + +#else +#define __Pyx_PyNumber_Absolute(x) PyNumber_Absolute(x) +#endif + +//////////////////// py_abs //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *n) { + if (likely(Py_SIZE(n) == -1)) { + // digits are unsigned + return PyLong_FromLong(((PyLongObject*)n)->ob_digit[0]); + } +#if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *copy = _PyLong_Copy((PyLongObject*)n); + if (likely(copy)) { // negate the size to swap the sign __Pyx_SET_SIZE(copy, -Py_SIZE(copy)); - } - return copy; - } -#else - return PyNumber_Negative(n); -#endif -} -#endif - - + } + return copy; + } +#else + return PyNumber_Negative(n); +#endif +} +#endif + + //////////////////// pow2.proto //////////////////// #define __Pyx_PyNumber_Power2(a, b) PyNumber_Power(a, b, Py_None) - -//////////////////// object_ord.proto //////////////////// -//@requires: TypeConversion.c::UnicodeAsUCS4 - -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyObject_Ord(c) \ - (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) -#else -#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) -#endif -static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/ - -//////////////////// object_ord //////////////////// - -static long __Pyx__PyObject_Ord(PyObject* c) { - Py_ssize_t size; - if (PyBytes_Check(c)) { - size = PyBytes_GET_SIZE(c); - if (likely(size == 1)) { - return (unsigned char) PyBytes_AS_STRING(c)[0]; - } -#if PY_MAJOR_VERSION < 3 - } else if (PyUnicode_Check(c)) { - return (long)__Pyx_PyUnicode_AsPy_UCS4(c); -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - } else if (PyByteArray_Check(c)) { - size = PyByteArray_GET_SIZE(c); - if (likely(size == 1)) { - return (unsigned char) PyByteArray_AS_STRING(c)[0]; - } -#endif - } else { - // FIXME: support character buffers - but CPython doesn't support them either - PyErr_Format(PyExc_TypeError, + +//////////////////// object_ord.proto //////////////////// +//@requires: TypeConversion.c::UnicodeAsUCS4 + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyObject_Ord(c) \ + (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) +#else +#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) +#endif +static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/ + +//////////////////// object_ord //////////////////// + +static long __Pyx__PyObject_Ord(PyObject* c) { + Py_ssize_t size; + if (PyBytes_Check(c)) { + size = PyBytes_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyBytes_AS_STRING(c)[0]; + } +#if PY_MAJOR_VERSION < 3 + } else if (PyUnicode_Check(c)) { + return (long)__Pyx_PyUnicode_AsPy_UCS4(c); +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + } else if (PyByteArray_Check(c)) { + size = PyByteArray_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyByteArray_AS_STRING(c)[0]; + } +#endif + } else { + // FIXME: support character buffers - but CPython doesn't support them either + PyErr_Format(PyExc_TypeError, "ord() expected string of length 1, but %.200s found", Py_TYPE(c)->tp_name); - return (long)(Py_UCS4)-1; - } - PyErr_Format(PyExc_TypeError, - "ord() expected a character, but string of length %zd found", size); - return (long)(Py_UCS4)-1; -} - - + return (long)(Py_UCS4)-1; + } + PyErr_Format(PyExc_TypeError, + "ord() expected a character, but string of length %zd found", size); + return (long)(Py_UCS4)-1; +} + + //////////////////// py_dict_keys.proto //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/ @@ -350,7 +350,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d) { if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); else return PyDict_Keys(d); } @@ -363,7 +363,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); else return PyDict_Values(d); } @@ -376,7 +376,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) { if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); else return PyDict_Items(d); } @@ -388,10 +388,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d); /*proto*/ //////////////////// py_dict_iterkeys //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "iterkeys", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iterkeys", d); } //////////////////// py_dict_itervalues.proto //////////////////// @@ -401,10 +401,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d); /*proto*/ //////////////////// py_dict_itervalues //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "itervalues", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "itervalues", d); } //////////////////// py_dict_iteritems.proto //////////////////// @@ -414,10 +414,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d); /*proto*/ //////////////////// py_dict_iteritems //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "iteritems", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iteritems", d); } //////////////////// py_dict_viewkeys.proto //////////////////// @@ -430,10 +430,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d); /*proto*/ //////////////////// py_dict_viewkeys //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "viewkeys", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewkeys", d); } //////////////////// py_dict_viewvalues.proto //////////////////// @@ -446,10 +446,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d); /*proto*/ //////////////////// py_dict_viewvalues //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "viewvalues", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewvalues", d); } //////////////////// py_dict_viewitems.proto //////////////////// @@ -462,10 +462,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d); /*proto*/ //////////////////// py_dict_viewitems //////////////////// static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d) { - if (PY_MAJOR_VERSION >= 3) - return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); - else - return CALL_UNBOUND_METHOD(PyDict_Type, "viewitems", d); + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewitems", d); } @@ -479,16 +479,16 @@ static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it); static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) { if (it) { PyObject* result; -#if CYTHON_COMPILING_IN_PYPY - // PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New() - PyObject* args; - args = PyTuple_Pack(1, it); - if (unlikely(!args)) - return NULL; - result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL); - Py_DECREF(args); - return result; -#else +#if CYTHON_COMPILING_IN_PYPY + // PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New() + PyObject* args; + args = PyTuple_Pack(1, it); + if (unlikely(!args)) + return NULL; + result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL); + Py_DECREF(args); + return result; +#else if (PyFrozenSet_CheckExact(it)) { Py_INCREF(it); return it; @@ -501,42 +501,42 @@ static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) { // empty frozenset is a singleton (on Python <3.10) // seems wasteful, but CPython does the same Py_DECREF(result); -#endif +#endif } -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS return PyFrozenSet_Type.tp_new(&PyFrozenSet_Type, $empty_tuple, NULL); -#else +#else return PyObject_Call((PyObject*)&PyFrozenSet_Type, $empty_tuple, NULL); -#endif -} - - -//////////////////// PySet_Update.proto //////////////////// - -static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it); /*proto*/ - -//////////////////// PySet_Update //////////////////// - -static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) { - PyObject *retval; - #if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY - if (PyAnySet_Check(it)) { - if (PySet_GET_SIZE(it) == 0) - return 0; - // fast and safe case: CPython will update our result set and return it - retval = PySet_Type.tp_as_number->nb_inplace_or(set, it); - if (likely(retval == set)) { - Py_DECREF(retval); - return 0; - } - if (unlikely(!retval)) - return -1; - // unusual result, fall through to set.update() call below - Py_DECREF(retval); - } +#endif +} + + +//////////////////// PySet_Update.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it); /*proto*/ + +//////////////////// PySet_Update //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) { + PyObject *retval; + #if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY + if (PyAnySet_Check(it)) { + if (PySet_GET_SIZE(it) == 0) + return 0; + // fast and safe case: CPython will update our result set and return it + retval = PySet_Type.tp_as_number->nb_inplace_or(set, it); + if (likely(retval == set)) { + Py_DECREF(retval); + return 0; + } + if (unlikely(!retval)) + return -1; + // unusual result, fall through to set.update() call below + Py_DECREF(retval); + } #endif - retval = CALL_UNBOUND_METHOD(PySet_Type, "update", set, it); - if (unlikely(!retval)) return -1; - Py_DECREF(retval); - return 0; + retval = CALL_UNBOUND_METHOD(PySet_Type, "update", set, it); + if (unlikely(!retval)) return -1; + Py_DECREF(retval); + return 0; } diff --git a/contrib/tools/cython/Cython/Utility/CConvert.pyx b/contrib/tools/cython/Cython/Utility/CConvert.pyx index 22ba939362..5969f6a582 100644 --- a/contrib/tools/cython/Cython/Utility/CConvert.pyx +++ b/contrib/tools/cython/Cython/Utility/CConvert.pyx @@ -1,132 +1,132 @@ -#################### FromPyStructUtility #################### - -cdef extern from *: - ctypedef struct PyTypeObject: - char* tp_name - PyTypeObject *Py_TYPE(obj) - bint PyMapping_Check(obj) - object PyErr_Format(exc, const char *format, ...) - -@cname("{{funcname}}") +#################### FromPyStructUtility #################### + +cdef extern from *: + ctypedef struct PyTypeObject: + char* tp_name + PyTypeObject *Py_TYPE(obj) + bint PyMapping_Check(obj) + object PyErr_Format(exc, const char *format, ...) + +@cname("{{funcname}}") cdef {{struct_type}} {{funcname}}(obj) except *: cdef {{struct_type}} result - if not PyMapping_Check(obj): - PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name) - - {{for member in var_entries:}} - try: - value = obj['{{member.name}}'] - except KeyError: - raise ValueError("No value specified for struct attribute '{{member.name}}'") - result.{{member.cname}} = value - {{endfor}} - return result - - -#################### FromPyUnionUtility #################### - -cdef extern from *: - ctypedef struct PyTypeObject: - char* tp_name - PyTypeObject *Py_TYPE(obj) - bint PyMapping_Check(obj) - object PyErr_Format(exc, const char *format, ...) - -@cname("{{funcname}}") + if not PyMapping_Check(obj): + PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name) + + {{for member in var_entries:}} + try: + value = obj['{{member.name}}'] + except KeyError: + raise ValueError("No value specified for struct attribute '{{member.name}}'") + result.{{member.cname}} = value + {{endfor}} + return result + + +#################### FromPyUnionUtility #################### + +cdef extern from *: + ctypedef struct PyTypeObject: + char* tp_name + PyTypeObject *Py_TYPE(obj) + bint PyMapping_Check(obj) + object PyErr_Format(exc, const char *format, ...) + +@cname("{{funcname}}") cdef {{struct_type}} {{funcname}}(obj) except *: cdef {{struct_type}} result - cdef Py_ssize_t length - if not PyMapping_Check(obj): - PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name) - - last_found = None - length = len(obj) - if length: - {{for member in var_entries:}} - if '{{member.name}}' in obj: - if last_found is not None: - raise ValueError("More than one union attribute passed: '%s' and '%s'" % (last_found, '{{member.name}}')) - last_found = '{{member.name}}' - result.{{member.cname}} = obj['{{member.name}}'] - length -= 1 - if not length: - return result - {{endfor}} - if last_found is None: - raise ValueError("No value specified for any of the union attributes (%s)" % - '{{", ".join(member.name for member in var_entries)}}') - return result - - -#################### cfunc.to_py #################### - -@cname("{{cname}}") -cdef object {{cname}}({{return_type.ctype}} (*f)({{ ', '.join(arg.type_cname for arg in args) }}) {{except_clause}}): - def wrap({{ ', '.join('{arg.ctype} {arg.name}'.format(arg=arg) for arg in args) }}): - """wrap({{', '.join(('{arg.name}: {arg.type_displayname}'.format(arg=arg) if arg.type_displayname else arg.name) for arg in args)}}){{if return_type.type_displayname}} -> {{return_type.type_displayname}}{{endif}}""" - {{'' if return_type.type.is_void else 'return '}}f({{ ', '.join(arg.name for arg in args) }}) - return wrap - - -#################### carray.from_py #################### - -cdef extern from *: - object PyErr_Format(exc, const char *format, ...) - -@cname("{{cname}}") -cdef int {{cname}}(object o, {{base_type}} *v, Py_ssize_t length) except -1: - cdef Py_ssize_t i = length - try: - i = len(o) - except (TypeError, OverflowError): - pass - if i == length: - for i, item in enumerate(o): - if i >= length: - break - v[i] = item - else: - i += 1 # convert index to length - if i == length: - return 0 - - PyErr_Format( - IndexError, - ("too many values found during array assignment, expected %zd" - if i >= length else - "not enough values found during array assignment, expected %zd, got %zd"), - length, i) - - -#################### carray.to_py #################### - -cdef extern from *: - void Py_INCREF(object o) - tuple PyTuple_New(Py_ssize_t size) - list PyList_New(Py_ssize_t size) - void PyTuple_SET_ITEM(object p, Py_ssize_t pos, object o) - void PyList_SET_ITEM(object p, Py_ssize_t pos, object o) - - -@cname("{{cname}}") -cdef inline list {{cname}}({{base_type}} *v, Py_ssize_t length): - cdef size_t i - cdef object value - l = PyList_New(length) - for i in range(<size_t>length): - value = v[i] - Py_INCREF(value) - PyList_SET_ITEM(l, i, value) - return l - - -@cname("{{to_tuple_cname}}") -cdef inline tuple {{to_tuple_cname}}({{base_type}} *v, Py_ssize_t length): - cdef size_t i - cdef object value - t = PyTuple_New(length) - for i in range(<size_t>length): - value = v[i] - Py_INCREF(value) - PyTuple_SET_ITEM(t, i, value) - return t + cdef Py_ssize_t length + if not PyMapping_Check(obj): + PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name) + + last_found = None + length = len(obj) + if length: + {{for member in var_entries:}} + if '{{member.name}}' in obj: + if last_found is not None: + raise ValueError("More than one union attribute passed: '%s' and '%s'" % (last_found, '{{member.name}}')) + last_found = '{{member.name}}' + result.{{member.cname}} = obj['{{member.name}}'] + length -= 1 + if not length: + return result + {{endfor}} + if last_found is None: + raise ValueError("No value specified for any of the union attributes (%s)" % + '{{", ".join(member.name for member in var_entries)}}') + return result + + +#################### cfunc.to_py #################### + +@cname("{{cname}}") +cdef object {{cname}}({{return_type.ctype}} (*f)({{ ', '.join(arg.type_cname for arg in args) }}) {{except_clause}}): + def wrap({{ ', '.join('{arg.ctype} {arg.name}'.format(arg=arg) for arg in args) }}): + """wrap({{', '.join(('{arg.name}: {arg.type_displayname}'.format(arg=arg) if arg.type_displayname else arg.name) for arg in args)}}){{if return_type.type_displayname}} -> {{return_type.type_displayname}}{{endif}}""" + {{'' if return_type.type.is_void else 'return '}}f({{ ', '.join(arg.name for arg in args) }}) + return wrap + + +#################### carray.from_py #################### + +cdef extern from *: + object PyErr_Format(exc, const char *format, ...) + +@cname("{{cname}}") +cdef int {{cname}}(object o, {{base_type}} *v, Py_ssize_t length) except -1: + cdef Py_ssize_t i = length + try: + i = len(o) + except (TypeError, OverflowError): + pass + if i == length: + for i, item in enumerate(o): + if i >= length: + break + v[i] = item + else: + i += 1 # convert index to length + if i == length: + return 0 + + PyErr_Format( + IndexError, + ("too many values found during array assignment, expected %zd" + if i >= length else + "not enough values found during array assignment, expected %zd, got %zd"), + length, i) + + +#################### carray.to_py #################### + +cdef extern from *: + void Py_INCREF(object o) + tuple PyTuple_New(Py_ssize_t size) + list PyList_New(Py_ssize_t size) + void PyTuple_SET_ITEM(object p, Py_ssize_t pos, object o) + void PyList_SET_ITEM(object p, Py_ssize_t pos, object o) + + +@cname("{{cname}}") +cdef inline list {{cname}}({{base_type}} *v, Py_ssize_t length): + cdef size_t i + cdef object value + l = PyList_New(length) + for i in range(<size_t>length): + value = v[i] + Py_INCREF(value) + PyList_SET_ITEM(l, i, value) + return l + + +@cname("{{to_tuple_cname}}") +cdef inline tuple {{to_tuple_cname}}({{base_type}} *v, Py_ssize_t length): + cdef size_t i + cdef object value + t = PyTuple_New(length) + for i in range(<size_t>length): + value = v[i] + Py_INCREF(value) + PyTuple_SET_ITEM(t, i, value) + return t diff --git a/contrib/tools/cython/Cython/Utility/CMath.c b/contrib/tools/cython/Cython/Utility/CMath.c index a8f53b2f51..2cd2223138 100644 --- a/contrib/tools/cython/Cython/Utility/CMath.c +++ b/contrib/tools/cython/Cython/Utility/CMath.c @@ -1,95 +1,95 @@ - -/////////////// CDivisionWarning.proto /////////////// - -static int __Pyx_cdivision_warning(const char *, int); /* proto */ - -/////////////// CDivisionWarning /////////////// - -static int __Pyx_cdivision_warning(const char *filename, int lineno) { -#if CYTHON_COMPILING_IN_PYPY - // avoid compiler warnings - filename++; lineno++; - return PyErr_Warn(PyExc_RuntimeWarning, - "division with oppositely signed operands, C and Python semantics differ"); -#else - return PyErr_WarnExplicit(PyExc_RuntimeWarning, - "division with oppositely signed operands, C and Python semantics differ", - filename, - lineno, - __Pyx_MODULE_NAME, - NULL); -#endif -} - - -/////////////// DivInt.proto /////////////// - -static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */ - -/////////////// DivInt /////////////// - -static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) { - %(type)s q = a / b; - %(type)s r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - - -/////////////// ModInt.proto /////////////// - -static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ - -/////////////// ModInt /////////////// - -static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { - %(type)s r = a %% b; - r += ((r != 0) & ((r ^ b) < 0)) * b; - return r; -} - - -/////////////// ModFloat.proto /////////////// - -static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ - -/////////////// ModFloat /////////////// - -static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { - %(type)s r = fmod%(math_h_modifier)s(a, b); - r += ((r != 0) & ((r < 0) ^ (b < 0))) * b; - return r; -} - - -/////////////// IntPow.proto /////////////// - -static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */ - -/////////////// IntPow /////////////// - -static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) { - %(type)s t = b; - switch (e) { - case 3: - t *= b; + +/////////////// CDivisionWarning.proto /////////////// + +static int __Pyx_cdivision_warning(const char *, int); /* proto */ + +/////////////// CDivisionWarning /////////////// + +static int __Pyx_cdivision_warning(const char *filename, int lineno) { +#if CYTHON_COMPILING_IN_PYPY + // avoid compiler warnings + filename++; lineno++; + return PyErr_Warn(PyExc_RuntimeWarning, + "division with oppositely signed operands, C and Python semantics differ"); +#else + return PyErr_WarnExplicit(PyExc_RuntimeWarning, + "division with oppositely signed operands, C and Python semantics differ", + filename, + lineno, + __Pyx_MODULE_NAME, + NULL); +#endif +} + + +/////////////// DivInt.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// DivInt /////////////// + +static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s q = a / b; + %(type)s r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + + +/////////////// ModInt.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// ModInt /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s r = a %% b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + + +/////////////// ModFloat.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// ModFloat /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s r = fmod%(math_h_modifier)s(a, b); + r += ((r != 0) & ((r < 0) ^ (b < 0))) * b; + return r; +} + + +/////////////// IntPow.proto /////////////// + +static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// IntPow /////////////// + +static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) { + %(type)s t = b; + switch (e) { + case 3: + t *= b; CYTHON_FALLTHROUGH; - case 2: - t *= b; + case 2: + t *= b; CYTHON_FALLTHROUGH; - case 1: - return t; - case 0: - return 1; - } - #if %(signed)s - if (unlikely(e<0)) return 0; - #endif - t = 1; - while (likely(e)) { - t *= (b * (e&1)) | ((~e)&1); /* 1 or b */ - b *= b; - e >>= 1; - } - return t; -} + case 1: + return t; + case 0: + return 1; + } + #if %(signed)s + if (unlikely(e<0)) return 0; + #endif + t = 1; + while (likely(e)) { + t *= (b * (e&1)) | ((~e)&1); /* 1 or b */ + b *= b; + e >>= 1; + } + return t; +} diff --git a/contrib/tools/cython/Cython/Utility/Complex.c b/contrib/tools/cython/Cython/Utility/Complex.c index 1dd56387dd..28062a0611 100644 --- a/contrib/tools/cython/Cython/Utility/Complex.c +++ b/contrib/tools/cython/Cython/Utility/Complex.c @@ -1,291 +1,291 @@ /////////////// Header.proto /////////////// //@proto_block: h_code - -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include <complex> - #else - #include <complex.h> - #endif -#endif - -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - - -/////////////// RealImag.proto /////////////// - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif - -#if defined(__cplusplus) && CYTHON_CCOMPLEX \ - && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - - + +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include <complex> + #else + #include <complex.h> + #endif +#endif + +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +/////////////// RealImag.proto /////////////// + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif + +#if defined(__cplusplus) && CYTHON_CCOMPLEX \ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + + /////////////// Declarations.proto /////////////// //@proto_block: complex_type_declarations - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< {{real_type}} > {{type_name}}; - #else - typedef {{real_type}} _Complex {{type_name}}; - #endif -#else - typedef struct { {{real_type}} real, imag; } {{type_name}}; -#endif - -static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}}, {{real_type}}); - -/////////////// Declarations /////////////// - -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { - return ::std::complex< {{real_type}} >(x, y); - } - #else - static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { - return x + y*({{type}})_Complex_I; - } - #endif -#else - static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { - {{type}} z; - z.real = x; - z.imag = y; - return z; - } -#endif - - -/////////////// ToPy.proto /////////////// - -#define __pyx_PyComplex_FromComplex(z) \ - PyComplex_FromDoubles((double)__Pyx_CREAL(z), \ - (double)__Pyx_CIMAG(z)) - - -/////////////// FromPy.proto /////////////// - -static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject*); - -/////////////// FromPy /////////////// - -static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject* o) { - Py_complex cval; -#if !CYTHON_COMPILING_IN_PYPY - if (PyComplex_CheckExact(o)) - cval = ((PyComplexObject *)o)->cval; - else -#endif - cval = PyComplex_AsCComplex(o); - return {{type_name}}_from_parts( - ({{real_type}})cval.real, - ({{real_type}})cval.imag); -} - - -/////////////// Arithmetic.proto /////////////// - -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq{{func_suffix}}(a, b) ((a)==(b)) - #define __Pyx_c_sum{{func_suffix}}(a, b) ((a)+(b)) - #define __Pyx_c_diff{{func_suffix}}(a, b) ((a)-(b)) - #define __Pyx_c_prod{{func_suffix}}(a, b) ((a)*(b)) - #define __Pyx_c_quot{{func_suffix}}(a, b) ((a)/(b)) - #define __Pyx_c_neg{{func_suffix}}(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==({{real_type}})0) - #define __Pyx_c_conj{{func_suffix}}(z) (::std::conj(z)) - #if {{is_float}} - #define __Pyx_c_abs{{func_suffix}}(z) (::std::abs(z)) - #define __Pyx_c_pow{{func_suffix}}(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==0) - #define __Pyx_c_conj{{func_suffix}}(z) (conj{{m}}(z)) - #if {{is_float}} - #define __Pyx_c_abs{{func_suffix}}(z) (cabs{{m}}(z)) - #define __Pyx_c_pow{{func_suffix}}(a, b) (cpow{{m}}(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}}, {{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}}, {{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}}, {{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}}, {{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}}, {{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}}); - static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}}); - #if {{is_float}} - static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}}); - static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}}, {{type}}); - #endif -#endif - -/////////////// Arithmetic /////////////// - -#if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}} a, {{type}} b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}} a, {{type}} b) { - {{type}} z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}} a, {{type}} b) { - {{type}} z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}} a, {{type}} b) { - {{type}} z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - - #if {{is_float}} - static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { - if (b.imag == 0) { - return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabs{{m}}(b.real) >= fabs{{m}}(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return {{type_name}}_from_parts(a.real / b.real, a.imag / b.imag); - } else { - {{real_type}} r = b.imag / b.real; - {{real_type}} s = ({{real_type}})(1.0) / (b.real + b.imag * r); - return {{type_name}}_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - {{real_type}} r = b.real / b.imag; - {{real_type}} s = ({{real_type}})(1.0) / (b.imag + b.real * r); - return {{type_name}}_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { - if (b.imag == 0) { - return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); - } else { - {{real_type}} denom = b.real * b.real + b.imag * b.imag; - return {{type_name}}_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - - static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}} a) { - {{type}} z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}} a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}} a) { - {{type}} z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if {{is_float}} - static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}} z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt{{m}}(z.real*z.real + z.imag*z.imag); - #else - return hypot{{m}}(z.real, z.imag); - #endif - } - static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}} a, {{type}} b) { - {{type}} z; - {{real_type}} r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - {{real_type}} denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod{{func_suffix}}(a, a); - case 3: - z = __Pyx_c_prod{{func_suffix}}(a, a); - return __Pyx_c_prod{{func_suffix}}(z, a); - case 4: - z = __Pyx_c_prod{{func_suffix}}(a, a); - return __Pyx_c_prod{{func_suffix}}(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = pow{{m}}(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2{{m}}(0.0, -1.0); - } - } else { - r = __Pyx_c_abs{{func_suffix}}(a); - theta = atan2{{m}}(a.imag, a.real); - } - lnr = log{{m}}(r); - z_r = exp{{m}}(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos{{m}}(z_theta); - z.imag = z_r * sin{{m}}(z_theta); - return z; - } - #endif -#endif + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< {{real_type}} > {{type_name}}; + #else + typedef {{real_type}} _Complex {{type_name}}; + #endif +#else + typedef struct { {{real_type}} real, imag; } {{type_name}}; +#endif + +static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}}, {{real_type}}); + +/////////////// Declarations /////////////// + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + return ::std::complex< {{real_type}} >(x, y); + } + #else + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + return x + y*({{type}})_Complex_I; + } + #endif +#else + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + {{type}} z; + z.real = x; + z.imag = y; + return z; + } +#endif + + +/////////////// ToPy.proto /////////////// + +#define __pyx_PyComplex_FromComplex(z) \ + PyComplex_FromDoubles((double)__Pyx_CREAL(z), \ + (double)__Pyx_CIMAG(z)) + + +/////////////// FromPy.proto /////////////// + +static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject*); + +/////////////// FromPy /////////////// + +static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject* o) { + Py_complex cval; +#if !CYTHON_COMPILING_IN_PYPY + if (PyComplex_CheckExact(o)) + cval = ((PyComplexObject *)o)->cval; + else +#endif + cval = PyComplex_AsCComplex(o); + return {{type_name}}_from_parts( + ({{real_type}})cval.real, + ({{real_type}})cval.imag); +} + + +/////////////// Arithmetic.proto /////////////// + +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq{{func_suffix}}(a, b) ((a)==(b)) + #define __Pyx_c_sum{{func_suffix}}(a, b) ((a)+(b)) + #define __Pyx_c_diff{{func_suffix}}(a, b) ((a)-(b)) + #define __Pyx_c_prod{{func_suffix}}(a, b) ((a)*(b)) + #define __Pyx_c_quot{{func_suffix}}(a, b) ((a)/(b)) + #define __Pyx_c_neg{{func_suffix}}(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==({{real_type}})0) + #define __Pyx_c_conj{{func_suffix}}(z) (::std::conj(z)) + #if {{is_float}} + #define __Pyx_c_abs{{func_suffix}}(z) (::std::abs(z)) + #define __Pyx_c_pow{{func_suffix}}(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==0) + #define __Pyx_c_conj{{func_suffix}}(z) (conj{{m}}(z)) + #if {{is_float}} + #define __Pyx_c_abs{{func_suffix}}(z) (cabs{{m}}(z)) + #define __Pyx_c_pow{{func_suffix}}(a, b) (cpow{{m}}(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}}); + static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}}); + #if {{is_float}} + static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}}, {{type}}); + #endif +#endif + +/////////////// Arithmetic /////////////// + +#if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}} a, {{type}} b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + + #if {{is_float}} + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { + if (b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs{{m}}(b.real) >= fabs{{m}}(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.imag); + } else { + {{real_type}} r = b.imag / b.real; + {{real_type}} s = ({{real_type}})(1.0) / (b.real + b.imag * r); + return {{type_name}}_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + {{real_type}} r = b.real / b.imag; + {{real_type}} s = ({{real_type}})(1.0) / (b.imag + b.real * r); + return {{type_name}}_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { + if (b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); + } else { + {{real_type}} denom = b.real * b.real + b.imag * b.imag; + return {{type_name}}_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + + static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}} a) { + {{type}} z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}} a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}} a) { + {{type}} z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if {{is_float}} + static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}} z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt{{m}}(z.real*z.real + z.imag*z.imag); + #else + return hypot{{m}}(z.real, z.imag); + #endif + } + static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + {{real_type}} r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + {{real_type}} denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod{{func_suffix}}(a, a); + case 3: + z = __Pyx_c_prod{{func_suffix}}(a, a); + return __Pyx_c_prod{{func_suffix}}(z, a); + case 4: + z = __Pyx_c_prod{{func_suffix}}(a, a); + return __Pyx_c_prod{{func_suffix}}(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow{{m}}(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2{{m}}(0.0, -1.0); + } + } else { + r = __Pyx_c_abs{{func_suffix}}(a); + theta = atan2{{m}}(a.imag, a.real); + } + lnr = log{{m}}(r); + z_r = exp{{m}}(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos{{m}}(z_theta); + z.imag = z_r * sin{{m}}(z_theta); + return z; + } + #endif +#endif diff --git a/contrib/tools/cython/Cython/Utility/Coroutine.c b/contrib/tools/cython/Cython/Utility/Coroutine.c index ee6e254e2d..d26314083b 100644 --- a/contrib/tools/cython/Cython/Utility/Coroutine.c +++ b/contrib/tools/cython/Cython/Utility/Coroutine.c @@ -1,10 +1,10 @@ -//////////////////// GeneratorYieldFrom.proto //////////////////// - -static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); - -//////////////////// GeneratorYieldFrom //////////////////// -//@requires: Generator - +//////////////////// GeneratorYieldFrom.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); + +//////////////////// GeneratorYieldFrom //////////////////// +//@requires: Generator + static void __PyxPyIter_CheckErrorAndDecref(PyObject *source) { PyErr_Format(PyExc_TypeError, "iter() returned non-iterator of type '%.100s'", @@ -12,58 +12,58 @@ static void __PyxPyIter_CheckErrorAndDecref(PyObject *source) { Py_DECREF(source); } -static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { - PyObject *source_gen, *retval; -#ifdef __Pyx_Coroutine_USED +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *source_gen, *retval; +#ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(source)) { - // TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here - Py_INCREF(source); - source_gen = source; - retval = __Pyx_Generator_Next(source); - } else -#endif - { -#if CYTHON_USE_TYPE_SLOTS - if (likely(Py_TYPE(source)->tp_iter)) { - source_gen = Py_TYPE(source)->tp_iter(source); - if (unlikely(!source_gen)) - return NULL; - if (unlikely(!PyIter_Check(source_gen))) { + // TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here + Py_INCREF(source); + source_gen = source; + retval = __Pyx_Generator_Next(source); + } else +#endif + { +#if CYTHON_USE_TYPE_SLOTS + if (likely(Py_TYPE(source)->tp_iter)) { + source_gen = Py_TYPE(source)->tp_iter(source); + if (unlikely(!source_gen)) + return NULL; + if (unlikely(!PyIter_Check(source_gen))) { __PyxPyIter_CheckErrorAndDecref(source_gen); - return NULL; - } - } else + return NULL; + } + } else // CPython also allows non-iterable sequences to be iterated over -#endif +#endif { - source_gen = PyObject_GetIter(source); + source_gen = PyObject_GetIter(source); if (unlikely(!source_gen)) return NULL; } - // source_gen is now the iterator, make the first next() call + // source_gen is now the iterator, make the first next() call #if CYTHON_USE_TYPE_SLOTS - retval = Py_TYPE(source_gen)->tp_iternext(source_gen); + retval = Py_TYPE(source_gen)->tp_iternext(source_gen); #else retval = PyIter_Next(source_gen); #endif - } - if (likely(retval)) { - gen->yieldfrom = source_gen; - return retval; - } - Py_DECREF(source_gen); - return NULL; -} - - -//////////////////// CoroutineYieldFrom.proto //////////////////// - + } + if (likely(retval)) { + gen->yieldfrom = source_gen; + return retval; + } + Py_DECREF(source_gen); + return NULL; +} + + +//////////////////// CoroutineYieldFrom.proto //////////////////// + static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); - -//////////////////// CoroutineYieldFrom //////////////////// -//@requires: Coroutine -//@requires: GetAwaitIter - + +//////////////////// CoroutineYieldFrom //////////////////// +//@requires: Coroutine +//@requires: GetAwaitIter + static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, PyObject *source) { PyObject *retval; PyObject *source_gen = __Pyx__Coroutine_GetAwaitableIter(source); @@ -76,9 +76,9 @@ static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, } else { #if CYTHON_USE_TYPE_SLOTS retval = Py_TYPE(source_gen)->tp_iternext(source_gen); -#else +#else retval = PyIter_Next(source_gen); -#endif +#endif } if (retval) { gen->yieldfrom = source_gen; @@ -86,53 +86,53 @@ static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, } Py_DECREF(source_gen); return NULL; -} - +} + static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { - PyObject *retval; + PyObject *retval; if (__Pyx_Coroutine_Check(source)) { if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) { PyErr_SetString( PyExc_RuntimeError, "coroutine is being awaited already"); - return NULL; - } - retval = __Pyx_Generator_Next(source); + return NULL; + } + retval = __Pyx_Generator_Next(source); #ifdef __Pyx_AsyncGen_USED // inlined "__pyx_PyAsyncGenASend" handling to avoid the series of generic calls } else if (__pyx_PyAsyncGenASend_CheckExact(source)) { retval = __Pyx_async_gen_asend_iternext(source); #endif - } else { + } else { return __Pyx__Coroutine_Yield_From_Generic(gen, source); - } + } if (retval) { Py_INCREF(source); gen->yieldfrom = source; - } + } return retval; -} - - -//////////////////// GetAwaitIter.proto //////////////////// - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ -static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ - -//////////////////// GetAwaitIter //////////////////// -//@requires: ObjectHandling.c::PyObjectGetMethod -//@requires: ObjectHandling.c::PyObjectCallNoArg -//@requires: ObjectHandling.c::PyObjectCallOneArg - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) { -#ifdef __Pyx_Coroutine_USED +} + + +//////////////////// GetAwaitIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ + +//////////////////// GetAwaitIter //////////////////// +//@requires: ObjectHandling.c::PyObjectGetMethod +//@requires: ObjectHandling.c::PyObjectCallNoArg +//@requires: ObjectHandling.c::PyObjectCallOneArg + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) { +#ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(o)) { return __Pyx_NewRef(o); - } -#endif - return __Pyx__Coroutine_GetAwaitableIter(o); -} - + } +#endif + return __Pyx__Coroutine_GetAwaitableIter(o); +} + static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) { #if PY_VERSION_HEX >= 0x030600B3 || defined(_PyErr_FormatFromCause) @@ -170,102 +170,102 @@ static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) { #endif } -// adapted from genobject.c in Py3.5 -static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) { - PyObject *res; -#if CYTHON_USE_ASYNC_SLOTS - __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); - if (likely(am && am->am_await)) { - res = (*am->am_await)(obj); - } else -#endif -#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) - if (PyCoro_CheckExact(obj)) { +// adapted from genobject.c in Py3.5 +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) { + PyObject *res; +#if CYTHON_USE_ASYNC_SLOTS + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_await)) { + res = (*am->am_await)(obj); + } else +#endif +#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + if (PyCoro_CheckExact(obj)) { return __Pyx_NewRef(obj); - } else -#endif -#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE) - if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) { - // Python generator marked with "@types.coroutine" decorator + } else +#endif +#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE) + if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) { + // Python generator marked with "@types.coroutine" decorator return __Pyx_NewRef(obj); - } else -#endif - { - PyObject *method = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method); - if (likely(is_method)) { - res = __Pyx_PyObject_CallOneArg(method, obj); - } else if (likely(method)) { - res = __Pyx_PyObject_CallNoArg(method); - } else - goto slot_error; - Py_DECREF(method); - } + } else +#endif + { + PyObject *method = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method); + if (likely(is_method)) { + res = __Pyx_PyObject_CallOneArg(method, obj); + } else if (likely(method)) { + res = __Pyx_PyObject_CallNoArg(method); + } else + goto slot_error; + Py_DECREF(method); + } if (unlikely(!res)) { // surprisingly, CPython replaces the exception here... __Pyx_Coroutine_AwaitableIterError(obj); goto bad; } if (unlikely(!PyIter_Check(res))) { - PyErr_Format(PyExc_TypeError, - "__await__() returned non-iterator of type '%.100s'", - Py_TYPE(res)->tp_name); - Py_CLEAR(res); - } else { - int is_coroutine = 0; - #ifdef __Pyx_Coroutine_USED + PyErr_Format(PyExc_TypeError, + "__await__() returned non-iterator of type '%.100s'", + Py_TYPE(res)->tp_name); + Py_CLEAR(res); + } else { + int is_coroutine = 0; + #ifdef __Pyx_Coroutine_USED is_coroutine |= __Pyx_Coroutine_Check(res); - #endif - #if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) - is_coroutine |= PyCoro_CheckExact(res); - #endif - if (unlikely(is_coroutine)) { - /* __await__ must return an *iterator*, not - a coroutine or another awaitable (see PEP 492) */ - PyErr_SetString(PyExc_TypeError, - "__await__() returned a coroutine"); - Py_CLEAR(res); - } - } - return res; -slot_error: - PyErr_Format(PyExc_TypeError, - "object %.100s can't be used in 'await' expression", - Py_TYPE(obj)->tp_name); -bad: - return NULL; -} - - -//////////////////// AsyncIter.proto //////////////////// - -static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/ -static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/ - -//////////////////// AsyncIter //////////////////// -//@requires: GetAwaitIter -//@requires: ObjectHandling.c::PyObjectCallMethod0 - + #endif + #if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + is_coroutine |= PyCoro_CheckExact(res); + #endif + if (unlikely(is_coroutine)) { + /* __await__ must return an *iterator*, not + a coroutine or another awaitable (see PEP 492) */ + PyErr_SetString(PyExc_TypeError, + "__await__() returned a coroutine"); + Py_CLEAR(res); + } + } + return res; +slot_error: + PyErr_Format(PyExc_TypeError, + "object %.100s can't be used in 'await' expression", + Py_TYPE(obj)->tp_name); +bad: + return NULL; +} + + +//////////////////// AsyncIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/ +static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/ + +//////////////////// AsyncIter //////////////////// +//@requires: GetAwaitIter +//@requires: ObjectHandling.c::PyObjectCallMethod0 + static PyObject *__Pyx_Coroutine_GetAsyncIter_Generic(PyObject *obj) { -#if PY_VERSION_HEX < 0x030500B1 - { - PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); - if (likely(iter)) - return iter; - // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__' - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) - return NULL; - } -#else - // avoid C warning about 'unused function' +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); + if (likely(iter)) + return iter; + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__' + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return NULL; + } +#else + // avoid C warning about 'unused function' if ((0)) (void) __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); -#endif - - PyErr_Format(PyExc_TypeError, "'async for' requires an object with __aiter__ method, got %.100s", - Py_TYPE(obj)->tp_name); - return NULL; -} - +#endif + + PyErr_Format(PyExc_TypeError, "'async for' requires an object with __aiter__ method, got %.100s", + Py_TYPE(obj)->tp_name); + return NULL; +} + static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) { #ifdef __Pyx_AsyncGen_USED @@ -273,34 +273,34 @@ static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) { return __Pyx_NewRef(obj); } #endif -#if CYTHON_USE_ASYNC_SLOTS +#if CYTHON_USE_ASYNC_SLOTS { __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); if (likely(am && am->am_aiter)) { return (*am->am_aiter)(obj); } - } -#endif + } +#endif return __Pyx_Coroutine_GetAsyncIter_Generic(obj); } static PyObject *__Pyx__Coroutine_AsyncIterNext(PyObject *obj) { -#if PY_VERSION_HEX < 0x030500B1 - { - PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__")); - if (likely(value)) - return value; - } - // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__' - if (PyErr_ExceptionMatches(PyExc_AttributeError)) -#endif - PyErr_Format(PyExc_TypeError, "'async for' requires an object with __anext__ method, got %.100s", - Py_TYPE(obj)->tp_name); - return NULL; -} - - +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__")); + if (likely(value)) + return value; + } + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__' + if (PyErr_ExceptionMatches(PyExc_AttributeError)) +#endif + PyErr_Format(PyExc_TypeError, "'async for' requires an object with __anext__ method, got %.100s", + Py_TYPE(obj)->tp_name); + return NULL; +} + + static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) { #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(obj)) { @@ -319,16 +319,16 @@ static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) { } -//////////////////// pep479.proto //////////////////// - +//////////////////// pep479.proto //////////////////// + static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); /*proto*/ - -//////////////////// pep479 //////////////////// -//@requires: Exceptions.c::GetException - + +//////////////////// pep479 //////////////////// +//@requires: Exceptions.c::GetException + static void __Pyx_Generator_Replace_StopIteration(CYTHON_UNUSED int in_async_gen) { PyObject *exc, *val, *tb, *cur_exc; - __Pyx_PyThreadState_declare + __Pyx_PyThreadState_declare #ifdef __Pyx_StopAsyncIteration_USED int is_async_stopiteration = 0; #endif @@ -343,272 +343,272 @@ static void __Pyx_Generator_Replace_StopIteration(CYTHON_UNUSED int in_async_gen return; } - __Pyx_PyThreadState_assign + __Pyx_PyThreadState_assign // Chain exceptions by moving Stop(Async)Iteration to exc_info before creating the RuntimeError. // In Py2.x, no chaining happens, but the exception still stays visible in exc_info. - __Pyx_GetException(&exc, &val, &tb); - Py_XDECREF(exc); - Py_XDECREF(val); - Py_XDECREF(tb); + __Pyx_GetException(&exc, &val, &tb); + Py_XDECREF(exc); + Py_XDECREF(val); + Py_XDECREF(tb); PyErr_SetString(PyExc_RuntimeError, #ifdef __Pyx_StopAsyncIteration_USED is_async_stopiteration ? "async generator raised StopAsyncIteration" : in_async_gen ? "async generator raised StopIteration" : #endif "generator raised StopIteration"); -} - - -//////////////////// CoroutineBase.proto //////////////////// +} + + +//////////////////// CoroutineBase.proto //////////////////// //@substitute: naming - + typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *); - -#if CYTHON_USE_EXC_INFO_STACK -// See https://bugs.python.org/issue25612 -#define __Pyx_ExcInfoStruct _PyErr_StackItem -#else -// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack. -typedef struct { - PyObject *exc_type; - PyObject *exc_value; - PyObject *exc_traceback; -} __Pyx_ExcInfoStruct; -#endif - -typedef struct { - PyObject_HEAD - __pyx_coroutine_body_t body; - PyObject *closure; - __Pyx_ExcInfoStruct gi_exc_state; - PyObject *gi_weakreflist; - PyObject *classobj; - PyObject *yieldfrom; - PyObject *gi_name; - PyObject *gi_qualname; - PyObject *gi_modulename; + +#if CYTHON_USE_EXC_INFO_STACK +// See https://bugs.python.org/issue25612 +#define __Pyx_ExcInfoStruct _PyErr_StackItem +#else +// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack. +typedef struct { + PyObject *exc_type; + PyObject *exc_value; + PyObject *exc_traceback; +} __Pyx_ExcInfoStruct; +#endif + +typedef struct { + PyObject_HEAD + __pyx_coroutine_body_t body; + PyObject *closure; + __Pyx_ExcInfoStruct gi_exc_state; + PyObject *gi_weakreflist; + PyObject *classobj; + PyObject *yieldfrom; + PyObject *gi_name; + PyObject *gi_qualname; + PyObject *gi_modulename; PyObject *gi_code; PyObject *gi_frame; - int resume_label; - // using T_BOOL for property below requires char value - char is_running; -} __pyx_CoroutineObject; - -static __pyx_CoroutineObject *__Pyx__Coroutine_New( + int resume_label; + // using T_BOOL for property below requires char value + char is_running; +} __pyx_CoroutineObject; + +static __pyx_CoroutineObject *__Pyx__Coroutine_New( PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ + PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ -static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); -static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/ +static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); +static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/ static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); /*proto*/ static PyObject *__Pyx_Coroutine_Close(PyObject *self); /*proto*/ static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); /*proto*/ - + // macros for exception state swapping instead of inline functions to make use of the local thread state context -#if CYTHON_USE_EXC_INFO_STACK -#define __Pyx_Coroutine_SwapException(self) -#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) -#else +#if CYTHON_USE_EXC_INFO_STACK +#define __Pyx_Coroutine_SwapException(self) +#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) +#else #define __Pyx_Coroutine_SwapException(self) { \ - __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \ - __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \ + __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \ + __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \ } #define __Pyx_Coroutine_ResetAndClearException(self) { \ - __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \ - (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \ + __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \ + (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \ } -#endif +#endif #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ __Pyx_PyGen__FetchStopIterationValue($local_tstate_cname, pvalue) -#else +#else #define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) -#endif +#endif static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); /*proto*/ -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/ - - -//////////////////// Coroutine.proto //////////////////// - -#define __Pyx_Coroutine_USED -static PyTypeObject *__pyx_CoroutineType = 0; -static PyTypeObject *__pyx_CoroutineAwaitType = 0; -#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType) +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/ + + +//////////////////// Coroutine.proto //////////////////// + +#define __Pyx_Coroutine_USED +static PyTypeObject *__pyx_CoroutineType = 0; +static PyTypeObject *__pyx_CoroutineAwaitType = 0; +#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType) // __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below #define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj) #define __Pyx_CoroutineAwait_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineAwaitType) - + #define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \ __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name) - -static int __pyx_Coroutine_init(void); /*proto*/ -static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/ - + +static int __pyx_Coroutine_init(void); /*proto*/ +static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/ + typedef struct { PyObject_HEAD PyObject *coroutine; } __pyx_CoroutineAwaitObject; - -static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/ + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/ static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args); /*proto*/ -//////////////////// Generator.proto //////////////////// - -#define __Pyx_Generator_USED -static PyTypeObject *__pyx_GeneratorType = 0; -#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType) - +//////////////////// Generator.proto //////////////////// + +#define __Pyx_Generator_USED +static PyTypeObject *__pyx_GeneratorType = 0; +#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType) + #define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \ __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) - -static PyObject *__Pyx_Generator_Next(PyObject *self); -static int __pyx_Generator_init(void); /*proto*/ - - + +static PyObject *__Pyx_Generator_Next(PyObject *self); +static int __pyx_Generator_init(void); /*proto*/ + + //////////////////// AsyncGen //////////////////// //@requires: AsyncGen.c::AsyncGenerator // -> empty, only delegates to separate file -//////////////////// CoroutineBase //////////////////// -//@substitute: naming -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyThreadStateGet -//@requires: Exceptions.c::SwapException -//@requires: Exceptions.c::RaiseException +//////////////////// CoroutineBase //////////////////// +//@substitute: naming +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::SwapException +//@requires: Exceptions.c::RaiseException //@requires: Exceptions.c::SaveResetException -//@requires: ObjectHandling.c::PyObjectCallMethod1 -//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires: ObjectHandling.c::PyObjectCallMethod1 +//@requires: ObjectHandling.c::PyObjectGetAttrStr //@requires: CommonStructures.c::FetchCommonType - -#include <structmember.h> -#include <frameobject.h> - -#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) - -// If StopIteration exception is set, fetches its 'value' -// attribute if any, otherwise sets pvalue to None. -// -// Returns 0 if no exception or StopIteration is set. -// If any other exception is set, returns -1 and leaves -// pvalue unchanged. + +#include <structmember.h> +#include <frameobject.h> + +#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) + +// If StopIteration exception is set, fetches its 'value' +// attribute if any, otherwise sets pvalue to None. +// +// Returns 0 if no exception or StopIteration is set. +// If any other exception is set, returns -1 and leaves +// pvalue unchanged. static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *$local_tstate_cname, PyObject **pvalue) { - PyObject *et, *ev, *tb; - PyObject *value = NULL; - - __Pyx_ErrFetch(&et, &ev, &tb); - - if (!et) { - Py_XDECREF(tb); - Py_XDECREF(ev); - Py_INCREF(Py_None); - *pvalue = Py_None; - return 0; - } - - // most common case: plain StopIteration without or with separate argument - if (likely(et == PyExc_StopIteration)) { - if (!ev) { - Py_INCREF(Py_None); - value = Py_None; - } -#if PY_VERSION_HEX >= 0x030300A0 - else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) { - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); - } -#endif - // PyErr_SetObject() and friends put the value directly into ev - else if (unlikely(PyTuple_Check(ev))) { - // if it's a tuple, it is interpreted as separate constructor arguments (surprise!) - if (PyTuple_GET_SIZE(ev) >= 1) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - value = PyTuple_GET_ITEM(ev, 0); - Py_INCREF(value); -#else - value = PySequence_ITEM(ev, 0); -#endif - } else { - Py_INCREF(Py_None); - value = Py_None; - } - Py_DECREF(ev); - } + PyObject *et, *ev, *tb; + PyObject *value = NULL; + + __Pyx_ErrFetch(&et, &ev, &tb); + + if (!et) { + Py_XDECREF(tb); + Py_XDECREF(ev); + Py_INCREF(Py_None); + *pvalue = Py_None; + return 0; + } + + // most common case: plain StopIteration without or with separate argument + if (likely(et == PyExc_StopIteration)) { + if (!ev) { + Py_INCREF(Py_None); + value = Py_None; + } +#if PY_VERSION_HEX >= 0x030300A0 + else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) { + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); + } +#endif + // PyErr_SetObject() and friends put the value directly into ev + else if (unlikely(PyTuple_Check(ev))) { + // if it's a tuple, it is interpreted as separate constructor arguments (surprise!) + if (PyTuple_GET_SIZE(ev) >= 1) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + value = PyTuple_GET_ITEM(ev, 0); + Py_INCREF(value); +#else + value = PySequence_ITEM(ev, 0); +#endif + } else { + Py_INCREF(Py_None); + value = Py_None; + } + Py_DECREF(ev); + } else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { - // 'steal' reference to ev - value = ev; - } - if (likely(value)) { - Py_XDECREF(tb); - Py_DECREF(et); - *pvalue = value; - return 0; - } + // 'steal' reference to ev + value = ev; + } + if (likely(value)) { + Py_XDECREF(tb); + Py_DECREF(et); + *pvalue = value; + return 0; + } } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - - // otherwise: normalise and check what that gives us - PyErr_NormalizeException(&et, &ev, &tb); - if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { - // looks like normalisation failed - raise the new exception - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - Py_XDECREF(tb); - Py_DECREF(et); -#if PY_VERSION_HEX >= 0x030300A0 - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); -#else - { - PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args")); - Py_DECREF(ev); - if (likely(args)) { - value = PySequence_GetItem(args, 0); - Py_DECREF(args); - } - if (unlikely(!value)) { - __Pyx_ErrRestore(NULL, NULL, NULL); - Py_INCREF(Py_None); - value = Py_None; - } - } -#endif - *pvalue = value; - return 0; -} - -static CYTHON_INLINE -void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { - PyObject *t, *v, *tb; - t = exc_state->exc_type; - v = exc_state->exc_value; - tb = exc_state->exc_traceback; - - exc_state->exc_type = NULL; - exc_state->exc_value = NULL; - exc_state->exc_traceback = NULL; - - Py_XDECREF(t); - Py_XDECREF(v); - Py_XDECREF(tb); -} - + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + + // otherwise: normalise and check what that gives us + PyErr_NormalizeException(&et, &ev, &tb); + if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { + // looks like normalisation failed - raise the new exception + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + Py_XDECREF(tb); + Py_DECREF(et); +#if PY_VERSION_HEX >= 0x030300A0 + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); +#else + { + PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args")); + Py_DECREF(ev); + if (likely(args)) { + value = PySequence_GetItem(args, 0); + Py_DECREF(args); + } + if (unlikely(!value)) { + __Pyx_ErrRestore(NULL, NULL, NULL); + Py_INCREF(Py_None); + value = Py_None; + } + } +#endif + *pvalue = value; + return 0; +} + +static CYTHON_INLINE +void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { + PyObject *t, *v, *tb; + t = exc_state->exc_type; + v = exc_state->exc_value; + tb = exc_state->exc_traceback; + + exc_state->exc_type = NULL; + exc_state->exc_value = NULL; + exc_state->exc_traceback = NULL; + + Py_XDECREF(t); + Py_XDECREF(v); + Py_XDECREF(tb); +} + #define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) { const char *msg; - if ((0)) { + if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { msg = "coroutine already executing"; @@ -619,14 +619,14 @@ static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineOb #endif } else { msg = "generator already executing"; - } + } PyErr_SetString(PyExc_ValueError, msg); -} - +} + #define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) { const char *msg; - if ((0)) { + if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check(gen)) { msg = "can't send non-None value to a just-started coroutine"; @@ -667,53 +667,53 @@ static PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { __Pyx_PyThreadState_declare PyThreadState *tstate; - __Pyx_ExcInfoStruct *exc_state; - PyObject *retval; - - assert(!self->is_running); - - if (unlikely(self->resume_label == 0)) { - if (unlikely(value && value != Py_None)) { + __Pyx_ExcInfoStruct *exc_state; + PyObject *retval; + + assert(!self->is_running); + + if (unlikely(self->resume_label == 0)) { + if (unlikely(value && value != Py_None)) { return __Pyx_Coroutine_NotStartedError((PyObject*)self); - } - } - - if (unlikely(self->resume_label == -1)) { + } + } + + if (unlikely(self->resume_label == -1)) { return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); - } - + } + #if CYTHON_FAST_THREAD_STATE - __Pyx_PyThreadState_assign + __Pyx_PyThreadState_assign tstate = $local_tstate_cname; #else tstate = __Pyx_PyThreadState_Current; #endif - // Traceback/Frame rules pre-Py3.7: - // - on entry, save external exception state in self->gi_exc_state, restore it on exit - // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // Traceback/Frame rules pre-Py3.7: + // - on entry, save external exception state in self->gi_exc_state, restore it on exit + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame // - on exit, clear "f_back" of internal exception traceback // - do not touch external frames and tracebacks - // Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK): - // - on entry, push internal exception state in self->gi_exc_state on the exception stack - // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else - // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame - // - on exit, clear "f_back" of internal exception traceback - // - do not touch external frames and tracebacks - - exc_state = &self->gi_exc_state; - if (exc_state->exc_type) { - #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON - // FIXME: what to do in PyPy? - #else - // Generators always return to their most recent caller, not - // necessarily their creator. - if (exc_state->exc_traceback) { - PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback; - PyFrameObject *f = tb->tb_frame; - + // Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK): + // - on entry, push internal exception state in self->gi_exc_state on the exception stack + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame + // - on exit, clear "f_back" of internal exception traceback + // - do not touch external frames and tracebacks + + exc_state = &self->gi_exc_state; + if (exc_state->exc_type) { + #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON + // FIXME: what to do in PyPy? + #else + // Generators always return to their most recent caller, not + // necessarily their creator. + if (exc_state->exc_traceback) { + PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback; + PyFrameObject *f = tb->tb_frame; + assert(f->f_back == NULL); #if PY_VERSION_HEX >= 0x030B00A1 // PyThreadState_GetFrame returns NULL if there isn't a current frame @@ -723,62 +723,62 @@ PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, i Py_XINCREF(tstate->frame); f->f_back = tstate->frame; #endif - } - #endif - } - -#if CYTHON_USE_EXC_INFO_STACK - // See https://bugs.python.org/issue25612 - exc_state->previous_item = tstate->exc_info; - tstate->exc_info = exc_state; -#else - if (exc_state->exc_type) { + } + #endif + } + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state->previous_item = tstate->exc_info; + tstate->exc_info = exc_state; +#else + if (exc_state->exc_type) { // We were in an except handler when we left, // restore the exception state which was put aside. - __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); // self->exc_* now holds the exception state of the caller - } else { + } else { // save away the exception state of the caller - __Pyx_Coroutine_ExceptionClear(exc_state); - __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); - } -#endif - - self->is_running = 1; + __Pyx_Coroutine_ExceptionClear(exc_state); + __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + } +#endif + + self->is_running = 1; PyObject* s = (PyObject *)(self); retval = self->body(s, tstate, value); - self->is_running = 0; - -#if CYTHON_USE_EXC_INFO_STACK - // See https://bugs.python.org/issue25612 - exc_state = &self->gi_exc_state; - tstate->exc_info = exc_state->previous_item; - exc_state->previous_item = NULL; - // Cut off the exception frame chain so that we can reconnect it on re-entry above. - __Pyx_Coroutine_ResetFrameBackpointer(exc_state); -#endif - + self->is_running = 0; + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state = &self->gi_exc_state; + tstate->exc_info = exc_state->previous_item; + exc_state->previous_item = NULL; + // Cut off the exception frame chain so that we can reconnect it on re-entry above. + __Pyx_Coroutine_ResetFrameBackpointer(exc_state); +#endif + return retval; } -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { // Don't keep the reference to f_back any longer than necessary. It // may keep a chain of frames alive or it could create a reference // cycle. - PyObject *exc_tb = exc_state->exc_traceback; - - if (likely(exc_tb)) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON + PyObject *exc_tb = exc_state->exc_traceback; + + if (likely(exc_tb)) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON // FIXME: what to do in PyPy? -#else - PyTracebackObject *tb = (PyTracebackObject *) exc_tb; +#else + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; PyFrameObject *f = tb->tb_frame; Py_CLEAR(f->f_back); -#endif - } -} - -static CYTHON_INLINE +#endif + } +} + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) { if (unlikely(!retval)) { __Pyx_PyThreadState_declare @@ -792,12 +792,12 @@ PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *re #endif __Pyx_PyErr_SetNone(exc); } - } - return retval; -} - + } + return retval; +} + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) -static CYTHON_INLINE +static CYTHON_INLINE PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { #if PY_VERSION_HEX <= 0x030A00A1 return _PyGen_Send(gen, arg); @@ -823,38 +823,38 @@ PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { #endif static CYTHON_INLINE -PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { - PyObject *ret; - PyObject *val = NULL; - __Pyx_Coroutine_Undelegate(gen); +PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { + PyObject *ret; + PyObject *val = NULL; + __Pyx_Coroutine_Undelegate(gen); __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); - // val == NULL on failure => pass on exception + // val == NULL on failure => pass on exception ret = __Pyx_Coroutine_SendEx(gen, val, 0); - Py_XDECREF(val); - return ret; -} - -static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { - PyObject *retval; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; + Py_XDECREF(val); + return ret; +} + +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { + PyObject *retval; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - // FIXME: does this really need an INCREF() ? - //Py_INCREF(yf); - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif - #ifdef __Pyx_Coroutine_USED + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_PyAsyncGenASend_CheckExact(yf)) { ret = __Pyx_async_gen_asend_send(yf, value); @@ -872,49 +872,49 @@ static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); } else #endif - { - if (value == Py_None) - ret = Py_TYPE(yf)->tp_iternext(yf); - else - ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value); - } - gen->is_running = 0; - //Py_DECREF(yf); - if (likely(ret)) { - return ret; - } - retval = __Pyx_Coroutine_FinishDelegation(gen); - } else { + { + if (value == Py_None) + ret = Py_TYPE(yf)->tp_iternext(yf); + else + ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value); + } + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + retval = __Pyx_Coroutine_FinishDelegation(gen); + } else { retval = __Pyx_Coroutine_SendEx(gen, value, 0); - } + } return __Pyx_Coroutine_MethodReturn(self, retval); -} - -// This helper function is used by gen_close and gen_throw to -// close a subiterator being delegated to by yield-from. -static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { - PyObject *retval = NULL; - int err = 0; - - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else - #endif - #ifdef __Pyx_Coroutine_USED +} + +// This helper function is used by gen_close and gen_throw to +// close a subiterator being delegated to by yield-from. +static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { + PyObject *retval = NULL; + int err = 0; + + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else + #endif + #ifdef __Pyx_Coroutine_USED if (__Pyx_Coroutine_Check(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { - retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); + retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); if (!retval) return -1; } else - #endif + #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_PyAsyncGenASend_CheckExact(yf)) { retval = __Pyx_async_gen_asend_close(yf, NULL); @@ -925,43 +925,43 @@ static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { // cannot fail } else #endif - { - PyObject *meth; - gen->is_running = 1; - meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("close")); - if (unlikely(!meth)) { - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_WriteUnraisable(yf); - } - PyErr_Clear(); - } else { - retval = PyObject_CallFunction(meth, NULL); - Py_DECREF(meth); - if (!retval) - err = -1; - } - gen->is_running = 0; - } - Py_XDECREF(retval); - return err; -} - -static PyObject *__Pyx_Generator_Next(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; + { + PyObject *meth; + gen->is_running = 1; + meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("close")); + if (unlikely(!meth)) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_WriteUnraisable(yf); + } + PyErr_Clear(); + } else { + retval = PyObject_CallFunction(meth, NULL); + Py_DECREF(meth); + if (!retval) + err = -1; + } + gen->is_running = 0; + } + Py_XDECREF(retval); + return err; +} + +static PyObject *__Pyx_Generator_Next(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - // FIXME: does this really need an INCREF() ? - //Py_INCREF(yf); - // YieldFrom code ensures that yf is an iterator - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Generator_Next(yf); - } else - #endif + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + // YieldFrom code ensures that yf is an iterator + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Generator_Next(yf); + } else + #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) // _PyGen_Send() is not exported before Py3.6 if (PyGen_CheckExact(yf)) { @@ -973,42 +973,42 @@ static PyObject *__Pyx_Generator_Next(PyObject *self) { ret = __Pyx_Coroutine_Send(yf, Py_None); } else #endif - ret = Py_TYPE(yf)->tp_iternext(yf); - gen->is_running = 0; - //Py_DECREF(yf); - if (likely(ret)) { - return ret; - } - return __Pyx_Coroutine_FinishDelegation(gen); - } + ret = Py_TYPE(yf)->tp_iternext(yf); + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + return __Pyx_Coroutine_FinishDelegation(gen); + } return __Pyx_Coroutine_SendEx(gen, Py_None, 0); -} - -static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) { - return __Pyx_Coroutine_Close(self); -} - -static PyObject *__Pyx_Coroutine_Close(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *retval, *raised_exception; - PyObject *yf = gen->yieldfrom; - int err = 0; - +} + +static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) { + return __Pyx_Coroutine_Close(self); +} + +static PyObject *__Pyx_Coroutine_Close(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *retval, *raised_exception; + PyObject *yf = gen->yieldfrom; + int err = 0; + if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); - - if (yf) { - Py_INCREF(yf); - err = __Pyx_Coroutine_CloseIter(gen, yf); - __Pyx_Coroutine_Undelegate(gen); - Py_DECREF(yf); - } - if (err == 0) - PyErr_SetNone(PyExc_GeneratorExit); + + if (yf) { + Py_INCREF(yf); + err = __Pyx_Coroutine_CloseIter(gen, yf); + __Pyx_Coroutine_Undelegate(gen); + Py_DECREF(yf); + } + if (err == 0) + PyErr_SetNone(PyExc_GeneratorExit); retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); if (unlikely(retval)) { const char *msg; - Py_DECREF(retval); + Py_DECREF(retval); if ((0)) { #ifdef __Pyx_Coroutine_USED } else if (__Pyx_Coroutine_Check(self)) { @@ -1026,48 +1026,48 @@ static PyObject *__Pyx_Coroutine_Close(PyObject *self) { msg = "generator ignored GeneratorExit"; } PyErr_SetString(PyExc_RuntimeError, msg); - return NULL; - } - raised_exception = PyErr_Occurred(); + return NULL; + } + raised_exception = PyErr_Occurred(); if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { - // ignore these errors - if (raised_exception) PyErr_Clear(); - Py_INCREF(Py_None); - return Py_None; - } - return NULL; -} - + // ignore these errors + if (raised_exception) PyErr_Clear(); + Py_INCREF(Py_None); + return Py_None; + } + return NULL; +} + static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, PyObject *args, int close_on_genexit) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *yf = gen->yieldfrom; - + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *yf = gen->yieldfrom; + if (unlikely(gen->is_running)) return __Pyx_Coroutine_AlreadyRunningError(gen); - - if (yf) { - PyObject *ret; - Py_INCREF(yf); + + if (yf) { + PyObject *ret; + Py_INCREF(yf); if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { // Asynchronous generators *should not* be closed right away. // We have to allow some awaits to work it through, hence the // `close_on_genexit` parameter here. - int err = __Pyx_Coroutine_CloseIter(gen, yf); - Py_DECREF(yf); - __Pyx_Coroutine_Undelegate(gen); - if (err < 0) + int err = __Pyx_Coroutine_CloseIter(gen, yf); + Py_DECREF(yf); + __Pyx_Coroutine_Undelegate(gen); + if (err < 0) return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); - goto throw_here; - } - gen->is_running = 1; + goto throw_here; + } + gen->is_running = 1; if (0 - #ifdef __Pyx_Generator_USED + #ifdef __Pyx_Generator_USED || __Pyx_Generator_CheckExact(yf) - #endif - #ifdef __Pyx_Coroutine_USED + #endif + #ifdef __Pyx_Coroutine_USED || __Pyx_Coroutine_Check(yf) - #endif + #endif ) { ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); #ifdef __Pyx_Coroutine_USED @@ -1075,70 +1075,70 @@ static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); #endif } else { - PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("throw")); - if (unlikely(!meth)) { - Py_DECREF(yf); - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { - gen->is_running = 0; - return NULL; - } - PyErr_Clear(); - __Pyx_Coroutine_Undelegate(gen); - gen->is_running = 0; - goto throw_here; - } + PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("throw")); + if (unlikely(!meth)) { + Py_DECREF(yf); + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) { + gen->is_running = 0; + return NULL; + } + PyErr_Clear(); + __Pyx_Coroutine_Undelegate(gen); + gen->is_running = 0; + goto throw_here; + } if (likely(args)) { ret = PyObject_CallObject(meth, args); } else { // "tb" or even "val" might be NULL, but that also correctly terminates the argument list ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL); } - Py_DECREF(meth); - } - gen->is_running = 0; - Py_DECREF(yf); - if (!ret) { - ret = __Pyx_Coroutine_FinishDelegation(gen); - } + Py_DECREF(meth); + } + gen->is_running = 0; + Py_DECREF(yf); + if (!ret) { + ret = __Pyx_Coroutine_FinishDelegation(gen); + } return __Pyx_Coroutine_MethodReturn(self, ret); - } -throw_here: - __Pyx_Raise(typ, val, tb, NULL); + } +throw_here: + __Pyx_Raise(typ, val, tb, NULL); return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); -} - +} + static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { PyObject *typ; PyObject *val = NULL; PyObject *tb = NULL; - + if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb)) return NULL; return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); } -static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { - Py_VISIT(exc_state->exc_type); - Py_VISIT(exc_state->exc_value); - Py_VISIT(exc_state->exc_traceback); - return 0; -} - +static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { + Py_VISIT(exc_state->exc_type); + Py_VISIT(exc_state->exc_value); + Py_VISIT(exc_state->exc_traceback); + return 0; +} + static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { - Py_VISIT(gen->closure); - Py_VISIT(gen->classobj); - Py_VISIT(gen->yieldfrom); - return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); -} - -static int __Pyx_Coroutine_clear(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - - Py_CLEAR(gen->closure); - Py_CLEAR(gen->classobj); - Py_CLEAR(gen->yieldfrom); - __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); + Py_VISIT(gen->closure); + Py_VISIT(gen->classobj); + Py_VISIT(gen->yieldfrom); + return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); +} + +static int __Pyx_Coroutine_clear(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + Py_CLEAR(gen->closure); + Py_CLEAR(gen->classobj); + Py_CLEAR(gen->yieldfrom); + __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); @@ -1146,35 +1146,35 @@ static int __Pyx_Coroutine_clear(PyObject *self) { #endif Py_CLEAR(gen->gi_code); Py_CLEAR(gen->gi_frame); - Py_CLEAR(gen->gi_name); - Py_CLEAR(gen->gi_qualname); + Py_CLEAR(gen->gi_name); + Py_CLEAR(gen->gi_qualname); Py_CLEAR(gen->gi_modulename); - return 0; -} - -static void __Pyx_Coroutine_dealloc(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - - PyObject_GC_UnTrack(gen); - if (gen->gi_weakreflist != NULL) - PyObject_ClearWeakRefs(self); - + return 0; +} + +static void __Pyx_Coroutine_dealloc(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + PyObject_GC_UnTrack(gen); + if (gen->gi_weakreflist != NULL) + PyObject_ClearWeakRefs(self); + if (gen->resume_label >= 0) { // Generator is paused or unstarted, so we need to close - PyObject_GC_Track(self); + PyObject_GC_Track(self); #if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE - if (PyObject_CallFinalizerFromDealloc(self)) -#else - Py_TYPE(gen)->tp_del(self); + if (PyObject_CallFinalizerFromDealloc(self)) +#else + Py_TYPE(gen)->tp_del(self); if (Py_REFCNT(self) > 0) -#endif - { - // resurrected. :( - return; - } - PyObject_GC_UnTrack(self); - } - +#endif + { + // resurrected. :( + return; + } + PyObject_GC_UnTrack(self); + } + #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { /* We have to handle this case for asynchronous generators @@ -1183,31 +1183,31 @@ static void __Pyx_Coroutine_dealloc(PyObject *self) { Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); } #endif - __Pyx_Coroutine_clear(self); - PyObject_GC_Del(gen); -} - -static void __Pyx_Coroutine_del(PyObject *self) { - PyObject *error_type, *error_value, *error_traceback; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - __Pyx_PyThreadState_declare - + __Pyx_Coroutine_clear(self); + PyObject_GC_Del(gen); +} + +static void __Pyx_Coroutine_del(PyObject *self) { + PyObject *error_type, *error_value, *error_traceback; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + __Pyx_PyThreadState_declare + if (gen->resume_label < 0) { // already terminated => nothing to clean up return; } - + #if !CYTHON_USE_TP_FINALIZE - // Temporarily resurrect the object. - assert(self->ob_refcnt == 0); + // Temporarily resurrect the object. + assert(self->ob_refcnt == 0); __Pyx_SET_REFCNT(self, 1); -#endif - +#endif + __Pyx_PyThreadState_assign - // Save the current exception, if any. - __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); - + // Save the current exception, if any. + __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); + #ifdef __Pyx_AsyncGen_USED if (__Pyx_AsyncGen_CheckExact(self)) { __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; @@ -1225,7 +1225,7 @@ static void __Pyx_Coroutine_del(PyObject *self) { } } #endif - + if (unlikely(gen->resume_label == 0 && !error_value)) { #ifdef __Pyx_Coroutine_USED #ifdef __Pyx_Generator_USED @@ -1250,7 +1250,7 @@ static void __Pyx_Coroutine_del(PyObject *self) { qualname = gen->gi_qualname; cname = PyString_AS_STRING(qualname); msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); - + if (unlikely(!msg)) { PyErr_Clear(); cmsg = (char*) "coroutine was never awaited"; @@ -1275,109 +1275,109 @@ static void __Pyx_Coroutine_del(PyObject *self) { } } - // Restore the saved exception. - __Pyx_ErrRestore(error_type, error_value, error_traceback); - + // Restore the saved exception. + __Pyx_ErrRestore(error_type, error_value, error_traceback); + #if !CYTHON_USE_TP_FINALIZE - // Undo the temporary resurrection; can't use DECREF here, it would - // cause a recursive call. + // Undo the temporary resurrection; can't use DECREF here, it would + // cause a recursive call. assert(Py_REFCNT(self) > 0); - if (--self->ob_refcnt == 0) { - // this is the normal path out - return; - } - - // close() resurrected it! Make it look like the original Py_DECREF - // never happened. - { + if (--self->ob_refcnt == 0) { + // this is the normal path out + return; + } + + // close() resurrected it! Make it look like the original Py_DECREF + // never happened. + { Py_ssize_t refcnt = Py_REFCNT(self); - _Py_NewReference(self); + _Py_NewReference(self); __Pyx_SET_REFCNT(self, refcnt); - } -#if CYTHON_COMPILING_IN_CPYTHON + } +#if CYTHON_COMPILING_IN_CPYTHON assert(PyType_IS_GC(Py_TYPE(self)) && - _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); - - // If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so - // we need to undo that. - _Py_DEC_REFTOTAL; -#endif - // If Py_TRACE_REFS, _Py_NewReference re-added self to the object - // chain, so no more to do there. - // If COUNT_ALLOCS, the original decref bumped tp_frees, and - // _Py_NewReference bumped tp_allocs: both of those need to be - // undone. -#ifdef COUNT_ALLOCS - --Py_TYPE(self)->tp_frees; - --Py_TYPE(self)->tp_allocs; -#endif -#endif -} - -static PyObject * -__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) -{ - PyObject *name = self->gi_name; - // avoid NULL pointer dereference during garbage collection - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} - -static int -__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) -{ - PyObject *tmp; - -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - tmp = self->gi_name; - Py_INCREF(value); - self->gi_name = value; - Py_XDECREF(tmp); - return 0; -} - -static PyObject * -__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) -{ - PyObject *name = self->gi_qualname; - // avoid NULL pointer dereference during garbage collection - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} - -static int -__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) -{ - PyObject *tmp; - -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - tmp = self->gi_qualname; - Py_INCREF(value); - self->gi_qualname = value; - Py_XDECREF(tmp); - return 0; -} - + _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); + + // If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so + // we need to undo that. + _Py_DEC_REFTOTAL; +#endif + // If Py_TRACE_REFS, _Py_NewReference re-added self to the object + // chain, so no more to do there. + // If COUNT_ALLOCS, the original decref bumped tp_frees, and + // _Py_NewReference bumped tp_allocs: both of those need to be + // undone. +#ifdef COUNT_ALLOCS + --Py_TYPE(self)->tp_frees; + --Py_TYPE(self)->tp_allocs; +#endif +#endif +} + +static PyObject * +__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) +{ + PyObject *name = self->gi_name; + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp; + +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + tmp = self->gi_name; + Py_INCREF(value); + self->gi_name = value; + Py_XDECREF(tmp); + return 0; +} + +static PyObject * +__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) +{ + PyObject *name = self->gi_qualname; + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp; + +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + tmp = self->gi_qualname; + Py_INCREF(value); + self->gi_qualname = value; + Py_XDECREF(tmp); + return 0; +} + static PyObject * __Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context) @@ -1403,353 +1403,353 @@ __Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, CYTHON_UNUSED void *conte return frame; } -static __pyx_CoroutineObject *__Pyx__Coroutine_New( +static __pyx_CoroutineObject *__Pyx__Coroutine_New( PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); + PyObject *name, PyObject *qualname, PyObject *module_name) { + __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); if (unlikely(!gen)) - return NULL; + return NULL; return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); } - + static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, PyObject *name, PyObject *qualname, PyObject *module_name) { - gen->body = body; - gen->closure = closure; - Py_XINCREF(closure); - gen->is_running = 0; - gen->resume_label = 0; - gen->classobj = NULL; - gen->yieldfrom = NULL; - gen->gi_exc_state.exc_type = NULL; - gen->gi_exc_state.exc_value = NULL; - gen->gi_exc_state.exc_traceback = NULL; -#if CYTHON_USE_EXC_INFO_STACK - gen->gi_exc_state.previous_item = NULL; -#endif - gen->gi_weakreflist = NULL; - Py_XINCREF(qualname); - gen->gi_qualname = qualname; - Py_XINCREF(name); - gen->gi_name = name; - Py_XINCREF(module_name); - gen->gi_modulename = module_name; + gen->body = body; + gen->closure = closure; + Py_XINCREF(closure); + gen->is_running = 0; + gen->resume_label = 0; + gen->classobj = NULL; + gen->yieldfrom = NULL; + gen->gi_exc_state.exc_type = NULL; + gen->gi_exc_state.exc_value = NULL; + gen->gi_exc_state.exc_traceback = NULL; +#if CYTHON_USE_EXC_INFO_STACK + gen->gi_exc_state.previous_item = NULL; +#endif + gen->gi_weakreflist = NULL; + Py_XINCREF(qualname); + gen->gi_qualname = qualname; + Py_XINCREF(name); + gen->gi_name = name; + Py_XINCREF(module_name); + gen->gi_modulename = module_name; Py_XINCREF(code); gen->gi_code = code; gen->gi_frame = NULL; - - PyObject_GC_Track(gen); - return gen; -} - - -//////////////////// Coroutine //////////////////// -//@requires: CoroutineBase -//@requires: PatchGeneratorABC + + PyObject_GC_Track(gen); + return gen; +} + + +//////////////////// Coroutine //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC //@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict - -static void __Pyx_CoroutineAwait_dealloc(PyObject *self) { - PyObject_GC_UnTrack(self); - Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine); - PyObject_GC_Del(self); -} - -static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) { - Py_VISIT(self->coroutine); - return 0; -} - -static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) { - Py_CLEAR(self->coroutine); - return 0; -} - -static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) { - return __Pyx_Generator_Next(self->coroutine); -} - -static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) { - return __Pyx_Coroutine_Send(self->coroutine, value); -} - -static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) { - return __Pyx_Coroutine_Throw(self->coroutine, args); -} - -static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, CYTHON_UNUSED PyObject *arg) { - return __Pyx_Coroutine_Close(self->coroutine); -} - -static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) { - Py_INCREF(self); - return self; -} - -#if !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_CoroutineAwait_no_new(CYTHON_UNUSED PyTypeObject *type, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwargs) { - PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead"); - return NULL; -} -#endif - -static PyMethodDef __pyx_CoroutineAwait_methods[] = { - {"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_CoroutineAwaitType_type = { - PyVarObject_HEAD_INIT(0, 0) - "coroutine_wrapper", /*tp_name*/ - sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_as_async resp. tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/ - (traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/ - (inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - __Pyx_CoroutineAwait_self, /*tp_iter*/ - (iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/ - __pyx_CoroutineAwait_methods, /*tp_methods*/ - 0 , /*tp_members*/ - 0 , /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ -#if !CYTHON_COMPILING_IN_PYPY - __Pyx_CoroutineAwait_no_new, /*tp_new*/ -#else - 0, /*tp_new*/ -#endif - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif + +static void __Pyx_CoroutineAwait_dealloc(PyObject *self) { + PyObject_GC_UnTrack(self); + Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine); + PyObject_GC_Del(self); +} + +static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) { + Py_VISIT(self->coroutine); + return 0; +} + +static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) { + Py_CLEAR(self->coroutine); + return 0; +} + +static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) { + return __Pyx_Generator_Next(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) { + return __Pyx_Coroutine_Send(self->coroutine, value); +} + +static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) { + return __Pyx_Coroutine_Throw(self->coroutine, args); +} + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, CYTHON_UNUSED PyObject *arg) { + return __Pyx_Coroutine_Close(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) { + Py_INCREF(self); + return self; +} + +#if !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_CoroutineAwait_no_new(CYTHON_UNUSED PyTypeObject *type, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwargs) { + PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead"); + return NULL; +} +#endif + +static PyMethodDef __pyx_CoroutineAwait_methods[] = { + {"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_CoroutineAwaitType_type = { + PyVarObject_HEAD_INIT(0, 0) + "coroutine_wrapper", /*tp_name*/ + sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async resp. tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/ + (traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/ + (inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + __Pyx_CoroutineAwait_self, /*tp_iter*/ + (iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/ + __pyx_CoroutineAwait_methods, /*tp_methods*/ + 0 , /*tp_members*/ + 0 , /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ +#if !CYTHON_COMPILING_IN_PYPY + __Pyx_CoroutineAwait_no_new, /*tp_new*/ +#else + 0, /*tp_new*/ +#endif + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif -}; - -#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS -static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) { - __pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType); - if (unlikely(!await)) return NULL; - Py_INCREF(coroutine); - await->coroutine = coroutine; - PyObject_GC_Track(await); - return (PyObject*)await; -} -#endif - -#if PY_VERSION_HEX < 0x030500B1 -static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, CYTHON_UNUSED PyObject *arg) { - return __Pyx__Coroutine_await(coroutine); -} -#endif - -#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS -static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) { +}; + +#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) { + __pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType); + if (unlikely(!await)) return NULL; + Py_INCREF(coroutine); + await->coroutine = coroutine; + PyObject_GC_Track(await); + return (PyObject*)await; +} +#endif + +#if PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, CYTHON_UNUSED PyObject *arg) { + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) { if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) { - PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine"); - return NULL; - } - return __Pyx__Coroutine_await(coroutine); -} -#endif - -#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 -static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) { - PyObject* result; - switch (op) { - case Py_EQ: result = (other == obj) ? Py_True : Py_False; break; - case Py_NE: result = (other != obj) ? Py_True : Py_False; break; - default: - result = Py_NotImplemented; - } - Py_INCREF(result); - return result; -} -#endif - -static PyMethodDef __pyx_Coroutine_methods[] = { - {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, -#if PY_VERSION_HEX < 0x030500B1 - {"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS, - (char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")}, -#endif - {0, 0, 0, 0} -}; - -static PyMemberDef __pyx_Coroutine_memberlist[] = { - {(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - {(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being awaited, or None")}, + PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine"); + return NULL; + } + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) { + PyObject* result; + switch (op) { + case Py_EQ: result = (other == obj) ? Py_True : Py_False; break; + case Py_NE: result = (other != obj) ? Py_True : Py_False; break; + default: + result = Py_NotImplemented; + } + Py_INCREF(result); + return result; +} +#endif + +static PyMethodDef __pyx_Coroutine_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, +#if PY_VERSION_HEX < 0x030500B1 + {"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS, + (char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")}, +#endif + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Coroutine_memberlist[] = { + {(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being awaited, or None")}, {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, - {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0}, - {0, 0, 0, 0, 0} -}; - -static PyGetSetDef __pyx_Coroutine_getsets[] = { - {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the coroutine"), 0}, - {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the coroutine"), 0}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0}, + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Coroutine_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the coroutine"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the coroutine"), 0}, {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL, (char*) PyDoc_STR("Frame of the coroutine"), 0}, - {0, 0, 0, 0, 0} -}; - -#if CYTHON_USE_ASYNC_SLOTS -static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = { - __Pyx_Coroutine_await, /*am_await*/ - 0, /*am_aiter*/ - 0, /*am_anext*/ + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = { + __Pyx_Coroutine_await, /*am_await*/ + 0, /*am_aiter*/ + 0, /*am_anext*/ #if PY_VERSION_HEX >= 0x030A00A3 0, /*am_send*/ #endif -}; -#endif - -static PyTypeObject __pyx_CoroutineType_type = { - PyVarObject_HEAD_INIT(0, 0) - "coroutine", /*tp_name*/ - sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ +}; +#endif + +static PyTypeObject __pyx_CoroutineType_type = { + PyVarObject_HEAD_INIT(0, 0) + "coroutine", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ -#if CYTHON_USE_ASYNC_SLOTS - &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ -#else - 0, /*tp_reserved*/ -#endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ - 0, /*tp_doc*/ - (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ - 0, /*tp_clear*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if CYTHON_USE_ASYNC_SLOTS + &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ +#else + 0, /*tp_reserved*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ #if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 - // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare - __Pyx_Coroutine_compare, /*tp_richcompare*/ -#else - 0, /*tp_richcompare*/ -#endif - offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ // no tp_iter() as iterator is only available through __await__() - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_Coroutine_methods, /*tp_methods*/ - __pyx_Coroutine_memberlist, /*tp_members*/ - __pyx_Coroutine_getsets, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_Coroutine_methods, /*tp_methods*/ + __pyx_Coroutine_memberlist, /*tp_members*/ + __pyx_Coroutine_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ #if CYTHON_USE_TP_FINALIZE - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /*tp_version_tag*/ + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ #if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, /*tp_finalize*/ + __Pyx_Coroutine_del, /*tp_finalize*/ #elif PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ -#endif +#endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif -}; - -static int __pyx_Coroutine_init(void) { - // on Windows, C-API functions can't be used in slots statically +}; + +static int __pyx_Coroutine_init(void) { + // on Windows, C-API functions can't be used in slots statically __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type); - if (unlikely(!__pyx_CoroutineType)) - return -1; - + __pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type); + if (unlikely(!__pyx_CoroutineType)) + return -1; + #ifdef __Pyx_IterableCoroutine_USED if (unlikely(__pyx_IterableCoroutine_init() == -1)) return -1; #endif - __pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type); - if (unlikely(!__pyx_CoroutineAwaitType)) - return -1; - return 0; -} - + __pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type); + if (unlikely(!__pyx_CoroutineAwaitType)) + return -1; + return 0; +} + //////////////////// IterableCoroutine.proto //////////////////// @@ -1836,11 +1836,11 @@ static PyTypeObject __pyx_IterableCoroutineType_type = { __Pyx_Coroutine_del, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif @@ -1856,531 +1856,531 @@ static int __pyx_IterableCoroutine_init(void) { } -//////////////////// Generator //////////////////// -//@requires: CoroutineBase -//@requires: PatchGeneratorABC +//////////////////// Generator //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC //@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict - -static PyMethodDef __pyx_Generator_methods[] = { - {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, - {0, 0, 0, 0} -}; - -static PyMemberDef __pyx_Generator_memberlist[] = { - {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, + +static PyMethodDef __pyx_Generator_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Generator_memberlist[] = { + {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, - {0, 0, 0, 0, 0} -}; - -static PyGetSetDef __pyx_Generator_getsets[] = { - {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the generator"), 0}, - {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the generator"), 0}, + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Generator_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the generator"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the generator"), 0}, {(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, (char*) PyDoc_STR("Frame of the generator"), 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_GeneratorType_type = { - PyVarObject_HEAD_INIT(0, 0) - "generator", /*tp_name*/ - sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare / tp_as_async*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ - 0, /*tp_doc*/ - (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ - __pyx_Generator_methods, /*tp_methods*/ - __pyx_Generator_memberlist, /*tp_members*/ - __pyx_Generator_getsets, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_GeneratorType_type = { + PyVarObject_HEAD_INIT(0, 0) + "generator", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare / tp_as_async*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ + __pyx_Generator_methods, /*tp_methods*/ + __pyx_Generator_memberlist, /*tp_members*/ + __pyx_Generator_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ #if CYTHON_USE_TP_FINALIZE - 0, /*tp_del*/ -#else - __Pyx_Coroutine_del, /*tp_del*/ -#endif - 0, /*tp_version_tag*/ + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ #if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, /*tp_finalize*/ + __Pyx_Coroutine_del, /*tp_finalize*/ #elif PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ -#endif +#endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif -}; - -static int __pyx_Generator_init(void) { - // on Windows, C-API functions can't be used in slots statically +}; + +static int __pyx_Generator_init(void) { + // on Windows, C-API functions can't be used in slots statically __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; - - __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); - if (unlikely(!__pyx_GeneratorType)) { - return -1; - } - return 0; -} - - -/////////////// ReturnWithStopIteration.proto /////////////// - -#define __Pyx_ReturnWithStopIteration(value) \ - if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value) -static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/ - -/////////////// ReturnWithStopIteration /////////////// -//@requires: Exceptions.c::PyErrFetchRestore -//@requires: Exceptions.c::PyThreadStateGet -//@substitute: naming - -// 1) Instantiating an exception just to pass back a value is costly. -// 2) CPython 3.3 <= x < 3.5b1 crash in yield-from when the StopIteration is not instantiated. -// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments. + __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; + + __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); + if (unlikely(!__pyx_GeneratorType)) { + return -1; + } + return 0; +} + + +/////////////// ReturnWithStopIteration.proto /////////////// + +#define __Pyx_ReturnWithStopIteration(value) \ + if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value) +static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/ + +/////////////// ReturnWithStopIteration /////////////// +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@substitute: naming + +// 1) Instantiating an exception just to pass back a value is costly. +// 2) CPython 3.3 <= x < 3.5b1 crash in yield-from when the StopIteration is not instantiated. +// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments. // 4) Passing an exception as value will interpret it as an exception on unpacking and raise it (or unpack its value). // 5) If there is currently an exception being handled, we need to chain it. - -static void __Pyx__ReturnWithStopIteration(PyObject* value) { - PyObject *exc, *args; -#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_PYSTON - __Pyx_PyThreadState_declare + +static void __Pyx__ReturnWithStopIteration(PyObject* value) { + PyObject *exc, *args; +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_PYSTON + __Pyx_PyThreadState_declare if ((PY_VERSION_HEX >= 0x03030000 && PY_VERSION_HEX < 0x030500B1) || unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) { - args = PyTuple_New(1); - if (unlikely(!args)) return; - Py_INCREF(value); - PyTuple_SET_ITEM(args, 0, value); - exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL); - Py_DECREF(args); - if (!exc) return; - } else { - // it's safe to avoid instantiating the exception - Py_INCREF(value); - exc = value; - } + args = PyTuple_New(1); + if (unlikely(!args)) return; + Py_INCREF(value); + PyTuple_SET_ITEM(args, 0, value); + exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (!exc) return; + } else { + // it's safe to avoid instantiating the exception + Py_INCREF(value); + exc = value; + } #if CYTHON_FAST_THREAD_STATE - __Pyx_PyThreadState_assign - #if CYTHON_USE_EXC_INFO_STACK - if (!$local_tstate_cname->exc_info->exc_type) + __Pyx_PyThreadState_assign + #if CYTHON_USE_EXC_INFO_STACK + if (!$local_tstate_cname->exc_info->exc_type) #else if (!$local_tstate_cname->exc_type) #endif { - // no chaining needed => avoid the overhead in PyErr_SetObject() - Py_INCREF(PyExc_StopIteration); - __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL); - return; - } + // no chaining needed => avoid the overhead in PyErr_SetObject() + Py_INCREF(PyExc_StopIteration); + __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL); + return; + } + #endif +#else + args = PyTuple_Pack(1, value); + if (unlikely(!args)) return; + exc = PyObject_Call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (unlikely(!exc)) return; +#endif + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); +} + + +//////////////////// PatchModuleWithCoroutine.proto //////////////////// + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/ + +//////////////////// PatchModuleWithCoroutine //////////////////// +//@substitute: naming + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + int result; + PyObject *globals, *result_obj; + globals = PyDict_New(); if (unlikely(!globals)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_coroutine_type", + #ifdef __Pyx_Coroutine_USED + (PyObject*)__pyx_CoroutineType); + #else + Py_None); + #endif + if (unlikely(result < 0)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_generator_type", + #ifdef __Pyx_Generator_USED + (PyObject*)__pyx_GeneratorType); + #else + Py_None); #endif -#else - args = PyTuple_Pack(1, value); - if (unlikely(!args)) return; - exc = PyObject_Call(PyExc_StopIteration, args, NULL); - Py_DECREF(args); - if (unlikely(!exc)) return; -#endif - PyErr_SetObject(PyExc_StopIteration, exc); - Py_DECREF(exc); -} - - -//////////////////// PatchModuleWithCoroutine.proto //////////////////// - -static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/ - -//////////////////// PatchModuleWithCoroutine //////////////////// -//@substitute: naming - -static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - int result; - PyObject *globals, *result_obj; - globals = PyDict_New(); if (unlikely(!globals)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_coroutine_type", - #ifdef __Pyx_Coroutine_USED - (PyObject*)__pyx_CoroutineType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_generator_type", - #ifdef __Pyx_Generator_USED - (PyObject*)__pyx_GeneratorType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore; - result_obj = PyRun_String(py_code, Py_file_input, globals, globals); - if (unlikely(!result_obj)) goto ignore; - Py_DECREF(result_obj); - Py_DECREF(globals); - return module; - -ignore: - Py_XDECREF(globals); - PyErr_WriteUnraisable(module); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { - Py_DECREF(module); - module = NULL; - } -#else - // avoid "unused" warning - py_code++; -#endif - return module; -} - - -//////////////////// PatchGeneratorABC.proto //////////////////// - -// register with Generator/Coroutine ABCs in 'collections.abc' -// see https://bugs.python.org/issue24018 -static int __Pyx_patch_abc(void); /*proto*/ - -//////////////////// PatchGeneratorABC //////////////////// -//@requires: PatchModuleWithCoroutine - + if (unlikely(result < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore; + result_obj = PyRun_String(py_code, Py_file_input, globals, globals); + if (unlikely(!result_obj)) goto ignore; + Py_DECREF(result_obj); + Py_DECREF(globals); + return module; + +ignore: + Py_XDECREF(globals); + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning + py_code++; +#endif + return module; +} + + +//////////////////// PatchGeneratorABC.proto //////////////////// + +// register with Generator/Coroutine ABCs in 'collections.abc' +// see https://bugs.python.org/issue24018 +static int __Pyx_patch_abc(void); /*proto*/ + +//////////////////// PatchGeneratorABC //////////////////// +//@requires: PatchModuleWithCoroutine + #ifndef CYTHON_REGISTER_ABCS #define CYTHON_REGISTER_ABCS 1 #endif -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) -static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/ -static PyObject* __Pyx_patch_abc_module(PyObject *module) { - module = __Pyx_Coroutine_patch_module( - module, CSTRING("""\ -if _cython_generator_type is not None: - try: Generator = _module.Generator - except AttributeError: pass - else: Generator.register(_cython_generator_type) -if _cython_coroutine_type is not None: - try: Coroutine = _module.Coroutine - except AttributeError: pass - else: Coroutine.register(_cython_coroutine_type) -""") - ); - return module; -} -#endif - -static int __Pyx_patch_abc(void) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - static int abc_patched = 0; +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) +static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/ +static PyObject* __Pyx_patch_abc_module(PyObject *module) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +if _cython_generator_type is not None: + try: Generator = _module.Generator + except AttributeError: pass + else: Generator.register(_cython_generator_type) +if _cython_coroutine_type is not None: + try: Coroutine = _module.Coroutine + except AttributeError: pass + else: Coroutine.register(_cython_coroutine_type) +""") + ); + return module; +} +#endif + +static int __Pyx_patch_abc(void) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + static int abc_patched = 0; if (CYTHON_REGISTER_ABCS && !abc_patched) { - PyObject *module; + PyObject *module; module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); - if (!module) { - PyErr_WriteUnraisable(NULL); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, + if (!module) { + PyErr_WriteUnraisable(NULL); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, ((PY_MAJOR_VERSION >= 3) ? - "Cython module failed to register with collections.abc module" : - "Cython module failed to register with collections module"), 1) < 0)) { - return -1; - } - } else { - module = __Pyx_patch_abc_module(module); - abc_patched = 1; - if (unlikely(!module)) - return -1; - Py_DECREF(module); - } - // also register with "backports_abc" module if available, just in case - module = PyImport_ImportModule("backports_abc"); - if (module) { - module = __Pyx_patch_abc_module(module); - Py_XDECREF(module); - } - if (!module) { - PyErr_Clear(); - } - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() + "Cython module failed to register with collections.abc module" : + "Cython module failed to register with collections module"), 1) < 0)) { + return -1; + } + } else { + module = __Pyx_patch_abc_module(module); + abc_patched = 1; + if (unlikely(!module)) + return -1; + Py_DECREF(module); + } + // also register with "backports_abc" module if available, just in case + module = PyImport_ImportModule("backports_abc"); + if (module) { + module = __Pyx_patch_abc_module(module); + Py_XDECREF(module); + } + if (!module) { + PyErr_Clear(); + } + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); -#endif - return 0; -} - - -//////////////////// PatchAsyncIO.proto //////////////////// - -// run after importing "asyncio" to patch Cython generator support into it -static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/ - -//////////////////// PatchAsyncIO //////////////////// -//@requires: ImportExport.c::Import -//@requires: PatchModuleWithCoroutine -//@requires: PatchInspect - -static PyObject* __Pyx_patch_asyncio(PyObject* module) { -#if PY_VERSION_HEX < 0x030500B2 && \ - (defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \ - (!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO) - PyObject *patch_module = NULL; - static int asyncio_patched = 0; - if (unlikely((!asyncio_patched) && module)) { - PyObject *package; - package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0); - if (package) { - patch_module = __Pyx_Coroutine_patch_module( - PyObject_GetAttrString(package, "coroutines"), CSTRING("""\ -try: - coro_types = _module._COROUTINE_TYPES -except AttributeError: pass -else: - if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types: - coro_types = tuple(coro_types) + (_cython_coroutine_type,) - if _cython_generator_type is not None and _cython_generator_type not in coro_types: - coro_types = tuple(coro_types) + (_cython_generator_type,) -_module._COROUTINE_TYPES = coro_types -""") - ); - } else { - PyErr_Clear(); +#endif + return 0; +} + + +//////////////////// PatchAsyncIO.proto //////////////////// + +// run after importing "asyncio" to patch Cython generator support into it +static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/ + +//////////////////// PatchAsyncIO //////////////////// +//@requires: ImportExport.c::Import +//@requires: PatchModuleWithCoroutine +//@requires: PatchInspect + +static PyObject* __Pyx_patch_asyncio(PyObject* module) { +#if PY_VERSION_HEX < 0x030500B2 && \ + (defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \ + (!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO) + PyObject *patch_module = NULL; + static int asyncio_patched = 0; + if (unlikely((!asyncio_patched) && module)) { + PyObject *package; + package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0); + if (package) { + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "coroutines"), CSTRING("""\ +try: + coro_types = _module._COROUTINE_TYPES +except AttributeError: pass +else: + if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_coroutine_type,) + if _cython_generator_type is not None and _cython_generator_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_generator_type,) +_module._COROUTINE_TYPES = coro_types +""") + ); + } else { + PyErr_Clear(); // Always enable fallback: even if we compile against 3.4.2, we might be running on 3.4.1 at some point. //#if PY_VERSION_HEX < 0x03040200 - // Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines - package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0); - if (unlikely(!package)) goto asyncio_done; - patch_module = __Pyx_Coroutine_patch_module( - PyObject_GetAttrString(package, "tasks"), CSTRING("""\ -if hasattr(_module, 'iscoroutine'): - old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None) - if old_types is None or not isinstance(old_types, set): - old_types = set() - def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types): - def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj) - cy_iscoroutine._cython_coroutine_types = cython_coroutine_types - return cy_iscoroutine - _module.iscoroutine = cy_wrap(_module.iscoroutine) - if _cython_coroutine_type is not None: - old_types.add(_cython_coroutine_type) - if _cython_generator_type is not None: - old_types.add(_cython_generator_type) -""") - ); + // Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines + package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0); + if (unlikely(!package)) goto asyncio_done; + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "tasks"), CSTRING("""\ +if hasattr(_module, 'iscoroutine'): + old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None) + if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types): + def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj) + cy_iscoroutine._cython_coroutine_types = cython_coroutine_types + return cy_iscoroutine + _module.iscoroutine = cy_wrap(_module.iscoroutine) + if _cython_coroutine_type is not None: + old_types.add(_cython_coroutine_type) + if _cython_generator_type is not None: + old_types.add(_cython_generator_type) +""") + ); //#endif -// Py < 0x03040200 - } - Py_DECREF(package); - if (unlikely(!patch_module)) goto ignore; +// Py < 0x03040200 + } + Py_DECREF(package); + if (unlikely(!patch_module)) goto ignore; //#if PY_VERSION_HEX < 0x03040200 -asyncio_done: - PyErr_Clear(); +asyncio_done: + PyErr_Clear(); //#endif - asyncio_patched = 1; -#ifdef __Pyx_Generator_USED - // now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module - { - PyObject *inspect_module; - if (patch_module) { - inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect")); - Py_DECREF(patch_module); - } else { - inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0); - } - if (unlikely(!inspect_module)) goto ignore; - inspect_module = __Pyx_patch_inspect(inspect_module); - if (unlikely(!inspect_module)) { - Py_DECREF(module); - module = NULL; - } - Py_XDECREF(inspect_module); - } -#else - // avoid "unused" warning for __Pyx_patch_inspect() + asyncio_patched = 1; +#ifdef __Pyx_Generator_USED + // now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module + { + PyObject *inspect_module; + if (patch_module) { + inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect")); + Py_DECREF(patch_module); + } else { + inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0); + } + if (unlikely(!inspect_module)) goto ignore; + inspect_module = __Pyx_patch_inspect(inspect_module); + if (unlikely(!inspect_module)) { + Py_DECREF(module); + module = NULL; + } + Py_XDECREF(inspect_module); + } +#else + // avoid "unused" warning for __Pyx_patch_inspect() if ((0)) return __Pyx_patch_inspect(module); -#endif - } - return module; -ignore: - PyErr_WriteUnraisable(module); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) { - Py_DECREF(module); - module = NULL; - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() +#endif + } + return module; +ignore: + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() if ((0)) return __Pyx_patch_inspect(__Pyx_Coroutine_patch_module(module, NULL)); -#endif - return module; -} - - -//////////////////// PatchInspect.proto //////////////////// - -// run after importing "inspect" to patch Cython generator support into it -static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/ - -//////////////////// PatchInspect //////////////////// -//@requires: PatchModuleWithCoroutine - -static PyObject* __Pyx_patch_inspect(PyObject* module) { -#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT) - static int inspect_patched = 0; - if (unlikely((!inspect_patched) && module)) { - module = __Pyx_Coroutine_patch_module( - module, CSTRING("""\ -old_types = getattr(_module.isgenerator, '_cython_generator_types', None) -if old_types is None or not isinstance(old_types, set): - old_types = set() - def cy_wrap(orig_func, type=type, cython_generator_types=old_types): - def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj) - cy_isgenerator._cython_generator_types = cython_generator_types - return cy_isgenerator - _module.isgenerator = cy_wrap(_module.isgenerator) -old_types.add(_cython_generator_type) -""") - ); - inspect_patched = 1; - } -#else - // avoid "unused" warning for __Pyx_Coroutine_patch_module() +#endif + return module; +} + + +//////////////////// PatchInspect.proto //////////////////// + +// run after importing "inspect" to patch Cython generator support into it +static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/ + +//////////////////// PatchInspect //////////////////// +//@requires: PatchModuleWithCoroutine + +static PyObject* __Pyx_patch_inspect(PyObject* module) { +#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT) + static int inspect_patched = 0; + if (unlikely((!inspect_patched) && module)) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +old_types = getattr(_module.isgenerator, '_cython_generator_types', None) +if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_generator_types=old_types): + def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj) + cy_isgenerator._cython_generator_types = cython_generator_types + return cy_isgenerator + _module.isgenerator = cy_wrap(_module.isgenerator) +old_types.add(_cython_generator_type) +""") + ); + inspect_patched = 1; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() if ((0)) return __Pyx_Coroutine_patch_module(module, NULL); -#endif - return module; -} - - -//////////////////// StopAsyncIteration.proto //////////////////// - -#define __Pyx_StopAsyncIteration_USED -static PyObject *__Pyx_PyExc_StopAsyncIteration; -static int __pyx_StopAsyncIteration_init(void); /*proto*/ - -//////////////////// StopAsyncIteration //////////////////// - -#if PY_VERSION_HEX < 0x030500B1 -static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = { - PyVarObject_HEAD_INIT(0, 0) - "StopAsyncIteration", /*tp_name*/ - sizeof(PyBaseExceptionObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare / reserved*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - 0, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ -#if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ -#endif +#endif + return module; +} + + +//////////////////// StopAsyncIteration.proto //////////////////// + +#define __Pyx_StopAsyncIteration_USED +static PyObject *__Pyx_PyExc_StopAsyncIteration; +static int __pyx_StopAsyncIteration_init(void); /*proto*/ + +//////////////////// StopAsyncIteration //////////////////// + +#if PY_VERSION_HEX < 0x030500B1 +static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = { + PyVarObject_HEAD_INIT(0, 0) + "StopAsyncIteration", /*tp_name*/ + sizeof(PyBaseExceptionObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare / reserved*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif #if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM+0 >= 0x06000000 0, /*tp_pypy_flags*/ #endif -}; -#endif - -static int __pyx_StopAsyncIteration_init(void) { -#if PY_VERSION_HEX >= 0x030500B1 - __Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration; -#else - PyObject *builtins = PyEval_GetBuiltins(); - if (likely(builtins)) { - PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration"); - if (exc) { - __Pyx_PyExc_StopAsyncIteration = exc; - return 0; - } - } - PyErr_Clear(); - - __Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse; - __Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear; - __Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset; - __Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception; - - __Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type); - if (unlikely(!__Pyx_PyExc_StopAsyncIteration)) - return -1; - if (builtins && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0)) - return -1; -#endif - return 0; -} +}; +#endif + +static int __pyx_StopAsyncIteration_init(void) { +#if PY_VERSION_HEX >= 0x030500B1 + __Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration; +#else + PyObject *builtins = PyEval_GetBuiltins(); + if (likely(builtins)) { + PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration"); + if (exc) { + __Pyx_PyExc_StopAsyncIteration = exc; + return 0; + } + } + PyErr_Clear(); + + __Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse; + __Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear; + __Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset; + __Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception; + + __Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type); + if (unlikely(!__Pyx_PyExc_StopAsyncIteration)) + return -1; + if (builtins && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0)) + return -1; +#endif + return 0; +} diff --git a/contrib/tools/cython/Cython/Utility/CpdefEnums.pyx b/contrib/tools/cython/Cython/Utility/CpdefEnums.pyx index ad2b0ea427..148d776c29 100644 --- a/contrib/tools/cython/Cython/Utility/CpdefEnums.pyx +++ b/contrib/tools/cython/Cython/Utility/CpdefEnums.pyx @@ -1,66 +1,66 @@ -#################### EnumBase #################### - -cimport cython - -cdef extern from *: - int PY_VERSION_HEX - -cdef object __Pyx_OrderedDict -if PY_VERSION_HEX >= 0x02070000: - from collections import OrderedDict as __Pyx_OrderedDict -else: - __Pyx_OrderedDict = dict - -@cython.internal -cdef class __Pyx_EnumMeta(type): - def __init__(cls, name, parents, dct): - type.__init__(cls, name, parents, dct) - cls.__members__ = __Pyx_OrderedDict() - def __iter__(cls): - return iter(cls.__members__.values()) - def __getitem__(cls, name): - return cls.__members__[name] - -# @cython.internal -cdef object __Pyx_EnumBase -class __Pyx_EnumBase(int): - __metaclass__ = __Pyx_EnumMeta - def __new__(cls, value, name=None): - for v in cls: - if v == value: - return v - if name is None: - raise ValueError("Unknown enum value: '%s'" % value) - res = int.__new__(cls, value) - res.name = name - setattr(cls, name, res) - cls.__members__[name] = res - return res - def __repr__(self): - return "<%s.%s: %d>" % (self.__class__.__name__, self.name, self) - def __str__(self): - return "%s.%s" % (self.__class__.__name__, self.name) - -if PY_VERSION_HEX >= 0x03040000: - from enum import IntEnum as __Pyx_EnumBase - -#################### EnumType #################### -#@requires: EnumBase - -cdef dict __Pyx_globals = globals() -if PY_VERSION_HEX >= 0x03040000: - # create new IntEnum() - {{name}} = __Pyx_EnumBase('{{name}}', __Pyx_OrderedDict([ - {{for item in items}} - ('{{item}}', {{item}}), - {{endfor}} - ])) - {{for item in items}} - __Pyx_globals['{{item}}'] = {{name}}.{{item}} - {{endfor}} -else: - class {{name}}(__Pyx_EnumBase): - pass - {{for item in items}} - __Pyx_globals['{{item}}'] = {{name}}({{item}}, '{{item}}') - {{endfor}} +#################### EnumBase #################### + +cimport cython + +cdef extern from *: + int PY_VERSION_HEX + +cdef object __Pyx_OrderedDict +if PY_VERSION_HEX >= 0x02070000: + from collections import OrderedDict as __Pyx_OrderedDict +else: + __Pyx_OrderedDict = dict + +@cython.internal +cdef class __Pyx_EnumMeta(type): + def __init__(cls, name, parents, dct): + type.__init__(cls, name, parents, dct) + cls.__members__ = __Pyx_OrderedDict() + def __iter__(cls): + return iter(cls.__members__.values()) + def __getitem__(cls, name): + return cls.__members__[name] + +# @cython.internal +cdef object __Pyx_EnumBase +class __Pyx_EnumBase(int): + __metaclass__ = __Pyx_EnumMeta + def __new__(cls, value, name=None): + for v in cls: + if v == value: + return v + if name is None: + raise ValueError("Unknown enum value: '%s'" % value) + res = int.__new__(cls, value) + res.name = name + setattr(cls, name, res) + cls.__members__[name] = res + return res + def __repr__(self): + return "<%s.%s: %d>" % (self.__class__.__name__, self.name, self) + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self.name) + +if PY_VERSION_HEX >= 0x03040000: + from enum import IntEnum as __Pyx_EnumBase + +#################### EnumType #################### +#@requires: EnumBase + +cdef dict __Pyx_globals = globals() +if PY_VERSION_HEX >= 0x03040000: + # create new IntEnum() + {{name}} = __Pyx_EnumBase('{{name}}', __Pyx_OrderedDict([ + {{for item in items}} + ('{{item}}', {{item}}), + {{endfor}} + ])) + {{for item in items}} + __Pyx_globals['{{item}}'] = {{name}}.{{item}} + {{endfor}} +else: + class {{name}}(__Pyx_EnumBase): + pass + {{for item in items}} + __Pyx_globals['{{item}}'] = {{name}}({{item}}, '{{item}}') + {{endfor}} diff --git a/contrib/tools/cython/Cython/Utility/CppConvert.pyx b/contrib/tools/cython/Cython/Utility/CppConvert.pyx index 9988f2f49a..5f7859dd0e 100644 --- a/contrib/tools/cython/Cython/Utility/CppConvert.pyx +++ b/contrib/tools/cython/Cython/Utility/CppConvert.pyx @@ -25,16 +25,16 @@ cdef extern from *: char* data() size_t size() -{{for py_type in ['PyObject', 'PyUnicode', 'PyStr', 'PyBytes', 'PyByteArray']}} -cdef extern from *: +{{for py_type in ['PyObject', 'PyUnicode', 'PyStr', 'PyBytes', 'PyByteArray']}} +cdef extern from *: cdef object __Pyx_{{py_type}}_FromStringAndSize(const char*, size_t) -@cname("{{cname.replace("PyObject", py_type, 1)}}") -cdef inline object {{cname.replace("PyObject", py_type, 1)}}(const string& s): - return __Pyx_{{py_type}}_FromStringAndSize(s.data(), s.size()) -{{endfor}} +@cname("{{cname.replace("PyObject", py_type, 1)}}") +cdef inline object {{cname.replace("PyObject", py_type, 1)}}(const string& s): + return __Pyx_{{py_type}}_FromStringAndSize(s.data(), s.size()) +{{endfor}} + - #################### vector.from_py #################### cdef extern from *: @@ -209,34 +209,34 @@ cdef object {{cname}}(const map[X,Y]& s): return o -#################### complex.from_py #################### - -cdef extern from *: - cdef cppclass std_complex "std::complex" [T]: - std_complex() - std_complex(T, T) except + - -@cname("{{cname}}") -cdef std_complex[X] {{cname}}(object o) except *: - cdef double complex z = o - return std_complex[X](<X>z.real, <X>z.imag) - - -#################### complex.to_py #################### - -cdef extern from *: - cdef cppclass std_complex "std::complex" [T]: - X real() - X imag() - -@cname("{{cname}}") -cdef object {{cname}}(const std_complex[X]& z): - cdef double complex tmp - tmp.real = <double>z.real() - tmp.imag = <double>z.imag() - return tmp - - +#################### complex.from_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + std_complex() + std_complex(T, T) except + + +@cname("{{cname}}") +cdef std_complex[X] {{cname}}(object o) except *: + cdef double complex z = o + return std_complex[X](<X>z.real, <X>z.imag) + + +#################### complex.to_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + X real() + X imag() + +@cname("{{cname}}") +cdef object {{cname}}(const std_complex[X]& z): + cdef double complex tmp + tmp.real = <double>z.real() + tmp.imag = <double>z.imag() + return tmp + + #################### arcadia_TMaybe.from_py #################### cdef extern from *: diff --git a/contrib/tools/cython/Cython/Utility/CppSupport.cpp b/contrib/tools/cython/Cython/Utility/CppSupport.cpp index 9876135dd8..b8fcff0643 100644 --- a/contrib/tools/cython/Cython/Utility/CppSupport.cpp +++ b/contrib/tools/cython/Cython/Utility/CppSupport.cpp @@ -18,8 +18,8 @@ static void __Pyx_CppExn2PyErr() { PyErr_SetString(PyExc_MemoryError, exn.what()); } catch (const std::bad_cast& exn) { PyErr_SetString(PyExc_TypeError, exn.what()); - } catch (const std::bad_typeid& exn) { - PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::bad_typeid& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); } catch (const std::domain_error& exn) { PyErr_SetString(PyExc_ValueError, exn.what()); } catch (const std::invalid_argument& exn) { diff --git a/contrib/tools/cython/Cython/Utility/CythonFunction.c b/contrib/tools/cython/Cython/Utility/CythonFunction.c index e2ae4df14a..d51b308a8d 100644 --- a/contrib/tools/cython/Cython/Utility/CythonFunction.c +++ b/contrib/tools/cython/Cython/Utility/CythonFunction.c @@ -48,8 +48,8 @@ typedef struct { static PyTypeObject *__pyx_CyFunctionType = 0; -#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) - +#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) + static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, int flags, PyObject* qualname, PyObject *self, @@ -67,7 +67,7 @@ static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, PyObject *dict); -static int __pyx_CyFunction_init(void); +static int __pyx_CyFunction_init(void); //////////////////// CythonFunctionShared //////////////////// @@ -99,7 +99,7 @@ __Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure } static int -__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp = op->func_doc; if (value == NULL) { @@ -113,7 +113,7 @@ __Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNU } static PyObject * -__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_name == NULL)) { #if PY_MAJOR_VERSION >= 3 @@ -129,16 +129,16 @@ __Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *contex } static int -__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) + if (unlikely(value == NULL || !PyUnicode_Check(value))) #else - if (unlikely(value == NULL || !PyString_Check(value))) + if (unlikely(value == NULL || !PyString_Check(value))) #endif - { + { PyErr_SetString(PyExc_TypeError, "__name__ must be set to a string object"); return -1; @@ -151,23 +151,23 @@ __Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UN } static PyObject * -__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_qualname); return op->func_qualname; } static int -__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; #if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) + if (unlikely(value == NULL || !PyUnicode_Check(value))) #else - if (unlikely(value == NULL || !PyString_Check(value))) + if (unlikely(value == NULL || !PyString_Check(value))) #endif - { + { PyErr_SetString(PyExc_TypeError, "__qualname__ must be set to a string object"); return -1; @@ -192,7 +192,7 @@ __Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure } static PyObject * -__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { if (unlikely(op->func_dict == NULL)) { op->func_dict = PyDict_New(); @@ -204,7 +204,7 @@ __Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *contex } static int -__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) { PyObject *tmp; @@ -226,21 +226,21 @@ __Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UN } static PyObject * -__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(op->func_globals); return op->func_globals; } static PyObject * -__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { Py_INCREF(Py_None); return Py_None; } static PyObject * -__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = (op->func_code) ? op->func_code : Py_None; Py_INCREF(result); @@ -249,31 +249,31 @@ __Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *contex static int __Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { - int result = 0; + int result = 0; PyObject *res = op->defaults_getter((PyObject *) op); if (unlikely(!res)) return -1; // Cache result - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS op->defaults_tuple = PyTuple_GET_ITEM(res, 0); Py_INCREF(op->defaults_tuple); op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); Py_INCREF(op->defaults_kwdict); - #else - op->defaults_tuple = PySequence_ITEM(res, 0); - if (unlikely(!op->defaults_tuple)) result = -1; - else { - op->defaults_kwdict = PySequence_ITEM(res, 1); - if (unlikely(!op->defaults_kwdict)) result = -1; - } - #endif + #else + op->defaults_tuple = PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif Py_DECREF(res); - return result; + return result; } static int -__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { // del => explicit None to prevent rebuilding @@ -291,7 +291,7 @@ __Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHO } static PyObject * -__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_tuple; if (unlikely(!result)) { if (op->defaults_getter) { @@ -306,7 +306,7 @@ __Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *co } static int -__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value) { // del => explicit None to prevent rebuilding @@ -324,7 +324,7 @@ __Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYT } static PyObject * -__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->defaults_kwdict; if (unlikely(!result)) { if (op->defaults_getter) { @@ -339,7 +339,7 @@ __Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void * } static int -__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { PyObject* tmp; if (!value || value == Py_None) { value = NULL; @@ -356,7 +356,7 @@ __Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CY } static PyObject * -__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { PyObject* result = op->func_annotations; if (unlikely(!result)) { result = PyDict_New(); @@ -369,7 +369,7 @@ __Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void //#if PY_VERSION_HEX >= 0x030400C1 //static PyObject * -//__Pyx_CyFunction_get_signature(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { +//__Pyx_CyFunction_get_signature(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { // PyObject *inspect_module, *signature_class, *signature; // // from inspect import Signature // inspect_module = PyImport_ImportModuleLevelObject(PYIDENT("inspect"), NULL, NULL, NULL, 0); @@ -501,7 +501,7 @@ __Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) for (i = 0; i < m->defaults_pyobjects; i++) Py_XDECREF(pydefaults[i]); - PyObject_Free(m->defaults); + PyObject_Free(m->defaults); m->defaults = NULL; } @@ -561,13 +561,13 @@ static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObj if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { if (type == NULL) type = (PyObject *)(Py_TYPE(obj)); - return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); + return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); } if (obj == Py_None) obj = NULL; #endif - return __Pyx_PyMethod_New(func, obj, type); + return __Pyx_PyMethod_New(func, obj, type); } static PyObject* @@ -582,48 +582,48 @@ __Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) #endif } -static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { - // originally copied from PyCFunction_Call() in CPython's Objects/methodobject.c +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { + // originally copied from PyCFunction_Call() in CPython's Objects/methodobject.c PyCFunctionObject* f = (PyCFunctionObject*)func; - PyCFunction meth = f->m_ml->ml_meth; + PyCFunction meth = f->m_ml->ml_meth; Py_ssize_t size; - switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { case METH_VARARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) + if (likely(kw == NULL || PyDict_Size(kw) == 0)) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: - return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); + return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); case METH_NOARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); - if (likely(size == 0)) + if (likely(size == 0)) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { size = PyTuple_GET_SIZE(arg); - if (likely(size == 1)) { + if (likely(size == 1)) { PyObject *result, *arg0; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS arg0 = PyTuple_GET_ITEM(arg, 0); #else arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; #endif - result = (*meth)(self, arg0); + result = (*meth)(self, arg0); #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(arg0); + Py_DECREF(arg0); #endif - return result; - } + return result; + } PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", f->m_ml->ml_name, size); return NULL; } @@ -639,39 +639,39 @@ static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, Py f->m_ml->ml_name); return NULL; } - -static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { - return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); + +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); +} + +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; + + argc = PyTuple_GET_SIZE(args); + new_args = PyTuple_GetSlice(args, 1, argc); + + if (unlikely(!new_args)) + return NULL; + + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); + return NULL; + } + + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; } -static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { - PyObject *result; - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - Py_ssize_t argc; - PyObject *new_args; - PyObject *self; - - argc = PyTuple_GET_SIZE(args); - new_args = PyTuple_GetSlice(args, 1, argc); - - if (unlikely(!new_args)) - return NULL; - - self = PyTuple_GetItem(args, 0); - if (unlikely(!self)) { - Py_DECREF(new_args); - return NULL; - } - - result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); - Py_DECREF(new_args); - } else { - result = __Pyx_CyFunction_Call(func, args, kw); - } - return result; -} - static PyTypeObject __pyx_CyFunctionType_type = { PyVarObject_HEAD_INIT(0, 0) "cython_function_or_method", /*tp_name*/ @@ -691,7 +691,7 @@ static PyTypeObject __pyx_CyFunctionType_type = { 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ - __Pyx_CyFunction_CallAsMethod, /*tp_call*/ + __Pyx_CyFunction_CallAsMethod, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ @@ -732,18 +732,18 @@ static PyTypeObject __pyx_CyFunctionType_type = { 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ -#endif + 0, /*tp_print*/ +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; -static int __pyx_CyFunction_init(void) { +static int __pyx_CyFunction_init(void) { __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); if (unlikely(__pyx_CyFunctionType == NULL)) { return -1; @@ -754,7 +754,7 @@ static int __pyx_CyFunction_init(void) { static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults = PyObject_Malloc(size); + m->defaults = PyObject_Malloc(size); if (unlikely(!m->defaults)) return PyErr_NoMemory(); memset(m->defaults, 0, size); @@ -813,24 +813,24 @@ static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *class //@requires: CythonFunctionShared static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj) { - Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions); - - for (i = 0; i < count; i++) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyList_GET_ITEM(cyfunctions, i); -#else - PySequence_ITEM(cyfunctions, i); - if (unlikely(!m)) - return -1; -#endif - Py_INCREF(classobj); + Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions); + + for (i = 0; i < count; i++) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyList_GET_ITEM(cyfunctions, i); +#else + PySequence_ITEM(cyfunctions, i); + if (unlikely(!m)) + return -1; +#endif + Py_INCREF(classobj); m->func_classobj = classobj; -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF((PyObject*)m); -#endif +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF((PyObject*)m); +#endif } - return 0; + return 0; } @@ -1010,15 +1010,15 @@ __pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx) for (i = 0; i < n; i++) { int ret; PyObject *string; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS PyObject *item = PyTuple_GET_ITEM(idx, i); -#else +#else PyObject *item = PySequence_ITEM(idx, i); if (unlikely(!item)) goto __pyx_err; -#endif +#endif string = _obj_to_str(item); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(item); -#endif +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(item); +#endif if (unlikely(!string)) goto __pyx_err; ret = PyList_Append(list, string); Py_DECREF(string); @@ -1072,9 +1072,9 @@ __pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw) !((__pyx_FusedFunctionObject *) func)->__signatures__); if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) { - return __Pyx_CyFunction_CallAsMethod(func, args, kw); + return __Pyx_CyFunction_CallAsMethod(func, args, kw); } else { - return __Pyx_CyFunction_Call(func, args, kw); + return __Pyx_CyFunction_Call(func, args, kw); } } @@ -1104,19 +1104,19 @@ __pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) return NULL; self = binding_func->self; -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_INCREF(self); +#endif Py_INCREF(self); -#endif - Py_INCREF(self); PyTuple_SET_ITEM(new_args, 0, self); for (i = 0; i < argc; i++) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS PyObject *item = PyTuple_GET_ITEM(args, i); Py_INCREF(item); -#else - PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad; -#endif +#else + PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad; +#endif PyTuple_SET_ITEM(new_args, i + 1, item); } @@ -1127,51 +1127,51 @@ __pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given."); return NULL; } -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS self = PyTuple_GET_ITEM(args, 0); -#else - self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL; -#endif +#else + self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL; +#endif } - if (self && !is_classmethod && !is_staticmethod) { - int is_instance = PyObject_IsInstance(self, binding_func->type); - if (unlikely(!is_instance)) { - PyErr_Format(PyExc_TypeError, - "First argument should be of type %.200s, got %.200s.", - ((PyTypeObject *) binding_func->type)->tp_name, + if (self && !is_classmethod && !is_staticmethod) { + int is_instance = PyObject_IsInstance(self, binding_func->type); + if (unlikely(!is_instance)) { + PyErr_Format(PyExc_TypeError, + "First argument should be of type %.200s, got %.200s.", + ((PyTypeObject *) binding_func->type)->tp_name, Py_TYPE(self)->tp_name); - goto bad; - } else if (unlikely(is_instance == -1)) { - goto bad; - } + goto bad; + } else if (unlikely(is_instance == -1)) { + goto bad; + } } -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_XDECREF(self); - self = NULL; -#endif +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_XDECREF(self); + self = NULL; +#endif if (binding_func->__signatures__) { - PyObject *tup; - if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) { - // FIXME: this seems wrong, but we must currently pass the signatures dict as 'self' argument - tup = PyTuple_Pack(3, args, - kw == NULL ? Py_None : kw, - binding_func->func.defaults_tuple); - if (unlikely(!tup)) goto bad; - new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod( - func, binding_func->__signatures__, tup, NULL); - } else { - tup = PyTuple_Pack(4, binding_func->__signatures__, args, - kw == NULL ? Py_None : kw, - binding_func->func.defaults_tuple); - if (unlikely(!tup)) goto bad; - new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL); - } + PyObject *tup; + if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) { + // FIXME: this seems wrong, but we must currently pass the signatures dict as 'self' argument + tup = PyTuple_Pack(3, args, + kw == NULL ? Py_None : kw, + binding_func->func.defaults_tuple); + if (unlikely(!tup)) goto bad; + new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod( + func, binding_func->__signatures__, tup, NULL); + } else { + tup = PyTuple_Pack(4, binding_func->__signatures__, args, + kw == NULL ? Py_None : kw, + binding_func->func.defaults_tuple); + if (unlikely(!tup)) goto bad; + new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL); + } Py_DECREF(tup); - if (unlikely(!new_func)) - goto bad; + if (unlikely(!new_func)) + goto bad; Py_XINCREF(binding_func->func.func_classobj); Py_CLEAR(new_func->func.func_classobj); @@ -1181,10 +1181,10 @@ __pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) } result = __pyx_FusedFunction_callfunction(func, args, kw); -bad: -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_XDECREF(self); -#endif +bad: +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_XDECREF(self); +#endif Py_XDECREF(new_args); Py_XDECREF((PyObject *) new_func); return result; @@ -1264,11 +1264,11 @@ static PyTypeObject __pyx_FusedFunctionType_type = { 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ -#endif + 0, /*tp_vectorcall*/ +#endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ -#endif +#endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif @@ -1287,7 +1287,7 @@ static int __pyx_FusedFunction_init(void) { //////////////////// ClassMethod.proto //////////////////// #include "descrobject.h" -static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method); /*proto*/ +static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method); /*proto*/ //////////////////// ClassMethod //////////////////// @@ -1300,8 +1300,8 @@ static PyObject* __Pyx_Method_ClassMethod(PyObject *method) { #else #if CYTHON_COMPILING_IN_PYSTON || CYTHON_COMPILING_IN_PYPY // special C-API function only in Pyston and PyPy >= 5.9 - if (PyMethodDescr_Check(method)) -#else + if (PyMethodDescr_Check(method)) +#else #if PY_MAJOR_VERSION == 2 // PyMethodDescr_Type is not exposed in the CPython C-API in Py2. static PyTypeObject *methoddescr_type = NULL; @@ -1314,9 +1314,9 @@ static PyObject* __Pyx_Method_ClassMethod(PyObject *method) { #else PyTypeObject *methoddescr_type = &PyMethodDescr_Type; #endif - if (__Pyx_TypeCheck(method, methoddescr_type)) -#endif - { + if (__Pyx_TypeCheck(method, methoddescr_type)) +#endif + { // cdef classes PyMethodDescrObject *descr = (PyMethodDescrObject *)method; #if PY_VERSION_HEX < 0x03020000 diff --git a/contrib/tools/cython/Cython/Utility/Embed.c b/contrib/tools/cython/Cython/Utility/Embed.c index 15b6716ad8..60da8f2330 100644 --- a/contrib/tools/cython/Cython/Utility/Embed.c +++ b/contrib/tools/cython/Cython/Utility/Embed.c @@ -4,12 +4,12 @@ #include <floatingpoint.h> #endif -#if PY_MAJOR_VERSION < 3 -void Py_InitArgcArgv(int argc, char **argv); -#else -void Py_InitArgcArgv(int argc, wchar_t **argv); -#endif - +#if PY_MAJOR_VERSION < 3 +void Py_InitArgcArgv(int argc, char **argv); +#else +void Py_InitArgcArgv(int argc, wchar_t **argv); +#endif + #if PY_MAJOR_VERSION < 3 int %(main_method)s(int argc, char** argv) { #elif defined(WIN32) || defined(MS_WINDOWS) @@ -28,10 +28,10 @@ static int __Pyx_main(int argc, wchar_t **argv) { m = fpgetmask(); fpsetmask(m & ~FP_X_OFL); #endif - if (argc && argv) { - Py_InitArgcArgv(argc, argv); + if (argc && argv) { + Py_InitArgcArgv(argc, argv); Py_SetProgramName(argv[0]); - } + } Py_Initialize(); if (argc && argv) PySys_SetArgv(argc, argv); @@ -122,7 +122,7 @@ __Pyx_char2wchar(char* arg) /* Overallocate; as multi-byte characters are in the argument, the actual output could use less memory. */ argsize = strlen(arg) + 1; - res = (wchar_t *)malloc(argsize*sizeof(wchar_t)); + res = (wchar_t *)malloc(argsize*sizeof(wchar_t)); if (!res) goto oom; in = (unsigned char*)arg; out = res; @@ -138,7 +138,7 @@ __Pyx_char2wchar(char* arg) unless there is a bug in the C library, or I misunderstood how mbrtowc works. */ fprintf(stderr, "unexpected mbrtowc result -2\\n"); - free(res); + free(res); return NULL; } if (converted == (size_t)-1) { @@ -166,7 +166,7 @@ __Pyx_char2wchar(char* arg) /* Cannot use C locale for escaping; manually escape as if charset is ASCII (i.e. escape all bytes > 128. This will still roundtrip correctly in the locale's charset, which must be an ASCII superset. */ - res = (wchar_t *)malloc((strlen(arg)+1)*sizeof(wchar_t)); + res = (wchar_t *)malloc((strlen(arg)+1)*sizeof(wchar_t)); if (!res) goto oom; in = (unsigned char*)arg; out = res; @@ -190,28 +190,28 @@ int return __Pyx_main(0, NULL); } else { - int i, res; + int i, res; wchar_t **argv_copy = (wchar_t **)malloc(sizeof(wchar_t*)*argc); - /* We need a second copy, as Python might modify the first one. */ + /* We need a second copy, as Python might modify the first one. */ wchar_t **argv_copy2 = (wchar_t **)malloc(sizeof(wchar_t*)*argc); - char *oldloc = strdup(setlocale(LC_ALL, NULL)); - if (!argv_copy || !argv_copy2 || !oldloc) { + char *oldloc = strdup(setlocale(LC_ALL, NULL)); + if (!argv_copy || !argv_copy2 || !oldloc) { fprintf(stderr, "out of memory\\n"); - free(argv_copy); - free(argv_copy2); - free(oldloc); + free(argv_copy); + free(argv_copy2); + free(oldloc); return 1; } - res = 0; + res = 0; setlocale(LC_ALL, ""); for (i = 0; i < argc; i++) { argv_copy2[i] = argv_copy[i] = __Pyx_char2wchar(argv[i]); - if (!argv_copy[i]) res = 1; /* failure, but continue to simplify cleanup */ + if (!argv_copy[i]) res = 1; /* failure, but continue to simplify cleanup */ } setlocale(LC_ALL, oldloc); free(oldloc); - if (res == 0) - res = __Pyx_main(argc, argv_copy); + if (res == 0) + res = __Pyx_main(argc, argv_copy); for (i = 0; i < argc; i++) { #if PY_VERSION_HEX < 0x03050000 free(argv_copy2[i]); diff --git a/contrib/tools/cython/Cython/Utility/Exceptions.c b/contrib/tools/cython/Cython/Utility/Exceptions.c index 12c08b4331..b0411f6956 100644 --- a/contrib/tools/cython/Cython/Utility/Exceptions.c +++ b/contrib/tools/cython/Cython/Utility/Exceptions.c @@ -5,34 +5,34 @@ // 'except' statement, curexc_* is moved over to exc_* by // __Pyx_GetException() - -/////////////// PyThreadStateGet.proto /////////////// -//@substitute: naming - -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *$local_tstate_cname; + +/////////////// PyThreadStateGet.proto /////////////// +//@substitute: naming + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *$local_tstate_cname; #define __Pyx_PyThreadState_assign $local_tstate_cname = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() $local_tstate_cname->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - - -/////////////// PyErrExceptionMatches.proto /////////////// -//@substitute: naming - -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState($local_tstate_cname, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/////////////// PyErrExceptionMatches /////////////// - -#if CYTHON_FAST_THREAD_STATE +#endif + + +/////////////// PyErrExceptionMatches.proto /////////////// +//@substitute: naming + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState($local_tstate_cname, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/////////////// PyErrExceptionMatches /////////////// + +#if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); @@ -48,50 +48,50 @@ static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple return 0; } -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { - PyObject *exc_type = tstate->curexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { + PyObject *exc_type = tstate->curexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - +} +#endif + /////////////// PyErrFetchRestore.proto /////////////// -//@substitute: naming -//@requires: PyThreadStateGet +//@substitute: naming +//@requires: PyThreadStateGet -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState($local_tstate_cname, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState($local_tstate_cname, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState($local_tstate_cname, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else +#else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + /////////////// PyErrFetchRestore /////////////// -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; @@ -104,14 +104,14 @@ static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObjec Py_XDECREF(tmp_tb); } -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; -} +} #endif /////////////// RaiseException.proto /////////////// @@ -120,7 +120,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject /////////////// RaiseException /////////////// //@requires: PyErrFetchRestore -//@requires: PyThreadStateGet +//@requires: PyThreadStateGet // The following function is based on do_raise() from ceval.c. There // are separate versions for Python2 and Python3 as exception handling @@ -129,7 +129,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare + __Pyx_PyThreadState_declare /* 'cause' is only used in Py3 */ Py_XINCREF(type); if (!value || value == Py_None) @@ -177,7 +177,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, } } - __Pyx_PyThreadState_assign + __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: @@ -215,13 +215,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - // error on subclass test - goto bad; - } else { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + // error on subclass test + goto bad; + } else { // believe the instance type = instance_class; } @@ -284,13 +284,13 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject PyErr_SetObject(type, value); if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { @@ -298,7 +298,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } -#endif +#endif } bad: @@ -307,52 +307,52 @@ bad: } #endif - -/////////////// GetTopmostException.proto /////////////// - -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/////////////// GetTopmostException /////////////// - -#if CYTHON_USE_EXC_INFO_STACK -// Copied from errors.c in CPython. -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - - + +/////////////// GetTopmostException.proto /////////////// + +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/////////////// GetTopmostException /////////////// + +#if CYTHON_USE_EXC_INFO_STACK +// Copied from errors.c in CPython. +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + + /////////////// GetException.proto /////////////// -//@substitute: naming -//@requires: PyThreadStateGet +//@substitute: naming +//@requires: PyThreadStateGet -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException($local_tstate_cname, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ -#else +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException($local_tstate_cname, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ -#endif +#endif /////////////// GetException /////////////// -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; @@ -364,7 +364,7 @@ static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) @@ -384,17 +384,17 @@ static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) *type = local_type; *value = local_value; *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; @@ -426,18 +426,18 @@ bad: static CYTHON_INLINE void __Pyx_ReraiseException(void); /*proto*/ -/////////////// ReRaiseException /////////////// -//@requires: GetTopmostException +/////////////// ReRaiseException /////////////// +//@requires: GetTopmostException static CYTHON_INLINE void __Pyx_ReraiseException(void) { PyObject *type = NULL, *value = NULL, *tb = NULL; -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = PyThreadState_GET(); - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - type = exc_info->exc_type; - value = exc_info->exc_value; - tb = exc_info->exc_traceback; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + type = exc_info->exc_type; + value = exc_info->exc_value; + tb = exc_info->exc_traceback; #else type = tstate->exc_type; value = tstate->exc_value; @@ -447,7 +447,7 @@ static CYTHON_INLINE void __Pyx_ReraiseException(void) { PyErr_GetExcInfo(&type, &value, &tb); #endif if (!type || type == Py_None) { -#if !CYTHON_FAST_THREAD_STATE +#if !CYTHON_FAST_THREAD_STATE Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(tb); @@ -456,7 +456,7 @@ static CYTHON_INLINE void __Pyx_ReraiseException(void) { PyErr_SetString(PyExc_RuntimeError, "No active exception to reraise"); } else { -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE Py_INCREF(type); Py_XINCREF(value); Py_XINCREF(tb); @@ -467,31 +467,31 @@ static CYTHON_INLINE void __Pyx_ReraiseException(void) { } /////////////// SaveResetException.proto /////////////// -//@substitute: naming -//@requires: PyThreadStateGet - -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave($local_tstate_cname, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset($local_tstate_cname, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ - -#else - -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - +//@substitute: naming +//@requires: PyThreadStateGet + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ + +#else + +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + /////////////// SaveResetException /////////////// -//@requires: GetTopmostException - -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; +//@requires: GetTopmostException + +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; @@ -502,17 +502,17 @@ static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject * Py_XINCREF(*tb); } -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; @@ -524,35 +524,35 @@ static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); -} +} #endif /////////////// SwapException.proto /////////////// -//@substitute: naming -//@requires: PyThreadStateGet +//@substitute: naming +//@requires: PyThreadStateGet -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap($local_tstate_cname, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ -#else +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ -#endif +#endif /////////////// SwapException /////////////// -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; @@ -562,50 +562,50 @@ static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject * tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif - - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} - + + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} + #else - -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; + +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } -#endif +#endif /////////////// WriteUnraisableException.proto /////////////// static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, - int full_traceback, int nogil); /*proto*/ + int full_traceback, int nogil); /*proto*/ /////////////// WriteUnraisableException /////////////// //@requires: PyErrFetchRestore -//@requires: PyThreadStateGet +//@requires: PyThreadStateGet static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, - int full_traceback, CYTHON_UNUSED int nogil) { + int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; - __Pyx_PyThreadState_declare -#ifdef WITH_THREAD - PyGILState_STATE state; - if (nogil) - state = PyGILState_Ensure(); -#ifdef _MSC_VER - /* arbitrary, to suppress warning */ - else state = (PyGILState_STATE)-1; -#endif -#endif - __Pyx_PyThreadState_assign + __Pyx_PyThreadState_declare +#ifdef WITH_THREAD + PyGILState_STATE state; + if (nogil) + state = PyGILState_Ensure(); +#ifdef _MSC_VER + /* arbitrary, to suppress warning */ + else state = (PyGILState_STATE)-1; +#endif +#endif + __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); @@ -626,10 +626,10 @@ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } -#ifdef WITH_THREAD - if (nogil) - PyGILState_Release(state); -#endif +#ifdef WITH_THREAD + if (nogil) + PyGILState_Release(state); +#endif } /////////////// CLineInTraceback.proto /////////////// @@ -642,7 +642,7 @@ static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);/*proto*/ /////////////// CLineInTraceback /////////////// //@requires: ObjectHandling.c::PyObjectGetAttrStr -//@requires: ObjectHandling.c::PyDictVersioning +//@requires: ObjectHandling.c::PyDictVersioning //@requires: PyErrFetchRestore //@substitute: naming @@ -653,20 +653,20 @@ static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif - - if (unlikely(!${cython_runtime_cname})) { - // Very early error where the runtime module is not set up yet. - return c_line; - } - + + if (unlikely(!${cython_runtime_cname})) { + // Very early error where the runtime module is not set up yet. + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(${cython_runtime_cname}); if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback"))) + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback"))) } else #endif { @@ -684,7 +684,7 @@ static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int // No need to handle errors here when we reset the exception state just afterwards. (void) PyObject_SetAttr(${cython_runtime_cname}, PYIDENT("cline_in_traceback"), Py_False); } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); @@ -792,7 +792,7 @@ static void __Pyx_AddTraceback(const char *funcname, int c_line, 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); diff --git a/contrib/tools/cython/Cython/Utility/FunctionArguments.c b/contrib/tools/cython/Cython/Utility/FunctionArguments.c index 4c9b488e89..8333d93666 100644 --- a/contrib/tools/cython/Cython/Utility/FunctionArguments.c +++ b/contrib/tools/cython/Cython/Utility/FunctionArguments.c @@ -104,17 +104,17 @@ static void __Pyx_RaiseDoubleKeywordsError( } -//////////////////// RaiseMappingExpected.proto //////////////////// - -static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/ - -//////////////////// RaiseMappingExpected //////////////////// - -static void __Pyx_RaiseMappingExpectedError(PyObject* arg) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not a mapping", Py_TYPE(arg)->tp_name); -} - - +//////////////////// RaiseMappingExpected.proto //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/ + +//////////////////// RaiseMappingExpected //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not a mapping", Py_TYPE(arg)->tp_name); +} + + //////////////////// KeywordStringCheck.proto //////////////////// static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /*proto*/ @@ -295,58 +295,58 @@ invalid_keyword: bad: return -1; } - - -//////////////////// MergeKeywords.proto //////////////////// - -static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/ - -//////////////////// MergeKeywords //////////////////// -//@requires: RaiseDoubleKeywords -//@requires: Optimize.c::dict_iter - -static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) { - PyObject *iter, *key = NULL, *value = NULL; - int source_is_dict, result; - Py_ssize_t orig_length, ppos = 0; - - iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict); - if (unlikely(!iter)) { - // slow fallback: try converting to dict, then iterate - PyObject *args; - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - args = PyTuple_Pack(1, source_mapping); - if (likely(args)) { - PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL); - Py_DECREF(args); - if (likely(fallback)) { - iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict); - Py_DECREF(fallback); - } - } - if (unlikely(!iter)) goto bad; - } - - while (1) { - result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict); - if (unlikely(result < 0)) goto bad; - if (!result) break; - - if (unlikely(PyDict_Contains(kwdict, key))) { - __Pyx_RaiseDoubleKeywordsError("function", key); - result = -1; - } else { - result = PyDict_SetItem(kwdict, key, value); - } - Py_DECREF(key); - Py_DECREF(value); - if (unlikely(result < 0)) goto bad; - } - Py_XDECREF(iter); - return 0; - -bad: - Py_XDECREF(iter); - return -1; -} + + +//////////////////// MergeKeywords.proto //////////////////// + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/ + +//////////////////// MergeKeywords //////////////////// +//@requires: RaiseDoubleKeywords +//@requires: Optimize.c::dict_iter + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) { + PyObject *iter, *key = NULL, *value = NULL; + int source_is_dict, result; + Py_ssize_t orig_length, ppos = 0; + + iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict); + if (unlikely(!iter)) { + // slow fallback: try converting to dict, then iterate + PyObject *args; + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + args = PyTuple_Pack(1, source_mapping); + if (likely(args)) { + PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL); + Py_DECREF(args); + if (likely(fallback)) { + iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict); + Py_DECREF(fallback); + } + } + if (unlikely(!iter)) goto bad; + } + + while (1) { + result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict); + if (unlikely(result < 0)) goto bad; + if (!result) break; + + if (unlikely(PyDict_Contains(kwdict, key))) { + __Pyx_RaiseDoubleKeywordsError("function", key); + result = -1; + } else { + result = PyDict_SetItem(kwdict, key, value); + } + Py_DECREF(key); + Py_DECREF(value); + if (unlikely(result < 0)) goto bad; + } + Py_XDECREF(iter); + return 0; + +bad: + Py_XDECREF(iter); + return -1; +} diff --git a/contrib/tools/cython/Cython/Utility/ImportExport.c b/contrib/tools/cython/Cython/Utility/ImportExport.c index 786726fd85..532ec326f6 100644 --- a/contrib/tools/cython/Cython/Utility/ImportExport.c +++ b/contrib/tools/cython/Cython/Utility/ImportExport.c @@ -66,7 +66,7 @@ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( @@ -105,125 +105,125 @@ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { } -/////////////// ImportStar /////////////// -//@substitute: naming - -/* import_all_from is an unexposed function from ceval.c */ - -static int -__Pyx_import_all_from(PyObject *locals, PyObject *v) -{ - PyObject *all = PyObject_GetAttrString(v, "__all__"); - PyObject *dict, *name, *value; - int skip_leading_underscores = 0; - int pos, err; - - if (all == NULL) { - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) - return -1; /* Unexpected error */ - PyErr_Clear(); - dict = PyObject_GetAttrString(v, "__dict__"); - if (dict == NULL) { - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) - return -1; - PyErr_SetString(PyExc_ImportError, - "from-import-* object has no __dict__ and no __all__"); - return -1; - } -#if PY_MAJOR_VERSION < 3 - all = PyObject_CallMethod(dict, (char *)"keys", NULL); -#else - all = PyMapping_Keys(dict); -#endif - Py_DECREF(dict); - if (all == NULL) - return -1; - skip_leading_underscores = 1; - } - - for (pos = 0, err = 0; ; pos++) { - name = PySequence_GetItem(all, pos); - if (name == NULL) { - if (!PyErr_ExceptionMatches(PyExc_IndexError)) - err = -1; - else - PyErr_Clear(); - break; - } - if (skip_leading_underscores && -#if PY_MAJOR_VERSION < 3 +/////////////// ImportStar /////////////// +//@substitute: naming + +/* import_all_from is an unexposed function from ceval.c */ + +static int +__Pyx_import_all_from(PyObject *locals, PyObject *v) +{ + PyObject *all = PyObject_GetAttrString(v, "__all__"); + PyObject *dict, *name, *value; + int skip_leading_underscores = 0; + int pos, err; + + if (all == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; /* Unexpected error */ + PyErr_Clear(); + dict = PyObject_GetAttrString(v, "__dict__"); + if (dict == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; + PyErr_SetString(PyExc_ImportError, + "from-import-* object has no __dict__ and no __all__"); + return -1; + } +#if PY_MAJOR_VERSION < 3 + all = PyObject_CallMethod(dict, (char *)"keys", NULL); +#else + all = PyMapping_Keys(dict); +#endif + Py_DECREF(dict); + if (all == NULL) + return -1; + skip_leading_underscores = 1; + } + + for (pos = 0, err = 0; ; pos++) { + name = PySequence_GetItem(all, pos); + if (name == NULL) { + if (!PyErr_ExceptionMatches(PyExc_IndexError)) + err = -1; + else + PyErr_Clear(); + break; + } + if (skip_leading_underscores && +#if PY_MAJOR_VERSION < 3 likely(PyString_Check(name)) && - PyString_AS_STRING(name)[0] == '_') -#else + PyString_AS_STRING(name)[0] == '_') +#else likely(PyUnicode_Check(name)) && likely(__Pyx_PyUnicode_GET_LENGTH(name)) && __Pyx_PyUnicode_READ_CHAR(name, 0) == '_') -#endif - { - Py_DECREF(name); - continue; - } - value = PyObject_GetAttr(v, name); - if (value == NULL) - err = -1; - else if (PyDict_CheckExact(locals)) - err = PyDict_SetItem(locals, name, value); - else - err = PyObject_SetItem(locals, name, value); - Py_DECREF(name); - Py_XDECREF(value); - if (err != 0) - break; - } - Py_DECREF(all); - return err; -} - - -static int ${import_star}(PyObject* m) { - - int i; - int ret = -1; - char* s; - PyObject *locals = 0; - PyObject *list = 0; -#if PY_MAJOR_VERSION >= 3 - PyObject *utf8_name = 0; -#endif - PyObject *name; - PyObject *item; - - locals = PyDict_New(); if (!locals) goto bad; - if (__Pyx_import_all_from(locals, m) < 0) goto bad; - list = PyDict_Items(locals); if (!list) goto bad; - - for(i=0; i<PyList_GET_SIZE(list); i++) { - name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0); - item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1); -#if PY_MAJOR_VERSION >= 3 - utf8_name = PyUnicode_AsUTF8String(name); - if (!utf8_name) goto bad; - s = PyBytes_AS_STRING(utf8_name); - if (${import_star_set}(item, name, s) < 0) goto bad; - Py_DECREF(utf8_name); utf8_name = 0; -#else - s = PyString_AsString(name); - if (!s) goto bad; - if (${import_star_set}(item, name, s) < 0) goto bad; -#endif - } - ret = 0; - -bad: - Py_XDECREF(locals); - Py_XDECREF(list); -#if PY_MAJOR_VERSION >= 3 - Py_XDECREF(utf8_name); -#endif - return ret; -} - - +#endif + { + Py_DECREF(name); + continue; + } + value = PyObject_GetAttr(v, name); + if (value == NULL) + err = -1; + else if (PyDict_CheckExact(locals)) + err = PyDict_SetItem(locals, name, value); + else + err = PyObject_SetItem(locals, name, value); + Py_DECREF(name); + Py_XDECREF(value); + if (err != 0) + break; + } + Py_DECREF(all); + return err; +} + + +static int ${import_star}(PyObject* m) { + + int i; + int ret = -1; + char* s; + PyObject *locals = 0; + PyObject *list = 0; +#if PY_MAJOR_VERSION >= 3 + PyObject *utf8_name = 0; +#endif + PyObject *name; + PyObject *item; + + locals = PyDict_New(); if (!locals) goto bad; + if (__Pyx_import_all_from(locals, m) < 0) goto bad; + list = PyDict_Items(locals); if (!list) goto bad; + + for(i=0; i<PyList_GET_SIZE(list); i++) { + name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0); + item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1); +#if PY_MAJOR_VERSION >= 3 + utf8_name = PyUnicode_AsUTF8String(name); + if (!utf8_name) goto bad; + s = PyBytes_AS_STRING(utf8_name); + if (${import_star_set}(item, name, s) < 0) goto bad; + Py_DECREF(utf8_name); utf8_name = 0; +#else + s = PyString_AsString(name); + if (!s) goto bad; + if (${import_star_set}(item, name, s) < 0) goto bad; +#endif + } + ret = 0; + +bad: + Py_XDECREF(locals); + Py_XDECREF(list); +#if PY_MAJOR_VERSION >= 3 + Py_XDECREF(utf8_name); +#endif + return ret; +} + + /////////////// SetPackagePathFromImportLib.proto /////////////// // PY_VERSION_HEX >= 0x03030000 @@ -310,25 +310,25 @@ set_path: /////////////// TypeImport.proto /////////////// -#ifndef __PYX_HAVE_RT_ImportType_proto -#define __PYX_HAVE_RT_ImportType_proto - -enum __Pyx_ImportType_CheckSize { - __Pyx_ImportType_CheckSize_Error = 0, - __Pyx_ImportType_CheckSize_Warn = 1, - __Pyx_ImportType_CheckSize_Ignore = 2 -}; - -static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); /*proto*/ - -#endif - +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto + +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; + +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); /*proto*/ + +#endif + /////////////// TypeImport /////////////// #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, - size_t size, enum __Pyx_ImportType_CheckSize check_size) +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; @@ -337,7 +337,7 @@ static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, PyObject *py_basicsize; #endif - result = PyObject_GetAttrString(module, class_name); + result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { @@ -358,28 +358,28 @@ static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif - if ((size_t)basicsize < size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; } - if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); goto bad; } - else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - } - /* check_size == __Pyx_ImportType_CheckSize_Ignore does not warn nor error */ + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + /* check_size == __Pyx_ImportType_CheckSize_Ignore does not warn nor error */ return (PyTypeObject *)result; bad: Py_XDECREF(result); diff --git a/contrib/tools/cython/Cython/Utility/MemoryView.pyx b/contrib/tools/cython/Cython/Utility/MemoryView.pyx index 75e434757a..6ca5fab9ba 100644 --- a/contrib/tools/cython/Cython/Utility/MemoryView.pyx +++ b/contrib/tools/cython/Cython/Utility/MemoryView.pyx @@ -2,10 +2,10 @@ # This utility provides cython.array and cython.view.memoryview -from __future__ import absolute_import +from __future__ import absolute_import + +cimport cython -cimport cython - # from cpython cimport ... cdef extern from "Python.h": int PyIndex_Check(object) @@ -19,7 +19,7 @@ cdef extern from "pythread.h": int PyThread_acquire_lock(PyThread_type_lock, int mode) nogil void PyThread_release_lock(PyThread_type_lock) nogil -cdef extern from "<string.h>": +cdef extern from "<string.h>": void *memset(void *b, int c, size_t len) cdef extern from *: @@ -33,8 +33,8 @@ cdef extern from *: void* PyMem_Malloc(size_t n) void PyMem_Free(void *p) - void* PyObject_Malloc(size_t n) - void PyObject_Free(void *p) + void* PyObject_Malloc(size_t n) + void PyObject_Free(void *p) cdef struct __pyx_memoryview "__pyx_memoryview_obj": Py_buffer view @@ -64,7 +64,7 @@ cdef extern from *: PyBUF_WRITABLE PyBUF_STRIDES PyBUF_INDIRECT - PyBUF_ND + PyBUF_ND PyBUF_RECORDS PyBUF_RECORDS_RO @@ -83,13 +83,13 @@ cdef extern from *: size_t sizeof_dtype, int contig_flag, bint dtype_is_object) nogil except * bint slice_is_contig "__pyx_memviewslice_is_contig" ( - {{memviewslice_name}} mvs, char order, int ndim) nogil + {{memviewslice_name}} mvs, char order, int ndim) nogil bint slices_overlap "__pyx_slices_overlap" ({{memviewslice_name}} *slice1, {{memviewslice_name}} *slice2, int ndim, size_t itemsize) nogil -cdef extern from "<stdlib.h>": +cdef extern from "<stdlib.h>": void *malloc(size_t) nogil void free(void *) nogil void *memcpy(void *dest, void *src, size_t n) nogil @@ -135,13 +135,13 @@ cdef class array: if itemsize <= 0: raise ValueError("itemsize <= 0 for cython.array") - if not isinstance(format, bytes): - format = format.encode('ASCII') + if not isinstance(format, bytes): + format = format.encode('ASCII') self._format = format # keep a reference to the byte string self.format = self._format # use single malloc() for both shape and strides - self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) self._strides = self._shape + self.ndim if not self._shape: @@ -216,20 +216,20 @@ cdef class array: refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, False) free(self.data) - PyObject_Free(self._shape) + PyObject_Free(self._shape) - @property - def memview(self): - return self.get_memview() + @property + def memview(self): + return self.get_memview() - @cname('get_memview') - cdef get_memview(self): - flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - return memoryview(self, flags, self.dtype_is_object) + @cname('get_memview') + cdef get_memview(self): + flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + return memoryview(self, flags, self.dtype_is_object) def __len__(self): return self._shape[0] - + def __getattr__(self, attr): return getattr(self.memview, attr) @@ -308,24 +308,24 @@ cdef void *align_pointer(void *memory, size_t alignment) nogil: return <void *> aligned_p - -# pre-allocate thread locks for reuse -## note that this could be implemented in a more beautiful way in "normal" Cython, -## but this code gets merged into the user module and not everything works there. -DEF THREAD_LOCKS_PREALLOCATED = 8 -cdef int __pyx_memoryview_thread_locks_used = 0 -cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), - PyThread_allocate_lock(), -] - - + +# pre-allocate thread locks for reuse +## note that this could be implemented in a more beautiful way in "normal" Cython, +## but this code gets merged into the user module and not everything works there. +DEF THREAD_LOCKS_PREALLOCATED = 8 +cdef int __pyx_memoryview_thread_locks_used = 0 +cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), + PyThread_allocate_lock(), +] + + @cname('__pyx_memoryview') cdef class memoryview(object): @@ -351,17 +351,17 @@ cdef class memoryview(object): (<__pyx_buffer *> &self.view).obj = Py_None Py_INCREF(Py_None) - global __pyx_memoryview_thread_locks_used - if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - __pyx_memoryview_thread_locks_used += 1 - if self.lock is NULL: - self.lock = PyThread_allocate_lock() - if self.lock is NULL: - raise MemoryError + global __pyx_memoryview_thread_locks_used + if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + __pyx_memoryview_thread_locks_used += 1 + if self.lock is NULL: + self.lock = PyThread_allocate_lock() + if self.lock is NULL: + raise MemoryError if flags & PyBUF_FORMAT: - self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') else: self.dtype_is_object = dtype_is_object @@ -372,23 +372,23 @@ cdef class memoryview(object): def __dealloc__(memoryview self): if self.obj is not None: __Pyx_ReleaseBuffer(&self.view) - elif (<__pyx_buffer *> &self.view).obj == Py_None: - # Undo the incref in __cinit__() above. - (<__pyx_buffer *> &self.view).obj = NULL - Py_DECREF(Py_None) + elif (<__pyx_buffer *> &self.view).obj == Py_None: + # Undo the incref in __cinit__() above. + (<__pyx_buffer *> &self.view).obj = NULL + Py_DECREF(Py_None) - cdef int i - global __pyx_memoryview_thread_locks_used + cdef int i + global __pyx_memoryview_thread_locks_used if self.lock != NULL: - for i in range(__pyx_memoryview_thread_locks_used): - if __pyx_memoryview_thread_locks[i] is self.lock: - __pyx_memoryview_thread_locks_used -= 1 - if i != __pyx_memoryview_thread_locks_used: - __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - break - else: - PyThread_free_lock(self.lock) + for i in range(__pyx_memoryview_thread_locks_used): + if __pyx_memoryview_thread_locks[i] is self.lock: + __pyx_memoryview_thread_locks_used -= 1 + if i != __pyx_memoryview_thread_locks_used: + __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + break + else: + PyThread_free_lock(self.lock) cdef char *get_item_pointer(memoryview self, object index) except NULL: cdef Py_ssize_t dim @@ -431,7 +431,7 @@ cdef class memoryview(object): cdef is_slice(self, obj): if not isinstance(obj, memoryview): try: - obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, self.dtype_is_object) except TypeError: return None @@ -519,7 +519,7 @@ cdef class memoryview(object): if flags & PyBUF_WRITABLE and self.view.readonly: raise ValueError("Cannot create writable memory view from read-only memoryview") - if flags & PyBUF_ND: + if flags & PyBUF_ND: info.shape = self.view.shape else: info.shape = NULL @@ -549,58 +549,58 @@ cdef class memoryview(object): __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # Some properties that have the same semantics as in NumPy - @property - def T(self): - cdef _memoryviewslice result = memoryview_copy(self) - transpose_memslice(&result.from_slice) - return result + @property + def T(self): + cdef _memoryviewslice result = memoryview_copy(self) + transpose_memslice(&result.from_slice) + return result - @property - def base(self): - return self.obj + @property + def base(self): + return self.obj - @property - def shape(self): - return tuple([length for length in self.view.shape[:self.view.ndim]]) + @property + def shape(self): + return tuple([length for length in self.view.shape[:self.view.ndim]]) - @property - def strides(self): - if self.view.strides == NULL: - # Note: we always ask for strides, so if this is not set it's a bug - raise ValueError("Buffer view does not expose strides") + @property + def strides(self): + if self.view.strides == NULL: + # Note: we always ask for strides, so if this is not set it's a bug + raise ValueError("Buffer view does not expose strides") - return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - @property - def suboffsets(self): - if self.view.suboffsets == NULL: - return (-1,) * self.view.ndim + @property + def suboffsets(self): + if self.view.suboffsets == NULL: + return (-1,) * self.view.ndim - return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - @property - def ndim(self): - return self.view.ndim + @property + def ndim(self): + return self.view.ndim - @property - def itemsize(self): - return self.view.itemsize + @property + def itemsize(self): + return self.view.itemsize - @property - def nbytes(self): - return self.size * self.view.itemsize + @property + def nbytes(self): + return self.size * self.view.itemsize - @property - def size(self): - if self._size is None: - result = 1 + @property + def size(self): + if self._size is None: + result = 1 - for length in self.view.shape[:self.view.ndim]: - result *= length + for length in self.view.shape[:self.view.ndim]: + result *= length - self._size = result + self._size = result - return self._size + return self._size def __len__(self): if self.view.ndim >= 1: @@ -620,13 +620,13 @@ cdef class memoryview(object): cdef {{memviewslice_name}} *mslice cdef {{memviewslice_name}} tmp mslice = get_slice_from_memview(self, &tmp) - return slice_is_contig(mslice[0], 'C', self.view.ndim) + return slice_is_contig(mslice[0], 'C', self.view.ndim) def is_f_contig(self): cdef {{memviewslice_name}} *mslice cdef {{memviewslice_name}} tmp mslice = get_slice_from_memview(self, &tmp) - return slice_is_contig(mslice[0], 'F', self.view.ndim) + return slice_is_contig(mslice[0], 'F', self.view.ndim) def copy(self): cdef {{memviewslice_name}} mslice @@ -698,8 +698,8 @@ cdef tuple _unellipsify(object index, int ndim): return have_slices or nslices, tuple(result) cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - for suboffset in suboffsets[:ndim]: - if suboffset >= 0: + for suboffset in suboffsets[:ndim]: + if suboffset >= 0: raise ValueError("Indirect dimensions not supported") # @@ -787,11 +787,11 @@ cdef memoryview memview_slice(memoryview memview, object indices): ### Slicing in a single dimension of a memoryviewslice # -cdef extern from "<stdlib.h>": +cdef extern from "<stdlib.h>": void abort() nogil void printf(char *s, ...) nogil -cdef extern from "<stdio.h>": +cdef extern from "<stdio.h>": ctypedef struct FILE FILE *stderr int fputs(char *s, FILE *stream) @@ -988,9 +988,9 @@ cdef class _memoryviewslice(memoryview): else: memoryview.assign_item_from_object(self, itemp, value) - @property - def base(self): - return self.from_object + @property + def base(self): + return self.from_object __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") @@ -1032,16 +1032,16 @@ cdef memoryview_fromslice({{memviewslice_name}} memviewslice, result.view.shape = <Py_ssize_t *> result.from_slice.shape result.view.strides = <Py_ssize_t *> result.from_slice.strides - # only set suboffsets if actually used, otherwise set to NULL to improve compatibility - result.view.suboffsets = NULL - for suboffset in result.from_slice.suboffsets[:ndim]: - if suboffset >= 0: - result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets - break - + # only set suboffsets if actually used, otherwise set to NULL to improve compatibility + result.view.suboffsets = NULL + for suboffset in result.from_slice.suboffsets[:ndim]: + if suboffset >= 0: + result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets + break + result.view.len = result.view.itemsize - for length in result.view.shape[:ndim]: - result.view.len *= length + for length in result.view.shape[:ndim]: + result.view.len *= length result.to_object_func = to_object_func result.to_dtype_func = to_dtype_func @@ -1074,7 +1074,7 @@ cdef void slice_copy(memoryview memview, {{memviewslice_name}} *dst): for dim in range(memview.view.ndim): dst.shape[dim] = shape[dim] dst.strides[dim] = strides[dim] - dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 @cname('__pyx_memoryview_copy_object') cdef memoryview_copy(memoryview memview): @@ -1238,7 +1238,7 @@ cdef void *copy_data_to_temp({{memviewslice_name}} *src, if tmpslice.shape[i] == 1: tmpslice.strides[i] = 0 - if slice_is_contig(src[0], order, ndim): + if slice_is_contig(src[0], order, ndim): memcpy(result, src.data, size) else: copy_strided_to_strided(src, tmpslice, ndim, itemsize) @@ -1301,7 +1301,7 @@ cdef int memoryview_copy_contents({{memviewslice_name}} src, if slices_overlap(&src, &dst, ndim, itemsize): # slices overlap, copy to temp, copy temp to dst - if not slice_is_contig(src, order, ndim): + if not slice_is_contig(src, order, ndim): order = get_best_order(&dst, ndim) tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) @@ -1310,10 +1310,10 @@ cdef int memoryview_copy_contents({{memviewslice_name}} src, if not broadcasting: # See if both slices have equal contiguity, in that case perform a # direct copy. This only works when we are not broadcasting. - if slice_is_contig(src, 'C', ndim): - direct_copy = slice_is_contig(dst, 'C', ndim) - elif slice_is_contig(src, 'F', ndim): - direct_copy = slice_is_contig(dst, 'F', ndim) + if slice_is_contig(src, 'C', ndim): + direct_copy = slice_is_contig(dst, 'C', ndim) + elif slice_is_contig(src, 'F', ndim): + direct_copy = slice_is_contig(dst, 'F', ndim) if direct_copy: # Contiguous slices with same order @@ -1337,21 +1337,21 @@ cdef int memoryview_copy_contents({{memviewslice_name}} src, return 0 @cname('__pyx_memoryview_broadcast_leading') -cdef void broadcast_leading({{memviewslice_name}} *mslice, +cdef void broadcast_leading({{memviewslice_name}} *mslice, int ndim, int ndim_other) nogil: cdef int i cdef int offset = ndim_other - ndim for i in range(ndim - 1, -1, -1): - mslice.shape[i + offset] = mslice.shape[i] - mslice.strides[i + offset] = mslice.strides[i] - mslice.suboffsets[i + offset] = mslice.suboffsets[i] + mslice.shape[i + offset] = mslice.shape[i] + mslice.strides[i + offset] = mslice.strides[i] + mslice.suboffsets[i + offset] = mslice.suboffsets[i] for i in range(offset): - mslice.shape[i] = 1 - mslice.strides[i] = mslice.strides[0] - mslice.suboffsets[i] = -1 + mslice.shape[i] = 1 + mslice.strides[i] = mslice.strides[0] + mslice.suboffsets[i] = -1 # ### Take care of refcounting the objects in slices. Do this separately from any copying, diff --git a/contrib/tools/cython/Cython/Utility/MemoryView_C.c b/contrib/tools/cython/Cython/Utility/MemoryView_C.c index 3c0e8a2f4b..0a5d8ee2c2 100644 --- a/contrib/tools/cython/Cython/Utility/MemoryView_C.c +++ b/contrib/tools/cython/Cython/Utility/MemoryView_C.c @@ -38,16 +38,16 @@ typedef struct { #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 +#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 /* msvc */ #include <Windows.h> - #undef __pyx_atomic_int_type + #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") + #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) @@ -713,7 +713,7 @@ static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim) { int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; + Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; @@ -725,10 +725,10 @@ __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int nd for (i = 0; i < ndim; i++) { index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; - itemsize *= mvs.shape[index]; + itemsize *= mvs.shape[index]; } return 1; @@ -752,11 +752,11 @@ __pyx_memviewslice_index_full(const char *bufp, Py_ssize_t idx, /////////////// MemviewDtypeToObject.proto /////////////// {{if to_py_function}} -static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp); /* proto */ +static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp); /* proto */ {{endif}} {{if from_py_function}} -static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ +static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ {{endif}} /////////////// MemviewDtypeToObject /////////////// @@ -766,13 +766,13 @@ static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* /* Convert a dtype to or from a Python object */ {{if to_py_function}} -static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) { +static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) { return (PyObject *) {{to_py_function}}(*({{dtype}} *) itemp); } {{endif}} {{if from_py_function}} -static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) { +static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) { {{dtype}} value = {{from_py_function}}(obj); if ({{error_condition}}) return 0; @@ -813,7 +813,7 @@ if (unlikely(__pyx_memoryview_slice_memviewslice( {{src}}.shape[{{dim}}], {{src}}.strides[{{dim}}], {{src}}.suboffsets[{{dim}}], {{dim}}, {{new_ndim}}, - &{{get_suboffset_dim()}}, + &{{get_suboffset_dim()}}, {{start}}, {{stop}}, {{step}}, @@ -838,7 +838,7 @@ if (unlikely(__pyx_memoryview_slice_memviewslice( {{else}} {{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}]; if ({{src}}.suboffsets[{{dim}}] >= 0) - {{get_suboffset_dim()}} = {{new_ndim}}; + {{get_suboffset_dim()}} = {{new_ndim}}; {{endif}} @@ -849,42 +849,42 @@ if (unlikely(__pyx_memoryview_slice_memviewslice( { Py_ssize_t __pyx_tmp_idx = {{idx}}; - - {{if wraparound or boundscheck}} - Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}]; - {{endif}} - + + {{if wraparound or boundscheck}} + Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}]; + {{endif}} + Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}]; - {{if wraparound}} - if (__pyx_tmp_idx < 0) - __pyx_tmp_idx += __pyx_tmp_shape; - {{endif}} + {{if wraparound}} + if (__pyx_tmp_idx < 0) + __pyx_tmp_idx += __pyx_tmp_shape; + {{endif}} - {{if boundscheck}} + {{if boundscheck}} if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) { - {{if not have_gil}} - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); - #endif - {{endif}} + {{if not have_gil}} + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); + #endif + {{endif}} - PyErr_SetString(PyExc_IndexError, - "Index out of bounds (axis {{dim}})"); + PyErr_SetString(PyExc_IndexError, + "Index out of bounds (axis {{dim}})"); - {{if not have_gil}} - #ifdef WITH_THREAD - PyGILState_Release(__pyx_gilstate_save); - #endif - {{endif}} + {{if not have_gil}} + #ifdef WITH_THREAD + PyGILState_Release(__pyx_gilstate_save); + #endif + {{endif}} - {{error_goto}} - } - {{endif}} + {{error_goto}} + } + {{endif}} {{if all_dimensions_direct}} {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; {{else}} - if ({{get_suboffset_dim()}} < 0) { + if ({{get_suboffset_dim()}} < 0) { {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; /* This dimension is the first dimension, or is preceded by */ @@ -906,7 +906,7 @@ if (unlikely(__pyx_memoryview_slice_memviewslice( {{endif}} } else { - {{dst}}.suboffsets[{{get_suboffset_dim()}}] += __pyx_tmp_idx * __pyx_tmp_stride; + {{dst}}.suboffsets[{{get_suboffset_dim()}}] += __pyx_tmp_idx * __pyx_tmp_stride; /* Note: dimension can not be indirect, the compiler will have */ /* issued an error */ diff --git a/contrib/tools/cython/Cython/Utility/ModuleSetupCode.c b/contrib/tools/cython/Cython/Utility/ModuleSetupCode.c index 787b87c525..0c7059b354 100644 --- a/contrib/tools/cython/Cython/Utility/ModuleSetupCode.c +++ b/contrib/tools/cython/Cython/Utility/ModuleSetupCode.c @@ -1,9 +1,9 @@ /////////////// CModulePreamble /////////////// -#if defined(__GNUC__) || defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wshadow" -#pragma GCC diagnostic ignored "-Wunused-function" +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wunused-function" #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 // Ignore tp_print initializer. Need for ya make -DUSE_SYSTEM_PYTHON=3.8 #pragma GCC diagnostic ignored "-Wdeprecated-declarations" @@ -12,7 +12,7 @@ #include <stddef.h> /* For offsetof */ #ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) @@ -37,13 +37,13 @@ // For use in DL_IMPORT/DL_EXPORT macros. #define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - // CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us +#ifndef HAVE_LONG_LONG + // CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif - + #define HAVE_LONG_LONG + #endif +#endif + #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif @@ -53,12 +53,12 @@ #endif #ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 @@ -67,84 +67,84 @@ #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif #if PY_VERSION_HEX < 0x02070000 // looks like calling _PyType_Lookup() isn't safe in Py<=2.6/3.1 #undef CYTHON_USE_PYTYPE_LOOKUP @@ -152,81 +152,81 @@ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 // Python 3.11a2 hid _PyLong_FormatAdvancedWriter and _PyFloat_FormatAdvancedWriter // therefore disable unicode writer until a better alternative appears - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL // Python 3.11 deleted localplus argument from frame object, which is used in our // fast_pycall code #define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1) - #endif + #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif - -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif - -#if CYTHON_USE_PYLONG_INTERNALS + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif + +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif + +#if CYTHON_USE_PYLONG_INTERNALS #if PY_MAJOR_VERSION < 3 #include "longintrepr.h" #endif - /* These short defines can easily conflict with other code */ - #undef SHIFT - #undef BASE - #undef MASK - /* Compile-time sanity check that these are indeed equal. Github issue #2670. */ - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif - + /* These short defines can easily conflict with other code */ + #undef SHIFT + #undef BASE + #undef MASK + /* Compile-time sanity check that these are indeed equal. Github issue #2670. */ + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif + #ifndef __has_attribute #define __has_attribute(x) 0 #endif @@ -383,8 +383,8 @@ class __Pyx_FakeReference { /////////////// PythonCompatibility /////////////// -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" @@ -468,31 +468,31 @@ class __Pyx_FakeReference { } return co; } -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif +#endif #define __Pyx_DefaultClassType PyType_Type #endif -#ifndef Py_TPFLAGS_CHECKTYPES +#ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE +#ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif -#ifndef METH_STACKLESS - // already defined for Stackless Python (all versions) and C-Python >= 3.7 - // value if defined: Stackless Python < 3.6: 0x80 else 0x100 - #define METH_STACKLESS 0 -#endif +#ifndef METH_STACKLESS + // already defined for Stackless Python (all versions) and C-Python >= 3.7 + // value if defined: Stackless Python < 3.6: 0x80 else 0x100 + #define METH_STACKLESS 0 +#endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) // new in CPython 3.6, but changed in 3.7 - see // positional-only parameters: @@ -506,29 +506,29 @@ class __Pyx_FakeReference { // new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6 typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func) \ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif - +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func) \ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif + #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif - +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif + #if CYTHON_COMPILING_IN_PYSTON // special C-API functions only in Pyston #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) @@ -557,7 +557,7 @@ class __Pyx_FakeReference { typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); - return 0; /* PyThread_create_key reports success always */ + return 0; /* PyThread_create_key reports success always */ } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); @@ -582,7 +582,7 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { } // PyThread_delete_key_value(key) is equalivalent to PyThread_set_key_value(key, NULL) // PyThread_ReInitTLS() is a no-op -#endif /* TSS (Thread Specific Storage) API */ +#endif /* TSS (Thread Specific Storage) API */ #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) @@ -618,37 +618,37 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 // Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. // https://www.python.org/dev/peps/pep-0623/ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #endif #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) /* (void)(k) => avoid unused variable warning due to macro: */ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY @@ -660,21 +660,21 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif - -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif - -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif - -// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()". -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif + +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif + +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif + +// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()". +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) @@ -682,10 +682,10 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif - +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif + #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject @@ -694,7 +694,7 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #define PyString_CheckExact PyUnicode_CheckExact // PyPy3 used to define "PyObject_Unicode" #ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str + #define PyObject_Unicode PyObject_Str #endif #endif @@ -748,12 +748,12 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #define PyBoolObject PyLongObject #endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif - +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif + #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong @@ -765,57 +765,57 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif -// backport of PyAsyncMethods from Py3.5 to older Py3.x versions -// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5 -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else +// backport of PyAsyncMethods from Py3.5 to older Py3.x versions +// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5 +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - - -/////////////// SmallCodeConfig.proto /////////////// - -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - - + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + + +/////////////// SmallCodeConfig.proto /////////////// + +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + + /////////////// PyModInitFuncType.proto /////////////// - + #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC - + #elif PY_MAJOR_VERSION < 3 // Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void -#endif - +#endif + #else // Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. #ifdef __cplusplus @@ -825,7 +825,7 @@ static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { #endif #endif - + /////////////// FastTypeChecks.proto /////////////// #if CYTHON_COMPILING_IN_CPYTHON @@ -913,48 +913,48 @@ static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, // so far, we only call PyErr_GivenExceptionMatches() with an exception type (not instance) as first argument // => optimise for that case -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - // the tighter subtype checking in Py3 allows faster out-of-order comparison - for (i=0; i<n; i++) { - if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; - } -#endif - for (i=0; i<n; i++) { - PyObject *t = PyTuple_GET_ITEM(tuple, i); - #if PY_MAJOR_VERSION < 3 - if (likely(exc_type == t)) return 1; - #endif - if (likely(PyExceptionClass_Check(t))) { - if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; - } else { - // FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); - } - } - return 0; -} - +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + // the tighter subtype checking in Py3 allows faster out-of-order comparison + for (i=0; i<n; i++) { + if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; + } +#endif + for (i=0; i<n; i++) { + PyObject *t = PyTuple_GET_ITEM(tuple, i); + #if PY_MAJOR_VERSION < 3 + if (likely(exc_type == t)) return 1; + #endif + if (likely(PyExceptionClass_Check(t))) { + if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; + } else { + // FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); + } + } + return 0; +} + static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { - if (likely(PyExceptionClass_Check(exc_type))) { - return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); - } else if (likely(PyTuple_Check(exc_type))) { - return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); - } else { - // FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); - } + if (likely(PyExceptionClass_Check(exc_type))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } else if (likely(PyTuple_Check(exc_type))) { + return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); + } else { + // FIXME: Py3: PyErr_SetString(PyExc_TypeError, "catching classes that do not inherit from BaseException is not allowed"); + } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { - // Only used internally with known exception types => pure safety check assertions. - assert(PyExceptionClass_Check(exc_type1)); - assert(PyExceptionClass_Check(exc_type2)); + // Only used internally with known exception types => pure safety check assertions. + assert(PyExceptionClass_Check(exc_type1)); + assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); @@ -965,36 +965,36 @@ static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObj #endif -/////////////// MathInitCode /////////////// - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include <math.h> - +/////////////// MathInitCode /////////////// + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include <math.h> + #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { - // Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and - // a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is - // a quiet NaN. + // Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and + // a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is + // a quiet NaN. float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + - /////////////// UtilityFunctionPredeclarations.proto /////////////// -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /////////////// ForceInitThreads.proto /////////////// @@ -1015,39 +1015,39 @@ PyEval_InitThreads(); //@substitute: naming //#if CYTHON_PEP489_MULTI_PHASE_INIT -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} - -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} + +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); @@ -1057,12 +1057,12 @@ static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject return result; } -static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { +static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; // For now, we only have exactly one module instance. - if (__Pyx_check_single_interpreter()) - return NULL; + if (__Pyx_check_single_interpreter()) + return NULL; if (${module_cname}) return __Pyx_NewRef(${module_cname}); @@ -1077,10 +1077,10 @@ static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, if (unlikely(!moddict)) goto bad; // moddict is a borrowed reference - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: @@ -1093,7 +1093,7 @@ bad: /////////////// CodeObjectCache.proto /////////////// typedef struct { - PyCodeObject* code_object; + PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; @@ -1119,7 +1119,7 @@ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int co return count; } while (start < end) { - mid = start + (end - start) / 2; + mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { @@ -1318,9 +1318,9 @@ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; - m = PyImport_ImportModule(modname); + m = PyImport_ImportModule(modname); if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); + p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: @@ -1348,12 +1348,12 @@ if (!__Pyx_RefNanny) { //@substitute: naming static void ${cleanup_cname}(PyObject *self); /*proto*/ - -#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY + +#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY static int __Pyx_RegisterCleanup(void); /*proto*/ -#else -#define __Pyx_RegisterCleanup() (0) -#endif +#else +#define __Pyx_RegisterCleanup() (0) +#endif /////////////// RegisterModuleCleanup /////////////// //@substitute: naming @@ -1386,7 +1386,7 @@ static int __Pyx_RegisterCleanup(void) { if (!cleanup_func) goto bad; - atexit = PyImport_ImportModule("atexit"); + atexit = PyImport_ImportModule("atexit"); if (!atexit) goto bad; reg = PyObject_GetAttrString(atexit, "_exithandlers"); @@ -1528,9 +1528,9 @@ static CYTHON_INLINE PyThreadState *__Pyx_FastGil_get_tcur(void) { static PyGILState_STATE __Pyx_FastGil_PyGILState_Ensure(void) { int current; - PyThreadState *tcur; + PyThreadState *tcur; __Pyx_FastGIL_Remember0(); - tcur = __Pyx_FastGil_get_tcur(); + tcur = __Pyx_FastGil_get_tcur(); if (tcur == NULL) { // Uninitialized, need to initialize now. return PyGILState_Ensure(); diff --git a/contrib/tools/cython/Cython/Utility/ObjectHandling.c b/contrib/tools/cython/Cython/Utility/ObjectHandling.c index e362b7b7c3..c1b1c60bda 100644 --- a/contrib/tools/cython/Cython/Utility/ObjectHandling.c +++ b/contrib/tools/cython/Cython/Utility/ObjectHandling.c @@ -195,7 +195,7 @@ static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* // We always do a quick slot check because calling PyIter_Check() is so wasteful. iternextfunc iternext = Py_TYPE(iterator)->tp_iternext; if (likely(iternext)) { -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS next = iternext(iterator); if (likely(next)) return next; @@ -238,7 +238,7 @@ static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ // detects an error that occurred in the iterator, it returns -1. static CYTHON_INLINE int __Pyx_IterFinish(void) { -#if CYTHON_FAST_THREAD_STATE +#if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { @@ -318,7 +318,7 @@ static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { /////////////// DictGetItem.proto /////////////// -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/ #define __Pyx_PyObject_Dict_GetItem(obj, name) \ @@ -338,17 +338,17 @@ static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { - if (unlikely(PyTuple_Check(key))) { - // CPython interprets tuples as separate arguments => must wrap them in another tuple. - PyObject* args = PyTuple_Pack(1, key); - if (likely(args)) { - PyErr_SetObject(PyExc_KeyError, args); - Py_DECREF(args); - } - } else { - // Avoid tuple packing if possible. - PyErr_SetObject(PyExc_KeyError, key); - } + if (unlikely(PyTuple_Check(key))) { + // CPython interprets tuples as separate arguments => must wrap them in another tuple. + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) { + PyErr_SetObject(PyExc_KeyError, args); + Py_DECREF(args); + } + } else { + // Avoid tuple packing if possible. + PyErr_SetObject(PyExc_KeyError, key); + } } return NULL; } @@ -391,14 +391,14 @@ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { {{for type in ['List', 'Tuple']}} static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += Py{{type}}_GET_SIZE(o); } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, Py{{type}}_GET_SIZE(o)))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, Py{{type}}_GET_SIZE(o)))) { PyObject *r = Py{{type}}_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; @@ -410,13 +410,13 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ss } {{endfor}} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; @@ -424,7 +424,7 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; @@ -439,9 +439,9 @@ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, i += l; } else { // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; - PyErr_Clear(); + PyErr_Clear(); } } return m->sq_item(o, i); @@ -477,12 +477,12 @@ static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { return r; } -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, - CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, + CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); @@ -499,9 +499,9 @@ static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObje i += l; } else { // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; - PyErr_Clear(); + PyErr_Clear(); } } return m->sq_ass_item(o, i, v); @@ -509,11 +509,11 @@ static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObje } #else #if CYTHON_COMPILING_IN_PYPY - if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) + if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) #else - if (is_list || PySequence_Check(o)) + if (is_list || PySequence_Check(o)) #endif - { + { return PySequence_SetItem(o, i, v); } #endif @@ -531,7 +531,7 @@ static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObje static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j); static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound); + int is_list, int wraparound); /////////////// DelItemInt /////////////// @@ -544,8 +544,8 @@ static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) { } static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, - CYTHON_UNUSED int is_list, CYTHON_NCP_UNUSED int wraparound) { -#if !CYTHON_USE_TYPE_SLOTS + CYTHON_UNUSED int is_list, CYTHON_NCP_UNUSED int wraparound) { +#if !CYTHON_USE_TYPE_SLOTS if (is_list || PySequence_Check(o)) { return PySequence_DelItem(o, i); } @@ -559,9 +559,9 @@ static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, i += l; } else { // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; - PyErr_Clear(); + PyErr_Clear(); } } return m->sq_ass_item(o, i, (PyObject *)NULL); @@ -593,14 +593,14 @@ static CYTHON_INLINE int __Pyx_PyObject_SetSlice( /////////////// SliceObject /////////////// {{if access == 'Get'}} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, {{else}} -static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, +static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, {{endif}} - Py_ssize_t cstart, Py_ssize_t cstop, + Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; @@ -632,9 +632,9 @@ static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, } } else { // if length > max(Py_ssize_t), maybe the object can wrap around itself? - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; - PyErr_Clear(); + PyErr_Clear(); } } {{if access == 'Get'}} @@ -686,7 +686,7 @@ static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS {{if access == 'Get'}} result = mp->mp_subscript(obj, py_slice); #else @@ -836,11 +836,11 @@ static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) { PyObject *metaclass; if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { PyTypeObject *metatype; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS PyObject *base = PyTuple_GET_ITEM(bases, 0); -#else - PyObject *base = PySequence_ITEM(bases, 0); -#endif +#else + PyObject *base = PySequence_ITEM(bases, 0); +#endif #if PY_MAJOR_VERSION < 3 PyObject* basetype = __Pyx_PyObject_GetAttrStr(base, PYIDENT("__class__")); if (basetype) { @@ -855,9 +855,9 @@ static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) { metatype = Py_TYPE(base); #endif metaclass = __Pyx_CalculateMetaclass(metatype, bases); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(base); -#endif +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(base); +#endif #if PY_MAJOR_VERSION < 3 Py_DECREF(basetype); #endif @@ -1039,7 +1039,7 @@ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { /////////////// CallableCheck.proto /////////////// -#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3 +#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3 #define __Pyx_PyCallable_Check(obj) (Py_TYPE(obj)->tp_call != NULL) #else #define __Pyx_PyCallable_Check(obj) PyCallable_Check(obj) @@ -1047,7 +1047,7 @@ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { /////////////// PyDictContains.proto /////////////// -static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { +static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { int result = PyDict_Contains(dict, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } @@ -1085,7 +1085,7 @@ static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, in /////////////// PySequenceContains.proto /////////////// -static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { +static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { int result = PySequence_Contains(seq, item); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } @@ -1119,8 +1119,8 @@ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { /////////////// GetNameInClass.proto /////////////// -#define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) -static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/ +#define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) +static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/ /////////////// GetNameInClass /////////////// //@requires: PyObjectGetAttrStr @@ -1130,17 +1130,17 @@ static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*pro //@requires: Exceptions.c::PyErrExceptionMatches static PyObject *__Pyx_GetGlobalNameAfterAttributeLookup(PyObject *name) { - PyObject *result; + PyObject *result; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); - __Pyx_GetModuleGlobalNameUncached(result, name); - return result; + __Pyx_GetModuleGlobalNameUncached(result, name); + return result; } -static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { +static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { PyObject *result; result = __Pyx_PyObject_GetAttrStr(nmspace, name); if (!result) { @@ -1165,67 +1165,67 @@ static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { /////////////// GetModuleGlobalName.proto /////////////// -//@requires: PyDictVersioning -//@substitute: naming - -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) { \ - static PY_UINT64_T __pyx_dict_version = 0; \ - static PyObject *__pyx_dict_cached_value = NULL; \ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION($moddict_cname))) ? \ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) : \ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) { \ - PY_UINT64_T __pyx_dict_version; \ - PyObject *__pyx_dict_cached_value; \ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); /*proto*/ -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); /*proto*/ -#endif - - +//@requires: PyDictVersioning +//@substitute: naming + +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) { \ + static PY_UINT64_T __pyx_dict_version = 0; \ + static PyObject *__pyx_dict_cached_value = NULL; \ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION($moddict_cname))) ? \ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) : \ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) { \ + PY_UINT64_T __pyx_dict_version; \ + PyObject *__pyx_dict_cached_value; \ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); /*proto*/ +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); /*proto*/ +#endif + + /////////////// GetModuleGlobalName /////////////// //@requires: GetBuiltinName //@substitute: naming -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS +#if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 // Identifier names are always interned and have a pre-calculated hash value. result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) if (likely(result)) { - return __Pyx_NewRef(result); + return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { - return NULL; - } + return NULL; + } #else result = PyDict_GetItem($moddict_cname, name); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) if (likely(result)) { - return __Pyx_NewRef(result); - } + return __Pyx_NewRef(result); + } #endif #else result = PyObject_GetItem($moddict_cname, name); - __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); } //////////////////// GetAttr.proto //////////////////// @@ -1398,7 +1398,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, P /////////////// PyObjectGetAttrStr.proto /////////////// -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/ #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) @@ -1422,7 +1422,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject /////////////// PyObjectSetAttrStr.proto /////////////// -#if CYTHON_USE_TYPE_SLOTS +#if CYTHON_USE_TYPE_SLOTS #define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/ #else @@ -1446,220 +1446,220 @@ static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr #endif -/////////////// PyObjectGetMethod.proto /////////////// - -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);/*proto*/ - -/////////////// PyObjectGetMethod /////////////// -//@requires: PyObjectGetAttrStr - -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - // Copied from _PyObject_GetMethod() in CPython 3.7 - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - - assert (*method == NULL); - - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); - // Repeating the condition below accommodates for MSVC's inability to test macros inside of macro expansions. -#if PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type))) - #endif -#else - // "PyMethodDescr_Type" is not part of the C-API in Py2. - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - - if (meth_found) { - *method = descr; - return 1; - } - - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - - if (descr != NULL) { - *method = descr; - return 0; - } - - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(name)); -#endif - return 0; - -// Generic fallback implementation using normal attribute lookup. -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif - -try_unpack: -#if CYTHON_UNPACK_METHODS - // Even if we failed to avoid creating a bound method object, it's still worth unpacking it now, if possible. - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - - -/////////////// UnpackUnboundCMethod.proto /////////////// - -typedef struct { - PyObject *type; - PyObject **method_name; - // "func" is set on first access (direct C function pointer) - PyCFunction func; - // "method" is set on first access (fallback) - PyObject *method; - int flag; -} __Pyx_CachedCFunction; - -/////////////// UnpackUnboundCMethod /////////////// -//@requires: PyObjectGetAttrStr - -static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { - PyObject *method; - method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); - if (unlikely(!method)) - return -1; - target->method = method; -#if CYTHON_COMPILING_IN_CPYTHON - #if PY_MAJOR_VERSION >= 3 - // method dscriptor type isn't exported in Py2.x, cannot easily check the type there +/////////////// PyObjectGetMethod.proto /////////////// + +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);/*proto*/ + +/////////////// PyObjectGetMethod /////////////// +//@requires: PyObjectGetAttrStr + +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + // Copied from _PyObject_GetMethod() in CPython 3.7 + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + + assert (*method == NULL); + + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); + // Repeating the condition below accommodates for MSVC's inability to test macros inside of macro expansions. +#if PY_MAJOR_VERSION >= 3 + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type))) + #endif +#else + // "PyMethodDescr_Type" is not part of the C-API in Py2. + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + + if (meth_found) { + *method = descr; + return 1; + } + + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + + if (descr != NULL) { + *method = descr; + return 0; + } + + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(name)); +#endif + return 0; + +// Generic fallback implementation using normal attribute lookup. +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif + +try_unpack: +#if CYTHON_UNPACK_METHODS + // Even if we failed to avoid creating a bound method object, it's still worth unpacking it now, if possible. + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} + + +/////////////// UnpackUnboundCMethod.proto /////////////// + +typedef struct { + PyObject *type; + PyObject **method_name; + // "func" is set on first access (direct C function pointer) + PyCFunction func; + // "method" is set on first access (fallback) + PyObject *method; + int flag; +} __Pyx_CachedCFunction; + +/////////////// UnpackUnboundCMethod /////////////// +//@requires: PyObjectGetAttrStr + +static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { + PyObject *method; + method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); + if (unlikely(!method)) + return -1; + target->method = method; +#if CYTHON_COMPILING_IN_CPYTHON + #if PY_MAJOR_VERSION >= 3 + // method dscriptor type isn't exported in Py2.x, cannot easily check the type there if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) - #endif - { - PyMethodDescrObject *descr = (PyMethodDescrObject*) method; - target->func = descr->d_method->ml_meth; - target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); - } -#endif - return 0; -} - - -/////////////// CallUnboundCMethod0.proto /////////////// -//@substitute: naming - -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/ -#if CYTHON_COMPILING_IN_CPYTHON + #endif + { + PyMethodDescrObject *descr = (PyMethodDescrObject*) method; + target->func = descr->d_method->ml_meth; + target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); + } +#endif + return 0; +} + + +/////////////// CallUnboundCMethod0.proto /////////////// +//@substitute: naming + +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/ +#if CYTHON_COMPILING_IN_CPYTHON // FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*" -#define __Pyx_CallUnboundCMethod0(cfunc, self) \ +#define __Pyx_CallUnboundCMethod0(cfunc, self) \ (likely((cfunc)->func) ? \ - (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \ + (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \ (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \ (PY_VERSION_HEX >= 0x030700A0 ? \ - (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0) : \ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \ + (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0) : \ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \ (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \ - (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \ - (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, $empty_tuple, NULL)) : \ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \ + (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, $empty_tuple, NULL)) : \ ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \ __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \ - __Pyx__CallUnboundCMethod0(cfunc, self)) -#else -#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) -#endif - -/////////////// CallUnboundCMethod0 /////////////// -//@requires: UnpackUnboundCMethod -//@requires: PyObjectCall - -static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { - PyObject *args, *result = NULL; - if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_ASSUME_SAFE_MACROS - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); -#else - args = PyTuple_Pack(1, self); - if (unlikely(!args)) goto bad; -#endif - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - Py_DECREF(args); -bad: - return result; -} - - -/////////////// CallUnboundCMethod1.proto /////////////// - + __Pyx__CallUnboundCMethod0(cfunc, self)) +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + +/////////////// CallUnboundCMethod0 /////////////// +//@requires: UnpackUnboundCMethod +//@requires: PyObjectCall + +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *args, *result = NULL; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_ASSUME_SAFE_MACROS + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); +#else + args = PyTuple_Pack(1, self); + if (unlikely(!args)) goto bad; +#endif + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + Py_DECREF(args); +bad: + return result; +} + + +/////////////// CallUnboundCMethod1.proto /////////////// + static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ - -#if CYTHON_COMPILING_IN_CPYTHON + +#if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ -#else -#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg) -#endif - -/////////////// CallUnboundCMethod1 /////////////// -//@requires: UnpackUnboundCMethod -//@requires: PyObjectCall - +#else +#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg) +#endif + +/////////////// CallUnboundCMethod1 /////////////// +//@requires: UnpackUnboundCMethod +//@requires: PyObjectCall + #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) { if (likely(cfunc->func)) { @@ -1669,51 +1669,51 @@ static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* return (*(cfunc->func))(self, arg); } else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) { if (PY_VERSION_HEX >= 0x030700A0) { - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1); + return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1); } else { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); } } else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); } } return __Pyx__CallUnboundCMethod1(cfunc, self, arg); } #endif -static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){ - PyObject *args, *result = NULL; +static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){ + PyObject *args, *result = NULL; if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_COMPILING_IN_CPYTHON - if (cfunc->func && (cfunc->flag & METH_VARARGS)) { - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); - else - result = (*cfunc->func)(self, args); - } else { - args = PyTuple_New(2); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 1, arg); - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - } -#else - args = PyTuple_Pack(2, self, arg); - if (unlikely(!args)) goto bad; - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); -#endif -bad: - Py_XDECREF(args); - return result; -} - - +#if CYTHON_COMPILING_IN_CPYTHON + if (cfunc->func && (cfunc->flag & METH_VARARGS)) { + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + if (cfunc->flag & METH_KEYWORDS) + result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); + else + result = (*cfunc->func)(self, args); + } else { + args = PyTuple_New(2); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 1, arg); + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + } +#else + args = PyTuple_Pack(2, self, arg); + if (unlikely(!args)) goto bad; + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); +#endif +bad: + Py_XDECREF(args); + return result; +} + + /////////////// CallUnboundCMethod2.proto /////////////// static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/ @@ -1734,14 +1734,14 @@ static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction * PyObject *args[2] = {arg1, arg2}; if (cfunc->flag == METH_FASTCALL) { #if PY_VERSION_HEX >= 0x030700A0 - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2); + return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2); #else - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); #endif } #if PY_VERSION_HEX >= 0x030700A0 if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); #endif } return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2); @@ -1760,7 +1760,7 @@ static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObje Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); + result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); else result = (*cfunc->func)(self, args); } else { @@ -1790,19 +1790,19 @@ bad: static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/ /////////////// PyObjectCallMethod0 /////////////// -//@requires: PyObjectGetMethod +//@requires: PyObjectGetMethod //@requires: PyObjectCallOneArg //@requires: PyObjectCallNoArg static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { - PyObject *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_CallOneArg(method, obj); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) goto bad; + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; result = __Pyx_PyObject_CallNoArg(method); Py_DECREF(method); bad: @@ -1815,27 +1815,27 @@ bad: static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); /*proto*/ /////////////// PyObjectCallMethod1 /////////////// -//@requires: PyObjectGetMethod +//@requires: PyObjectGetMethod //@requires: PyObjectCallOneArg -//@requires: PyObjectCall2Args +//@requires: PyObjectCall2Args static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { - // Separate function to avoid excessive inlining. - PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); - Py_DECREF(method); + // Separate function to avoid excessive inlining. + PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); + Py_DECREF(method); return result; } static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { - PyObject *method = NULL, *result; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call2Args(method, obj, arg); - Py_DECREF(method); - return result; - } + PyObject *method = NULL, *result; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_Call2Args(method, obj, arg); + Py_DECREF(method); + return result; + } if (unlikely(!method)) return NULL; - return __Pyx__PyObject_CallMethod1(method, arg); + return __Pyx__PyObject_CallMethod1(method, arg); } @@ -1845,48 +1845,48 @@ static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name /////////////// PyObjectCallMethod2 /////////////// //@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall -//@requires: PyObjectCall2Args - -static PyObject* __Pyx_PyObject_Call3Args(PyObject* function, PyObject* arg1, PyObject* arg2, PyObject* arg3) { - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[3] = {arg1, arg2, arg3}; - return __Pyx_PyFunction_FastCall(function, args, 3); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[3] = {arg1, arg2, arg3}; - return __Pyx_PyFunction_FastCall(function, args, 3); - } - #endif - - args = PyTuple_New(3); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(arg3); - PyTuple_SET_ITEM(args, 2, arg3); - - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - return result; -} - +//@requires: PyFunctionFastCall +//@requires: PyCFunctionFastCall +//@requires: PyObjectCall2Args + +static PyObject* __Pyx_PyObject_Call3Args(PyObject* function, PyObject* arg1, PyObject* arg2, PyObject* arg3) { + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[3] = {arg1, arg2, arg3}; + return __Pyx_PyFunction_FastCall(function, args, 3); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[3] = {arg1, arg2, arg3}; + return __Pyx_PyFunction_FastCall(function, args, 3); + } + #endif + + args = PyTuple_New(3); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(arg3); + PyTuple_SET_ITEM(args, 2, arg3); + + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + return result; +} + static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name, PyObject* arg1, PyObject* arg2) { - PyObject *args, *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call3Args(method, obj, arg1, arg2); + PyObject *args, *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_Call3Args(method, obj, arg1, arg2); Py_DECREF(method); - return result; + return result; } - if (unlikely(!method)) return NULL; - result = __Pyx_PyObject_Call2Args(method, arg1, arg2); + if (unlikely(!method)) return NULL; + result = __Pyx_PyObject_Call2Args(method, arg1, arg2); Py_DECREF(method); return result; } @@ -1960,286 +1960,286 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject #endif -/////////////// PyFunctionFastCall.proto /////////////// - -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs) \ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) - -// let's assume that the non-public C-API function might still change during the 3.6 beta phase -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif - -// Backport from Python 3 -// Assert a build-time dependency, as an expression. -// Your compile will fail if the condition isn't true, or can't be evaluated -// by the compiler. This can be used in an expression: its value is 0. -// Example: -// #define foo_to_char(foo) \ -// ((char *)(foo) \ -// + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0)) -// -// Written by Rusty Russell, public domain, http://ccodearchive.net/ -#define __Pyx_BUILD_ASSERT_EXPR(cond) \ - (sizeof(char [1 - 2*!(cond)]) - 1) - -#ifndef Py_MEMBER_SIZE -// Get the size of a structure member in bytes -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - +/////////////// PyFunctionFastCall.proto /////////////// + +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs) \ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) + +// let's assume that the non-public C-API function might still change during the 3.6 beta phase +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif + +// Backport from Python 3 +// Assert a build-time dependency, as an expression. +// Your compile will fail if the condition isn't true, or can't be evaluated +// by the compiler. This can be used in an expression: its value is 0. +// Example: +// #define foo_to_char(foo) \ +// ((char *)(foo) \ +// + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0)) +// +// Written by Rusty Russell, public domain, http://ccodearchive.net/ +#define __Pyx_BUILD_ASSERT_EXPR(cond) \ + (sizeof(char [1 - 2*!(cond)]) - 1) + +#ifndef Py_MEMBER_SIZE +// Get the size of a structure member in bytes +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + #if CYTHON_FAST_PYCALL - // Initialised by module init code. - static size_t __pyx_pyframe_localsplus_offset = 0; - - #include "frameobject.h" - // This is the long runtime version of - // #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) - // offsetof(PyFrameObject, f_localsplus) differs between regular C-Python and Stackless Python. - // Therefore the offset is computed at run time from PyFrame_type.tp_basicsize. That is feasible, - // because f_localsplus is the last field of PyFrameObject (checked by Py_BUILD_ASSERT_EXPR below). - #define __Pxy_PyFrame_Initialize_Offsets() \ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)), \ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame) \ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) + // Initialised by module init code. + static size_t __pyx_pyframe_localsplus_offset = 0; + + #include "frameobject.h" + // This is the long runtime version of + // #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) + // offsetof(PyFrameObject, f_localsplus) differs between regular C-Python and Stackless Python. + // Therefore the offset is computed at run time from PyFrame_type.tp_basicsize. That is feasible, + // because f_localsplus is the last field of PyFrameObject (checked by Py_BUILD_ASSERT_EXPR below). + #define __Pxy_PyFrame_Initialize_Offsets() \ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)), \ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame) \ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif // CYTHON_FAST_PYCALL -#endif - - -/////////////// PyFunctionFastCall /////////////// -// copied from CPython 3.6 ceval.c - -#if CYTHON_FAST_PYCALL - -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; +#endif + + +/////////////// PyFunctionFastCall /////////////// +// copied from CPython 3.6 ceval.c + +#if CYTHON_FAST_PYCALL + +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - - return result; -} - - -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; - //#if PY_VERSION_HEX >= 0x03050000 - //PyObject *name, *qualname; - //#endif -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - /* Fast paths */ - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); - //#if PY_VERSION_HEX >= 0x03050000 - //name = ((PyFunctionObject *)func) -> func_name; - //qualname = ((PyFunctionObject *)func) -> func_qualname; - //#endif -#endif - - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } - - //#if PY_VERSION_HEX >= 0x03050000 - //return _PyEval_EvalCodeWithName((PyObject*)co, globals, (PyObject *)NULL, - // args, nargs, - // NULL, 0, - // d, nd, kwdefs, - // closure, name, qualname); - //#elif PY_MAJOR_VERSION >= 3 -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); - -done: - Py_LeaveRecursiveCall(); - return result; -} + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + + return result; +} + + +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; + //#if PY_VERSION_HEX >= 0x03050000 + //PyObject *name, *qualname; + //#endif +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + /* Fast paths */ + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); + //#if PY_VERSION_HEX >= 0x03050000 + //name = ((PyFunctionObject *)func) -> func_name; + //qualname = ((PyFunctionObject *)func) -> func_qualname; + //#endif +#endif + + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } + + //#if PY_VERSION_HEX >= 0x03050000 + //return _PyEval_EvalCodeWithName((PyObject*)co, globals, (PyObject *)NULL, + // args, nargs, + // NULL, 0, + // d, nd, kwdefs, + // closure, name, qualname); + //#elif PY_MAJOR_VERSION >= 3 +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); + +done: + Py_LeaveRecursiveCall(); + return result; +} #endif /* CPython < 3.6 */ #endif /* CYTHON_FAST_PYCALL */ - - -/////////////// PyCFunctionFastCall.proto /////////////// - -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/////////////// PyCFunctionFastCall /////////////// - -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); + + +/////////////// PyCFunctionFastCall.proto /////////////// + +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/////////////// PyCFunctionFastCall /////////////// + +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); - - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - + + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } -} +} #endif /* CYTHON_FAST_PYCCALL */ - - -/////////////// PyObjectCall2Args.proto /////////////// - -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /*proto*/ - -/////////////// PyObjectCall2Args /////////////// -//@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall - -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - - + + +/////////////// PyObjectCall2Args.proto /////////////// + +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /*proto*/ + +/////////////// PyObjectCall2Args /////////////// +//@requires: PyObjectCall +//@requires: PyFunctionFastCall +//@requires: PyCFunctionFastCall + +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + + /////////////// PyObjectCallOneArg.proto /////////////// static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /*proto*/ @@ -2247,8 +2247,8 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObjec /////////////// PyObjectCallOneArg /////////////// //@requires: PyObjectCallMethO //@requires: PyObjectCall -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall +//@requires: PyFunctionFastCall +//@requires: PyCFunctionFastCall #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { @@ -2263,31 +2263,31 @@ static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { // fast and simple case that we are optimising for return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL +#if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; } #endif @@ -2305,22 +2305,22 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); /*proto /////////////// PyObjectCallNoArg /////////////// //@requires: PyObjectCallMethO //@requires: PyObjectCall -//@requires: PyFunctionFastCall +//@requires: PyFunctionFastCall //@substitute: naming #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, NULL, 0); - } -#endif +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif #ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) + if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else - if (likely(PyCFunction_Check(func))) + if (likely(PyCFunction_Check(func))) #endif - { + { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { // fast and simple case that we are optimising for return __Pyx_PyObject_CallMethO(func, NULL); @@ -2345,35 +2345,35 @@ static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y); /////////////// MatrixMultiply /////////////// //@requires: PyObjectGetAttrStr //@requires: PyObjectCallOneArg -//@requires: PyFunctionFastCall -//@requires: PyCFunctionFastCall +//@requires: PyFunctionFastCall +//@requires: PyCFunctionFastCall #if PY_VERSION_HEX < 0x03050000 static PyObject* __Pyx_PyObject_CallMatrixMethod(PyObject* method, PyObject* arg) { // NOTE: eats the method reference PyObject *result = NULL; -#if CYTHON_UNPACK_METHODS +#if CYTHON_UNPACK_METHODS if (likely(PyMethod_Check(method))) { PyObject *self = PyMethod_GET_SELF(method); if (likely(self)) { PyObject *args; PyObject *function = PyMethod_GET_FUNCTION(method); - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {self, arg}; - result = __Pyx_PyFunction_FastCall(function, args, 2); - goto done; - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {self, arg}; - result = __Pyx_PyCFunction_FastCall(function, args, 2); - goto done; - } - #endif + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {self, arg}; + result = __Pyx_PyFunction_FastCall(function, args, 2); + goto done; + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {self, arg}; + result = __Pyx_PyCFunction_FastCall(function, args, 2); + goto done; + } + #endif args = PyTuple_New(2); - if (unlikely(!args)) goto done; + if (unlikely(!args)) goto done; Py_INCREF(self); PyTuple_SET_ITEM(args, 0, self); Py_INCREF(arg); @@ -2388,7 +2388,7 @@ static PyObject* __Pyx_PyObject_CallMatrixMethod(PyObject* method, PyObject* arg } #endif result = __Pyx_PyObject_CallOneArg(method, arg); -done: +done: Py_DECREF(method); return result; } @@ -2409,8 +2409,8 @@ done: static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name) { int right_is_subtype = PyObject_IsSubclass((PyObject*)Py_TYPE(y), (PyObject*)Py_TYPE(x)); - if (unlikely(right_is_subtype == -1)) - return NULL; + if (unlikely(right_is_subtype == -1)) + return NULL; if (right_is_subtype) { // to allow subtypes to override parent behaviour, try reversed operation first // see note at https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types @@ -2435,63 +2435,63 @@ static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y) #undef __Pyx_TryMatrixMethod #endif - - -/////////////// PyDictVersioning.proto /////////////// - -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) \ - (version_var) = __PYX_GET_DICT_VERSION(dict); \ - (cache_var) = (value); - -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) { \ - static PY_UINT64_T __pyx_dict_version = 0; \ - static PyObject *__pyx_dict_cached_value = NULL; \ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) { \ - (VAR) = __pyx_dict_cached_value; \ - } else { \ - (VAR) = __pyx_dict_cached_value = (LOOKUP); \ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT); \ - } \ -} - -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); /*proto*/ -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); /*proto*/ -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); /*proto*/ - -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/////////////// PyDictVersioning /////////////// - -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} - -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} - -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif + + +/////////////// PyDictVersioning.proto /////////////// + +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) \ + (version_var) = __PYX_GET_DICT_VERSION(dict); \ + (cache_var) = (value); + +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) { \ + static PY_UINT64_T __pyx_dict_version = 0; \ + static PyObject *__pyx_dict_cached_value = NULL; \ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) { \ + (VAR) = __pyx_dict_cached_value; \ + } else { \ + (VAR) = __pyx_dict_cached_value = (LOOKUP); \ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT); \ + } \ +} + +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); /*proto*/ +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); /*proto*/ +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); /*proto*/ + +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/////////////// PyDictVersioning /////////////// + +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} + +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} + +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif diff --git a/contrib/tools/cython/Cython/Utility/Optimize.c b/contrib/tools/cython/Cython/Utility/Optimize.c index fb155140ff..d18c9b78ec 100644 --- a/contrib/tools/cython/Cython/Utility/Optimize.c +++ b/contrib/tools/cython/Cython/Utility/Optimize.c @@ -28,7 +28,7 @@ static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) { /////////////// ListAppend.proto /////////////// -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); @@ -46,7 +46,7 @@ static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { /////////////// ListCompAppend.proto /////////////// -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); @@ -78,18 +78,18 @@ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { /////////////// pop.proto /////////////// -static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); /*proto*/ +static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); /*proto*/ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); /*proto*/ -#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ? \ - __Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L)) - -#else -#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L) -#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L) -#endif - +#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ? \ + __Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L)) + +#else +#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L) +#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L) +#endif + /////////////// pop /////////////// //@requires: ObjectHandling.c::PyObjectCallMethod0 @@ -100,72 +100,72 @@ static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L) { return __Pyx_PyObject_CallMethod0(L, PYIDENT("pop")); } -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) { /* Check that both the size is positive and no reallocation shrinking needs to be done. */ if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) { __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); return PyList_GET_ITEM(L, PyList_GET_SIZE(L)); } - return CALL_UNBOUND_METHOD(PyList_Type, "pop", L); -} + return CALL_UNBOUND_METHOD(PyList_Type, "pop", L); +} #endif /////////////// pop_index.proto /////////////// -static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix); /*proto*/ -static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix); /*proto*/ - -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix); /*proto*/ - -#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ - (likely(PyList_CheckExact(L) && __Pyx_fits_Py_ssize_t(ix, type, is_signed))) ? \ - __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ - (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ - __Pyx__PyObject_PopIndex(L, py_ix))) - -#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ +static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix); /*proto*/ +static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix); /*proto*/ + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix); /*proto*/ + +#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ + (likely(PyList_CheckExact(L) && __Pyx_fits_Py_ssize_t(ix, type, is_signed))) ? \ + __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix))) + +#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ __Pyx_fits_Py_ssize_t(ix, type, is_signed) ? \ - __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ - (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ - __Pyx__PyObject_PopIndex(L, py_ix))) - -#else - -#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) \ - __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) - -#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ - (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ - __Pyx__PyObject_PopIndex(L, py_ix)) -#endif - + __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix))) + +#else + +#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) \ + __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) + +#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix)) +#endif + /////////////// pop_index /////////////// //@requires: ObjectHandling.c::PyObjectCallMethod1 -static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix) { +static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix) { PyObject *r; if (unlikely(!py_ix)) return NULL; - r = __Pyx__PyObject_PopIndex(L, py_ix); + r = __Pyx__PyObject_PopIndex(L, py_ix); Py_DECREF(py_ix); return r; } -static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix) { - return __Pyx_PyObject_CallMethod1(L, PYIDENT("pop"), py_ix); -} - -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) { +static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix) { + return __Pyx_PyObject_CallMethod1(L, PYIDENT("pop"), py_ix); +} + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) { Py_ssize_t size = PyList_GET_SIZE(L); if (likely(size > (((PyListObject*)L)->allocated >> 1))) { Py_ssize_t cix = ix; if (cix < 0) { cix += size; } - if (likely(__Pyx_is_valid_index(cix, size))) { + if (likely(__Pyx_is_valid_index(cix, size))) { PyObject* v = PyList_GET_ITEM(L, cix); __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); size -= 1; @@ -173,12 +173,12 @@ static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t return v; } } - if (py_ix == Py_None) { - return __Pyx__PyObject_PopNewIndex(L, PyInt_FromSsize_t(ix)); - } else { - return __Pyx__PyObject_PopIndex(L, py_ix); - } -} + if (py_ix == Py_None) { + return __Pyx__PyObject_PopNewIndex(L, PyInt_FromSsize_t(ix)); + } else { + return __Pyx__PyObject_PopIndex(L, py_ix); + } +} #endif @@ -190,7 +190,7 @@ static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObjec static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) { PyObject* value; -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (unlikely(PyErr_Occurred())) @@ -238,7 +238,7 @@ static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *ke #else if (is_safe_type == 1 || (is_safe_type == -1 && /* the following builtins presumably have repeatably safe and fast hash functions */ -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY (PyUnicode_CheckExact(key) || PyString_CheckExact(key) || PyLong_CheckExact(key)))) { value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { @@ -352,9 +352,9 @@ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_di return PyObject_GetIter(iterable); } -static CYTHON_INLINE int __Pyx_dict_iter_next( - PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, - PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { +static CYTHON_INLINE int __Pyx_dict_iter_next( + PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, + PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { PyObject* next_item; #if !CYTHON_COMPILING_IN_PYPY if (source_is_dict) { @@ -438,15 +438,15 @@ static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set #if CYTHON_COMPILING_IN_CPYTHON is_set = is_set || likely(PySet_CheckExact(iterable) || PyFrozenSet_CheckExact(iterable)); *p_source_is_set = is_set; - if (likely(is_set)) { - *p_orig_length = PySet_Size(iterable); - Py_INCREF(iterable); - return iterable; - } + if (likely(is_set)) { + *p_orig_length = PySet_Size(iterable); + Py_INCREF(iterable); + return iterable; + } #else (void)is_set; *p_source_is_set = 0; -#endif +#endif *p_orig_length = 0; return PyObject_GetIter(iterable); } @@ -462,7 +462,7 @@ static CYTHON_INLINE int __Pyx_set_iter_next( } (void)orig_length; (void)ppos; - return 1; + return 1; } #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(PySet_GET_SIZE(iter_obj) != orig_length)) { @@ -482,7 +482,7 @@ static CYTHON_INLINE int __Pyx_set_iter_next( } } #endif - return 0; + return 0; } /////////////// py_set_discard_unhashable /////////////// @@ -599,8 +599,8 @@ static double __Pyx__PyObject_AsDouble(PyObject* obj); /* proto */ static double __Pyx__PyObject_AsDouble(PyObject* obj) { PyObject* float_value; -#if !CYTHON_USE_TYPE_SLOTS - float_value = PyNumber_Float(obj); if ((0)) goto bad; +#if !CYTHON_USE_TYPE_SLOTS + float_value = PyNumber_Float(obj); if ((0)) goto bad; #else PyNumberMethods *nb = Py_TYPE(obj)->tp_as_number; if (likely(nb) && likely(nb->nb_float)) { @@ -649,25 +649,25 @@ static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject *none, int inplace) { // in CPython, 1<<N is substantially faster than 2**N // see http://bugs.python.org/issue21420 -#if !CYTHON_COMPILING_IN_PYPY +#if !CYTHON_COMPILING_IN_PYPY Py_ssize_t shiftby; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(exp))) { - shiftby = PyInt_AS_LONG(exp); - } else -#endif +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(exp))) { + shiftby = PyInt_AS_LONG(exp); + } else +#endif if (likely(PyLong_CheckExact(exp))) { - #if CYTHON_USE_PYLONG_INTERNALS - const Py_ssize_t size = Py_SIZE(exp); - // tuned to optimise branch prediction - if (likely(size == 1)) { - shiftby = ((PyLongObject*)exp)->ob_digit[0]; - } else if (size == 0) { - return PyInt_FromLong(1L); - } else if (unlikely(size < 0)) { - goto fallback; - } else { - shiftby = PyLong_AsSsize_t(exp); + #if CYTHON_USE_PYLONG_INTERNALS + const Py_ssize_t size = Py_SIZE(exp); + // tuned to optimise branch prediction + if (likely(size == 1)) { + shiftby = ((PyLongObject*)exp)->ob_digit[0]; + } else if (size == 0) { + return PyInt_FromLong(1L); + } else if (unlikely(size < 0)) { + goto fallback; + } else { + shiftby = PyLong_AsSsize_t(exp); } #else shiftby = PyLong_AsSsize_t(exp); @@ -679,17 +679,17 @@ static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject if ((size_t)shiftby <= sizeof(long) * 8 - 2) { long value = 1L << shiftby; return PyInt_FromLong(value); -#ifdef HAVE_LONG_LONG - } else if ((size_t)shiftby <= sizeof(unsigned PY_LONG_LONG) * 8 - 1) { - unsigned PY_LONG_LONG value = ((unsigned PY_LONG_LONG)1) << shiftby; - return PyLong_FromUnsignedLongLong(value); -#endif +#ifdef HAVE_LONG_LONG + } else if ((size_t)shiftby <= sizeof(unsigned PY_LONG_LONG) * 8 - 1) { + unsigned PY_LONG_LONG value = ((unsigned PY_LONG_LONG)1) << shiftby; + return PyLong_FromUnsignedLongLong(value); +#endif } else { - PyObject *result, *one = PyInt_FromLong(1L); + PyObject *result, *one = PyInt_FromLong(1L); if (unlikely(!one)) return NULL; - result = PyNumber_Lshift(one, exp); - Py_DECREF(one); - return result; + result = PyNumber_Lshift(one, exp); + Py_DECREF(one); + return result; } } else if (shiftby == -1 && PyErr_Occurred()) { PyErr_Clear(); @@ -698,498 +698,498 @@ fallback: #endif return (inplace ? PyNumber_InPlacePower : PyNumber_Power)(two, exp, none); } - - -/////////////// PyIntCompare.proto /////////////// - -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, long inplace); /*proto*/ - -/////////////// PyIntCompare /////////////// - -{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} -{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} -{{py: slot_name = op.lower() }} -{{py: c_op = {'Eq': '==', 'Ne': '!='}[op] }} -{{py: -return_compare = ( - (lambda a,b,c_op, return_true=return_true, return_false=return_false: "if ({a} {c_op} {b}) {return_true}; else {return_false};".format( - a=a, b=b, c_op=c_op, return_true=return_true, return_false=return_false)) - if ret_type.is_pyobject else - (lambda a,b,c_op: "return ({a} {c_op} {b});".format(a=a, b=b, c_op=c_op)) - ) -}} - -static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { - if (op1 == op2) { - {{return_true if op == 'Eq' else return_false}}; - } - - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact({{pyval}}))) { - const long {{'a' if order == 'CObj' else 'b'}} = intval; - long {{ival}} = PyInt_AS_LONG({{pyval}}); - {{return_compare('a', 'b', c_op)}} - } - #endif - - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact({{pyval}}))) { - int unequal; - unsigned long uintval; - Py_ssize_t size = Py_SIZE({{pyval}}); - const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; - if (intval == 0) { - // == 0 => Py_SIZE(pyval) == 0 - {{return_compare('size', '0', c_op)}} - } else if (intval < 0) { - // < 0 => Py_SIZE(pyval) < 0 - if (size >= 0) - {{return_false if op == 'Eq' else return_true}}; - // both are negative => can use absolute values now. - intval = -intval; - size = -size; - } else { - // > 0 => Py_SIZE(pyval) > 0 - if (size <= 0) - {{return_false if op == 'Eq' else return_true}}; - } - // After checking that the sign is the same (and excluding 0), now compare the absolute values. - // When inlining, the C compiler should select exactly one line from this unrolled loop. - uintval = (unsigned long) intval; - {{for _size in range(4, 0, -1)}} -#if PyLong_SHIFT * {{_size}} < SIZEOF_LONG*8 - if (uintval >> (PyLong_SHIFT * {{_size}})) { - // The C integer value is between (PyLong_BASE ** _size) and MIN(PyLong_BASE ** _size, LONG_MAX). - unequal = (size != {{_size+1}}) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) - {{for _i in range(1, _size+1)}} | (digits[{{_i}}] != ((uintval >> ({{_i}} * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)){{endfor}}; - } else -#endif - {{endfor}} - unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); - - {{return_compare('unequal', '0', c_op)}} - } - #endif - - if (PyFloat_CheckExact({{pyval}})) { - const long {{'a' if order == 'CObj' else 'b'}} = intval; - double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); - {{return_compare('(double)a', '(double)b', c_op)}} - } - - return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( - PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); -} - - -/////////////// PyIntBinop.proto /////////////// - -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -#if !CYTHON_COMPILING_IN_PYPY -static {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); /*proto*/ -#else -#define __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, intval, inplace, zerodivision_check) \ - {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) - {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) - {{endif}} -#endif - -/////////////// PyIntBinop /////////////// - -#if !CYTHON_COMPILING_IN_PYPY -{{py: from Cython.Utility import pylong_join }} -{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} -{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} -{{py: slot_name = {'TrueDivide': 'true_divide', 'FloorDivide': 'floor_divide'}.get(op, op.lower()) }} -{{py: cfunc_name = '__Pyx_PyInt_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order)}} -{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}} -{{py: -c_op = { - 'Add': '+', 'Subtract': '-', 'Remainder': '%', 'TrueDivide': '/', 'FloorDivide': '/', - 'Or': '|', 'Xor': '^', 'And': '&', 'Rshift': '>>', 'Lshift': '<<', - 'Eq': '==', 'Ne': '!=', - }[op] -}} - -{{if op in ('TrueDivide', 'FloorDivide', 'Remainder')}} -#if PY_MAJOR_VERSION < 3 || CYTHON_USE_PYLONG_INTERNALS -#define {{zerodiv_check('operand')}} \ - if (unlikely(zerodivision_check && ((operand) == 0))) { \ - PyErr_SetString(PyExc_ZeroDivisionError, "integer division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \ - return NULL; \ - } -#endif -{{endif}} - -static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { - // Prevent "unused" warnings. - (void)inplace; - (void)zerodivision_check; - - {{if op in ('Eq', 'Ne')}} - if (op1 == op2) { - {{return_true if op == 'Eq' else return_false}}; - } - {{endif}} - - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact({{pyval}}))) { - const long {{'a' if order == 'CObj' else 'b'}} = intval; - {{if c_op in '+-%' or op == 'FloorDivide'}} - long x; - {{endif}} - long {{ival}} = PyInt_AS_LONG({{pyval}}); - - {{if op in ('Eq', 'Ne')}} - if (a {{c_op}} b) { - {{return_true}}; - } else { - {{return_false}}; - } - {{elif c_op in '+-'}} - // adapted from intobject.c in Py2.7: - // casts in the line below avoid undefined behaviour on overflow - x = (long)((unsigned long)a {{c_op}} b); - if (likely((x^a) >= 0 || (x^{{ '~' if op == 'Subtract' else '' }}b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); - {{elif c_op == '%'}} - {{zerodiv_check('b')}} - // see ExprNodes.py :: mod_int_utility_code - x = a % b; - x += ((x != 0) & ((x ^ b) < 0)) * b; - return PyInt_FromLong(x); - {{elif op == 'TrueDivide'}} - {{zerodiv_check('b')}} + + +/////////////// PyIntCompare.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, long inplace); /*proto*/ + +/////////////// PyIntCompare /////////////// + +{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: slot_name = op.lower() }} +{{py: c_op = {'Eq': '==', 'Ne': '!='}[op] }} +{{py: +return_compare = ( + (lambda a,b,c_op, return_true=return_true, return_false=return_false: "if ({a} {c_op} {b}) {return_true}; else {return_false};".format( + a=a, b=b, c_op=c_op, return_true=return_true, return_false=return_false)) + if ret_type.is_pyobject else + (lambda a,b,c_op: "return ({a} {c_op} {b});".format(a=a, b=b, c_op=c_op)) + ) +}} + +static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + long {{ival}} = PyInt_AS_LONG({{pyval}}); + {{return_compare('a', 'b', c_op)}} + } + #endif + + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact({{pyval}}))) { + int unequal; + unsigned long uintval; + Py_ssize_t size = Py_SIZE({{pyval}}); + const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; + if (intval == 0) { + // == 0 => Py_SIZE(pyval) == 0 + {{return_compare('size', '0', c_op)}} + } else if (intval < 0) { + // < 0 => Py_SIZE(pyval) < 0 + if (size >= 0) + {{return_false if op == 'Eq' else return_true}}; + // both are negative => can use absolute values now. + intval = -intval; + size = -size; + } else { + // > 0 => Py_SIZE(pyval) > 0 + if (size <= 0) + {{return_false if op == 'Eq' else return_true}}; + } + // After checking that the sign is the same (and excluding 0), now compare the absolute values. + // When inlining, the C compiler should select exactly one line from this unrolled loop. + uintval = (unsigned long) intval; + {{for _size in range(4, 0, -1)}} +#if PyLong_SHIFT * {{_size}} < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * {{_size}})) { + // The C integer value is between (PyLong_BASE ** _size) and MIN(PyLong_BASE ** _size, LONG_MAX). + unequal = (size != {{_size+1}}) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + {{for _i in range(1, _size+1)}} | (digits[{{_i}}] != ((uintval >> ({{_i}} * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)){{endfor}}; + } else +#endif + {{endfor}} + unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); + + {{return_compare('unequal', '0', c_op)}} + } + #endif + + if (PyFloat_CheckExact({{pyval}})) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); + {{return_compare('(double)a', '(double)b', c_op)}} + } + + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); +} + + +/////////////// PyIntBinop.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +#if !CYTHON_COMPILING_IN_PYPY +static {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); /*proto*/ +#else +#define __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, intval, inplace, zerodivision_check) \ + {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) + {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) + {{endif}} +#endif + +/////////////// PyIntBinop /////////////// + +#if !CYTHON_COMPILING_IN_PYPY +{{py: from Cython.Utility import pylong_join }} +{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: slot_name = {'TrueDivide': 'true_divide', 'FloorDivide': 'floor_divide'}.get(op, op.lower()) }} +{{py: cfunc_name = '__Pyx_PyInt_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order)}} +{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}} +{{py: +c_op = { + 'Add': '+', 'Subtract': '-', 'Remainder': '%', 'TrueDivide': '/', 'FloorDivide': '/', + 'Or': '|', 'Xor': '^', 'And': '&', 'Rshift': '>>', 'Lshift': '<<', + 'Eq': '==', 'Ne': '!=', + }[op] +}} + +{{if op in ('TrueDivide', 'FloorDivide', 'Remainder')}} +#if PY_MAJOR_VERSION < 3 || CYTHON_USE_PYLONG_INTERNALS +#define {{zerodiv_check('operand')}} \ + if (unlikely(zerodivision_check && ((operand) == 0))) { \ + PyErr_SetString(PyExc_ZeroDivisionError, "integer division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \ + return NULL; \ + } +#endif +{{endif}} + +static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { + // Prevent "unused" warnings. + (void)inplace; + (void)zerodivision_check; + + {{if op in ('Eq', 'Ne')}} + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + {{endif}} + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + {{if c_op in '+-%' or op == 'FloorDivide'}} + long x; + {{endif}} + long {{ival}} = PyInt_AS_LONG({{pyval}}); + + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{elif c_op in '+-'}} + // adapted from intobject.c in Py2.7: + // casts in the line below avoid undefined behaviour on overflow + x = (long)((unsigned long)a {{c_op}} b); + if (likely((x^a) >= 0 || (x^{{ '~' if op == 'Subtract' else '' }}b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif c_op == '%'}} + {{zerodiv_check('b')}} + // see ExprNodes.py :: mod_int_utility_code + x = a % b; + x += ((x != 0) & ((x ^ b) < 0)) * b; + return PyInt_FromLong(x); + {{elif op == 'TrueDivide'}} + {{zerodiv_check('b')}} if (8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53))) { - return PyFloat_FromDouble((double)a / (double)b); - } - // let Python do the rounding - return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); - {{elif op == 'FloorDivide'}} - // INT_MIN / -1 is the only case that overflows, b == 0 is an error case - {{zerodiv_check('b')}} - if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a)) - return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); - else { - long q, r; - // see ExprNodes.py :: div_int_utility_code - q = a / b; - r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - x = q; - } - return PyInt_FromLong(x); - {{elif op == 'Lshift'}} - if (likely(b < (int)(sizeof(long)*8) && a == (a << b) >> b) || !a) { - return PyInt_FromLong(a {{c_op}} b); - } - {{else}} - // other operations are safe, no overflow - return PyInt_FromLong(a {{c_op}} b); - {{endif}} - } - #endif - - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact({{pyval}}))) { - const long {{'a' if order == 'CObj' else 'b'}} = intval; - long {{ival}}{{if op not in ('Eq', 'Ne')}}, x{{endif}}; - {{if op not in ('Eq', 'Ne', 'TrueDivide')}} -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG ll{{'a' if order == 'CObj' else 'b'}} = intval; - PY_LONG_LONG ll{{ival}}, llx; -#endif - {{endif}} - const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; - const Py_ssize_t size = Py_SIZE({{pyval}}); - // handle most common case first to avoid indirect branch and optimise branch prediction - if (likely(__Pyx_sst_abs(size) <= 1)) { - {{ival}} = likely(size) ? digits[0] : 0; - if (size == -1) {{ival}} = -{{ival}}; - } else { - switch (size) { - {{for _size in range(2, 5)}} - {{for _case in (-_size, _size)}} - case {{_case}}: - if (8 * sizeof(long) - 1 > {{_size}} * PyLong_SHIFT{{if op == 'TrueDivide'}} && {{_size-1}} * PyLong_SHIFT < 53{{endif}}) { - {{ival}} = {{'-' if _case < 0 else ''}}(long) {{pylong_join(_size, 'digits')}}; - break; - {{if op not in ('Eq', 'Ne', 'TrueDivide')}} -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > {{_size}} * PyLong_SHIFT) { - ll{{ival}} = {{'-' if _case < 0 else ''}}(PY_LONG_LONG) {{pylong_join(_size, 'digits', 'unsigned PY_LONG_LONG')}}; - goto long_long; -#endif - {{endif}} - } - // if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default + return PyFloat_FromDouble((double)a / (double)b); + } + // let Python do the rounding + return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif op == 'FloorDivide'}} + // INT_MIN / -1 is the only case that overflows, b == 0 is an error case + {{zerodiv_check('b')}} + if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a)) + return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + else { + long q, r; + // see ExprNodes.py :: div_int_utility_code + q = a / b; + r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + x = q; + } + return PyInt_FromLong(x); + {{elif op == 'Lshift'}} + if (likely(b < (int)(sizeof(long)*8) && a == (a << b) >> b) || !a) { + return PyInt_FromLong(a {{c_op}} b); + } + {{else}} + // other operations are safe, no overflow + return PyInt_FromLong(a {{c_op}} b); + {{endif}} + } + #endif + + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + long {{ival}}{{if op not in ('Eq', 'Ne')}}, x{{endif}}; + {{if op not in ('Eq', 'Ne', 'TrueDivide')}} +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG ll{{'a' if order == 'CObj' else 'b'}} = intval; + PY_LONG_LONG ll{{ival}}, llx; +#endif + {{endif}} + const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; + const Py_ssize_t size = Py_SIZE({{pyval}}); + // handle most common case first to avoid indirect branch and optimise branch prediction + if (likely(__Pyx_sst_abs(size) <= 1)) { + {{ival}} = likely(size) ? digits[0] : 0; + if (size == -1) {{ival}} = -{{ival}}; + } else { + switch (size) { + {{for _size in range(2, 5)}} + {{for _case in (-_size, _size)}} + case {{_case}}: + if (8 * sizeof(long) - 1 > {{_size}} * PyLong_SHIFT{{if op == 'TrueDivide'}} && {{_size-1}} * PyLong_SHIFT < 53{{endif}}) { + {{ival}} = {{'-' if _case < 0 else ''}}(long) {{pylong_join(_size, 'digits')}}; + break; + {{if op not in ('Eq', 'Ne', 'TrueDivide')}} +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > {{_size}} * PyLong_SHIFT) { + ll{{ival}} = {{'-' if _case < 0 else ''}}(PY_LONG_LONG) {{pylong_join(_size, 'digits', 'unsigned PY_LONG_LONG')}}; + goto long_long; +#endif + {{endif}} + } + // if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default CYTHON_FALLTHROUGH; - {{endfor}} - {{endfor}} - - {{if op in ('Eq', 'Ne')}} - #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 - // unusual setup - your fault - default: return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( - PyLong_Type.tp_richcompare({{'op1, op2' if order == 'ObjC' else 'op2, op1'}}, Py_{{op.upper()}})); - #else - // too large for the long values we allow => definitely not equal - default: {{return_false if op == 'Eq' else return_true}}; - #endif - {{else}} - default: return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); - {{endif}} - } - } - {{if op in ('Eq', 'Ne')}} - if (a {{c_op}} b) { - {{return_true}}; - } else { - {{return_false}}; - } - {{else}} - {{if c_op == '%'}} - {{zerodiv_check('b')}} - // see ExprNodes.py :: mod_int_utility_code - x = a % b; - x += ((x != 0) & ((x ^ b) < 0)) * b; - {{elif op == 'TrueDivide'}} - {{zerodiv_check('b')}} + {{endfor}} + {{endfor}} + + {{if op in ('Eq', 'Ne')}} + #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 + // unusual setup - your fault + default: return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyLong_Type.tp_richcompare({{'op1, op2' if order == 'ObjC' else 'op2, op1'}}, Py_{{op.upper()}})); + #else + // too large for the long values we allow => definitely not equal + default: {{return_false if op == 'Eq' else return_true}}; + #endif + {{else}} + default: return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{endif}} + } + } + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + {{if c_op == '%'}} + {{zerodiv_check('b')}} + // see ExprNodes.py :: mod_int_utility_code + x = a % b; + x += ((x != 0) & ((x ^ b) < 0)) * b; + {{elif op == 'TrueDivide'}} + {{zerodiv_check('b')}} if ((8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53))) - || __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); - {{elif op == 'FloorDivide'}} - {{zerodiv_check('b')}} - { - long q, r; - // see ExprNodes.py :: div_int_utility_code - q = a / b; - r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - x = q; - } - {{else}} - x = a {{c_op}} b; - {{if op == 'Lshift'}} -#ifdef HAVE_LONG_LONG - if (unlikely(!(b < (int)(sizeof(long)*8) && a == x >> b)) && a) { - ll{{ival}} = {{ival}}; - goto long_long; - } -#else - if (likely(b < (int)(sizeof(long)*8) && a == x >> b) || !a) /* execute return statement below */ -#endif - {{endif}} - {{endif}} - return PyLong_FromLong(x); - - {{if op != 'TrueDivide'}} -#ifdef HAVE_LONG_LONG - long_long: - {{if c_op == '%'}} - // see ExprNodes.py :: mod_int_utility_code - llx = lla % llb; - llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb; - {{elif op == 'FloorDivide'}} - { - PY_LONG_LONG q, r; - // see ExprNodes.py :: div_int_utility_code - q = lla / llb; - r = lla - q*llb; - q -= ((r != 0) & ((r ^ llb) < 0)); - llx = q; - } - {{else}} - llx = lla {{c_op}} llb; - {{if op == 'Lshift'}} - if (likely(lla == llx >> llb)) /* then execute 'return' below */ - {{endif}} - {{endif}} - return PyLong_FromLongLong(llx); -#endif - {{endif}}{{# if op != 'TrueDivide' #}} - {{endif}}{{# if op in ('Eq', 'Ne') #}} - } - #endif - - {{if c_op in '+-' or op in ('TrueDivide', 'Eq', 'Ne')}} - if (PyFloat_CheckExact({{pyval}})) { - const long {{'a' if order == 'CObj' else 'b'}} = intval; - double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); - {{if op in ('Eq', 'Ne')}} - if ((double)a {{c_op}} (double)b) { - {{return_true}}; - } else { - {{return_false}}; - } - {{else}} - double result; - {{if op == 'TrueDivide'}} - if (unlikely(zerodivision_check && b == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); - return NULL; - } - {{endif}} - // copied from floatobject.c in Py3.5: + || __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) { + return PyFloat_FromDouble((double)a / (double)b); + } + return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif op == 'FloorDivide'}} + {{zerodiv_check('b')}} + { + long q, r; + // see ExprNodes.py :: div_int_utility_code + q = a / b; + r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + x = q; + } + {{else}} + x = a {{c_op}} b; + {{if op == 'Lshift'}} +#ifdef HAVE_LONG_LONG + if (unlikely(!(b < (int)(sizeof(long)*8) && a == x >> b)) && a) { + ll{{ival}} = {{ival}}; + goto long_long; + } +#else + if (likely(b < (int)(sizeof(long)*8) && a == x >> b) || !a) /* execute return statement below */ +#endif + {{endif}} + {{endif}} + return PyLong_FromLong(x); + + {{if op != 'TrueDivide'}} +#ifdef HAVE_LONG_LONG + long_long: + {{if c_op == '%'}} + // see ExprNodes.py :: mod_int_utility_code + llx = lla % llb; + llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb; + {{elif op == 'FloorDivide'}} + { + PY_LONG_LONG q, r; + // see ExprNodes.py :: div_int_utility_code + q = lla / llb; + r = lla - q*llb; + q -= ((r != 0) & ((r ^ llb) < 0)); + llx = q; + } + {{else}} + llx = lla {{c_op}} llb; + {{if op == 'Lshift'}} + if (likely(lla == llx >> llb)) /* then execute 'return' below */ + {{endif}} + {{endif}} + return PyLong_FromLongLong(llx); +#endif + {{endif}}{{# if op != 'TrueDivide' #}} + {{endif}}{{# if op in ('Eq', 'Ne') #}} + } + #endif + + {{if c_op in '+-' or op in ('TrueDivide', 'Eq', 'Ne')}} + if (PyFloat_CheckExact({{pyval}})) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); + {{if op in ('Eq', 'Ne')}} + if ((double)a {{c_op}} (double)b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + double result; + {{if op == 'TrueDivide'}} + if (unlikely(zerodivision_check && b == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); + return NULL; + } + {{endif}} + // copied from floatobject.c in Py3.5: // PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL) - result = ((double)a) {{c_op}} (double)b; + result = ((double)a) {{c_op}} (double)b; // PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - {{endif}} - } - {{endif}} - - {{if op in ('Eq', 'Ne')}} - return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( - PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); - {{else}} - return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); - {{endif}} -} -#endif - -/////////////// PyFloatBinop.proto /////////////// - -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -#if !CYTHON_COMPILING_IN_PYPY -static {{c_ret_type}} __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); /*proto*/ -#else -#define __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, floatval, inplace, zerodivision_check) \ - {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) - {{elif op == 'Divide'}}((inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2))) - {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) - {{endif}} -#endif - -/////////////// PyFloatBinop /////////////// - -#if !CYTHON_COMPILING_IN_PYPY -{{py: from Cython.Utility import pylong_join }} -{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} -{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} -{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} -{{py: pyval, fval = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} -{{py: cfunc_name = '__Pyx_PyFloat_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order) }} -{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}} -{{py: -c_op = { - 'Add': '+', 'Subtract': '-', 'TrueDivide': '/', 'Divide': '/', 'Remainder': '%', - 'Eq': '==', 'Ne': '!=', - }[op] -}} - -{{if order == 'CObj' and c_op in '%/'}} -#define {{zerodiv_check('operand')}} if (unlikely(zerodivision_check && ((operand) == 0))) { \ - PyErr_SetString(PyExc_ZeroDivisionError, "float division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \ - return NULL; \ -} -{{endif}} - -static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { - const double {{'a' if order == 'CObj' else 'b'}} = floatval; - double {{fval}}{{if op not in ('Eq', 'Ne')}}, result{{endif}}; - // Prevent "unused" warnings. - (void)inplace; - (void)zerodivision_check; - - {{if op in ('Eq', 'Ne')}} - if (op1 == op2) { - {{return_true if op == 'Eq' else return_false}}; - } - {{endif}} - - if (likely(PyFloat_CheckExact({{pyval}}))) { - {{fval}} = PyFloat_AS_DOUBLE({{pyval}}); - {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} - } else - - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact({{pyval}}))) { - {{fval}} = (double) PyInt_AS_LONG({{pyval}}); - {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} - } else - #endif - - if (likely(PyLong_CheckExact({{pyval}}))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; - const Py_ssize_t size = Py_SIZE({{pyval}}); - switch (size) { - case 0: {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('0')}}{{else}}{{fval}} = 0.0;{{endif}} break; - case -1: {{fval}} = -(double) digits[0]; break; - case 1: {{fval}} = (double) digits[0]; break; - {{for _size in (2, 3, 4)}} - case -{{_size}}: - case {{_size}}: - if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || ({{_size-1}} * PyLong_SHIFT < 53))) { - {{fval}} = (double) {{pylong_join(_size, 'digits')}}; - // let CPython do its own float rounding from 2**53 on (max. consecutive integer in double float) + return PyFloat_FromDouble(result); + {{endif}} + } + {{endif}} + + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); + {{else}} + return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); + {{endif}} +} +#endif + +/////////////// PyFloatBinop.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +#if !CYTHON_COMPILING_IN_PYPY +static {{c_ret_type}} __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); /*proto*/ +#else +#define __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, floatval, inplace, zerodivision_check) \ + {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) + {{elif op == 'Divide'}}((inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2))) + {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) + {{endif}} +#endif + +/////////////// PyFloatBinop /////////////// + +#if !CYTHON_COMPILING_IN_PYPY +{{py: from Cython.Utility import pylong_join }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: pyval, fval = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: cfunc_name = '__Pyx_PyFloat_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order) }} +{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}} +{{py: +c_op = { + 'Add': '+', 'Subtract': '-', 'TrueDivide': '/', 'Divide': '/', 'Remainder': '%', + 'Eq': '==', 'Ne': '!=', + }[op] +}} + +{{if order == 'CObj' and c_op in '%/'}} +#define {{zerodiv_check('operand')}} if (unlikely(zerodivision_check && ((operand) == 0))) { \ + PyErr_SetString(PyExc_ZeroDivisionError, "float division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \ + return NULL; \ +} +{{endif}} + +static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { + const double {{'a' if order == 'CObj' else 'b'}} = floatval; + double {{fval}}{{if op not in ('Eq', 'Ne')}}, result{{endif}}; + // Prevent "unused" warnings. + (void)inplace; + (void)zerodivision_check; + + {{if op in ('Eq', 'Ne')}} + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + {{endif}} + + if (likely(PyFloat_CheckExact({{pyval}}))) { + {{fval}} = PyFloat_AS_DOUBLE({{pyval}}); + {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} + } else + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + {{fval}} = (double) PyInt_AS_LONG({{pyval}}); + {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} + } else + #endif + + if (likely(PyLong_CheckExact({{pyval}}))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*){{pyval}})->ob_digit; + const Py_ssize_t size = Py_SIZE({{pyval}}); + switch (size) { + case 0: {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('0')}}{{else}}{{fval}} = 0.0;{{endif}} break; + case -1: {{fval}} = -(double) digits[0]; break; + case 1: {{fval}} = (double) digits[0]; break; + {{for _size in (2, 3, 4)}} + case -{{_size}}: + case {{_size}}: + if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || ({{_size-1}} * PyLong_SHIFT < 53))) { + {{fval}} = (double) {{pylong_join(_size, 'digits')}}; + // let CPython do its own float rounding from 2**53 on (max. consecutive integer in double float) if ((8 * sizeof(unsigned long) < 53) || ({{_size}} * PyLong_SHIFT < 53) || ({{fval}} < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == {{-_size}}) - {{fval}} = -{{fval}}; - break; - } - } - // Fall through if size doesn't fit safely into a double anymore. - // It may not be obvious that this is a safe fall-through given the "fval < 2**53" - // check above. However, the number of digits that CPython uses for a given PyLong - // value is minimal, and together with the "(size-1) * SHIFT < 53" check above, - // this should make it safe. + if (size == {{-_size}}) + {{fval}} = -{{fval}}; + break; + } + } + // Fall through if size doesn't fit safely into a double anymore. + // It may not be obvious that this is a safe fall-through given the "fval < 2**53" + // check above. However, the number of digits that CPython uses for a given PyLong + // value is minimal, and together with the "(size-1) * SHIFT < 53" check above, + // this should make it safe. CYTHON_FALLTHROUGH; - {{endfor}} - default: - #else - { - #endif - {{if op in ('Eq', 'Ne')}} - return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( - PyFloat_Type.tp_richcompare({{'op1, op2' if order == 'CObj' else 'op2, op1'}}, Py_{{op.upper()}})); - {{else}} - {{fval}} = PyLong_AsDouble({{pyval}}); - if (unlikely({{fval}} == -1.0 && PyErr_Occurred())) return NULL; - {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} - {{endif}} - } - } else { - {{if op in ('Eq', 'Ne')}} - return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( - PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); - {{elif op == 'Divide'}} - return (inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2)); - {{else}} - return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); - {{endif}} - } - - {{if op in ('Eq', 'Ne')}} - if (a {{c_op}} b) { - {{return_true}}; - } else { - {{return_false}}; - } - {{else}} - // copied from floatobject.c in Py3.5: - {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('b')}}{{endif}} + {{endfor}} + default: + #else + { + #endif + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyFloat_Type.tp_richcompare({{'op1, op2' if order == 'CObj' else 'op2, op1'}}, Py_{{op.upper()}})); + {{else}} + {{fval}} = PyLong_AsDouble({{pyval}}); + if (unlikely({{fval}} == -1.0 && PyErr_Occurred())) return NULL; + {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}} + {{endif}} + } + } else { + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); + {{elif op == 'Divide'}} + return (inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2)); + {{else}} + return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); + {{endif}} + } + + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + // copied from floatobject.c in Py3.5: + {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('b')}}{{endif}} // PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL) - {{if c_op == '%'}} - result = fmod(a, b); - if (result) - result += ((result < 0) ^ (b < 0)) * b; - else - result = copysign(0.0, b); - {{else}} - result = a {{c_op}} b; - {{endif}} + {{if c_op == '%'}} + result = fmod(a, b); + if (result) + result += ((result < 0) ^ (b < 0)) * b; + else + result = copysign(0.0, b); + {{else}} + result = a {{c_op}} b; + {{endif}} // PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - {{endif}} -} -#endif + return PyFloat_FromDouble(result); + {{endif}} +} +#endif diff --git a/contrib/tools/cython/Cython/Utility/Overflow.c b/contrib/tools/cython/Cython/Utility/Overflow.c index 7add2b822d..0259c58f01 100644 --- a/contrib/tools/cython/Cython/Utility/Overflow.c +++ b/contrib/tools/cython/Cython/Utility/Overflow.c @@ -47,12 +47,12 @@ static int __Pyx_check_twos_complement(void) { #define __Pyx_div_const_no_overflow(a, b, overflow) ((a) / (b)) /////////////// Common.init /////////////// -//@substitute: naming +//@substitute: naming -// FIXME: Propagate the error here instead of just printing it. -if (unlikely(__Pyx_check_twos_complement())) { - PyErr_WriteUnraisable($module_cname); -} +// FIXME: Propagate the error here instead of just printing it. +if (unlikely(__Pyx_check_twos_complement())) { + PyErr_WriteUnraisable($module_cname); +} /////////////// BaseCaseUnsigned.proto /////////////// @@ -87,13 +87,13 @@ static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, { {{UINT}} r = ({{UINT}}) big_r; *overflow |= big_r != r; return r; -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG } else if ((sizeof({{UINT}}) < sizeof(unsigned PY_LONG_LONG))) { - unsigned PY_LONG_LONG big_r = ((unsigned PY_LONG_LONG) a) * ((unsigned PY_LONG_LONG) b); + unsigned PY_LONG_LONG big_r = ((unsigned PY_LONG_LONG) a) * ((unsigned PY_LONG_LONG) b); {{UINT}} r = ({{UINT}}) big_r; *overflow |= big_r != r; return r; -#endif +#endif } else { {{UINT}} prod = a * b; double dprod = ((double) a) * ((double) b); @@ -143,13 +143,13 @@ static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{I {{INT}} r = ({{INT}}) big_r; *overflow |= big_r != r; return r; -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG } else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) { - PY_LONG_LONG big_r = ((PY_LONG_LONG) a) + ((PY_LONG_LONG) b); + PY_LONG_LONG big_r = ((PY_LONG_LONG) a) + ((PY_LONG_LONG) b); {{INT}} r = ({{INT}}) big_r; *overflow |= big_r != r; return r; -#endif +#endif } else { // Signed overflow undefined, but unsigned overflow is well defined. {{INT}} r = ({{INT}}) ((unsigned {{INT}}) a + (unsigned {{INT}}) b); @@ -189,13 +189,13 @@ static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{I {{INT}} r = ({{INT}}) big_r; *overflow |= big_r != r; return ({{INT}}) r; -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG } else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) { - PY_LONG_LONG big_r = ((PY_LONG_LONG) a) * ((PY_LONG_LONG) b); + PY_LONG_LONG big_r = ((PY_LONG_LONG) a) * ((PY_LONG_LONG) b); {{INT}} r = ({{INT}}) big_r; *overflow |= big_r != r; return ({{INT}}) r; -#endif +#endif } else { {{INT}} prod = a * b; double dprod = ((double) a) * ((double) b); @@ -230,20 +230,20 @@ static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{I /////////////// SizeCheck.init /////////////// -//@substitute: naming +//@substitute: naming -// FIXME: Propagate the error here instead of just printing it. -if (unlikely(__Pyx_check_sane_{{NAME}}())) { - PyErr_WriteUnraisable($module_cname); -} +// FIXME: Propagate the error here instead of just printing it. +if (unlikely(__Pyx_check_sane_{{NAME}}())) { + PyErr_WriteUnraisable($module_cname); +} /////////////// SizeCheck.proto /////////////// static int __Pyx_check_sane_{{NAME}}(void) { if (((sizeof({{TYPE}}) <= sizeof(int)) || -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG (sizeof({{TYPE}}) == sizeof(PY_LONG_LONG)) || -#endif +#endif (sizeof({{TYPE}}) == sizeof(long)))) { return 0; } else { @@ -268,10 +268,10 @@ static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE} return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_int_checking_overflow(a, b, overflow); } else if ((sizeof({{TYPE}}) == sizeof(unsigned long))) { return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_checking_overflow(a, b, overflow); -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG } else if ((sizeof({{TYPE}}) == sizeof(unsigned PY_LONG_LONG))) { return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_long_checking_overflow(a, b, overflow); -#endif +#endif } else { abort(); return 0; /* handled elsewhere */ } @@ -280,10 +280,10 @@ static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE} return ({{TYPE}}) __Pyx_{{BINOP}}_int_checking_overflow(a, b, overflow); } else if ((sizeof({{TYPE}}) == sizeof(long))) { return ({{TYPE}}) __Pyx_{{BINOP}}_long_checking_overflow(a, b, overflow); -#ifdef HAVE_LONG_LONG +#ifdef HAVE_LONG_LONG } else if ((sizeof({{TYPE}}) == sizeof(PY_LONG_LONG))) { return ({{TYPE}}) __Pyx_{{BINOP}}_long_long_checking_overflow(a, b, overflow); -#endif +#endif } else { abort(); return 0; /* handled elsewhere */ } @@ -302,10 +302,10 @@ static CYTHON_INLINE {{TYPE}} __Pyx_lshift_{{NAME}}_checking_overflow({{TYPE}} a } #define __Pyx_lshift_const_{{NAME}}_checking_overflow __Pyx_lshift_{{NAME}}_checking_overflow - -/////////////// UnaryNegOverflows.proto /////////////// - -//FIXME: shouldn't the macro name be prefixed by "__Pyx_" ? Too late now, I guess... -// from intobject.c -#define UNARY_NEG_WOULD_OVERFLOW(x) \ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +/////////////// UnaryNegOverflows.proto /////////////// + +//FIXME: shouldn't the macro name be prefixed by "__Pyx_" ? Too late now, I guess... +// from intobject.c +#define UNARY_NEG_WOULD_OVERFLOW(x) \ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) diff --git a/contrib/tools/cython/Cython/Utility/Profile.c b/contrib/tools/cython/Cython/Utility/Profile.c index 5844bae89d..921eb67529 100644 --- a/contrib/tools/cython/Cython/Utility/Profile.c +++ b/contrib/tools/cython/Cython/Utility/Profile.c @@ -6,21 +6,21 @@ // but maybe some other profilers don't. #ifndef CYTHON_PROFILE -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON - #define CYTHON_PROFILE 0 -#else +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON + #define CYTHON_PROFILE 0 +#else #define CYTHON_PROFILE 1 #endif -#endif - -#ifndef CYTHON_TRACE_NOGIL - #define CYTHON_TRACE_NOGIL 0 -#else - #if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE) - #define CYTHON_TRACE 1 - #endif -#endif - +#endif + +#ifndef CYTHON_TRACE_NOGIL + #define CYTHON_TRACE_NOGIL 0 +#else + #if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE) + #define CYTHON_TRACE 1 + #endif +#endif + #ifndef CYTHON_TRACE #define CYTHON_TRACE 0 #endif @@ -41,10 +41,10 @@ #if CYTHON_PROFILE_REUSE_FRAME #define CYTHON_FRAME_MODIFIER static - #define CYTHON_FRAME_DEL(frame) + #define CYTHON_FRAME_DEL(frame) #else #define CYTHON_FRAME_MODIFIER - #define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame) + #define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame) #endif #define __Pyx_TraceDeclarations \ @@ -54,7 +54,7 @@ #define __Pyx_TraceFrameInit(codeobj) \ if (codeobj) $frame_code_cname = (PyCodeObject*) codeobj; - + #if PY_VERSION_HEX >= 0x030b00a2 #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ (unlikely((tstate)->cframe->use_tracing) && \ @@ -99,175 +99,175 @@ #endif - #ifdef WITH_THREAD - #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ - if (nogil) { \ - if (CYTHON_TRACE_NOGIL) { \ - PyThreadState *tstate; \ - PyGILState_STATE state = PyGILState_Ensure(); \ + #ifdef WITH_THREAD + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 1, 1)) { \ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ - } \ - PyGILState_Release(state); \ - if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ - } \ - } else { \ - PyThreadState* tstate = PyThreadState_GET(); \ + } \ + PyGILState_Release(state); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } else { \ + PyThreadState* tstate = PyThreadState_GET(); \ if (__Pyx_IsTracing(tstate, 1, 1)) { \ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ - if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ - } \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ } - #else - #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ - { PyThreadState* tstate = PyThreadState_GET(); \ + #else + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + { PyThreadState* tstate = PyThreadState_GET(); \ if (__Pyx_IsTracing(tstate, 1, 1)) { \ __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ - if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ - } \ - } - #endif + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } + #endif #define __Pyx_TraceException() \ - if (likely(!__Pyx_use_tracing)); else { \ + if (likely(!__Pyx_use_tracing)); else { \ PyThreadState* tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 1)) { \ __Pyx_EnterTracing(tstate); \ - PyObject *exc_info = __Pyx_GetExceptionTuple(tstate); \ - if (exc_info) { \ - if (CYTHON_TRACE && tstate->c_tracefunc) \ - tstate->c_tracefunc( \ - tstate->c_traceobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ - tstate->c_profilefunc( \ - tstate->c_profileobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ - Py_DECREF(exc_info); \ - } \ + PyObject *exc_info = __Pyx_GetExceptionTuple(tstate); \ + if (exc_info) { \ + if (CYTHON_TRACE && tstate->c_tracefunc) \ + tstate->c_tracefunc( \ + tstate->c_traceobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + tstate->c_profilefunc( \ + tstate->c_profileobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + Py_DECREF(exc_info); \ + } \ __Pyx_LeaveTracing(tstate); \ } \ } - static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) { - PyObject *type, *value, *traceback; + static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) { + PyObject *type, *value, *traceback; __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); __Pyx_EnterTracing(tstate); - if (CYTHON_TRACE && tstate->c_tracefunc) - tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result); - if (tstate->c_profilefunc) - tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result); - CYTHON_FRAME_DEL(frame); + if (CYTHON_TRACE && tstate->c_tracefunc) + tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result); + if (tstate->c_profilefunc) + tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result); + CYTHON_FRAME_DEL(frame); __Pyx_LeaveTracing(tstate); __Pyx_ErrRestoreInState(tstate, type, value, traceback); } - #ifdef WITH_THREAD - #define __Pyx_TraceReturn(result, nogil) \ - if (likely(!__Pyx_use_tracing)); else { \ - if (nogil) { \ - if (CYTHON_TRACE_NOGIL) { \ - PyThreadState *tstate; \ - PyGILState_STATE state = PyGILState_Ensure(); \ + #ifdef WITH_THREAD + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0)) { \ - __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ - } \ - PyGILState_Release(state); \ - } \ - } else { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + PyGILState_Release(state); \ + } \ + } else { \ PyThreadState* tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0)) { \ - __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ - } \ - } \ - } - #else - #define __Pyx_TraceReturn(result, nogil) \ - if (likely(!__Pyx_use_tracing)); else { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } \ + } + #else + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ PyThreadState* tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0)) { \ - __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ - } \ - } - #endif - + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } + #endif + static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); /*proto*/ static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno); /*proto*/ #else #define __Pyx_TraceDeclarations - #define __Pyx_TraceFrameInit(codeobj) - // mark error label as used to avoid compiler warnings + #define __Pyx_TraceFrameInit(codeobj) + // mark error label as used to avoid compiler warnings #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) if ((1)); else goto_error; #define __Pyx_TraceException() - #define __Pyx_TraceReturn(result, nogil) + #define __Pyx_TraceReturn(result, nogil) #endif /* CYTHON_PROFILE */ #if CYTHON_TRACE - // see call_trace_protected() in CPython's ceval.c - static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) { - int ret; - PyObject *type, *value, *traceback; + // see call_trace_protected() in CPython's ceval.c + static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) { + int ret; + PyObject *type, *value, *traceback; __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); - __Pyx_PyFrame_SetLineNumber(frame, lineno); + __Pyx_PyFrame_SetLineNumber(frame, lineno); __Pyx_EnterTracing(tstate); - ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL); + ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL); __Pyx_LeaveTracing(tstate); - if (likely(!ret)) { + if (likely(!ret)) { __Pyx_ErrRestoreInState(tstate, type, value, traceback); - } else { - Py_XDECREF(type); - Py_XDECREF(value); - Py_XDECREF(traceback); - } - return ret; - } - - #ifdef WITH_THREAD - #define __Pyx_TraceLine(lineno, nogil, goto_error) \ - if (likely(!__Pyx_use_tracing)); else { \ + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + return ret; + } + + #ifdef WITH_THREAD + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ // mark error label as used to avoid compiler warnings \ if ((1)); else goto_error; \ - if (nogil) { \ - if (CYTHON_TRACE_NOGIL) { \ - int ret = 0; \ - PyThreadState *tstate; \ - PyGILState_STATE state = PyGILState_Ensure(); \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + int ret = 0; \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ - ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ - } \ - PyGILState_Release(state); \ + ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + } \ + PyGILState_Release(state); \ // XXX https://github.com/cython/cython/issues/2274 \ if (unlikely(ret)) { fprintf(stderr, "cython: line_trace_func returned %d\n", ret); } \ - } \ - } else { \ + } \ + } else { \ PyThreadState* tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ - int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ // XXX https://github.com/cython/cython/issues/2274 \ if (unlikely(ret)) { fprintf(stderr, "cython: line_trace_func returned %d\n", ret); } \ - } \ - } \ - } - #else - #define __Pyx_TraceLine(lineno, nogil, goto_error) \ - if (likely(!__Pyx_use_tracing)); else { \ + } \ + } \ + } + #else + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ // mark error label as used to avoid compiler warnings \ if ((1)); else goto_error; \ PyThreadState* tstate = __Pyx_PyThreadState_Current; \ if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ - int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ // XXX https://github.com/cython/cython/issues/2274 \ if (unlikely(ret)) { fprintf(stderr, "cython: line_trace_func returned %d\n", ret); } \ - } \ + } \ } - #endif + #endif #else - // mark error label as used to avoid compiler warnings + // mark error label as used to avoid compiler warnings #define __Pyx_TraceLine(lineno, nogil, goto_error) if ((1)); else goto_error; #endif @@ -282,7 +282,7 @@ static int __Pyx_TraceSetupAndCall(PyCodeObject** code, const char *funcname, const char *srcfile, int firstlineno) { - PyObject *type, *value, *traceback; + PyObject *type, *value, *traceback; int retval; if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) { if (*code == NULL) { @@ -308,46 +308,46 @@ static int __Pyx_TraceSetupAndCall(PyCodeObject** code, } __Pyx_PyFrame_SetLineNumber(*frame, firstlineno); - retval = 1; + retval = 1; __Pyx_EnterTracing(tstate); __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); #if CYTHON_TRACE if (tstate->c_tracefunc) - retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0; - if (retval && tstate->c_profilefunc) + retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0; + if (retval && tstate->c_profilefunc) #endif retval = tstate->c_profilefunc(tstate->c_profileobj, *frame, PyTrace_CALL, NULL) == 0; __Pyx_LeaveTracing(tstate); - if (retval) { + if (retval) { __Pyx_ErrRestoreInState(tstate, type, value, traceback); return __Pyx_IsTracing(tstate, 0, 0) && retval; - } else { - Py_XDECREF(type); - Py_XDECREF(value); - Py_XDECREF(traceback); - return -1; - } + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + return -1; + } } static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) { - PyCodeObject *py_code = 0; - -#if PY_MAJOR_VERSION >= 3 - py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno); - // make CPython use a fresh dict for "f_locals" at need (see GH #1836) - if (likely(py_code)) { - py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS; - } -#else + PyCodeObject *py_code = 0; + +#if PY_MAJOR_VERSION >= 3 + py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno); + // make CPython use a fresh dict for "f_locals" at need (see GH #1836) + if (likely(py_code)) { + py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS; + } +#else PyObject *py_srcfile = 0; PyObject *py_funcname = 0; py_funcname = PyString_FromString(funcname); - if (unlikely(!py_funcname)) goto bad; + if (unlikely(!py_funcname)) goto bad; py_srcfile = PyString_FromString(srcfile); - if (unlikely(!py_srcfile)) goto bad; + if (unlikely(!py_srcfile)) goto bad; py_code = PyCode_New( 0, /*int argcount,*/ @@ -370,7 +370,7 @@ static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const cha bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); -#endif +#endif return py_code; } diff --git a/contrib/tools/cython/Cython/Utility/StringTools.c b/contrib/tools/cython/Cython/Utility/StringTools.c index 25255f2630..2fdae812a0 100644 --- a/contrib/tools/cython/Cython/Utility/StringTools.c +++ b/contrib/tools/cython/Cython/Utility/StringTools.c @@ -51,12 +51,12 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /*proto*/ //////////////////// BytesContains //////////////////// -//@requires: IncludeStringH +//@requires: IncludeStringH static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) { const Py_ssize_t length = PyBytes_GET_SIZE(bytes); char* char_start = PyBytes_AS_STRING(bytes); - return memchr(char_start, (unsigned char)character, (size_t)length) != NULL; + return memchr(char_start, (unsigned char)character, (size_t)length) != NULL; } @@ -146,24 +146,24 @@ static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 ch //////////////////// PyUnicodeContains.proto //////////////////// -static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { +static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { int result = PyUnicode_Contains(text, substring); return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } -//////////////////// CStringEquals.proto //////////////////// - -static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/ - -//////////////////// CStringEquals //////////////////// - -static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) { - while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } - return *s1 == *s2; -} - - +//////////////////// CStringEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/ + +//////////////////// CStringEquals //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) { + while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } + return *s1 == *s2; +} + + //////////////////// StrEquals.proto //////////////////// //@requires: BytesEquals //@requires: UnicodeEquals @@ -263,9 +263,9 @@ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); @@ -359,7 +359,7 @@ static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ss if (wraparound | boundscheck) { length = PyByteArray_GET_SIZE(string); if (wraparound & unlikely(i < 0)) i += length; - if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { return (unsigned char) (PyByteArray_AS_STRING(string)[i]); } else { PyErr_SetString(PyExc_IndexError, "bytearray index out of range"); @@ -389,7 +389,7 @@ static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ss if (wraparound | boundscheck) { length = PyByteArray_GET_SIZE(string); if (wraparound & unlikely(i < 0)) i += length; - if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { PyByteArray_AS_STRING(string)[i] = (char) v; return 0; } else { @@ -422,7 +422,7 @@ static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py if (wraparound | boundscheck) { length = __Pyx_PyUnicode_GET_LENGTH(ustring); if (wraparound & unlikely(i < 0)) i += length; - if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { return __Pyx_PyUnicode_READ_CHAR(ustring, i); } else { PyErr_SetString(PyExc_IndexError, "string index out of range"); @@ -480,13 +480,13 @@ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { - size_t slen = strlen(cstring); - if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, - "c-string too long to convert to Python"); - return NULL; - } - length = (Py_ssize_t) slen; + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) @@ -586,7 +586,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring( start = 0; } if (stop < 0) - stop += length; + stop += length; else if (stop > length) stop = length; if (stop <= start) @@ -619,9 +619,9 @@ static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UCS4 uchar) static int __Pyx_PyUnicode_Tailmatch( PyObject* s, PyObject* substr, Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ - -/////////////// unicode_tailmatch /////////////// - + +/////////////// unicode_tailmatch /////////////// + // Python's unicode.startswith() and unicode.endswith() support a // tuple of prefixes/suffixes, whereas it's much more common to // test for a single unicode string. @@ -631,7 +631,7 @@ static int __Pyx_PyUnicode_TailmatchTuple(PyObject* s, PyObject* substrings, Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); for (i = 0; i < count; i++) { Py_ssize_t result; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS result = PyUnicode_Tailmatch(s, PyTuple_GET_ITEM(substrings, i), start, end, direction); #else @@ -652,21 +652,21 @@ static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr, if (unlikely(PyTuple_Check(substr))) { return __Pyx_PyUnicode_TailmatchTuple(s, substr, start, end, direction); } - return (int) PyUnicode_Tailmatch(s, substr, start, end, direction); + return (int) PyUnicode_Tailmatch(s, substr, start, end, direction); } /////////////// bytes_tailmatch.proto /////////////// -static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, - Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ -static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, - Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ - -/////////////// bytes_tailmatch /////////////// - -static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, - Py_ssize_t start, Py_ssize_t end, int direction) { +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ +static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ + +/////////////// bytes_tailmatch /////////////// + +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction) { const char* self_ptr = PyBytes_AS_STRING(self); Py_ssize_t self_len = PyBytes_GET_SIZE(self); const char* sub_ptr; @@ -683,7 +683,7 @@ static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, #if PY_MAJOR_VERSION < 3 // Python 2.x allows mixing unicode and str else if ( PyUnicode_Check(arg) ) { - return (int) PyUnicode_Tailmatch(self, arg, start, end, direction); + return (int) PyUnicode_Tailmatch(self, arg, start, end, direction); } #endif else { @@ -726,7 +726,7 @@ static int __Pyx_PyBytes_TailmatchTuple(PyObject* self, PyObject* substrings, Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); for (i = 0; i < count; i++) { int result; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS result = __Pyx_PyBytes_SingleTailmatch(self, PyTuple_GET_ITEM(substrings, i), start, end, direction); #else @@ -755,7 +755,7 @@ static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, /////////////// str_tailmatch.proto /////////////// static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start, - Py_ssize_t end, int direction); /*proto*/ + Py_ssize_t end, int direction); /*proto*/ /////////////// str_tailmatch /////////////// //@requires: bytes_tailmatch @@ -777,16 +777,16 @@ static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py /////////////// bytes_index.proto /////////////// -static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds); /*proto*/ - -/////////////// bytes_index /////////////// - +static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds); /*proto*/ + +/////////////// bytes_index /////////////// + static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) { - if (index < 0) - index += PyBytes_GET_SIZE(bytes); + if (index < 0) + index += PyBytes_GET_SIZE(bytes); if (check_bounds) { Py_ssize_t size = PyBytes_GET_SIZE(bytes); - if (unlikely(!__Pyx_is_valid_index(index, size))) { + if (unlikely(!__Pyx_is_valid_index(index, size))) { PyErr_SetString(PyExc_IndexError, "string index out of range"); return (char) -1; } @@ -825,170 +825,170 @@ static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* value #endif -/////////////// JoinPyUnicode.proto /////////////// - -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - Py_UCS4 max_char); - -/////////////// JoinPyUnicode /////////////// -//@requires: IncludeStringH -//@substitute: naming - -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - CYTHON_UNUSED Py_UCS4 max_char) { -#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyObject *result_uval; - int result_ukind; - Py_ssize_t i, char_pos; - void *result_udata; -#if CYTHON_PEP393_ENABLED - // Py 3.3+ (post PEP-393) - result_uval = PyUnicode_New(result_ulength, max_char); - if (unlikely(!result_uval)) return NULL; - result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; - result_udata = PyUnicode_DATA(result_uval); -#else - // Py 2.x/3.2 (pre PEP-393) - result_uval = PyUnicode_FromUnicode(NULL, result_ulength); - if (unlikely(!result_uval)) return NULL; - result_ukind = sizeof(Py_UNICODE); - result_udata = PyUnicode_AS_UNICODE(result_uval); -#endif - - char_pos = 0; - for (i=0; i < value_count; i++) { - int ukind; - Py_ssize_t ulength; - void *udata; - PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); - if (unlikely(__Pyx_PyUnicode_READY(uval))) - goto bad; - ulength = __Pyx_PyUnicode_GET_LENGTH(uval); - if (unlikely(!ulength)) - continue; - if (unlikely(char_pos + ulength < 0)) - goto overflow; - ukind = __Pyx_PyUnicode_KIND(uval); - udata = __Pyx_PyUnicode_DATA(uval); - if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { - memcpy((char *)result_udata + char_pos * result_ukind, udata, (size_t) (ulength * result_ukind)); - } else { +/////////////// JoinPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char); + +/////////////// JoinPyUnicode /////////////// +//@requires: IncludeStringH +//@substitute: naming + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + CYTHON_UNUSED Py_UCS4 max_char) { +#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *result_uval; + int result_ukind; + Py_ssize_t i, char_pos; + void *result_udata; +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + result_uval = PyUnicode_New(result_ulength, max_char); + if (unlikely(!result_uval)) return NULL; + result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; + result_udata = PyUnicode_DATA(result_uval); +#else + // Py 2.x/3.2 (pre PEP-393) + result_uval = PyUnicode_FromUnicode(NULL, result_ulength); + if (unlikely(!result_uval)) return NULL; + result_ukind = sizeof(Py_UNICODE); + result_udata = PyUnicode_AS_UNICODE(result_uval); +#endif + + char_pos = 0; + for (i=0; i < value_count; i++) { + int ukind; + Py_ssize_t ulength; + void *udata; + PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); + if (unlikely(__Pyx_PyUnicode_READY(uval))) + goto bad; + ulength = __Pyx_PyUnicode_GET_LENGTH(uval); + if (unlikely(!ulength)) + continue; + if (unlikely(char_pos + ulength < 0)) + goto overflow; + ukind = __Pyx_PyUnicode_KIND(uval); + udata = __Pyx_PyUnicode_DATA(uval); + if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { + memcpy((char *)result_udata + char_pos * result_ukind, udata, (size_t) (ulength * result_ukind)); + } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) - _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); - #else - Py_ssize_t j; - for (j=0; j < ulength; j++) { - Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); - __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); - } - #endif - } - char_pos += ulength; - } - return result_uval; -overflow: - PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); -bad: - Py_DECREF(result_uval); - return NULL; -#else - // non-CPython fallback - result_ulength++; - value_count++; - return PyUnicode_Join($empty_unicode, value_tuple); -#endif -} - - -/////////////// BuildPyUnicode.proto /////////////// - -static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, - int prepend_sign, char padding_char); - -/////////////// BuildPyUnicode /////////////// - -// Create a PyUnicode object from an ASCII char*, e.g. a formatted number. - -static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, - int prepend_sign, char padding_char) { - PyObject *uval; - Py_ssize_t uoffset = ulength - clength; -#if CYTHON_USE_UNICODE_INTERNALS - Py_ssize_t i; -#if CYTHON_PEP393_ENABLED - // Py 3.3+ (post PEP-393) - void *udata; - uval = PyUnicode_New(ulength, 127); - if (unlikely(!uval)) return NULL; - udata = PyUnicode_DATA(uval); -#else - // Py 2.x/3.2 (pre PEP-393) - Py_UNICODE *udata; - uval = PyUnicode_FromUnicode(NULL, ulength); - if (unlikely(!uval)) return NULL; - udata = PyUnicode_AS_UNICODE(uval); -#endif - if (uoffset > 0) { - i = 0; - if (prepend_sign) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); - i++; - } - for (; i < uoffset; i++) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); - } - } - for (i=0; i < clength; i++) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); - } - -#else - // non-CPython - { - PyObject *sign = NULL, *padding = NULL; - uval = NULL; - if (uoffset > 0) { - prepend_sign = !!prepend_sign; - if (uoffset > prepend_sign) { - padding = PyUnicode_FromOrdinal(padding_char); - if (likely(padding) && uoffset > prepend_sign + 1) { - PyObject *tmp; - PyObject *repeat = PyInt_FromSize_t(uoffset - prepend_sign); - if (unlikely(!repeat)) goto done_or_error; - tmp = PyNumber_Multiply(padding, repeat); - Py_DECREF(repeat); - Py_DECREF(padding); - padding = tmp; - } - if (unlikely(!padding)) goto done_or_error; - } - if (prepend_sign) { - sign = PyUnicode_FromOrdinal('-'); - if (unlikely(!sign)) goto done_or_error; - } - } - - uval = PyUnicode_DecodeASCII(chars, clength, NULL); - if (likely(uval) && padding) { - PyObject *tmp = PyNumber_Add(padding, uval); - Py_DECREF(uval); - uval = tmp; - } - if (likely(uval) && sign) { - PyObject *tmp = PyNumber_Add(sign, uval); - Py_DECREF(uval); - uval = tmp; - } -done_or_error: - Py_XDECREF(padding); - Py_XDECREF(sign); - } -#endif - - return uval; -} - - + _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); + #else + Py_ssize_t j; + for (j=0; j < ulength; j++) { + Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); + __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); + } + #endif + } + char_pos += ulength; + } + return result_uval; +overflow: + PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); +bad: + Py_DECREF(result_uval); + return NULL; +#else + // non-CPython fallback + result_ulength++; + value_count++; + return PyUnicode_Join($empty_unicode, value_tuple); +#endif +} + + +/////////////// BuildPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char); + +/////////////// BuildPyUnicode /////////////// + +// Create a PyUnicode object from an ASCII char*, e.g. a formatted number. + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char) { + PyObject *uval; + Py_ssize_t uoffset = ulength - clength; +#if CYTHON_USE_UNICODE_INTERNALS + Py_ssize_t i; +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + void *udata; + uval = PyUnicode_New(ulength, 127); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_DATA(uval); +#else + // Py 2.x/3.2 (pre PEP-393) + Py_UNICODE *udata; + uval = PyUnicode_FromUnicode(NULL, ulength); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_AS_UNICODE(uval); +#endif + if (uoffset > 0) { + i = 0; + if (prepend_sign) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); + i++; + } + for (; i < uoffset; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); + } + } + for (i=0; i < clength; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); + } + +#else + // non-CPython + { + PyObject *sign = NULL, *padding = NULL; + uval = NULL; + if (uoffset > 0) { + prepend_sign = !!prepend_sign; + if (uoffset > prepend_sign) { + padding = PyUnicode_FromOrdinal(padding_char); + if (likely(padding) && uoffset > prepend_sign + 1) { + PyObject *tmp; + PyObject *repeat = PyInt_FromSize_t(uoffset - prepend_sign); + if (unlikely(!repeat)) goto done_or_error; + tmp = PyNumber_Multiply(padding, repeat); + Py_DECREF(repeat); + Py_DECREF(padding); + padding = tmp; + } + if (unlikely(!padding)) goto done_or_error; + } + if (prepend_sign) { + sign = PyUnicode_FromOrdinal('-'); + if (unlikely(!sign)) goto done_or_error; + } + } + + uval = PyUnicode_DecodeASCII(chars, clength, NULL); + if (likely(uval) && padding) { + PyObject *tmp = PyNumber_Add(padding, uval); + Py_DECREF(uval); + uval = tmp; + } + if (likely(uval) && sign) { + PyObject *tmp = PyNumber_Add(sign, uval); + Py_DECREF(uval); + uval = tmp; + } +done_or_error: + Py_XDECREF(padding); + Py_XDECREF(sign); + } +#endif + + return uval; +} + + //////////////////// ByteArrayAppendObject.proto //////////////////// static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value); @@ -1006,7 +1006,7 @@ static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyO } ival = (unsigned char) (PyString_AS_STRING(value)[0]); } else -#endif +#endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(value)) && likely(Py_SIZE(value) == 1 || Py_SIZE(value) == 0)) { if (Py_SIZE(value) == 0) { @@ -1020,7 +1020,7 @@ static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyO { // CPython calls PyNumber_Index() internally ival = __Pyx_PyIndex_AsSsize_t(value); - if (unlikely(!__Pyx_is_valid_index(ival, 256))) { + if (unlikely(!__Pyx_is_valid_index(ival, 256))) { if (ival == -1 && PyErr_Occurred()) return -1; goto bad_range; @@ -1042,7 +1042,7 @@ static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value) { PyObject *pyval, *retval; #if CYTHON_COMPILING_IN_CPYTHON - if (likely(__Pyx_is_valid_index(value, 256))) { + if (likely(__Pyx_is_valid_index(value, 256))) { Py_ssize_t n = Py_SIZE(bytearray); if (likely(n != PY_SSIZE_T_MAX)) { if (unlikely(PyByteArray_Resize(bytearray, n + 1) < 0)) @@ -1065,110 +1065,110 @@ static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value Py_DECREF(retval); return 0; } - - -//////////////////// PyObjectFormat.proto //////////////////// - -#if CYTHON_USE_UNICODE_WRITER -static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f); -#else -#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f) -#endif - -//////////////////// PyObjectFormat //////////////////// - -#if CYTHON_USE_UNICODE_WRITER -static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) { - int ret; - _PyUnicodeWriter writer; - - if (likely(PyFloat_CheckExact(obj))) { - // copied from CPython 3.5 "float__format__()" in floatobject.c -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 - _PyUnicodeWriter_Init(&writer, 0); -#else - _PyUnicodeWriter_Init(&writer); -#endif - ret = _PyFloat_FormatAdvancedWriter( - &writer, - obj, - format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); - } else if (likely(PyLong_CheckExact(obj))) { - // copied from CPython 3.5 "long__format__()" in longobject.c -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 - _PyUnicodeWriter_Init(&writer, 0); -#else - _PyUnicodeWriter_Init(&writer); -#endif - ret = _PyLong_FormatAdvancedWriter( - &writer, - obj, - format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); - } else { - return PyObject_Format(obj, format_spec); - } - - if (unlikely(ret == -1)) { - _PyUnicodeWriter_Dealloc(&writer); - return NULL; - } - return _PyUnicodeWriter_Finish(&writer); -} -#endif - - -//////////////////// PyObjectFormatSimple.proto //////////////////// - -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyObject_FormatSimple(s, f) ( \ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ - PyObject_Format(s, f)) -#elif PY_MAJOR_VERSION < 3 - // str is common in Py2, but formatting must return a Unicode string - #define __Pyx_PyObject_FormatSimple(s, f) ( \ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ - likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") : \ - PyObject_Format(s, f)) -#elif CYTHON_USE_TYPE_SLOTS - // Py3 nicely returns unicode strings from str() which makes this quite efficient for builtin types - #define __Pyx_PyObject_FormatSimple(s, f) ( \ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ - likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_str(s) : \ - likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_str(s) : \ - PyObject_Format(s, f)) -#else - #define __Pyx_PyObject_FormatSimple(s, f) ( \ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ - PyObject_Format(s, f)) -#endif - - -//////////////////// PyObjectFormatAndDecref.proto //////////////////// - -static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f); -static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f); - -//////////////////// PyObjectFormatAndDecref //////////////////// - -static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) { - if (unlikely(!s)) return NULL; - if (likely(PyUnicode_CheckExact(s))) return s; - #if PY_MAJOR_VERSION < 3 - // str is common in Py2, but formatting must return a Unicode string - if (likely(PyString_CheckExact(s))) { - PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict"); - Py_DECREF(s); - return result; - } - #endif - return __Pyx_PyObject_FormatAndDecref(s, f); -} - -static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) { - PyObject *result = PyObject_Format(s, f); - Py_DECREF(s); - return result; -} + + +//////////////////// PyObjectFormat.proto //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f); +#else +#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f) +#endif + +//////////////////// PyObjectFormat //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) { + int ret; + _PyUnicodeWriter writer; + + if (likely(PyFloat_CheckExact(obj))) { + // copied from CPython 3.5 "float__format__()" in floatobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyFloat_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else if (likely(PyLong_CheckExact(obj))) { + // copied from CPython 3.5 "long__format__()" in longobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyLong_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else { + return PyObject_Format(obj, format_spec); + } + + if (unlikely(ret == -1)) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); +} +#endif + + +//////////////////// PyObjectFormatSimple.proto //////////////////// + +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#elif PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") : \ + PyObject_Format(s, f)) +#elif CYTHON_USE_TYPE_SLOTS + // Py3 nicely returns unicode strings from str() which makes this quite efficient for builtin types + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_str(s) : \ + likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_str(s) : \ + PyObject_Format(s, f)) +#else + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#endif + + +//////////////////// PyObjectFormatAndDecref.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f); +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f); + +//////////////////// PyObjectFormatAndDecref //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) { + if (unlikely(!s)) return NULL; + if (likely(PyUnicode_CheckExact(s))) return s; + #if PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + if (likely(PyString_CheckExact(s))) { + PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict"); + Py_DECREF(s); + return result; + } + #endif + return __Pyx_PyObject_FormatAndDecref(s, f); +} + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) { + PyObject *result = PyObject_Format(s, f); + Py_DECREF(s); + return result; +} //////////////////// PyUnicode_Unicode.proto //////////////////// diff --git a/contrib/tools/cython/Cython/Utility/TypeConversion.c b/contrib/tools/cython/Cython/Utility/TypeConversion.c index b7456a33a6..7a7bf0f799 100644 --- a/contrib/tools/cython/Cython/Utility/TypeConversion.c +++ b/contrib/tools/cython/Cython/Utility/TypeConversion.c @@ -2,9 +2,9 @@ /* Type Conversion Predeclarations */ -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) - +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) + #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ @@ -16,35 +16,35 @@ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - // Optimisation from Section 14.2 "Bounds Checking" in - // https://www.agner.org/optimize/optimizing_cpp.pdf - // See https://bugs.python.org/issue28397 - // The cast to unsigned effectively tests for "0 <= i < limit". - return (size_t) i < (size_t) limit; -} - -// fast and unsafe abs(Py_ssize_t) that ignores the overflow for (-PY_SSIZE_T_MAX-1) -#if defined (__cplusplus) && __cplusplus >= 201103L - #include <cstdlib> - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + // Optimisation from Section 14.2 "Bounds Checking" in + // https://www.agner.org/optimize/optimizing_cpp.pdf + // See https://bugs.python.org/issue28397 + // The cast to unsigned effectively tests for "0 <= i < limit". + return (size_t) i < (size_t) limit; +} + +// fast and unsafe abs(Py_ssize_t) that ignores the overflow for (-PY_SSIZE_T_MAX-1) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include <cstdlib> + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) - // abs() is defined for long, but 64-bits type on MSVC is long long. - // Use MS-specific _abs64 instead. + // abs() is defined for long, but 64-bits type on MSVC is long long. + // Use MS-specific _abs64 instead. #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - // gcc or clang on 64 bit windows. - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif - +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + // gcc or clang on 64 bit windows. + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif + static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); @@ -73,11 +73,11 @@ static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) // There used to be a Py_UNICODE_strlen() in CPython 3.x, but it is deprecated since Py3.3. static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { @@ -90,12 +90,12 @@ static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj) \ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) @@ -104,20 +104,20 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); -#if CYTHON_ASSUME_SAFE_MACROS +#if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) - +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) + #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { @@ -128,7 +128,7 @@ static int __Pyx_init_sys_getdefaultencoding_params(void) { const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); @@ -163,7 +163,7 @@ bad: Py_XDECREF(ascii_chars_b); return -1; } -#endif +#endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) @@ -187,7 +187,7 @@ static int __Pyx_init_sys_getdefaultencoding_params(void) { if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); @@ -271,7 +271,7 @@ static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); @@ -295,14 +295,14 @@ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { else return PyObject_IsTrue(x); } -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} - +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} + static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { @@ -325,10 +325,10 @@ static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const return NULL; } -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; -#endif +#endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 @@ -336,10 +336,10 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #else if (likely(PyLong_Check(x))) #endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 + #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); @@ -348,13 +348,13 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { name = "long"; res = m->nb_long(x); } - #else + #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } - #endif -#else + #endif +#else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } @@ -375,41 +375,41 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { return res; } -{{py: from Cython.Utility import pylong_join }} - +{{py: from Cython.Utility import pylong_join }} + static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } #endif if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - // handle most common case first to avoid indirect branch and optimise branch prediction - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - {{for _size in (2, 3, 4)}} - {{for _case in (_size, -_size)}} - case {{_case}}: - if (8 * sizeof(Py_ssize_t) > {{_size}} * PyLong_SHIFT) { - return {{'-' if _case < 0 else ''}}(Py_ssize_t) {{pylong_join(_size, 'digits', 'size_t')}}; - } - break; - {{endfor}} - {{endfor}} - } - } + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + // handle most common case first to avoid indirect branch and optimise branch prediction + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + {{for _size in (2, 3, 4)}} + {{for _case in (_size, -_size)}} + case {{_case}}: + if (8 * sizeof(Py_ssize_t) > {{_size}} * PyLong_SHIFT) { + return {{'-' if _case < 0 else ''}}(Py_ssize_t) {{pylong_join(_size, 'digits', 'size_t')}}; + } + break; + {{endfor}} + {{endfor}} + } + } #endif return PyLong_AsSsize_t(b); } @@ -420,7 +420,7 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { return ival; } - + static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); @@ -440,11 +440,11 @@ static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { } -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} - - +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} + + static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } @@ -459,135 +459,135 @@ static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #endif -/////////////// ToPyCTupleUtility.proto /////////////// -static PyObject* {{funcname}}({{struct_type_decl}}); - -/////////////// ToPyCTupleUtility /////////////// -static PyObject* {{funcname}}({{struct_type_decl}} value) { - PyObject* item = NULL; - PyObject* result = PyTuple_New({{size}}); - if (!result) goto bad; - - {{for ix, component in enumerate(components):}} - {{py:attr = "value.f%s" % ix}} - item = {{component.to_py_function}}({{attr}}); - if (!item) goto bad; - PyTuple_SET_ITEM(result, {{ix}}, item); - {{endfor}} - - return result; -bad: - Py_XDECREF(item); - Py_XDECREF(result); - return NULL; -} - - -/////////////// FromPyCTupleUtility.proto /////////////// +/////////////// ToPyCTupleUtility.proto /////////////// +static PyObject* {{funcname}}({{struct_type_decl}}); + +/////////////// ToPyCTupleUtility /////////////// +static PyObject* {{funcname}}({{struct_type_decl}} value) { + PyObject* item = NULL; + PyObject* result = PyTuple_New({{size}}); + if (!result) goto bad; + + {{for ix, component in enumerate(components):}} + {{py:attr = "value.f%s" % ix}} + item = {{component.to_py_function}}({{attr}}); + if (!item) goto bad; + PyTuple_SET_ITEM(result, {{ix}}, item); + {{endfor}} + + return result; +bad: + Py_XDECREF(item); + Py_XDECREF(result); + return NULL; +} + + +/////////////// FromPyCTupleUtility.proto /////////////// static {{struct_type_decl}} {{funcname}}(PyObject *); -/////////////// FromPyCTupleUtility /////////////// +/////////////// FromPyCTupleUtility /////////////// static {{struct_type_decl}} {{funcname}}(PyObject * o) { {{struct_type_decl}} result; - if (!PyTuple_Check(o) || PyTuple_GET_SIZE(o) != {{size}}) { - PyErr_Format(PyExc_TypeError, "Expected %.16s of size %d, got %.200s", "a tuple", {{size}}, Py_TYPE(o)->tp_name); + if (!PyTuple_Check(o) || PyTuple_GET_SIZE(o) != {{size}}) { + PyErr_Format(PyExc_TypeError, "Expected %.16s of size %d, got %.200s", "a tuple", {{size}}, Py_TYPE(o)->tp_name); goto bad; } -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - {{for ix, component in enumerate(components):}} - {{py:attr = "result.f%s" % ix}} - {{attr}} = {{component.from_py_function}}(PyTuple_GET_ITEM(o, {{ix}})); - if ({{component.error_condition(attr)}}) goto bad; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + {{for ix, component in enumerate(components):}} + {{py:attr = "result.f%s" % ix}} + {{attr}} = {{component.from_py_function}}(PyTuple_GET_ITEM(o, {{ix}})); + if ({{component.error_condition(attr)}}) goto bad; + {{endfor}} +#else + { + PyObject *item; + {{for ix, component in enumerate(components):}} + {{py:attr = "result.f%s" % ix}} + item = PySequence_ITEM(o, {{ix}}); if (unlikely(!item)) goto bad; + {{attr}} = {{component.from_py_function}}(item); + Py_DECREF(item); + if ({{component.error_condition(attr)}}) goto bad; {{endfor}} -#else - { - PyObject *item; - {{for ix, component in enumerate(components):}} - {{py:attr = "result.f%s" % ix}} - item = PySequence_ITEM(o, {{ix}}); if (unlikely(!item)) goto bad; - {{attr}} = {{component.from_py_function}}(item); - Py_DECREF(item); - if ({{component.error_condition(attr)}}) goto bad; - {{endfor}} - } -#endif + } +#endif return result; bad: return result; } - -/////////////// UnicodeAsUCS4.proto /////////////// - -static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*); - -/////////////// UnicodeAsUCS4 /////////////// - -static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) { - Py_ssize_t length; - #if CYTHON_PEP393_ENABLED - length = PyUnicode_GET_LENGTH(x); - if (likely(length == 1)) { - return PyUnicode_READ_CHAR(x, 0); - } - #else - length = PyUnicode_GET_SIZE(x); - if (likely(length == 1)) { - return PyUnicode_AS_UNICODE(x)[0]; - } - #if Py_UNICODE_SIZE == 2 - else if (PyUnicode_GET_SIZE(x) == 2) { - Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0]; - if (high_val >= 0xD800 && high_val <= 0xDBFF) { - Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1]; - if (low_val >= 0xDC00 && low_val <= 0xDFFF) { - return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1))); - } - } - } - #endif - #endif - PyErr_Format(PyExc_ValueError, - "only single character unicode strings can be converted to Py_UCS4, " - "got length %" CYTHON_FORMAT_SSIZE_T "d", length); - return (Py_UCS4)-1; -} - - + +/////////////// UnicodeAsUCS4.proto /////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*); + +/////////////// UnicodeAsUCS4 /////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) { + Py_ssize_t length; + #if CYTHON_PEP393_ENABLED + length = PyUnicode_GET_LENGTH(x); + if (likely(length == 1)) { + return PyUnicode_READ_CHAR(x, 0); + } + #else + length = PyUnicode_GET_SIZE(x); + if (likely(length == 1)) { + return PyUnicode_AS_UNICODE(x)[0]; + } + #if Py_UNICODE_SIZE == 2 + else if (PyUnicode_GET_SIZE(x) == 2) { + Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0]; + if (high_val >= 0xD800 && high_val <= 0xDBFF) { + Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1]; + if (low_val >= 0xDC00 && low_val <= 0xDFFF) { + return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1))); + } + } + } + #endif + #endif + PyErr_Format(PyExc_ValueError, + "only single character unicode strings can be converted to Py_UCS4, " + "got length %" CYTHON_FORMAT_SSIZE_T "d", length); + return (Py_UCS4)-1; +} + + /////////////// ObjectAsUCS4.proto /////////////// -//@requires: UnicodeAsUCS4 +//@requires: UnicodeAsUCS4 -#define __Pyx_PyObject_AsPy_UCS4(x) \ - (likely(PyUnicode_Check(x)) ? __Pyx_PyUnicode_AsPy_UCS4(x) : __Pyx__PyObject_AsPy_UCS4(x)) -static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject*); +#define __Pyx_PyObject_AsPy_UCS4(x) \ + (likely(PyUnicode_Check(x)) ? __Pyx_PyUnicode_AsPy_UCS4(x) : __Pyx__PyObject_AsPy_UCS4(x)) +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject*); /////////////// ObjectAsUCS4 /////////////// -static Py_UCS4 __Pyx__PyObject_AsPy_UCS4_raise_error(long ival) { - if (ival < 0) { +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4_raise_error(long ival) { + if (ival < 0) { if (!PyErr_Occurred()) PyErr_SetString(PyExc_OverflowError, "cannot convert negative value to Py_UCS4"); - } else { + } else { PyErr_SetString(PyExc_OverflowError, "value too large to convert to Py_UCS4"); } - return (Py_UCS4)-1; -} - -static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject* x) { - long ival; - ival = __Pyx_PyInt_As_long(x); - if (unlikely(!__Pyx_is_valid_index(ival, 1114111 + 1))) { - return __Pyx__PyObject_AsPy_UCS4_raise_error(ival); - } + return (Py_UCS4)-1; +} + +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject* x) { + long ival; + ival = __Pyx_PyInt_As_long(x); + if (unlikely(!__Pyx_is_valid_index(ival, 1114111 + 1))) { + return __Pyx__PyObject_AsPy_UCS4_raise_error(ival); + } return (Py_UCS4)ival; } - + /////////////// ObjectAsPyUnicode.proto /////////////// static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject*); @@ -624,16 +624,16 @@ static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject* x) { #endif ival = __Pyx_PyInt_As_long(x); } - if (unlikely(!__Pyx_is_valid_index(ival, maxval + 1))) { - if (ival < 0) { - if (!PyErr_Occurred()) - PyErr_SetString(PyExc_OverflowError, - "cannot convert negative value to Py_UNICODE"); - return (Py_UNICODE)-1; - } else { + if (unlikely(!__Pyx_is_valid_index(ival, maxval + 1))) { + if (ival < 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_OverflowError, + "cannot convert negative value to Py_UNICODE"); + return (Py_UNICODE)-1; + } else { PyErr_SetString(PyExc_OverflowError, - "value too large to convert to Py_UNICODE"); - } + "value too large to convert to Py_UNICODE"); + } return (Py_UNICODE)-1; } return (Py_UNICODE)ival; @@ -652,7 +652,7 @@ static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif - const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif @@ -662,18 +662,18 @@ static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value) { return PyInt_FromLong((long) value); } else if (sizeof({{TYPE}}) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif } } else { if (sizeof({{TYPE}}) <= sizeof(long)) { return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif } } { @@ -685,175 +685,175 @@ static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value) { } -/////////////// CIntToDigits /////////////// - -static const char DIGIT_PAIRS_10[2*10*10+1] = { - "00010203040506070809" - "10111213141516171819" - "20212223242526272829" - "30313233343536373839" - "40414243444546474849" - "50515253545556575859" - "60616263646566676869" - "70717273747576777879" - "80818283848586878889" - "90919293949596979899" -}; - -static const char DIGIT_PAIRS_8[2*8*8+1] = { - "0001020304050607" - "1011121314151617" - "2021222324252627" - "3031323334353637" - "4041424344454647" - "5051525354555657" - "6061626364656667" - "7071727374757677" -}; - -static const char DIGITS_HEX[2*16+1] = { +/////////////// CIntToDigits /////////////// + +static const char DIGIT_PAIRS_10[2*10*10+1] = { + "00010203040506070809" + "10111213141516171819" + "20212223242526272829" + "30313233343536373839" + "40414243444546474849" + "50515253545556575859" + "60616263646566676869" + "70717273747576777879" + "80818283848586878889" + "90919293949596979899" +}; + +static const char DIGIT_PAIRS_8[2*8*8+1] = { + "0001020304050607" + "1011121314151617" + "2021222324252627" + "3031323334353637" + "4041424344454647" + "5051525354555657" + "6061626364656667" + "7071727374757677" +}; + +static const char DIGITS_HEX[2*16+1] = { "0123456789abcdef" "0123456789ABCDEF" -}; - - -/////////////// CIntToPyUnicode.proto /////////////// - -static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char); - -/////////////// CIntToPyUnicode /////////////// +}; + + +/////////////// CIntToPyUnicode.proto /////////////// + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char); + +/////////////// CIntToPyUnicode /////////////// //@requires: StringTools.c::IncludeStringH -//@requires: StringTools.c::BuildPyUnicode -//@requires: CIntToDigits +//@requires: StringTools.c::BuildPyUnicode +//@requires: CIntToDigits //@requires: GCCDiagnostics - -// NOTE: inlining because most arguments are constant, which collapses lots of code below - -static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char) { - // simple and conservative C string allocation on the stack: each byte gives at most 3 digits, plus sign - char digits[sizeof({{TYPE}})*3+2]; - // 'dpos' points to end of digits array + 1 initially to allow for pre-decrement looping - char *dpos, *end = digits + sizeof({{TYPE}})*3+2; - const char *hex_digits = DIGITS_HEX; - Py_ssize_t length, ulength; - int prepend_sign, last_one_off; - {{TYPE}} remaining; + +// NOTE: inlining because most arguments are constant, which collapses lots of code below + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char) { + // simple and conservative C string allocation on the stack: each byte gives at most 3 digits, plus sign + char digits[sizeof({{TYPE}})*3+2]; + // 'dpos' points to end of digits array + 1 initially to allow for pre-decrement looping + char *dpos, *end = digits + sizeof({{TYPE}})*3+2; + const char *hex_digits = DIGITS_HEX; + Py_ssize_t length, ulength; + int prepend_sign, last_one_off; + {{TYPE}} remaining; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - - if (format_char == 'X') { - hex_digits += 16; - format_char = 'x'; +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + + if (format_char == 'X') { + hex_digits += 16; + format_char = 'x'; } - - // surprise: even trivial sprintf() calls don't get optimised in gcc (4.8) - remaining = value; /* not using abs(value) to avoid overflow problems */ - last_one_off = 0; - dpos = end; + + // surprise: even trivial sprintf() calls don't get optimised in gcc (4.8) + remaining = value; /* not using abs(value) to avoid overflow problems */ + last_one_off = 0; + dpos = end; do { - int digit_pos; - switch (format_char) { - case 'o': + int digit_pos; + switch (format_char) { + case 'o': digit_pos = abs((int)(remaining % (8*8))); remaining = ({{TYPE}}) (remaining / (8*8)); - dpos -= 2; + dpos -= 2; memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */ - last_one_off = (digit_pos < 8); - break; - case 'd': + last_one_off = (digit_pos < 8); + break; + case 'd': digit_pos = abs((int)(remaining % (10*10))); remaining = ({{TYPE}}) (remaining / (10*10)); - dpos -= 2; + dpos -= 2; memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */ - last_one_off = (digit_pos < 10); - break; - case 'x': + last_one_off = (digit_pos < 10); + break; + case 'x': *(--dpos) = hex_digits[abs((int)(remaining % 16))]; remaining = ({{TYPE}}) (remaining / 16); - break; - default: - assert(0); - break; - } + break; + default: + assert(0); + break; + } } while (unlikely(remaining != 0)); - if (last_one_off) { - assert(*dpos == '0'); - dpos++; - } - length = end - dpos; - ulength = length; - prepend_sign = 0; - if (!is_unsigned && value <= neg_one) { - if (padding_char == ' ' || width <= length + 1) { - *(--dpos) = '-'; - ++length; - } else { - prepend_sign = 1; - } - ++ulength; - } - if (width > ulength) { - ulength = width; - } - // single character unicode strings are cached in CPython => use PyUnicode_FromOrdinal() for them - if (ulength == 1) { - return PyUnicode_FromOrdinal(*dpos); - } - return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); -} - - -/////////////// CBIntToPyUnicode.proto /////////////// - -#define {{TO_PY_FUNCTION}}(value) \ - ((value) ? __Pyx_NewRef({{TRUE_CONST}}) : __Pyx_NewRef({{FALSE_CONST}})) - - -/////////////// PyIntFromDouble.proto /////////////// - -#if PY_MAJOR_VERSION < 3 -static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value); -#else -#define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value) -#endif - -/////////////// PyIntFromDouble /////////////// - -#if PY_MAJOR_VERSION < 3 -static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) { - if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) { - return PyInt_FromLong((long)value); - } - return PyLong_FromDouble(value); -} -#endif - - + if (last_one_off) { + assert(*dpos == '0'); + dpos++; + } + length = end - dpos; + ulength = length; + prepend_sign = 0; + if (!is_unsigned && value <= neg_one) { + if (padding_char == ' ' || width <= length + 1) { + *(--dpos) = '-'; + ++length; + } else { + prepend_sign = 1; + } + ++ulength; + } + if (width > ulength) { + ulength = width; + } + // single character unicode strings are cached in CPython => use PyUnicode_FromOrdinal() for them + if (ulength == 1) { + return PyUnicode_FromOrdinal(*dpos); + } + return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); +} + + +/////////////// CBIntToPyUnicode.proto /////////////// + +#define {{TO_PY_FUNCTION}}(value) \ + ((value) ? __Pyx_NewRef({{TRUE_CONST}}) : __Pyx_NewRef({{FALSE_CONST}})) + + +/////////////// PyIntFromDouble.proto /////////////// + +#if PY_MAJOR_VERSION < 3 +static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value); +#else +#define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value) +#endif + +/////////////// PyIntFromDouble /////////////// + +#if PY_MAJOR_VERSION < 3 +static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) { + if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) { + return PyInt_FromLong((long)value); + } + return PyLong_FromDouble(value); +} +#endif + + /////////////// CIntFromPyVerify /////////////// // see CIntFromPy #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) - -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value) \ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) - -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc) \ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) + +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value) \ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) + +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc) \ { \ func_type value = func_value; \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred())) \ - return (target_type) -1; \ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred())) \ + return (target_type) -1; \ if (is_unsigned && unlikely(value < zero)) \ goto raise_neg_overflow; \ else \ @@ -872,14 +872,14 @@ static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *); //@requires: CIntFromPyVerify //@requires: GCCDiagnostics -{{py: from Cython.Utility import pylong_join }} - +{{py: from Cython.Utility import pylong_join }} + static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif - const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif @@ -899,74 +899,74 @@ static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) { #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { - case 0: return ({{TYPE}}) 0; - case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, digits[0]) - {{for _size in (2, 3, 4)}} - case {{_size}}: - if (8 * sizeof({{TYPE}}) > {{_size-1}} * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT({{TYPE}}, unsigned long, {{pylong_join(_size, 'digits')}}) - } else if (8 * sizeof({{TYPE}}) >= {{_size}} * PyLong_SHIFT) { - return ({{TYPE}}) {{pylong_join(_size, 'digits', TYPE)}}; - } - } - break; - {{endfor}} + case 0: return ({{TYPE}}) 0; + case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, digits[0]) + {{for _size in (2, 3, 4)}} + case {{_size}}: + if (8 * sizeof({{TYPE}}) > {{_size-1}} * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, unsigned long, {{pylong_join(_size, 'digits')}}) + } else if (8 * sizeof({{TYPE}}) >= {{_size}} * PyLong_SHIFT) { + return ({{TYPE}}) {{pylong_join(_size, 'digits', TYPE)}}; + } + } + break; + {{endfor}} } #endif -#if CYTHON_COMPILING_IN_CPYTHON +#if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } -#else - { - // misuse Py_False as a quick way to compare to a '0' int object in PyPy - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return ({{TYPE}}) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif +#else + { + // misuse Py_False as a quick way to compare to a '0' int object in PyPy + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return ({{TYPE}}) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif if (sizeof({{TYPE}}) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif } } else { - // signed -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; + // signed +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { - case 0: return ({{TYPE}}) 0; - case -1: __PYX_VERIFY_RETURN_INT({{TYPE}}, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, +digits[0]) - {{for _size in (2, 3, 4)}} - {{for _case in (-_size, _size)}} - case {{_case}}: - if (8 * sizeof({{TYPE}}){{' - 1' if _case < 0 else ''}} > {{_size-1}} * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT({{TYPE}}, {{'long' if _case < 0 else 'unsigned long'}}, {{'-(long) ' if _case < 0 else ''}}{{pylong_join(_size, 'digits')}}) - } else if (8 * sizeof({{TYPE}}) - 1 > {{_size}} * PyLong_SHIFT) { - return ({{TYPE}}) ({{'((%s)-1)*' % TYPE if _case < 0 else ''}}{{pylong_join(_size, 'digits', TYPE)}}); - } - } - break; - {{endfor}} - {{endfor}} + case 0: return ({{TYPE}}) 0; + case -1: __PYX_VERIFY_RETURN_INT({{TYPE}}, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, +digits[0]) + {{for _size in (2, 3, 4)}} + {{for _case in (-_size, _size)}} + case {{_case}}: + if (8 * sizeof({{TYPE}}){{' - 1' if _case < 0 else ''}} > {{_size-1}} * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, {{'long' if _case < 0 else 'unsigned long'}}, {{'-(long) ' if _case < 0 else ''}}{{pylong_join(_size, 'digits')}}) + } else if (8 * sizeof({{TYPE}}) - 1 > {{_size}} * PyLong_SHIFT) { + return ({{TYPE}}) ({{'((%s)-1)*' % TYPE if _case < 0 else ''}}{{pylong_join(_size, 'digits', TYPE)}}); + } + } + break; + {{endfor}} + {{endfor}} } #endif if (sizeof({{TYPE}}) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif } } { @@ -975,7 +975,7 @@ static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) { "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else {{TYPE}} val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); + PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; @@ -998,7 +998,7 @@ static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) { } } else { {{TYPE}} val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return ({{TYPE}}) -1; val = {{FROM_PY_FUNCTION}}(tmp); Py_DECREF(tmp); diff --git a/contrib/tools/cython/Cython/Utility/__init__.py b/contrib/tools/cython/Cython/Utility/__init__.py index 95094222e6..73ccc1e2cb 100644 --- a/contrib/tools/cython/Cython/Utility/__init__.py +++ b/contrib/tools/cython/Cython/Utility/__init__.py @@ -1,29 +1,29 @@ - -def pylong_join(count, digits_ptr='digits', join_type='unsigned long'): - """ - Generate an unrolled shift-then-or loop over the first 'count' digits. - Assumes that they fit into 'join_type'. - - (((d[2] << n) | d[1]) << n) | d[0] - """ - return ('(' * (count * 2) + ' | '.join( - "(%s)%s[%d])%s)" % (join_type, digits_ptr, _i, " << PyLong_SHIFT" if _i else '') - for _i in range(count-1, -1, -1))) - - -# although it could potentially make use of data independence, -# this implementation is a bit slower than the simpler one above -def _pylong_join(count, digits_ptr='digits', join_type='unsigned long'): - """ - Generate an or-ed series of shifts for the first 'count' digits. - Assumes that they fit into 'join_type'. - - (d[2] << 2*n) | (d[1] << 1*n) | d[0] - """ - def shift(n): - # avoid compiler warnings for overly large shifts that will be discarded anyway - return " << (%d * PyLong_SHIFT < 8 * sizeof(%s) ? %d * PyLong_SHIFT : 0)" % (n, join_type, n) if n else '' - - return '(%s)' % ' | '.join( - "(((%s)%s[%d])%s)" % (join_type, digits_ptr, i, shift(i)) - for i in range(count-1, -1, -1)) + +def pylong_join(count, digits_ptr='digits', join_type='unsigned long'): + """ + Generate an unrolled shift-then-or loop over the first 'count' digits. + Assumes that they fit into 'join_type'. + + (((d[2] << n) | d[1]) << n) | d[0] + """ + return ('(' * (count * 2) + ' | '.join( + "(%s)%s[%d])%s)" % (join_type, digits_ptr, _i, " << PyLong_SHIFT" if _i else '') + for _i in range(count-1, -1, -1))) + + +# although it could potentially make use of data independence, +# this implementation is a bit slower than the simpler one above +def _pylong_join(count, digits_ptr='digits', join_type='unsigned long'): + """ + Generate an or-ed series of shifts for the first 'count' digits. + Assumes that they fit into 'join_type'. + + (d[2] << 2*n) | (d[1] << 1*n) | d[0] + """ + def shift(n): + # avoid compiler warnings for overly large shifts that will be discarded anyway + return " << (%d * PyLong_SHIFT < 8 * sizeof(%s) ? %d * PyLong_SHIFT : 0)" % (n, join_type, n) if n else '' + + return '(%s)' % ' | '.join( + "(((%s)%s[%d])%s)" % (join_type, digits_ptr, i, shift(i)) + for i in range(count-1, -1, -1)) diff --git a/contrib/tools/cython/Cython/Utility/arrayarray.h b/contrib/tools/cython/Cython/Utility/arrayarray.h index e9558ca6c3..a9e4923785 100644 --- a/contrib/tools/cython/Cython/Utility/arrayarray.h +++ b/contrib/tools/cython/Cython/Utility/arrayarray.h @@ -125,20 +125,20 @@ static CYTHON_INLINE int resize(arrayobject *self, Py_ssize_t n) { static CYTHON_INLINE int resize_smart(arrayobject *self, Py_ssize_t n) { void *items = (void*) self->data.ob_item; Py_ssize_t newsize; - if (n < self->allocated && n*4 > self->allocated) { + if (n < self->allocated && n*4 > self->allocated) { __Pyx_SET_SIZE(self, n); - return 0; + return 0; + } + newsize = n + (n / 2) + 1; + if (newsize <= n) { /* overflow */ + PyErr_NoMemory(); + return -1; } - newsize = n + (n / 2) + 1; - if (newsize <= n) { /* overflow */ - PyErr_NoMemory(); - return -1; - } PyMem_Resize(items, char, (size_t)(newsize * self->ob_descr->itemsize)); if (items == NULL) { PyErr_NoMemory(); return -1; - } + } self->data.ob_item = (char*) items; __Pyx_SET_SIZE(self, n); self->allocated = newsize; diff --git a/contrib/tools/cython/Cython/Utils.py b/contrib/tools/cython/Cython/Utils.py index aa49e05c1b..d59d67d78b 100644 --- a/contrib/tools/cython/Cython/Utils.py +++ b/contrib/tools/cython/Cython/Utils.py @@ -3,44 +3,44 @@ # anywhere else in particular # -from __future__ import absolute_import - -try: - from __builtin__ import basestring -except ImportError: - basestring = str - -try: - FileNotFoundError -except NameError: - FileNotFoundError = OSError - +from __future__ import absolute_import + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + +try: + FileNotFoundError +except NameError: + FileNotFoundError = OSError + import os import sys import re import io import codecs -import shutil +import shutil import tempfile from contextlib import contextmanager modification_time = os.path.getmtime -_function_caches = [] -def clear_function_caches(): - for cache in _function_caches: - cache.clear() +_function_caches = [] +def clear_function_caches(): + for cache in _function_caches: + cache.clear() def cached_function(f): cache = {} - _function_caches.append(cache) + _function_caches.append(cache) uncomputed = object() def wrapper(*args): res = cache.get(args, uncomputed) if res is uncomputed: res = cache[args] = f(*args) return res - wrapper.uncached = f + wrapper.uncached = f return wrapper def cached_method(f): @@ -96,34 +96,34 @@ def file_newer_than(path, time): ftime = modification_time(path) return ftime > time - -def safe_makedirs(path): - try: - os.makedirs(path) - except OSError: - if not os.path.isdir(path): - raise - - -def copy_file_to_dir_if_newer(sourcefile, destdir): - """ - Copy file sourcefile to directory destdir (creating it if needed), - preserving metadata. If the destination file exists and is not - older than the source file, the copying is skipped. - """ - destfile = os.path.join(destdir, os.path.basename(sourcefile)) - try: - desttime = modification_time(destfile) - except OSError: - # New file does not exist, destdir may or may not exist - safe_makedirs(destdir) - else: - # New file already exists - if not file_newer_than(sourcefile, desttime): - return - shutil.copy2(sourcefile, destfile) - - + +def safe_makedirs(path): + try: + os.makedirs(path) + except OSError: + if not os.path.isdir(path): + raise + + +def copy_file_to_dir_if_newer(sourcefile, destdir): + """ + Copy file sourcefile to directory destdir (creating it if needed), + preserving metadata. If the destination file exists and is not + older than the source file, the copying is skipped. + """ + destfile = os.path.join(destdir, os.path.basename(sourcefile)) + try: + desttime = modification_time(destfile) + except OSError: + # New file does not exist, destdir may or may not exist + safe_makedirs(destdir) + else: + # New file already exists + if not file_newer_than(sourcefile, desttime): + return + shutil.copy2(sourcefile, destfile) + + @cached_function def find_root_package_dir(file_path): dir = os.path.dirname(file_path) @@ -179,40 +179,40 @@ def path_exists(path): # file name encodings def decode_filename(filename): - if isinstance(filename, bytes): - try: - filename_encoding = sys.getfilesystemencoding() - if filename_encoding is None: - filename_encoding = sys.getdefaultencoding() - filename = filename.decode(filename_encoding) - except UnicodeDecodeError: - pass + if isinstance(filename, bytes): + try: + filename_encoding = sys.getfilesystemencoding() + if filename_encoding is None: + filename_encoding = sys.getdefaultencoding() + filename = filename.decode(filename_encoding) + except UnicodeDecodeError: + pass return filename # support for source file encoding detection -_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search +_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search def detect_opened_file_encoding(f): # PEPs 263 and 3120 - # Most of the time the first two lines fall in the first couple of hundred chars, + # Most of the time the first two lines fall in the first couple of hundred chars, # and this bulk read/split is much faster. - lines = () - start = b'' - while len(lines) < 3: - data = f.read(500) - start += data - lines = start.split(b"\n") - if not data: - break - m = _match_file_encoding(lines[0]) - if m and m.group(1) != b'c_string_encoding': - return m.group(2).decode('iso8859-1') - elif len(lines) > 1: - m = _match_file_encoding(lines[1]) + lines = () + start = b'' + while len(lines) < 3: + data = f.read(500) + start += data + lines = start.split(b"\n") + if not data: + break + m = _match_file_encoding(lines[0]) + if m and m.group(1) != b'c_string_encoding': + return m.group(2).decode('iso8859-1') + elif len(lines) > 1: + m = _match_file_encoding(lines[1]) if m: - return m.group(2).decode('iso8859-1') + return m.group(2).decode('iso8859-1') return "UTF-8" @@ -226,40 +226,40 @@ def skip_bom(f): f.seek(0) -def open_source_file(source_filename, encoding=None, error_handling=None): - stream = None - try: - if encoding is None: - # Most of the time the encoding is not specified, so try hard to open the file only once. - f = io.open(source_filename, 'rb') - encoding = detect_opened_file_encoding(f) +def open_source_file(source_filename, encoding=None, error_handling=None): + stream = None + try: + if encoding is None: + # Most of the time the encoding is not specified, so try hard to open the file only once. + f = io.open(source_filename, 'rb') + encoding = detect_opened_file_encoding(f) f.seek(0) - stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling) + stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling) else: - stream = io.open(source_filename, encoding=encoding, errors=error_handling) + stream = io.open(source_filename, encoding=encoding, errors=error_handling) - except OSError: - if os.path.exists(source_filename): - raise # File is there, but something went wrong reading from it. - # Allow source files to be in zip files etc. + except OSError: + if os.path.exists(source_filename): + raise # File is there, but something went wrong reading from it. + # Allow source files to be in zip files etc. try: loader = __loader__ if source_filename.startswith(loader.archive): - stream = open_source_from_loader( + stream = open_source_from_loader( loader, source_filename, - encoding, error_handling) + encoding, error_handling) except (NameError, AttributeError): pass - if stream is None: - raise FileNotFoundError(source_filename) + if stream is None: + raise FileNotFoundError(source_filename) skip_bom(stream) return stream def open_source_from_loader(loader, source_filename, - encoding=None, error_handling=None): + encoding=None, error_handling=None): nrmpath = os.path.normpath(source_filename) arcname = nrmpath[len(loader.archive)+1:] data = loader.get_data(arcname) @@ -270,22 +270,22 @@ def open_source_from_loader(loader, def str_to_number(value): # note: this expects a string as input that was accepted by the - # parser already, with an optional "-" sign in front - is_neg = False - if value[:1] == '-': - is_neg = True - value = value[1:] + # parser already, with an optional "-" sign in front + is_neg = False + if value[:1] == '-': + is_neg = True + value = value[1:] if len(value) < 2: value = int(value, 0) elif value[0] == '0': - literal_type = value[1] # 0'o' - 0'b' - 0'x' - if literal_type in 'xX': + literal_type = value[1] # 0'o' - 0'b' - 0'x' + if literal_type in 'xX': # hex notation ('0x1AF') value = int(value[2:], 16) - elif literal_type in 'oO': + elif literal_type in 'oO': # Py3 octal notation ('0o136') value = int(value[2:], 8) - elif literal_type in 'bB': + elif literal_type in 'bB': # Py3 binary notation ('0b101') value = int(value[2:], 2) else: @@ -293,7 +293,7 @@ def str_to_number(value): value = int(value, 8) else: value = int(value, 0) - return -value if is_neg else value + return -value if is_neg else value def long_literal(value): @@ -304,8 +304,8 @@ def long_literal(value): @cached_function def get_cython_cache_dir(): - r""" - Return the base directory containing Cython's caches. + r""" + Return the base directory containing Cython's caches. Priority: @@ -358,9 +358,9 @@ def captured_fd(stream=2, encoding=None): os.close(orig_stream) -def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True): - if header_text: - file.write(header_text) # note: text! => file.write() instead of out.write() +def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True): + if header_text: + file.write(header_text) # note: text! => file.write() instead of out.write() file.flush() try: out = file.buffer # Py3 @@ -384,7 +384,7 @@ class LazyStr: def __radd__(self, left): return left + self.callback() - + class OrderedSet(object): def __init__(self, elements=()): self._list = [] @@ -401,49 +401,49 @@ class OrderedSet(object): self._set.add(e) -# Class decorator that adds a metaclass and recreates the class with it. -# Copied from 'six'. -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def raise_error_if_module_name_forbidden(full_module_name): - #it is bad idea to call the pyx-file cython.pyx, so fail early - if full_module_name == 'cython' or full_module_name.startswith('cython.'): - raise ValueError('cython is a special module, cannot be used as a module name') - - -def build_hex_version(version_string): - """ +# Class decorator that adds a metaclass and recreates the class with it. +# Copied from 'six'. +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def raise_error_if_module_name_forbidden(full_module_name): + #it is bad idea to call the pyx-file cython.pyx, so fail early + if full_module_name == 'cython' or full_module_name.startswith('cython.'): + raise ValueError('cython is a special module, cannot be used as a module name') + + +def build_hex_version(version_string): + """ Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX). - """ - # First, parse '4.12a1' into [4, 12, 0, 0xA01]. - digits = [] - release_status = 0xF0 - for digit in re.split('([.abrc]+)', version_string): - if digit in ('a', 'b', 'rc'): - release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[digit] - digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1 - elif digit != '.': - digits.append(int(digit)) - digits = (digits + [0] * 3)[:4] - digits[3] += release_status - - # Then, build a single hex value, two hex digits per version part. - hexversion = 0 - for digit in digits: - hexversion = (hexversion << 8) + digit - - return '0x%08X' % hexversion + """ + # First, parse '4.12a1' into [4, 12, 0, 0xA01]. + digits = [] + release_status = 0xF0 + for digit in re.split('([.abrc]+)', version_string): + if digit in ('a', 'b', 'rc'): + release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[digit] + digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1 + elif digit != '.': + digits.append(int(digit)) + digits = (digits + [0] * 3)[:4] + digits[3] += release_status + + # Then, build a single hex value, two hex digits per version part. + hexversion = 0 + for digit in digits: + hexversion = (hexversion << 8) + digit + + return '0x%08X' % hexversion diff --git a/contrib/tools/cython/Cython/__init__.py b/contrib/tools/cython/Cython/__init__.py index 98fc8d1a9a..549246b8a3 100644 --- a/contrib/tools/cython/Cython/__init__.py +++ b/contrib/tools/cython/Cython/__init__.py @@ -1,12 +1,12 @@ -from __future__ import absolute_import +from __future__ import absolute_import + +from .Shadow import __version__ -from .Shadow import __version__ - # Void cython.* directives (for case insensitive operating systems). -from .Shadow import * +from .Shadow import * def load_ipython_extension(ip): """Load the extension in IPython.""" - from .Build.IpythonMagic import CythonMagics # pylint: disable=cyclic-import + from .Build.IpythonMagic import CythonMagics # pylint: disable=cyclic-import ip.register_magics(CythonMagics) |