diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/python/Pygments/py3/pygments/lexers/python.py | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) | |
download | ydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/python.py')
-rw-r--r-- | contrib/python/Pygments/py3/pygments/lexers/python.py | 920 |
1 files changed, 460 insertions, 460 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/python.py b/contrib/python/Pygments/py3/pygments/lexers/python.py index 0d9478dc8d..2901d7b982 100644 --- a/contrib/python/Pygments/py3/pygments/lexers/python.py +++ b/contrib/python/Pygments/py3/pygments/lexers/python.py @@ -4,7 +4,7 @@ Lexers for Python and related languages. - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -19,196 +19,196 @@ from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ from pygments import unistring as uni __all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', - 'Python2Lexer', 'Python2TracebackLexer', - 'CythonLexer', 'DgLexer', 'NumPyLexer'] + 'Python2Lexer', 'Python2TracebackLexer', + 'CythonLexer', 'DgLexer', 'NumPyLexer'] line_re = re.compile('.*?\n') class PythonLexer(RegexLexer): """ - For `Python <http://www.python.org>`_ source code (version 3.x). - - .. versionadded:: 0.10 - - .. versionchanged:: 2.5 - This is now the default ``PythonLexer``. It is still available as the - alias ``Python3Lexer``. + For `Python <http://www.python.org>`_ source code (version 3.x). + + .. versionadded:: 0.10 + + .. versionchanged:: 2.5 + This is now the default ``PythonLexer``. It is still available as the + alias ``Python3Lexer``. """ name = 'Python' - aliases = ['python', 'py', 'sage', 'python3', 'py3'] - filenames = [ - '*.py', - '*.pyw', - # Jython - '*.jy', - # Sage - '*.sage', - # SCons - '*.sc', - 'SConstruct', - 'SConscript', - # Skylark/Starlark (used by Bazel, Buck, and Pants) - '*.bzl', - 'BUCK', - 'BUILD', - 'BUILD.bazel', - 'WORKSPACE', - # Twisted Application infrastructure - '*.tac', - ] - mimetypes = ['text/x-python', 'application/x-python', - 'text/x-python3', 'application/x-python3'] - - flags = re.MULTILINE | re.UNICODE - - uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) - + aliases = ['python', 'py', 'sage', 'python3', 'py3'] + filenames = [ + '*.py', + '*.pyw', + # Jython + '*.jy', + # Sage + '*.sage', + # SCons + '*.sc', + 'SConstruct', + 'SConscript', + # Skylark/Starlark (used by Bazel, Buck, and Pants) + '*.bzl', + 'BUCK', + 'BUILD', + 'BUILD.bazel', + 'WORKSPACE', + # Twisted Application infrastructure + '*.tac', + ] + mimetypes = ['text/x-python', 'application/x-python', + 'text/x-python3', 'application/x-python3'] + + flags = re.MULTILINE | re.UNICODE + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + def innerstring_rules(ttype): return [ - # the old style '%s' % (...) string formatting (still valid in Py3) - (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[E-GXc-giorsaux%]', String.Interpol), - # the new style '{}'.format(...) string formatting - (r'\{' - r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name - r'(\![sra])?' # conversion - r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' - r'\}', String.Interpol), - - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"%{\n]+', ttype), - (r'[\'"\\]', ttype), - # unhandled string formatting sign - (r'%|(\{{1,2})', ttype) - # newlines are an error (use "nl" state) - ] - - def fstring_rules(ttype): - return [ - # Assuming that a '}' is the closing brace after format specifier. - # Sadly, this means that we won't detect syntax error. But it's - # more important to parse correct syntax correctly, than to - # highlight invalid syntax. - (r'\}', String.Interpol), - (r'\{', String.Interpol, 'expr-inside-fstring'), - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"{}\n]+', ttype), - (r'[\'"\\]', ttype), - # newlines are an error (use "nl" state) - ] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', - bygroups(Text, String.Affix, String.Doc)), - (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", - bygroups(Text, String.Affix, String.Doc)), - (r'\A#!.+$', Comment.Hashbang), - (r'#.*$', Comment.Single), - (r'\\\n', Text), - (r'\\', Text), - include('keywords'), + # the old style '%s' % (...) string formatting (still valid in Py3) + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsaux%]', String.Interpol), + # the new style '{}'.format(...) string formatting + (r'\{' + r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name + r'(\![sra])?' # conversion + r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' + r'\}', String.Interpol), + + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%{\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%|(\{{1,2})', ttype) + # newlines are an error (use "nl" state) + ] + + def fstring_rules(ttype): + return [ + # Assuming that a '}' is the closing brace after format specifier. + # Sadly, this means that we won't detect syntax error. But it's + # more important to parse correct syntax correctly, than to + # highlight invalid syntax. + (r'\}', String.Interpol), + (r'\{', String.Interpol, 'expr-inside-fstring'), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"{}\n]+', ttype), + (r'[\'"\\]', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Text, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Text, String.Affix, String.Doc)), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'\\\n', Text), + (r'\\', Text), + include('keywords'), include('soft-keywords'), - (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), - (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), - (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'fromimport'), - (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'import'), - include('expr'), - ], - 'expr': [ - # raw f-strings - ('(?i)(rf|fr)(""")', - bygroups(String.Affix, String.Double), - combined('rfstringescape', 'tdqf')), - ("(?i)(rf|fr)(''')", - bygroups(String.Affix, String.Single), - combined('rfstringescape', 'tsqf')), - ('(?i)(rf|fr)(")', - bygroups(String.Affix, String.Double), - combined('rfstringescape', 'dqf')), - ("(?i)(rf|fr)(')", - bygroups(String.Affix, String.Single), - combined('rfstringescape', 'sqf')), - # non-raw f-strings - ('([fF])(""")', bygroups(String.Affix, String.Double), - combined('fstringescape', 'tdqf')), - ("([fF])(''')", bygroups(String.Affix, String.Single), - combined('fstringescape', 'tsqf')), - ('([fF])(")', bygroups(String.Affix, String.Double), - combined('fstringescape', 'dqf')), - ("([fF])(')", bygroups(String.Affix, String.Single), - combined('fstringescape', 'sqf')), - # raw strings - ('(?i)(rb|br|r)(""")', - bygroups(String.Affix, String.Double), 'tdqs'), - ("(?i)(rb|br|r)(''')", - bygroups(String.Affix, String.Single), 'tsqs'), - ('(?i)(rb|br|r)(")', - bygroups(String.Affix, String.Double), 'dqs'), - ("(?i)(rb|br|r)(')", - bygroups(String.Affix, String.Single), 'sqs'), - # non-raw strings - ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), - combined('stringescape', 'tdqs')), - ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), - combined('stringescape', 'tsqs')), - ('([uUbB]?)(")', bygroups(String.Affix, String.Double), - combined('stringescape', 'dqs')), - ("([uUbB]?)(')", bygroups(String.Affix, String.Single), - combined('stringescape', 'sqs')), - (r'[^\S\n]+', Text), - include('numbers'), - (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), - (r'[]{}:(),;[]', Punctuation), - (r'(in|is|and|or|not)\b', Operator.Word), - include('expr-keywords'), - include('builtins'), - include('magicfuncs'), - include('magicvars'), - include('name'), - ], - 'expr-inside-fstring': [ - (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), - # without format specifier - (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) - r'(\![sraf])?' # conversion - r'\}', String.Interpol, '#pop'), - # with format specifier - # we'll catch the remaining '}' in the outer scope - (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) - r'(\![sraf])?' # conversion - r':', String.Interpol, '#pop'), - (r'\s+', Text), # allow new lines - include('expr'), - ], - 'expr-inside-fstring-inner': [ - (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), - (r'[])}]', Punctuation, '#pop'), - (r'\s+', Text), # allow new lines - include('expr'), - ], - 'expr-keywords': [ - # Based on https://docs.python.org/3/reference/expressions.html - (words(( - 'async for', 'await', 'else', 'for', 'if', 'lambda', - 'yield', 'yield from'), suffix=r'\b'), - Keyword), - (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), - ], - 'keywords': [ - (words(( - 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', - 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', - 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', - 'yield from', 'as', 'with'), suffix=r'\b'), - Keyword), - (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), - ], + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('expr'), + ], + 'expr': [ + # raw f-strings + ('(?i)(rf|fr)(""")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'tdqf')), + ("(?i)(rf|fr)(''')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'tsqf')), + ('(?i)(rf|fr)(")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'dqf')), + ("(?i)(rf|fr)(')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'sqf')), + # non-raw f-strings + ('([fF])(""")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'tdqf')), + ("([fF])(''')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'tsqf')), + ('([fF])(")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'dqf')), + ("([fF])(')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'sqf')), + # raw strings + ('(?i)(rb|br|r)(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("(?i)(rb|br|r)(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('(?i)(rb|br|r)(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("(?i)(rb|br|r)(')", + bygroups(String.Affix, String.Single), 'sqs'), + # non-raw strings + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + (r'[^\S\n]+', Text), + include('numbers'), + (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), + (r'[]{}:(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + include('expr-keywords'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('name'), + ], + 'expr-inside-fstring': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + # without format specifier + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r'\}', String.Interpol, '#pop'), + # with format specifier + # we'll catch the remaining '}' in the outer scope + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r':', String.Interpol, '#pop'), + (r'\s+', Text), # allow new lines + include('expr'), + ], + 'expr-inside-fstring-inner': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + (r'[])}]', Punctuation, '#pop'), + (r'\s+', Text), # allow new lines + include('expr'), + ], + 'expr-keywords': [ + # Based on https://docs.python.org/3/reference/expressions.html + (words(( + 'async for', 'await', 'else', 'for', 'if', 'lambda', + 'yield', 'yield from'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', + 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', + 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], 'soft-keywords': [ # `match`, `case` and `_` soft keywords (r'(^[ \t]*)' # at beginning of line + possible indentation @@ -223,201 +223,201 @@ class PythonLexer(RegexLexer): (r'(\s+)([^\n_]*)(_\b)', bygroups(Text, using(this), Keyword)), default('#pop') ], - 'builtins': [ - (words(( - '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex', - 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', - 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', - 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', - 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', - 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', - 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', - 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', - 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), - Name.Builtin), - (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo), - (words(( - 'ArithmeticError', 'AssertionError', 'AttributeError', - 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', - 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', - 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', - 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', - 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', - 'NotImplementedError', 'OSError', 'OverflowError', - 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning', - 'RuntimeError', 'RuntimeWarning', 'StopIteration', - 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', - 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', - 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', - 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', - 'Warning', 'WindowsError', 'ZeroDivisionError', - # new builtin exceptions from PEP 3151 - 'BlockingIOError', 'ChildProcessError', 'ConnectionError', - 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError', - 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError', - 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', - 'PermissionError', 'ProcessLookupError', 'TimeoutError', - # others new in Python 3 - 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError'), - prefix=r'(?<!\.)', suffix=r'\b'), - Name.Exception), - ], - 'magicfuncs': [ - (words(( - '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', - '__and__', '__anext__', '__await__', '__bool__', '__bytes__', - '__call__', '__complex__', '__contains__', '__del__', '__delattr__', - '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__', - '__eq__', '__exit__', '__float__', '__floordiv__', '__format__', - '__ge__', '__get__', '__getattr__', '__getattribute__', - '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__', - '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__', - '__imul__', '__index__', '__init__', '__instancecheck__', - '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', - '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', - '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__', - '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', - '__new__', '__next__', '__or__', '__pos__', '__pow__', - '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__', - '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__', - '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', - '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', - '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__', - '__sub__', '__subclasscheck__', '__truediv__', - '__xor__'), suffix=r'\b'), - Name.Function.Magic), - ], - 'magicvars': [ - (words(( - '__annotations__', '__bases__', '__class__', '__closure__', - '__code__', '__defaults__', '__dict__', '__doc__', '__file__', - '__func__', '__globals__', '__kwdefaults__', '__module__', - '__mro__', '__name__', '__objclass__', '__qualname__', - '__self__', '__slots__', '__weakref__'), suffix=r'\b'), - Name.Variable.Magic), - ], - 'numbers': [ - (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)' - r'([eE][+-]?\d(?:_?\d)*)?', Number.Float), - (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float), - (r'0[oO](?:_?[0-7])+', Number.Oct), - (r'0[bB](?:_?[01])+', Number.Bin), - (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex), - (r'\d(?:_?\d)*', Number.Integer), - ], - 'name': [ - (r'@' + uni_name, Name.Decorator), - (r'@', Operator), # new matrix multiplication operator - (uni_name, Name), - ], - 'funcname': [ - include('magicfuncs'), - (uni_name, Name.Function, '#pop'), - default('#pop'), - ], - 'classname': [ - (uni_name, Name.Class, '#pop'), - ], - 'import': [ - (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), - (r'\.', Name.Namespace), - (uni_name, Name.Namespace), - (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), - default('#pop') # all else: go back - ], - 'fromimport': [ - (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'), - (r'\.', Name.Namespace), - # if None occurs here, it's "raise x from None", since None can - # never be a module name - (r'None\b', Name.Builtin.Pseudo, '#pop'), - (uni_name, Name.Namespace), - default('#pop'), - ], - 'rfstringescape': [ - (r'\{\{', String.Escape), - (r'\}\}', String.Escape), - ], - 'fstringescape': [ - include('rfstringescape'), - include('stringescape'), - ], - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) - ], - 'fstrings-single': fstring_rules(String.Single), - 'fstrings-double': fstring_rules(String.Double), - 'strings-single': innerstring_rules(String.Single), - 'strings-double': innerstring_rules(String.Double), - 'dqf': [ - (r'"', String.Double, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings - include('fstrings-double') - ], - 'sqf': [ - (r"'", String.Single, '#pop'), - (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings - include('fstrings-single') - ], - 'dqs': [ - (r'"', String.Double, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings - include('strings-double') - ], - 'sqs': [ - (r"'", String.Single, '#pop'), - (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings - include('strings-single') - ], - 'tdqf': [ - (r'"""', String.Double, '#pop'), - include('fstrings-double'), - (r'\n', String.Double) - ], - 'tsqf': [ - (r"'''", String.Single, '#pop'), - include('fstrings-single'), - (r'\n', String.Single) - ], - 'tdqs': [ - (r'"""', String.Double, '#pop'), - include('strings-double'), - (r'\n', String.Double) - ], - 'tsqs': [ - (r"'''", String.Single, '#pop'), - include('strings-single'), - (r'\n', String.Single) - ], - } - - def analyse_text(text): - return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \ - 'import ' in text[:1000] - - -Python3Lexer = PythonLexer - - -class Python2Lexer(RegexLexer): - """ - For `Python 2.x <http://www.python.org>`_ source code. - - .. versionchanged:: 2.5 - This class has been renamed from ``PythonLexer``. ``PythonLexer`` now - refers to the Python 3 variant. File name patterns like ``*.py`` have - been moved to Python 3 as well. - """ - - name = 'Python 2.x' - aliases = ['python2', 'py2'] - filenames = [] # now taken over by PythonLexer (3.x) - mimetypes = ['text/x-python2', 'application/x-python2'] - - def innerstring_rules(ttype): - return [ + 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', + 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', + 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', + 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', + 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', + 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', + 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), + Name.Builtin), + (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo), + (words(( + 'ArithmeticError', 'AssertionError', 'AttributeError', + 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', + 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', + 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', + 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', + 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', + 'NotImplementedError', 'OSError', 'OverflowError', + 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning', + 'RuntimeError', 'RuntimeWarning', 'StopIteration', + 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', + 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', + 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', + 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', + 'Warning', 'WindowsError', 'ZeroDivisionError', + # new builtin exceptions from PEP 3151 + 'BlockingIOError', 'ChildProcessError', 'ConnectionError', + 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError', + 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError', + 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', + 'PermissionError', 'ProcessLookupError', 'TimeoutError', + # others new in Python 3 + 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError'), + prefix=r'(?<!\.)', suffix=r'\b'), + Name.Exception), + ], + 'magicfuncs': [ + (words(( + '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', + '__and__', '__anext__', '__await__', '__bool__', '__bytes__', + '__call__', '__complex__', '__contains__', '__del__', '__delattr__', + '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__', + '__eq__', '__exit__', '__float__', '__floordiv__', '__format__', + '__ge__', '__get__', '__getattr__', '__getattribute__', + '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__', + '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__', + '__imul__', '__index__', '__init__', '__instancecheck__', + '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', + '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', + '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__', + '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', + '__new__', '__next__', '__or__', '__pos__', '__pow__', + '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__', + '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__', + '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', + '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', + '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__', + '__sub__', '__subclasscheck__', '__truediv__', + '__xor__'), suffix=r'\b'), + Name.Function.Magic), + ], + 'magicvars': [ + (words(( + '__annotations__', '__bases__', '__class__', '__closure__', + '__code__', '__defaults__', '__dict__', '__doc__', '__file__', + '__func__', '__globals__', '__kwdefaults__', '__module__', + '__mro__', '__name__', '__objclass__', '__qualname__', + '__self__', '__slots__', '__weakref__'), suffix=r'\b'), + Name.Variable.Magic), + ], + 'numbers': [ + (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)' + r'([eE][+-]?\d(?:_?\d)*)?', Number.Float), + (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float), + (r'0[oO](?:_?[0-7])+', Number.Oct), + (r'0[bB](?:_?[01])+', Number.Bin), + (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex), + (r'\d(?:_?\d)*', Number.Integer), + ], + 'name': [ + (r'@' + uni_name, Name.Decorator), + (r'@', Operator), # new matrix multiplication operator + (uni_name, Name), + ], + 'funcname': [ + include('magicfuncs'), + (uni_name, Name.Function, '#pop'), + default('#pop'), + ], + 'classname': [ + (uni_name, Name.Class, '#pop'), + ], + 'import': [ + (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), + (r'\.', Name.Namespace), + (uni_name, Name.Namespace), + (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), + default('#pop') # all else: go back + ], + 'fromimport': [ + (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'), + (r'\.', Name.Namespace), + # if None occurs here, it's "raise x from None", since None can + # never be a module name + (r'None\b', Name.Builtin.Pseudo, '#pop'), + (uni_name, Name.Namespace), + default('#pop'), + ], + 'rfstringescape': [ + (r'\{\{', String.Escape), + (r'\}\}', String.Escape), + ], + 'fstringescape': [ + include('rfstringescape'), + include('stringescape'), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'fstrings-single': fstring_rules(String.Single), + 'fstrings-double': fstring_rules(String.Double), + 'strings-single': innerstring_rules(String.Single), + 'strings-double': innerstring_rules(String.Double), + 'dqf': [ + (r'"', String.Double, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + include('fstrings-double') + ], + 'sqf': [ + (r"'", String.Single, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings + include('fstrings-single') + ], + 'dqs': [ + (r'"', String.Double, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + include('strings-double') + ], + 'sqs': [ + (r"'", String.Single, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings + include('strings-single') + ], + 'tdqf': [ + (r'"""', String.Double, '#pop'), + include('fstrings-double'), + (r'\n', String.Double) + ], + 'tsqf': [ + (r"'''", String.Single, '#pop'), + include('fstrings-single'), + (r'\n', String.Single) + ], + 'tdqs': [ + (r'"""', String.Double, '#pop'), + include('strings-double'), + (r'\n', String.Double) + ], + 'tsqs': [ + (r"'''", String.Single, '#pop'), + include('strings-single'), + (r'\n', String.Single) + ], + } + + def analyse_text(text): + return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \ + 'import ' in text[:1000] + + +Python3Lexer = PythonLexer + + +class Python2Lexer(RegexLexer): + """ + For `Python 2.x <http://www.python.org>`_ source code. + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonLexer``. ``PythonLexer`` now + refers to the Python 3 variant. File name patterns like ``*.py`` have + been moved to Python 3 as well. + """ + + name = 'Python 2.x' + aliases = ['python2', 'py2'] + filenames = [] # now taken over by PythonLexer (3.x) + mimetypes = ['text/x-python2', 'application/x-python2'] + + def innerstring_rules(ttype): + return [ # the old style '%s' % (...) string formatting (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' '[hlL]?[E-GXc-giorsux%]', String.Interpol), @@ -505,15 +505,15 @@ class Python2Lexer(RegexLexer): 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', - 'MemoryError', 'NameError', - 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', - 'PendingDeprecationWarning', 'ReferenceError', - 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', - 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', - 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', - 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', - 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning', - 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), + 'MemoryError', 'NameError', + 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', + 'PendingDeprecationWarning', 'ReferenceError', + 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', + 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', + 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', + 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', + 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning', + 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Exception), ], 'magicfuncs': [ @@ -619,7 +619,7 @@ class Python2Lexer(RegexLexer): } def analyse_text(text): - return shebang_matches(text, r'pythonw?2(\.\d)?') + return shebang_matches(text, r'pythonw?2(\.\d)?') class PythonConsoleLexer(Lexer): @@ -639,27 +639,27 @@ class PythonConsoleLexer(Lexer): Additional options: `python3` - Use Python 3 lexer for code. Default is ``True``. + Use Python 3 lexer for code. Default is ``True``. .. versionadded:: 1.0 - .. versionchanged:: 2.5 - Now defaults to ``True``. + .. versionchanged:: 2.5 + Now defaults to ``True``. """ name = 'Python console session' aliases = ['pycon'] mimetypes = ['text/x-python-doctest'] def __init__(self, **options): - self.python3 = get_bool_opt(options, 'python3', True) + self.python3 = get_bool_opt(options, 'python3', True) Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): if self.python3: pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) - else: - pylexer = Python2Lexer(**self.options) - tblexer = Python2TracebackLexer(**self.options) + else: + pylexer = Python2Lexer(**self.options) + tblexer = Python2TracebackLexer(**self.options) curcode = '' insertions = [] @@ -668,26 +668,26 @@ class PythonConsoleLexer(Lexer): tb = 0 for match in line_re.finditer(text): line = match.group() - if line.startswith('>>> ') or line.startswith('... '): + if line.startswith('>>> ') or line.startswith('... '): tb = 0 insertions.append((len(curcode), [(0, Generic.Prompt, line[:4])])) curcode += line[4:] - elif line.rstrip() == '...' and not tb: + elif line.rstrip() == '...' and not tb: # only a new >>> prompt can end an exception block # otherwise an ellipsis in place of the traceback frames # will be mishandled insertions.append((len(curcode), - [(0, Generic.Prompt, '...')])) + [(0, Generic.Prompt, '...')])) curcode += line[3:] else: if curcode: - yield from do_insertions( - insertions, pylexer.get_tokens_unprocessed(curcode)) + yield from do_insertions( + insertions, pylexer.get_tokens_unprocessed(curcode)) curcode = '' insertions = [] - if (line.startswith('Traceback (most recent call last):') or - re.match(' File "[^"]+", line \\d+\\n$', line)): + if (line.startswith('Traceback (most recent call last):') or + re.match(' File "[^"]+", line \\d+\\n$', line)): tb = 1 curtb = line tbindex = match.start() @@ -695,7 +695,7 @@ class PythonConsoleLexer(Lexer): yield match.start(), Name.Class, line elif tb: curtb += line - if not (line.startswith(' ') or line.strip() == '...'): + if not (line.startswith(' ') or line.strip() == '...'): tb = 0 for i, t, v in tblexer.get_tokens_unprocessed(curtb): yield tbindex+i, t, v @@ -703,8 +703,8 @@ class PythonConsoleLexer(Lexer): else: yield match.start(), Generic.Output, line if curcode: - yield from do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)) + yield from do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)) if curtb: for i, t, v in tblexer.get_tokens_unprocessed(curtb): yield tbindex+i, t, v @@ -712,28 +712,28 @@ class PythonConsoleLexer(Lexer): class PythonTracebackLexer(RegexLexer): """ - For Python 3.x tracebacks, with support for chained exceptions. + For Python 3.x tracebacks, with support for chained exceptions. + + .. versionadded:: 1.0 - .. versionadded:: 1.0 - - .. versionchanged:: 2.5 - This is now the default ``PythonTracebackLexer``. It is still available - as the alias ``Python3TracebackLexer``. + .. versionchanged:: 2.5 + This is now the default ``PythonTracebackLexer``. It is still available + as the alias ``Python3TracebackLexer``. """ name = 'Python Traceback' - aliases = ['pytb', 'py3tb'] - filenames = ['*.pytb', '*.py3tb'] - mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] + aliases = ['pytb', 'py3tb'] + filenames = ['*.pytb', '*.py3tb'] + mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] tokens = { 'root': [ - (r'\n', Text), - (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), - (r'^During handling of the above exception, another ' - r'exception occurred:\n\n', Generic.Traceback), - (r'^The above exception was the direct cause of the ' - r'following exception:\n\n', Generic.Traceback), + (r'\n', Text), + (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), (r'^.*\n', Other), ], @@ -743,54 +743,54 @@ class PythonTracebackLexer(RegexLexer): (r'^( File )("[^"]+")(, line )(\d+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text)), (r'^( )(.+)(\n)', - bygroups(Text, using(PythonLexer), Text), 'markers'), + bygroups(Text, using(PythonLexer), Text), 'markers'), (r'^([ \t]*)(\.\.\.)(\n)', bygroups(Text, Comment, Text)), # for doctests... (r'^([^:]+)(: )(.+)(\n)', bygroups(Generic.Error, Text, Name, Text), '#pop'), - (r'^([a-zA-Z_][\w.]*)(:?\n)', + (r'^([a-zA-Z_][\w.]*)(:?\n)', bygroups(Generic.Error, Text), '#pop') ], - 'markers': [ - # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>` - # error locations in Python 3.11+, or single-caret markers - # for syntax errors before that. + 'markers': [ + # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>` + # error locations in Python 3.11+, or single-caret markers + # for syntax errors before that. (r'^( {4,})([~^]+)(\n)', - bygroups(Text, Punctuation.Marker, Text), - '#pop'), - default('#pop'), - ], + bygroups(Text, Punctuation.Marker, Text), + '#pop'), + default('#pop'), + ], } -Python3TracebackLexer = PythonTracebackLexer - - -class Python2TracebackLexer(RegexLexer): +Python3TracebackLexer = PythonTracebackLexer + + +class Python2TracebackLexer(RegexLexer): """ - For Python tracebacks. + For Python tracebacks. - .. versionadded:: 0.7 - - .. versionchanged:: 2.5 - This class has been renamed from ``PythonTracebackLexer``. - ``PythonTracebackLexer`` now refers to the Python 3 variant. + .. versionadded:: 0.7 + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonTracebackLexer``. + ``PythonTracebackLexer`` now refers to the Python 3 variant. """ - name = 'Python 2.x Traceback' - aliases = ['py2tb'] - filenames = ['*.py2tb'] - mimetypes = ['text/x-python2-traceback'] + name = 'Python 2.x Traceback' + aliases = ['py2tb'] + filenames = ['*.py2tb'] + mimetypes = ['text/x-python2-traceback'] tokens = { 'root': [ - # Cover both (most recent call last) and (innermost last) - # The optional ^C allows us to catch keyboard interrupt signals. - (r'^(\^C)?(Traceback.*\n)', - bygroups(Text, Generic.Traceback), 'intb'), - # SyntaxError starts with this. + # Cover both (most recent call last) and (innermost last) + # The optional ^C allows us to catch keyboard interrupt signals. + (r'^(\^C)?(Traceback.*\n)', + bygroups(Text, Generic.Traceback), 'intb'), + # SyntaxError starts with this. (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), - (r'^.*\n', Other), + (r'^.*\n', Other), ], 'intb': [ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', @@ -798,7 +798,7 @@ class Python2TracebackLexer(RegexLexer): (r'^( File )("[^"]+")(, line )(\d+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text)), (r'^( )(.+)(\n)', - bygroups(Text, using(Python2Lexer), Text), 'marker'), + bygroups(Text, using(Python2Lexer), Text), 'marker'), (r'^([ \t]*)(\.\.\.)(\n)', bygroups(Text, Comment, Text)), # for doctests... (r'^([^:]+)(: )(.+)(\n)', @@ -806,11 +806,11 @@ class Python2TracebackLexer(RegexLexer): (r'^([a-zA-Z_]\w*)(:?\n)', bygroups(Generic.Error, Text), '#pop') ], - 'marker': [ - # For syntax errors. - (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'), - default('#pop'), - ], + 'marker': [ + # For syntax errors. + (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'), + default('#pop'), + ], } @@ -866,7 +866,7 @@ class CythonLexer(RegexLexer): ], 'keywords': [ (words(( - 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), @@ -875,14 +875,14 @@ class CythonLexer(RegexLexer): ], 'builtins': [ (words(( - '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint', + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint', 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', - 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t', 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', @@ -1106,7 +1106,7 @@ class NumPyLexer(PythonLexer): mimetypes = [] filenames = [] - EXTRA_KEYWORDS = { + EXTRA_KEYWORDS = { 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose', 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append', 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh', @@ -1171,7 +1171,7 @@ class NumPyLexer(PythonLexer): 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index', 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises', 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like' - } + } def get_tokens_unprocessed(self, text): for index, token, value in \ @@ -1182,7 +1182,7 @@ class NumPyLexer(PythonLexer): yield index, token, value def analyse_text(text): - ltext = text[:1000] - return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or - 'import ' in ltext) \ - and ('import numpy' in ltext or 'from numpy import' in ltext) + ltext = text[:1000] + return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or + 'import ' in ltext) \ + and ('import numpy' in ltext or 'from numpy import' in ltext) |