aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexer.py
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/python/Pygments/py3/pygments/lexer.py
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexer.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexer.py64
1 files changed, 32 insertions, 32 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexer.py b/contrib/python/Pygments/py3/pygments/lexer.py
index 4aa3105f39..33d738a8d6 100644
--- a/contrib/python/Pygments/py3/pygments/lexer.py
+++ b/contrib/python/Pygments/py3/pygments/lexer.py
@@ -4,7 +4,7 @@
Base lexer classes.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -16,7 +16,7 @@ from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- make_analysator, Future, guess_decode
+ make_analysator, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
@@ -45,7 +45,7 @@ class LexerMeta(type):
return type.__new__(mcs, name, bases, d)
-class Lexer(metaclass=LexerMeta):
+class Lexer(metaclass=LexerMeta):
"""
Lexer for a specific language.
@@ -141,16 +141,16 @@ class Lexer(metaclass=LexerMeta):
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
- if not isinstance(text, str):
+ if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
import chardet
- except ImportError as e:
+ except ImportError as e:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
- 'from http://chardet.feedparser.org/') from e
+ 'from http://chardet.feedparser.org/') from e
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
@@ -165,11 +165,11 @@ class Lexer(metaclass=LexerMeta):
text = decoded
else:
text = text.decode(self.encoding)
- if text.startswith('\ufeff'):
- text = text[len('\ufeff'):]
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
else:
- if text.startswith('\ufeff'):
- text = text[len('\ufeff'):]
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
@@ -248,7 +248,7 @@ class include(str): # pylint: disable=invalid-name
pass
-class _inherit:
+class _inherit:
"""
Indicates the a state should inherit from its superclass.
"""
@@ -271,7 +271,7 @@ class combined(tuple): # pylint: disable=invalid-name
pass
-class _PseudoMatch:
+class _PseudoMatch:
"""
A pseudo match object constructed from a string.
"""
@@ -324,12 +324,12 @@ def bygroups(*args):
return callback
-class _This:
+class _This:
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
-
+
this = _This()
@@ -466,7 +466,7 @@ class RegexLexerMeta(LexerMeta):
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
- assert type(state) is str, "wrong state name %r" % state
+ assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
@@ -495,7 +495,7 @@ class RegexLexerMeta(LexerMeta):
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
- (tdef[0], state, cls, err)) from err
+ (tdef[0], state, cls, err)) from err
token = cls._process_token(tdef[1])
@@ -533,7 +533,7 @@ class RegexLexerMeta(LexerMeta):
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
- for state, items in toks.items():
+ for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
@@ -579,7 +579,7 @@ class RegexLexerMeta(LexerMeta):
return type.__call__(cls, *args, **kwds)
-class RegexLexer(Lexer, metaclass=RegexLexerMeta):
+class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
@@ -632,7 +632,7 @@ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
if type(action) is _TokenType:
yield pos, action, m.group()
else:
- yield from action(self, m)
+ yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
@@ -667,7 +667,7 @@ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
- yield pos, Text, '\n'
+ yield pos, Text, '\n'
pos += 1
continue
yield pos, Error, text[pos]
@@ -676,7 +676,7 @@ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
break
-class LexerContext:
+class LexerContext:
"""
A helper object that holds lexer position data.
"""
@@ -719,7 +719,7 @@ class ExtendedRegexLexer(RegexLexer):
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
- yield from action(self, m, ctx)
+ yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
@@ -755,7 +755,7 @@ class ExtendedRegexLexer(RegexLexer):
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
- yield ctx.pos, Text, '\n'
+ yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
@@ -783,7 +783,7 @@ def do_insertions(insertions, tokens):
index, itokens = next(insertions)
except StopIteration:
# no insertions
- yield from tokens
+ yield from tokens
return
realpos = None
@@ -798,9 +798,9 @@ def do_insertions(insertions, tokens):
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
- if tmpval:
- yield realpos, t, tmpval
- realpos += len(tmpval)
+ if tmpval:
+ yield realpos, t, tmpval
+ realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
@@ -810,9 +810,9 @@ def do_insertions(insertions, tokens):
except StopIteration:
insleft = False
break # not strictly necessary
- if oldi < len(v):
- yield realpos, t, v[oldi:]
- realpos += len(v) - oldi
+ if oldi < len(v):
+ yield realpos, t, v[oldi:]
+ realpos += len(v) - oldi
# leftover tokens
while insleft:
@@ -850,7 +850,7 @@ class ProfilingRegexLexerMeta(RegexLexerMeta):
return match_func
-class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
+class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
@@ -859,7 +859,7 @@ class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
def get_tokens_unprocessed(self, text, stack=('root',)):
# this needs to be a stack, since using(this) will produce nested calls
self.__class__._prof_data.append({})
- yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
+ yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
rawdata = self.__class__._prof_data.pop()
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
n, 1000 * t, 1000 * t / n)