aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexers/special.py
diff options
context:
space:
mode:
authorilezhankin <ilezhankin@yandex-team.ru>2022-02-10 16:45:56 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:56 +0300
commit62a805381e41500fbc7914c37c71ab040a098f4e (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/python/Pygments/py3/pygments/lexers/special.py
parent1d125034f06575234f83f24f08677955133f140e (diff)
downloadydb-62a805381e41500fbc7914c37c71ab040a098f4e.tar.gz
Restoring authorship annotation for <ilezhankin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/special.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexers/special.py142
1 files changed, 71 insertions, 71 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/special.py b/contrib/python/Pygments/py3/pygments/lexers/special.py
index b108cf1aef..bff6652c56 100644
--- a/contrib/python/Pygments/py3/pygments/lexers/special.py
+++ b/contrib/python/Pygments/py3/pygments/lexers/special.py
@@ -1,40 +1,40 @@
-"""
- pygments.lexers.special
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- Special lexers.
-
+"""
+ pygments.lexers.special
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Special lexers.
+
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
+ :license: BSD, see LICENSE for details.
+"""
+
import ast
-import re
-
-from pygments.lexer import Lexer
+import re
+
+from pygments.lexer import Lexer
from pygments.token import Token, Error, Text, Generic
from pygments.util import get_choice_opt
-
-
+
+
__all__ = ['TextLexer', 'OutputLexer', 'RawTokenLexer']
-
-
-class TextLexer(Lexer):
- """
- "Null" lexer, doesn't highlight anything.
- """
- name = 'Text only'
- aliases = ['text']
- filenames = ['*.txt']
- mimetypes = ['text/plain']
+
+
+class TextLexer(Lexer):
+ """
+ "Null" lexer, doesn't highlight anything.
+ """
+ name = 'Text only'
+ aliases = ['text']
+ filenames = ['*.txt']
+ mimetypes = ['text/plain']
priority = 0.01
-
- def get_tokens_unprocessed(self, text):
- yield 0, Text, text
-
+
+ def get_tokens_unprocessed(self, text):
+ yield 0, Text, text
+
def analyse_text(text):
return TextLexer.priority
-
+
class OutputLexer(Lexer):
"""
@@ -49,32 +49,32 @@ class OutputLexer(Lexer):
yield 0, Generic.Output, text
-_ttype_cache = {}
-
+_ttype_cache = {}
+
line_re = re.compile('.*?\n')
-
-
-class RawTokenLexer(Lexer):
- """
+
+
+class RawTokenLexer(Lexer):
+ """
Recreate a token stream formatted with the `RawTokenFormatter`.
-
- Additional options accepted:
-
- `compress`
- If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
- the given compression algorithm before lexing (default: ``""``).
- """
- name = 'Raw token data'
+
+ Additional options accepted:
+
+ `compress`
+ If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
+ the given compression algorithm before lexing (default: ``""``).
+ """
+ name = 'Raw token data'
aliases = []
- filenames = []
- mimetypes = ['application/x-pygments-tokens']
-
- def __init__(self, **options):
- self.compress = get_choice_opt(options, 'compress',
- ['', 'none', 'gz', 'bz2'], '')
- Lexer.__init__(self, **options)
-
- def get_tokens(self, text):
+ filenames = []
+ mimetypes = ['application/x-pygments-tokens']
+
+ def __init__(self, **options):
+ self.compress = get_choice_opt(options, 'compress',
+ ['', 'none', 'gz', 'bz2'], '')
+ Lexer.__init__(self, **options)
+
+ def get_tokens(self, text):
if self.compress:
if isinstance(text, str):
text = text.encode('latin1')
@@ -89,31 +89,31 @@ class RawTokenLexer(Lexer):
yield Error, text.decode('latin1')
if isinstance(text, bytes):
text = text.decode('latin1')
-
+
# do not call Lexer.get_tokens() because stripping is not optional.
text = text.strip('\n') + '\n'
- for i, t, v in self.get_tokens_unprocessed(text):
- yield t, v
-
- def get_tokens_unprocessed(self, text):
- length = 0
- for match in line_re.finditer(text):
- try:
+ for i, t, v in self.get_tokens_unprocessed(text):
+ yield t, v
+
+ def get_tokens_unprocessed(self, text):
+ length = 0
+ for match in line_re.finditer(text):
+ try:
ttypestr, val = match.group().rstrip().split('\t', 1)
- ttype = _ttype_cache.get(ttypestr)
- if not ttype:
- ttype = Token
- ttypes = ttypestr.split('.')[1:]
- for ttype_ in ttypes:
- if not ttype_ or not ttype_[0].isupper():
- raise ValueError('malformed token name')
- ttype = getattr(ttype, ttype_)
- _ttype_cache[ttypestr] = ttype
+ ttype = _ttype_cache.get(ttypestr)
+ if not ttype:
+ ttype = Token
+ ttypes = ttypestr.split('.')[1:]
+ for ttype_ in ttypes:
+ if not ttype_ or not ttype_[0].isupper():
+ raise ValueError('malformed token name')
+ ttype = getattr(ttype, ttype_)
+ _ttype_cache[ttypestr] = ttype
val = ast.literal_eval(val)
if not isinstance(val, str):
raise ValueError('expected str')
except (SyntaxError, ValueError):
val = match.group()
ttype = Error
- yield length, ttype, val
- length += len(val)
+ yield length, ttype, val
+ length += len(val)