aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexers/special.py
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/python/Pygments/py3/pygments/lexers/special.py
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/special.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexers/special.py92
1 files changed, 46 insertions, 46 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/special.py b/contrib/python/Pygments/py3/pygments/lexers/special.py
index bff6652c56..7bdfaa81e0 100644
--- a/contrib/python/Pygments/py3/pygments/lexers/special.py
+++ b/contrib/python/Pygments/py3/pygments/lexers/special.py
@@ -4,19 +4,19 @@
Special lexers.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import ast
+import ast
import re
from pygments.lexer import Lexer
-from pygments.token import Token, Error, Text, Generic
-from pygments.util import get_choice_opt
+from pygments.token import Token, Error, Text, Generic
+from pygments.util import get_choice_opt
-__all__ = ['TextLexer', 'OutputLexer', 'RawTokenLexer']
+__all__ = ['TextLexer', 'OutputLexer', 'RawTokenLexer']
class TextLexer(Lexer):
@@ -35,28 +35,28 @@ class TextLexer(Lexer):
def analyse_text(text):
return TextLexer.priority
-
-class OutputLexer(Lexer):
- """
- Simple lexer that highlights everything as ``Token.Generic.Output``.
-
- .. versionadded:: 2.10
- """
- name = 'Text output'
- aliases = ['output']
-
- def get_tokens_unprocessed(self, text):
- yield 0, Generic.Output, text
-
-
+
+class OutputLexer(Lexer):
+ """
+ Simple lexer that highlights everything as ``Token.Generic.Output``.
+
+ .. versionadded:: 2.10
+ """
+ name = 'Text output'
+ aliases = ['output']
+
+ def get_tokens_unprocessed(self, text):
+ yield 0, Generic.Output, text
+
+
_ttype_cache = {}
-line_re = re.compile('.*?\n')
+line_re = re.compile('.*?\n')
class RawTokenLexer(Lexer):
"""
- Recreate a token stream formatted with the `RawTokenFormatter`.
+ Recreate a token stream formatted with the `RawTokenFormatter`.
Additional options accepted:
@@ -65,7 +65,7 @@ class RawTokenLexer(Lexer):
the given compression algorithm before lexing (default: ``""``).
"""
name = 'Raw token data'
- aliases = []
+ aliases = []
filenames = []
mimetypes = ['application/x-pygments-tokens']
@@ -75,23 +75,23 @@ class RawTokenLexer(Lexer):
Lexer.__init__(self, **options)
def get_tokens(self, text):
- if self.compress:
- if isinstance(text, str):
- text = text.encode('latin1')
- try:
- if self.compress == 'gz':
- import gzip
- text = gzip.decompress(text)
- elif self.compress == 'bz2':
- import bz2
- text = bz2.decompress(text)
- except OSError:
- yield Error, text.decode('latin1')
- if isinstance(text, bytes):
- text = text.decode('latin1')
-
- # do not call Lexer.get_tokens() because stripping is not optional.
- text = text.strip('\n') + '\n'
+ if self.compress:
+ if isinstance(text, str):
+ text = text.encode('latin1')
+ try:
+ if self.compress == 'gz':
+ import gzip
+ text = gzip.decompress(text)
+ elif self.compress == 'bz2':
+ import bz2
+ text = bz2.decompress(text)
+ except OSError:
+ yield Error, text.decode('latin1')
+ if isinstance(text, bytes):
+ text = text.decode('latin1')
+
+ # do not call Lexer.get_tokens() because stripping is not optional.
+ text = text.strip('\n') + '\n'
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
@@ -99,7 +99,7 @@ class RawTokenLexer(Lexer):
length = 0
for match in line_re.finditer(text):
try:
- ttypestr, val = match.group().rstrip().split('\t', 1)
+ ttypestr, val = match.group().rstrip().split('\t', 1)
ttype = _ttype_cache.get(ttypestr)
if not ttype:
ttype = Token
@@ -109,11 +109,11 @@ class RawTokenLexer(Lexer):
raise ValueError('malformed token name')
ttype = getattr(ttype, ttype_)
_ttype_cache[ttypestr] = ttype
- val = ast.literal_eval(val)
- if not isinstance(val, str):
- raise ValueError('expected str')
- except (SyntaxError, ValueError):
- val = match.group()
- ttype = Error
+ val = ast.literal_eval(val)
+ if not isinstance(val, str):
+ raise ValueError('expected str')
+ except (SyntaxError, ValueError):
+ val = match.group()
+ ttype = Error
yield length, ttype, val
length += len(val)