aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexers/erlang.py
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/python/Pygments/py3/pygments/lexers/erlang.py
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/erlang.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexers/erlang.py56
1 files changed, 28 insertions, 28 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/erlang.py b/contrib/python/Pygments/py3/pygments/lexers/erlang.py
index 1e496729e1..2563ffc263 100644
--- a/contrib/python/Pygments/py3/pygments/lexers/erlang.py
+++ b/contrib/python/Pygments/py3/pygments/lexers/erlang.py
@@ -4,7 +4,7 @@
Lexers for Erlang.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -162,7 +162,7 @@ class ErlangShellLexer(Lexer):
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
- _prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
+ _prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
@@ -179,8 +179,8 @@ class ErlangShellLexer(Lexer):
curcode += line[end:]
else:
if curcode:
- yield from do_insertions(insertions,
- erlexer.get_tokens_unprocessed(curcode))
+ yield from do_insertions(insertions,
+ erlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('*'):
@@ -188,8 +188,8 @@ class ErlangShellLexer(Lexer):
else:
yield match.start(), Generic.Output, line
if curcode:
- yield from do_insertions(insertions,
- erlexer.get_tokens_unprocessed(curcode))
+ yield from do_insertions(insertions,
+ erlexer.get_tokens_unprocessed(curcode))
def gen_elixir_string_rules(name, symbol, token):
@@ -204,10 +204,10 @@ def gen_elixir_string_rules(name, symbol, token):
return states
-def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
+def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
if interpol:
return [
- (r'[^#%s\\]+' % (term_class,), token),
+ (r'[^#%s\\]+' % (term_class,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
@@ -215,7 +215,7 @@ def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
]
else:
return [
- (r'[^%s\\]+' % (term_class,), token),
+ (r'[^%s\\]+' % (term_class,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
@@ -230,7 +230,7 @@ class ElixirLexer(RegexLexer):
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
- filenames = ['*.ex', '*.eex', '*.exs', '*.leex']
+ filenames = ['*.ex', '*.eex', '*.exs', '*.leex']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
@@ -288,14 +288,14 @@ class ElixirLexer(RegexLexer):
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
- (r'\{', r'\}', '}', 'cb'),
- (r'\[', r'\]', r'\]', 'sb'),
- (r'\(', r'\)', ')', 'pa'),
- ('<', '>', '>', 'ab'),
- ('/', '/', '/', 'slas'),
- (r'\|', r'\|', '|', 'pipe'),
- ('"', '"', '"', 'quot'),
- ("'", "'", "'", 'apos'),
+ (r'\{', r'\}', '}', 'cb'),
+ (r'\[', r'\]', r'\]', 'sb'),
+ (r'\(', r'\)', ')', 'pa'),
+ ('<', '>', '>', 'ab'),
+ ('/', '/', '/', 'slas'),
+ (r'\|', r'\|', '|', 'pipe'),
+ ('"', '"', '"', 'quot'),
+ ("'", "'", "'", 'apos'),
]
# heredocs have slightly different rules
@@ -325,15 +325,15 @@ class ElixirLexer(RegexLexer):
include('heredoc_no_interpol'),
]
- for lterm, rterm, rterm_class, name in terminators:
+ for lterm, rterm, rterm_class, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
- states[name + '-intp'] = \
- gen_elixir_sigstr_rules(rterm, rterm_class, token)
+ states[name + '-intp'] = \
+ gen_elixir_sigstr_rules(rterm, rterm_class, token)
states[name + '-no-intp'] = \
- gen_elixir_sigstr_rules(rterm, rterm_class, token, interpol=False)
+ gen_elixir_sigstr_rules(rterm, rterm_class, token, interpol=False)
return states
@@ -495,7 +495,7 @@ class ElixirConsoleLexer(Lexer):
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
- _prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
+ _prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
@@ -505,7 +505,7 @@ class ElixirConsoleLexer(Lexer):
insertions = []
for match in line_re.finditer(text):
line = match.group()
- if line.startswith('** '):
+ if line.startswith('** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
@@ -520,12 +520,12 @@ class ElixirConsoleLexer(Lexer):
curcode += line[end:]
else:
if curcode:
- yield from do_insertions(
- insertions, exlexer.get_tokens_unprocessed(curcode))
+ yield from do_insertions(
+ insertions, exlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
- yield from do_insertions(
- insertions, exlexer.get_tokens_unprocessed(curcode))
+ yield from do_insertions(
+ insertions, exlexer.get_tokens_unprocessed(curcode))