aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexers/dotnet.py
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:39 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:39 +0300
commite9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch)
tree64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/python/Pygments/py3/pygments/lexers/dotnet.py
parent2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff)
downloadydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/dotnet.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexers/dotnet.py68
1 files changed, 34 insertions, 34 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/dotnet.py b/contrib/python/Pygments/py3/pygments/lexers/dotnet.py
index 29f6015152..c04d2a0a92 100644
--- a/contrib/python/Pygments/py3/pygments/lexers/dotnet.py
+++ b/contrib/python/Pygments/py3/pygments/lexers/dotnet.py
@@ -4,7 +4,7 @@
Lexers for .net languages.
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
@@ -13,7 +13,7 @@ from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number, Literal, Other, Whitespace
-from pygments.util import get_choice_opt
+from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
@@ -70,7 +70,7 @@ class CSharpLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
@@ -88,7 +88,7 @@ class CSharpLexer(RegexLexer):
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
- (r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
+ (r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
@@ -185,7 +185,7 @@ class NemerleLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
@@ -218,7 +218,7 @@ class NemerleLexer(RegexLexer):
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
- (r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
+ (r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
@@ -304,17 +304,17 @@ class NemerleLexer(RegexLexer):
RegexLexer.__init__(self, **options)
- def analyse_text(text):
- """Nemerle is quite similar to Python, but @if is relatively uncommon
- elsewhere."""
- result = 0
-
- if '@if' in text:
- result += 0.1
-
- return result
-
-
+ def analyse_text(text):
+ """Nemerle is quite similar to Python, but @if is relatively uncommon
+ elsewhere."""
+ result = 0
+
+ if '@if' in text:
+ result += 0.1
+
+ return result
+
+
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
@@ -334,8 +334,8 @@ class BooLexer(RegexLexer):
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
- (r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
- (r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
+ (r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
+ (r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
@@ -354,8 +354,8 @@ class BooLexer(RegexLexer):
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
- (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
- (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
@@ -527,7 +527,7 @@ class CSharpAspxLexer(DelegatingLexer):
mimetypes = []
def __init__(self, **options):
- super().__init__(CSharpLexer, GenericAspxLexer, **options)
+ super().__init__(CSharpLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
@@ -547,7 +547,7 @@ class VbNetAspxLexer(DelegatingLexer):
mimetypes = []
def __init__(self, **options):
- super().__init__(VbNetLexer, GenericAspxLexer, **options)
+ super().__init__(VbNetLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
@@ -705,14 +705,14 @@ class FSharpLexer(RegexLexer):
(r'"', String),
],
}
-
- def analyse_text(text):
- """F# doesn't have that many unique features -- |> and <| are weak
- indicators."""
- result = 0
- if '|>' in text:
- result += 0.05
- if '<|' in text:
- result += 0.05
-
- return result
+
+ def analyse_text(text):
+ """F# doesn't have that many unique features -- |> and <| are weak
+ indicators."""
+ result = 0
+ if '|>' in text:
+ result += 0.05
+ if '<|' in text:
+ result += 0.05
+
+ return result