aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Pygments/py3/pygments/lexers/rdf.py
diff options
context:
space:
mode:
authororivej <orivej@yandex-team.ru>2022-02-10 16:45:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:01 +0300
commit2d37894b1b037cf24231090eda8589bbb44fb6fc (patch)
treebe835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/python/Pygments/py3/pygments/lexers/rdf.py
parent718c552901d703c502ccbefdfc3c9028d608b947 (diff)
downloadydb-2d37894b1b037cf24231090eda8589bbb44fb6fc.tar.gz
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/python/Pygments/py3/pygments/lexers/rdf.py')
-rw-r--r--contrib/python/Pygments/py3/pygments/lexers/rdf.py70
1 files changed, 35 insertions, 35 deletions
diff --git a/contrib/python/Pygments/py3/pygments/lexers/rdf.py b/contrib/python/Pygments/py3/pygments/lexers/rdf.py
index bd00da6b61..bd7a4f690c 100644
--- a/contrib/python/Pygments/py3/pygments/lexers/rdf.py
+++ b/contrib/python/Pygments/py3/pygments/lexers/rdf.py
@@ -28,8 +28,8 @@ class SparqlLexer(RegexLexer):
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
- # character group definitions ::
-
+ # character group definitions ::
+
PN_CHARS_BASE_GRP = ('a-zA-Z'
'\u00c0-\u00d6'
'\u00d8-\u00f6'
@@ -42,38 +42,38 @@ class SparqlLexer(RegexLexer):
'\u3001-\ud7ff'
'\uf900-\ufdcf'
'\ufdf0-\ufffd')
-
- PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
-
- PN_CHARS_GRP = (PN_CHARS_U_GRP +
- r'\-' +
- r'0-9' +
+
+ PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+ PN_CHARS_GRP = (PN_CHARS_U_GRP +
+ r'\-' +
+ r'0-9' +
'\u00b7' +
'\u0300-\u036f' +
'\u203f-\u2040')
-
- HEX_GRP = '0-9A-Fa-f'
-
- PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
-
+
+ HEX_GRP = '0-9A-Fa-f'
+
+ PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
# terminal productions ::
- PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+ PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
- PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
+ PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
- PN_CHARS = '[' + PN_CHARS_GRP + ']'
+ PN_CHARS = '[' + PN_CHARS_GRP + ']'
- HEX = '[' + HEX_GRP + ']'
+ HEX = '[' + HEX_GRP + ']'
- PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+ PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
- BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
- '.]*' + PN_CHARS + ')?'
+ BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+ '.]*' + PN_CHARS + ')?'
- PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+ PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = '[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
@@ -84,9 +84,9 @@ class SparqlLexer(RegexLexer):
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
- PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
- '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
- PN_CHARS_GRP + ':]|' + PLX + '))?')
+ PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+ '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+ PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
@@ -96,7 +96,7 @@ class SparqlLexer(RegexLexer):
'root': [
(r'\s+', Text),
# keywords ::
- (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|'
+ (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|values|bindings|load|into|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|with|delete|insert|'
@@ -110,10 +110,10 @@ class SparqlLexer(RegexLexer):
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
- (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
+ (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
- (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
+ (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|uuid|struuid|md5|sha1|sha256|sha384|'
@@ -124,7 +124,7 @@ class SparqlLexer(RegexLexer):
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
- (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
+ (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
@@ -307,13 +307,13 @@ class TurtleLexer(RegexLexer):
],
}
-
- # Turtle and Tera Term macro files share the same file extension
- # but each has a recognizable and distinct syntax.
- def analyse_text(text):
- for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '):
- if re.search(r'^\s*%s' % t, text):
- return 0.80
+
+ # Turtle and Tera Term macro files share the same file extension
+ # but each has a recognizable and distinct syntax.
+ def analyse_text(text):
+ for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '):
+ if re.search(r'^\s*%s' % t, text):
+ return 0.80
class ShExCLexer(RegexLexer):