1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
"""
pygments.lexers.dns
~~~~~~~~~~~~~~~~~~~
Pygments lexers for DNS
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal
from pygments.lexer import RegexLexer, bygroups, include
__all__ = ['DnsZoneLexer']
CLASSES = [
"IN",
"CS",
"CH",
"HS",
]
CLASSES_RE = "(" + "|".join(CLASSES) + ')'
class DnsZoneLexer(RegexLexer):
"""
Lexer for DNS zone file
.. versionadded:: 2.16
"""
flags = re.MULTILINE
name = 'Zone'
aliases = ['zone']
filenames = [ "*.zone" ]
url = "https://datatracker.ietf.org/doc/html/rfc1035"
mimetypes = ['text/dns']
tokens = {
'root': [
# Empty/comment line:
(r'([ \t]*)(;.*)(\n)', bygroups(Whitespace, Comment.Single, Whitespace)),
# Special directives:
(r'^\$ORIGIN\b', Keyword, 'values'),
(r'^\$TTL\b', Keyword, 'values'),
(r'^\$INCLUDE\b', Comment.Preproc, 'include'),
# TODO, $GENERATE https://bind9.readthedocs.io/en/v9.18.14/chapter3.html#soa-rr
(r'^\$[A-Z]+\b', Keyword, 'values'),
# Records:
# <domain-name> [<TTL>] [<class>] <type> <RDATA> [<comment>]
(r'^(@)([ \t]+)(?:([0-9]+[smhdw]?)([ \t]+))?(?:' + CLASSES_RE + "([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Operator, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
(r'^([^ \t\n]*)([ \t]+)(?:([0-9]+[smhdw]?)([ \t]+))?(?:' + CLASSES_RE + "([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
# <domain-name> [<class>] [<TTL>] <type> <RDATA> [<comment>]
(r'^(Operator)([ \t]+)(?:' + CLASSES_RE + "([ \t]+))?(?:([0-9]+[smhdw]?)([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
(r'^([^ \t\n]*)([ \t]+)(?:' + CLASSES_RE + "([ \t]+))?(?:([0-9]+[smhdw]?)([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
],
# Parsing values:
'values': [
(r'\n', Whitespace, "#pop"),
(r'\(', Punctuation, 'nested'),
include('simple-values'),
],
# Parsing nested values (...):
'nested': [
(r'\)', Punctuation, "#pop"),
include('simple-values'),
],
# Parsing values:
'simple-values': [
(r'(;.*)(\n)', bygroups(Comment.Single, Whitespace)),
(r'[ \t]+', Whitespace),
(r"@\b", Operator),
('"', String, 'string'),
(r'[0-9]+[smhdw]?$', Number.Integer),
(r'([0-9]+[smhdw]?)([ \t]+)', bygroups(Number.Integer, Whitespace)),
(r'\S+', Literal),
],
'include': [
(r'([ \t]+)([^ \t\n]+)([ \t]+)([-\._a-zA-Z]+)([ \t]+)(;.*)?$',
bygroups(Whitespace, Comment.PreprocFile, Whitespace, Name, Whitespace, Comment.Single), '#pop'),
(r'([ \t]+)([^ \t\n]+)([ \t\n]+)$', bygroups(Whitespace, Comment.PreprocFile, Whitespace), '#pop'),
],
"string": [
(r'\\"', String),
(r'"', String, "#pop"),
(r'[^"]+', String),
]
}
def analyse_text(text):
return text.startswith("$ORIGIN")
|