diff options
author | shadchin <shadchin@yandex-team.com> | 2025-06-13 00:05:26 +0300 |
---|---|---|
committer | shadchin <shadchin@yandex-team.com> | 2025-06-13 00:35:30 +0300 |
commit | 796b9088366b10b4cd42885101fc20c0b5709b07 (patch) | |
tree | f287eacb0b95ffd7cabf95b16cafb4788645dc38 /contrib/tools/python3/Lib/tokenize.py | |
parent | c72bca862651e507d2ff4980ef7f4ff7267a7227 (diff) | |
download | ydb-796b9088366b10b4cd42885101fc20c0b5709b07.tar.gz |
Update Python 3 to 3.12.10
commit_hash:dd2398e159fe1d72ea6b12da52fccc933a41a785
Diffstat (limited to 'contrib/tools/python3/Lib/tokenize.py')
-rw-r--r-- | contrib/tools/python3/Lib/tokenize.py | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/contrib/tools/python3/Lib/tokenize.py b/contrib/tools/python3/Lib/tokenize.py index 553c1ca4388..7cf92acb192 100644 --- a/contrib/tools/python3/Lib/tokenize.py +++ b/contrib/tools/python3/Lib/tokenize.py @@ -171,6 +171,7 @@ class Untokenizer: self.prev_row = 1 self.prev_col = 0 self.prev_type = None + self.prev_line = "" self.encoding = None def add_whitespace(self, start): @@ -178,14 +179,28 @@ class Untokenizer: if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) - row_offset = row - self.prev_row - if row_offset: - self.tokens.append("\\\n" * row_offset) - self.prev_col = 0 + self.add_backslash_continuation(start) col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) + def add_backslash_continuation(self, start): + """Add backslash continuation characters if the row has increased + without encountering a newline token. + + This also inserts the correct amount of whitespace before the backslash. + """ + row = start[0] + row_offset = row - self.prev_row + if row_offset == 0: + return + + newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n' + line = self.prev_line.rstrip('\\\r\n') + ws = ''.join(_itertools.takewhile(str.isspace, reversed(line))) + self.tokens.append(ws + f"\\{newline}" * row_offset) + self.prev_col = 0 + def escape_brackets(self, token): characters = [] consume_until_next_bracket = False @@ -245,8 +260,6 @@ class Untokenizer: end_line, end_col = end extra_chars = last_line.count("{{") + last_line.count("}}") end = (end_line, end_col + extra_chars) - elif tok_type in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END): - self.tokens.append(" ") self.add_whitespace(start) self.tokens.append(token) @@ -255,6 +268,7 @@ class Untokenizer: self.prev_row += 1 self.prev_col = 0 self.prev_type = tok_type + self.prev_line = line return "".join(self.tokens) def compat(self, token, iterable): |