aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/ipython/py3/IPython/core/inputsplitter.py
diff options
context:
space:
mode:
authorrobot-contrib <robot-contrib@yandex-team.com>2023-09-30 10:27:28 +0300
committerrobot-contrib <robot-contrib@yandex-team.com>2023-09-30 10:47:10 +0300
commit5a6373c9d09bbfb7094f9992a4531477bb97829e (patch)
treeebea8fd55fee858876743312cdf789a1f01487b5 /contrib/python/ipython/py3/IPython/core/inputsplitter.py
parent15f3c7493474de25a6b23296878bb8f49470d2e6 (diff)
downloadydb-5a6373c9d09bbfb7094f9992a4531477bb97829e.tar.gz
Update contrib/python/ipython/py3 to 8.15.0
Diffstat (limited to 'contrib/python/ipython/py3/IPython/core/inputsplitter.py')
-rw-r--r--contrib/python/ipython/py3/IPython/core/inputsplitter.py18
1 files changed, 16 insertions, 2 deletions
diff --git a/contrib/python/ipython/py3/IPython/core/inputsplitter.py b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
index 10707d3d6b..a4401184bd 100644
--- a/contrib/python/ipython/py3/IPython/core/inputsplitter.py
+++ b/contrib/python/ipython/py3/IPython/core/inputsplitter.py
@@ -44,6 +44,7 @@ from IPython.core.inputtransformer import (leading_indent,
assign_from_system,
assemble_python_lines,
)
+from IPython.utils import tokenutil
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
@@ -128,7 +129,7 @@ def partial_tokens(s):
readline = io.StringIO(s).readline
token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '')
try:
- for token in tokenize.generate_tokens(readline):
+ for token in tokenutil.generate_tokens_catch_errors(readline):
yield token
except tokenize.TokenError as e:
# catch EOF error
@@ -150,9 +151,22 @@ def find_next_indent(code):
tokens.pop()
if not tokens:
return 0
- while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}):
+
+ while tokens[-1].type in {
+ tokenize.DEDENT,
+ tokenize.NEWLINE,
+ tokenize.COMMENT,
+ tokenize.ERRORTOKEN,
+ }:
tokens.pop()
+ # Starting in Python 3.12, the tokenize module adds implicit newlines at the end
+ # of input. We need to remove those if we're in a multiline statement
+ if tokens[-1].type == IN_MULTILINE_STATEMENT:
+ while tokens[-2].type in {tokenize.NL}:
+ tokens.pop(-2)
+
+
if tokens[-1].type == INCOMPLETE_STRING:
# Inside a multiline string
return 0