aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorrobot-piglet <robot-piglet@yandex-team.com>2025-02-15 09:34:32 +0300
committerrobot-piglet <robot-piglet@yandex-team.com>2025-02-15 11:36:46 +0300
commitb78775e5a25dfb7551cdc06dba96cdfe6e9bd6fb (patch)
tree8fdac4a27404b9036f50883e9afe4e3f37c4c39a /contrib
parent784038d7404cb679026c8cc19204497e8411c75a (diff)
downloadydb-b78775e5a25dfb7551cdc06dba96cdfe6e9bd6fb.tar.gz
Intermediate changes
commit_hash:293c725da86af9df83cab900e86bf2b75cc6b4e8
Diffstat (limited to 'contrib')
-rw-r--r--contrib/python/ipython/py3/.dist-info/METADATA7
-rw-r--r--contrib/python/ipython/py3/IPython/core/interactiveshell.py6
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/execution.py16
-rw-r--r--contrib/python/ipython/py3/IPython/core/magics/script.py35
-rw-r--r--contrib/python/ipython/py3/IPython/core/release.py2
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/interactiveshell.py49
-rw-r--r--contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py279
-rw-r--r--contrib/python/ipython/py3/IPython/utils/_sysinfo.py2
-rw-r--r--contrib/python/ipython/py3/ya.make2
9 files changed, 365 insertions, 33 deletions
diff --git a/contrib/python/ipython/py3/.dist-info/METADATA b/contrib/python/ipython/py3/.dist-info/METADATA
index b6a222768c..590a77f53f 100644
--- a/contrib/python/ipython/py3/.dist-info/METADATA
+++ b/contrib/python/ipython/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
-Metadata-Version: 2.1
+Metadata-Version: 2.2
Name: ipython
-Version: 8.31.0
+Version: 8.32.0
Summary: IPython: Productive Interactive Computing
Author: The IPython Development Team
Author-email: ipython-dev@python.org
@@ -85,6 +85,9 @@ Requires-Dist: matplotlib; extra == "matplotlib"
Provides-Extra: all
Requires-Dist: ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]; extra == "all"
Requires-Dist: ipython[test,test_extra]; extra == "all"
+Dynamic: author
+Dynamic: author-email
+Dynamic: license
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
diff --git a/contrib/python/ipython/py3/IPython/core/interactiveshell.py b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
index 07fb807760..a341ab053a 100644
--- a/contrib/python/ipython/py3/IPython/core/interactiveshell.py
+++ b/contrib/python/ipython/py3/IPython/core/interactiveshell.py
@@ -900,7 +900,7 @@ class InteractiveShell(SingletonConfigurable):
return
p = Path(sys.executable)
- p_venv = Path(os.environ["VIRTUAL_ENV"])
+ p_venv = Path(os.environ["VIRTUAL_ENV"]).resolve()
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
@@ -913,7 +913,7 @@ class InteractiveShell(SingletonConfigurable):
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
- if any(p_venv == p.parents[1] for p in paths):
+ if any(p_venv == p.parents[1].resolve() for p in paths):
# Our exe is inside or has access to the virtualenv, don't need to do anything.
return
@@ -2093,6 +2093,8 @@ class InteractiveShell(SingletonConfigurable):
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
+ if sys.version_info >= (3, 12):
+ sys.last_exc = value
return etype, value, tb
diff --git a/contrib/python/ipython/py3/IPython/core/magics/execution.py b/contrib/python/ipython/py3/IPython/core/magics/execution.py
index 3aa0a27fc2..ec17d0a497 100644
--- a/contrib/python/ipython/py3/IPython/core/magics/execution.py
+++ b/contrib/python/ipython/py3/IPython/core/magics/execution.py
@@ -977,7 +977,21 @@ class ExecutionMagics(Magics):
break
finally:
sys.settrace(trace)
-
+
+ # Perform proper cleanup of the session in case if
+ # it exited with "continue" and not "quit" command
+ if hasattr(deb, "rcLines"):
+ # Run this code defensively in case if custom debugger
+ # class does not implement rcLines, which although public
+ # is an implementation detail of `pdb.Pdb` and not part of
+ # the more generic basic debugger framework (`bdb.Bdb`).
+ deb.set_quit()
+ deb.rcLines.extend(["q"])
+ try:
+ deb.run("", code_ns, local_ns)
+ except StopIteration:
+ # Stop iteration is raised on quit command
+ pass
except:
etype, value, tb = sys.exc_info()
diff --git a/contrib/python/ipython/py3/IPython/core/magics/script.py b/contrib/python/ipython/py3/IPython/core/magics/script.py
index 0c405ef420..3bfc4d8d67 100644
--- a/contrib/python/ipython/py3/IPython/core/magics/script.py
+++ b/contrib/python/ipython/py3/IPython/core/magics/script.py
@@ -67,6 +67,10 @@ def script_args(f):
return f
+class RaiseAfterInterrupt(Exception):
+ pass
+
+
@magics_class
class ScriptMagics(Magics):
"""Magics for talking to scripts
@@ -176,6 +180,10 @@ class ScriptMagics(Magics):
The rest of the cell is run by that program.
+ .. versionchanged:: 9.0
+ Interrupting the script executed without `--bg` will end in
+ raising an exception (unless `--no-raise-error` is passed).
+
Examples
--------
::
@@ -212,7 +220,7 @@ class ScriptMagics(Magics):
async def _readchunk(stream):
try:
- return await stream.readuntil(b"\n")
+ return await stream.read(100)
except asyncio.exceptions.IncompleteReadError as e:
return e.partial
except asyncio.exceptions.LimitOverrunError as e:
@@ -292,20 +300,33 @@ class ScriptMagics(Magics):
p.send_signal(signal.SIGINT)
in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
if p.returncode is not None:
- print("Process is interrupted.")
- return
+ print("Process was interrupted.")
+ if args.raise_error:
+ raise RaiseAfterInterrupt()
+ else:
+ return
p.terminate()
in_thread(asyncio.wait_for(p.wait(), timeout=0.1))
if p.returncode is not None:
- print("Process is terminated.")
- return
+ print("Process was terminated.")
+ if args.raise_error:
+ raise RaiseAfterInterrupt()
+ else:
+ return
p.kill()
- print("Process is killed.")
+ print("Process was killed.")
+ if args.raise_error:
+ raise RaiseAfterInterrupt()
+ except RaiseAfterInterrupt:
+ pass
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e))
- return
+ if args.raise_error:
+ raise CalledProcessError(p.returncode, cell) from None
+ else:
+ return
if args.raise_error and p.returncode != 0:
# If we get here and p.returncode is still None, we must have
diff --git a/contrib/python/ipython/py3/IPython/core/release.py b/contrib/python/ipython/py3/IPython/core/release.py
index 06917bb8ae..a21f446949 100644
--- a/contrib/python/ipython/py3/IPython/core/release.py
+++ b/contrib/python/ipython/py3/IPython/core/release.py
@@ -16,7 +16,7 @@
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 8
-_version_minor = 31
+_version_minor = 32
_version_patch = 0
_version_extra = ".dev"
# _version_extra = "rc1"
diff --git a/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py b/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py
index ef4f5cd3f6..ba9a31135a 100644
--- a/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py
+++ b/contrib/python/ipython/py3/IPython/terminal/interactiveshell.py
@@ -26,7 +26,10 @@ from traitlets import (
Any,
validate,
Float,
+ DottedObjectName,
)
+from traitlets.utils.importstring import import_item
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
@@ -214,7 +217,9 @@ class TerminalInteractiveShell(InteractiveShell):
pt_app: UnionType[PromptSession, None] = None
auto_suggest: UnionType[
- AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None
+ AutoSuggestFromHistory,
+ NavigableAutoSuggestFromHistory,
+ None,
] = None
debugger_history = None
@@ -421,6 +426,37 @@ class TerminalInteractiveShell(InteractiveShell):
allow_none=True,
).tag(config=True)
+ llm_provider_class = DottedObjectName(
+ None,
+ allow_none=True,
+ help="""\
+ Provisional:
+ This is a provisinal API in IPython 8.32, before stabilisation
+ in 9.0, it may change without warnings.
+
+ class to use for the `NavigableAutoSuggestFromHistory` to request
+ completions from a LLM, this should inherit from
+ `jupyter_ai_magics:BaseProvider` and implement
+ `stream_inline_completions`
+ """,
+ ).tag(config=True)
+
+ @observe("llm_provider_class")
+ def _llm_provider_class_changed(self, change):
+ provider_class = change.new
+ if provider_class is not None:
+ warn(
+ "TerminalInteractiveShell.llm_provider_class is a provisional"
+ " API as of IPython 8.32, and may change without warnings."
+ )
+ if isinstance(self.auto_suggest, NavigableAutoSuggestFromHistory):
+ self.auto_suggest._llm_provider = provider_class()
+ else:
+ self.log.warn(
+ "llm_provider_class only has effects when using"
+ "`NavigableAutoSuggestFromHistory` as auto_suggest."
+ )
+
def _set_autosuggestions(self, provider):
# disconnect old handler
if self.auto_suggest and isinstance(
@@ -432,7 +468,15 @@ class TerminalInteractiveShell(InteractiveShell):
elif provider == "AutoSuggestFromHistory":
self.auto_suggest = AutoSuggestFromHistory()
elif provider == "NavigableAutoSuggestFromHistory":
+ # LLM stuff are all Provisional in 8.32
+ if self.llm_provider_class:
+ llm_provider_constructor = import_item(self.llm_provider_class)
+ llm_provider = llm_provider_constructor()
+ else:
+ llm_provider = None
self.auto_suggest = NavigableAutoSuggestFromHistory()
+ # Provisinal in 8.32
+ self.auto_suggest._llm_provider = llm_provider
else:
raise ValueError("No valid provider.")
if self.pt_app:
@@ -815,7 +859,8 @@ class TerminalInteractiveShell(InteractiveShell):
& ~IsDone()
& Condition(
lambda: isinstance(
- self.auto_suggest, NavigableAutoSuggestFromHistory
+ self.auto_suggest,
+ NavigableAutoSuggestFromHistory,
)
),
),
diff --git a/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py
index 94a94a88c1..bcba5622e4 100644
--- a/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py
+++ b/contrib/python/ipython/py3/IPython/terminal/shortcuts/auto_suggest.py
@@ -1,13 +1,15 @@
import re
+import asyncio
import tokenize
from io import StringIO
-from typing import Callable, List, Optional, Union, Generator, Tuple
+from typing import Callable, List, Optional, Union, Generator, Tuple, ClassVar, Any
import warnings
+import prompt_toolkit
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.key_binding import KeyPressEvent
from prompt_toolkit.key_binding.bindings import named_commands as nc
-from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion, AutoSuggest
from prompt_toolkit.document import Document
from prompt_toolkit.history import History
from prompt_toolkit.shortcuts import PromptSession
@@ -22,6 +24,12 @@ from IPython.utils.tokenutil import generate_tokens
from .filters import pass_through
+try:
+ import jupyter_ai_magics
+ import jupyter_ai.completions.models as jai_models
+except ModuleNotFoundError:
+ jai_models = None
+
def _get_query(document: Document):
return document.lines[document.cursor_position_row]
@@ -31,26 +39,124 @@ class AppendAutoSuggestionInAnyLine(Processor):
"""
Append the auto suggestion to lines other than the last (appending to the
last line is natively supported by the prompt toolkit).
+
+ This has a private `_debug` attribute that can be set to True to display
+ debug information as virtual suggestion on the end of any line. You can do
+ so with:
+
+ >>> from IPython.terminal.shortcuts.auto_suggest import AppendAutoSuggestionInAnyLine
+ >>> AppendAutoSuggestionInAnyLine._debug = True
+
"""
+ _debug: ClassVar[bool] = False
+
def __init__(self, style: str = "class:auto-suggestion") -> None:
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
- is_last_line = ti.lineno == ti.document.line_count - 1
- is_active_line = ti.lineno == ti.document.cursor_position_row
+ """
+ Apply transformation to the line that is currently being edited.
- if not is_last_line and is_active_line:
- buffer = ti.buffer_control.buffer
+ This is a variation of the original implementation in prompt toolkit
+ that allows to not only append suggestions to any line, but also to show
+ multi-line suggestions.
- if buffer.suggestion and ti.document.is_cursor_at_the_end_of_line:
- suggestion = buffer.suggestion.text
- else:
- suggestion = ""
+ As transformation are applied on a line-by-line basis; we need to trick
+ a bit, and elide any line that is after the line we are currently
+ editing, until we run out of completions. We cannot shift the existing
+ lines
+
+ There are multiple cases to handle:
+
+ The completions ends before the end of the buffer:
+ We can resume showing the normal line, and say that some code may
+ be hidden.
+
+ The completions ends at the end of the buffer
+ We can just say that some code may be hidden.
+
+ And separately:
+
+ The completions ends beyond the end of the buffer
+ We need to both say that some code may be hidden, and that some
+ lines are not shown.
+
+ """
+ last_line_number = ti.document.line_count - 1
+ is_last_line = ti.lineno == last_line_number
+
+ noop = lambda text: Transformation(
+ fragments=ti.fragments + [(self.style, " " + text if self._debug else "")]
+ )
+ if ti.document.line_count == 1:
+ return noop("noop:oneline")
+ if ti.document.cursor_position_row == last_line_number and is_last_line:
+ # prompt toolkit already appends something; just leave it be
+ return noop("noop:last line and cursor")
+
+ # first everything before the current line is unchanged.
+ if ti.lineno < ti.document.cursor_position_row:
+ return noop("noop:before cursor")
+
+ buffer = ti.buffer_control.buffer
+ if not buffer.suggestion or not ti.document.is_cursor_at_the_end_of_line:
+ return noop("noop:not eol")
+
+ delta = ti.lineno - ti.document.cursor_position_row
+ suggestions = buffer.suggestion.text.splitlines()
+ if len(suggestions) == 0:
+ return noop("noop: no suggestions")
+
+ suggestions_longer_than_buffer: bool = (
+ len(suggestions) + ti.document.cursor_position_row > ti.document.line_count
+ )
+
+ if len(suggestions) >= 1 and prompt_toolkit.VERSION < (3, 0, 49):
+ if ti.lineno == ti.document.cursor_position_row:
+ return Transformation(
+ fragments=ti.fragments
+ + [
+ (
+ "red",
+ "(Cannot show multiline suggestion; requires prompt_toolkit > 3.0.49)",
+ )
+ ]
+ )
+ else:
+ return Transformation(fragments=ti.fragments)
+ if delta == 0:
+ suggestion = suggestions[0]
return Transformation(fragments=ti.fragments + [(self.style, suggestion)])
+ if is_last_line:
+ if delta < len(suggestions):
+ extra = f"; {len(suggestions) - delta} line(s) hidden"
+ suggestion = f"… rest of suggestion ({len(suggestions) - delta} lines) and code hidden"
+ return Transformation([(self.style, suggestion)])
+
+ n_elided = len(suggestions)
+ for i in range(len(suggestions)):
+ ll = ti.get_line(last_line_number - i)
+ el = "".join(l[1] for l in ll).strip()
+ if el:
+ break
+ else:
+ n_elided -= 1
+ if n_elided:
+ return Transformation([(self.style, f"… {n_elided} line(s) hidden")])
+ else:
+ return Transformation(
+ ti.get_line(last_line_number - len(suggestions) + 1)
+ + ([(self.style, "shift-last-line")] if self._debug else [])
+ )
+
+ elif delta < len(suggestions):
+ suggestion = suggestions[delta]
+ return Transformation([(self.style, suggestion)])
else:
- return Transformation(fragments=ti.fragments)
+ shift = ti.lineno - len(suggestions) + 1
+ return Transformation(ti.get_line(shift))
class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
@@ -60,16 +166,29 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
state need to carefully be cleared on the right events.
"""
- def __init__(
- self,
- ):
+ skip_lines: int
+ _connected_apps: list[PromptSession]
+
+ # handle to the currently running llm task that appends suggestions to the
+ # current buffer; we keep a handle to it in order to cancell it when there is a cursor movement, or
+ # another request.
+ _llm_task: asyncio.Task | None = None
+
+ # This is the instance of the LLM provider from jupyter-ai to which we forward the request
+ # to generate inline completions.
+ _llm_provider: Any | None
+
+ def __init__(self):
+ super().__init__()
self.skip_lines = 0
self._connected_apps = []
+ self._llm_provider = None
def reset_history_position(self, _: Buffer):
self.skip_lines = 0
- def disconnect(self):
+ def disconnect(self) -> None:
+ self._cancel_running_llm_task()
for pt_app in self._connected_apps:
text_insert_event = pt_app.default_buffer.on_text_insert
text_insert_event.remove_handler(self.reset_history_position)
@@ -94,7 +213,8 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
return None
- def _dismiss(self, buffer, *args, **kwargs):
+ def _dismiss(self, buffer, *args, **kwargs) -> None:
+ self._cancel_running_llm_task()
buffer.suggestion = None
def _find_match(
@@ -149,6 +269,7 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
)
def up(self, query: str, other_than: str, history: History) -> None:
+ self._cancel_running_llm_task()
for suggestion, line_number in self._find_next_match(
query, self.skip_lines, history
):
@@ -165,6 +286,7 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
self.skip_lines = 0
def down(self, query: str, other_than: str, history: History) -> None:
+ self._cancel_running_llm_task()
for suggestion, line_number in self._find_previous_match(
query, self.skip_lines, history
):
@@ -180,6 +302,131 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
self.skip_lines = line_number
break
+ def _cancel_running_llm_task(self) -> None:
+ """
+ Try to cancell the currently running llm_task if exists, and set it to None.
+ """
+ if self._llm_task is not None:
+ if self._llm_task.done():
+ self._llm_task = None
+ return
+ cancelled = self._llm_task.cancel()
+ if cancelled:
+ self._llm_task = None
+ if not cancelled:
+ warnings.warn(
+ "LLM task not cancelled, does your provider support cancellation?"
+ )
+
+ async def _trigger_llm(self, buffer) -> None:
+ """
+ This will ask the current llm provider a suggestion for the current buffer.
+
+ If there is a currently running llm task, it will cancel it.
+ """
+ # we likely want to store the current cursor position, and cancel if the cursor has moved.
+ if not self._llm_provider:
+ warnings.warn("No LLM provider found, cannot trigger LLM completions")
+ return
+ if jai_models is None:
+ warnings.warn(
+ "LLM Completion requires `jupyter_ai_magics` and `jupyter_ai` to be installed"
+ )
+
+ self._cancel_running_llm_task()
+
+ async def error_catcher(buffer):
+ """
+ This catches and log any errors, as otherwise this is just
+ lost in the void of the future running task.
+ """
+ try:
+ await self._trigger_llm_core(buffer)
+ except Exception as e:
+ get_ipython().log.error("error")
+ raise
+
+ # here we need a cancellable task so we can't just await the error catched
+ self._llm_task = asyncio.create_task(error_catcher(buffer))
+ await self._llm_task
+
+ async def _trigger_llm_core(self, buffer: Buffer):
+ """
+ This is the core of the current llm request.
+
+ Here we build a compatible `InlineCompletionRequest` and ask the llm
+ provider to stream it's response back to us iteratively setting it as
+ the suggestion on the current buffer.
+
+ Unlike with JupyterAi, as we do not have multiple cell, the cell number
+ is always set to `0`, note that we _could_ set it to a new number each
+ time and ignore threply from past numbers.
+
+ We set the prefix to the current cell content, but could also inset the
+ rest of the history or even just the non-fail history.
+
+ In the same way, we do not have cell id.
+
+ LLM provider may return multiple suggestion stream, but for the time
+ being we only support one.
+
+ Here we make the assumption that the provider will have
+ stream_inline_completions, I'm not sure it is the case for all
+ providers.
+ """
+
+ request = jai_models.InlineCompletionRequest(
+ number=0,
+ prefix=buffer.document.text,
+ suffix="",
+ mime="text/x-python",
+ stream=True,
+ path=None,
+ language="python",
+ cell_id=None,
+ )
+
+ async for reply_and_chunks in self._llm_provider.stream_inline_completions(
+ request
+ ):
+ if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
+ if len(reply_and_chunks.list.items) > 1:
+ raise ValueError(
+ "Terminal IPython cannot deal with multiple LLM suggestions at once"
+ )
+ buffer.suggestion = Suggestion(
+ reply_and_chunks.list.items[0].insertText
+ )
+ buffer.on_suggestion_set.fire()
+ elif isinstance(reply_and_chunks, jai_models.InlineCompletionStreamChunk):
+ buffer.suggestion = Suggestion(reply_and_chunks.response.insertText)
+ buffer.on_suggestion_set.fire()
+ return
+
+
+_MIN_LINES = 5
+
+
+async def llm_autosuggestion(event: KeyPressEvent):
+ """
+ Ask the AutoSuggester from history to delegate to ask an LLM for completion
+
+ This will first make sure that the current buffer have _MIN_LINES (7)
+ available lines to insert the LLM completion
+
+ Provisional as of 8.32, may change without warnigns
+
+ """
+ provider = get_ipython().auto_suggest
+ if not isinstance(provider, NavigableAutoSuggestFromHistory):
+ return
+ doc = event.current_buffer.document
+ lines_to_insert = max(0, _MIN_LINES - doc.line_count + doc.cursor_position_row)
+ for _ in range(lines_to_insert):
+ event.current_buffer.insert_text("\n", move_cursor=False)
+
+ await provider._trigger_llm(event.current_buffer)
+
def accept_or_jump_to_end(event: KeyPressEvent):
"""Apply autosuggestion or jump to end of line."""
diff --git a/contrib/python/ipython/py3/IPython/utils/_sysinfo.py b/contrib/python/ipython/py3/IPython/utils/_sysinfo.py
index 44fbbc4530..fbb89d3aa8 100644
--- a/contrib/python/ipython/py3/IPython/utils/_sysinfo.py
+++ b/contrib/python/ipython/py3/IPython/utils/_sysinfo.py
@@ -1,2 +1,2 @@
# GENERATED BY setup.py
-commit = "22d6a1c16"
+commit = "56a70e42d"
diff --git a/contrib/python/ipython/py3/ya.make b/contrib/python/ipython/py3/ya.make
index 950e693736..adc99f5b64 100644
--- a/contrib/python/ipython/py3/ya.make
+++ b/contrib/python/ipython/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(8.31.0)
+VERSION(8.32.0)
LICENSE(BSD-3-Clause)