diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2024-04-20 09:18:55 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2024-04-20 09:39:03 +0300 |
commit | 19be936dfe8ff1852437f6a73bb7919cfb06b8be (patch) | |
tree | c98e3f559152be2d96b22c2741d19212cb0bf63f | |
parent | aed0d7a803f63c28bb7eb37540614fecd7676220 (diff) | |
download | ydb-19be936dfe8ff1852437f6a73bb7919cfb06b8be.tar.gz |
Intermediate changes
18 files changed, 705 insertions, 75 deletions
diff --git a/contrib/python/fonttools/.dist-info/METADATA b/contrib/python/fonttools/.dist-info/METADATA index b374ebc499..60b6e6df80 100644 --- a/contrib/python/fonttools/.dist-info/METADATA +++ b/contrib/python/fonttools/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: fonttools -Version: 4.50.0 +Version: 4.51.0 Summary: Tools to manipulate font files Home-page: http://github.com/fonttools/fonttools Author: Just van Rossum @@ -375,6 +375,12 @@ Have fun! Changelog ~~~~~~~~~ +4.51.0 (released 2024-04-05) +---------------------------- + +- [ttLib] Optimization on loading aux fields (#3464). +- [ttFont] Add reorderGlyphs (#3468). + 4.50.0 (released 2024-03-15) ---------------------------- diff --git a/contrib/python/fonttools/fontTools/__init__.py b/contrib/python/fonttools/fontTools/__init__.py index ead49e26c8..5621f391f9 100644 --- a/contrib/python/fonttools/fontTools/__init__.py +++ b/contrib/python/fonttools/fontTools/__init__.py @@ -3,6 +3,6 @@ from fontTools.misc.loggingTools import configLogger log = logging.getLogger(__name__) -version = __version__ = "4.50.0" +version = __version__ = "4.51.0" __all__ = ["version", "log", "configLogger"] diff --git a/contrib/python/fonttools/fontTools/ttLib/reorderGlyphs.py b/contrib/python/fonttools/fontTools/ttLib/reorderGlyphs.py new file mode 100644 index 0000000000..3221261f16 --- /dev/null +++ b/contrib/python/fonttools/fontTools/ttLib/reorderGlyphs.py @@ -0,0 +1,278 @@ +"""Reorder glyphs in a font.""" + +__author__ = "Rod Sheeter" + +# See https://docs.google.com/document/d/1h9O-C_ndods87uY0QeIIcgAMiX2gDTpvO_IhMJsKAqs/ +# for details. + + +from fontTools import ttLib +from fontTools.ttLib.tables import otBase +from fontTools.ttLib.tables import otTables as ot +from abc import ABC, abstractmethod +from dataclasses import dataclass +from collections import deque +from typing import ( + Optional, + Any, + Callable, + Deque, + Iterable, + List, + NamedTuple, + Tuple, + Union, +) + + +_COVERAGE_ATTR = "Coverage" # tables that have one coverage use this name + + +def _sort_by_gid( + get_glyph_id: Callable[[str], int], + glyphs: List[str], + parallel_list: Optional[List[Any]], +): + if parallel_list: + reordered = sorted( + ((g, e) for g, e in zip(glyphs, parallel_list)), + key=lambda t: get_glyph_id(t[0]), + ) + sorted_glyphs, sorted_parallel_list = map(list, zip(*reordered)) + parallel_list[:] = sorted_parallel_list + else: + sorted_glyphs = sorted(glyphs, key=get_glyph_id) + + glyphs[:] = sorted_glyphs + + +def _get_dotted_attr(value: Any, dotted_attr: str) -> Any: + attr_names = dotted_attr.split(".") + assert attr_names + + while attr_names: + attr_name = attr_names.pop(0) + value = getattr(value, attr_name) + return value + + +class ReorderRule(ABC): + """A rule to reorder something in a font to match the fonts glyph order.""" + + @abstractmethod + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: ... + + +@dataclass(frozen=True) +class ReorderCoverage(ReorderRule): + """Reorder a Coverage table, and optionally a list that is sorted parallel to it.""" + + # A list that is parallel to Coverage + parallel_list_attr: Optional[str] = None + coverage_attr: str = _COVERAGE_ATTR + + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: + coverage = _get_dotted_attr(value, self.coverage_attr) + + if type(coverage) is not list: + # Normal path, process one coverage that might have a parallel list + parallel_list = None + if self.parallel_list_attr: + parallel_list = _get_dotted_attr(value, self.parallel_list_attr) + assert ( + type(parallel_list) is list + ), f"{self.parallel_list_attr} should be a list" + assert len(parallel_list) == len(coverage.glyphs), "Nothing makes sense" + + _sort_by_gid(font.getGlyphID, coverage.glyphs, parallel_list) + + else: + # A few tables have a list of coverage. No parallel list can exist. + assert ( + not self.parallel_list_attr + ), f"Can't have multiple coverage AND a parallel list; {self}" + for coverage_entry in coverage: + _sort_by_gid(font.getGlyphID, coverage_entry.glyphs, None) + + +@dataclass(frozen=True) +class ReorderList(ReorderRule): + """Reorder the items within a list to match the updated glyph order. + + Useful when a list ordered by coverage itself contains something ordered by a gid. + For example, the PairSet table of https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable. + """ + + list_attr: str + key: str + + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: + lst = _get_dotted_attr(value, self.list_attr) + assert isinstance(lst, list), f"{self.list_attr} should be a list" + lst.sort(key=lambda v: font.getGlyphID(getattr(v, self.key))) + + +# (Type, Optional Format) => List[ReorderRule] +# Encodes the relationships Cosimo identified +_REORDER_RULES = { + # GPOS + (ot.SinglePos, 1): [ReorderCoverage()], + (ot.SinglePos, 2): [ReorderCoverage(parallel_list_attr="Value")], + (ot.PairPos, 1): [ReorderCoverage(parallel_list_attr="PairSet")], + (ot.PairSet, None): [ReorderList("PairValueRecord", key="SecondGlyph")], + (ot.PairPos, 2): [ReorderCoverage()], + (ot.CursivePos, 1): [ReorderCoverage(parallel_list_attr="EntryExitRecord")], + (ot.MarkBasePos, 1): [ + ReorderCoverage( + coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" + ), + ReorderCoverage( + coverage_attr="BaseCoverage", parallel_list_attr="BaseArray.BaseRecord" + ), + ], + (ot.MarkLigPos, 1): [ + ReorderCoverage( + coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" + ), + ReorderCoverage( + coverage_attr="LigatureCoverage", + parallel_list_attr="LigatureArray.LigatureAttach", + ), + ], + (ot.MarkMarkPos, 1): [ + ReorderCoverage( + coverage_attr="Mark1Coverage", parallel_list_attr="Mark1Array.MarkRecord" + ), + ReorderCoverage( + coverage_attr="Mark2Coverage", parallel_list_attr="Mark2Array.Mark2Record" + ), + ], + (ot.ContextPos, 1): [ReorderCoverage(parallel_list_attr="PosRuleSet")], + (ot.ContextPos, 2): [ReorderCoverage()], + (ot.ContextPos, 3): [ReorderCoverage()], + (ot.ChainContextPos, 1): [ReorderCoverage(parallel_list_attr="ChainPosRuleSet")], + (ot.ChainContextPos, 2): [ReorderCoverage()], + (ot.ChainContextPos, 3): [ + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="InputCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + # GSUB + (ot.ContextSubst, 1): [ReorderCoverage(parallel_list_attr="SubRuleSet")], + (ot.ContextSubst, 2): [ReorderCoverage()], + (ot.ContextSubst, 3): [ReorderCoverage()], + (ot.ChainContextSubst, 1): [ReorderCoverage(parallel_list_attr="ChainSubRuleSet")], + (ot.ChainContextSubst, 2): [ReorderCoverage()], + (ot.ChainContextSubst, 3): [ + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="InputCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + (ot.ReverseChainSingleSubst, 1): [ + ReorderCoverage(parallel_list_attr="Substitute"), + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + # GDEF + (ot.AttachList, None): [ReorderCoverage(parallel_list_attr="AttachPoint")], + (ot.LigCaretList, None): [ReorderCoverage(parallel_list_attr="LigGlyph")], + (ot.MarkGlyphSetsDef, None): [ReorderCoverage()], + # MATH + (ot.MathGlyphInfo, None): [ReorderCoverage(coverage_attr="ExtendedShapeCoverage")], + (ot.MathItalicsCorrectionInfo, None): [ + ReorderCoverage(parallel_list_attr="ItalicsCorrection") + ], + (ot.MathTopAccentAttachment, None): [ + ReorderCoverage( + coverage_attr="TopAccentCoverage", parallel_list_attr="TopAccentAttachment" + ) + ], + (ot.MathKernInfo, None): [ + ReorderCoverage( + coverage_attr="MathKernCoverage", parallel_list_attr="MathKernInfoRecords" + ) + ], + (ot.MathVariants, None): [ + ReorderCoverage( + coverage_attr="VertGlyphCoverage", + parallel_list_attr="VertGlyphConstruction", + ), + ReorderCoverage( + coverage_attr="HorizGlyphCoverage", + parallel_list_attr="HorizGlyphConstruction", + ), + ], +} + + +# TODO Port to otTraverse + +SubTablePath = Tuple[otBase.BaseTable.SubTableEntry, ...] + + +def _bfs_base_table( + root: otBase.BaseTable, root_accessor: str +) -> Iterable[SubTablePath]: + yield from _traverse_ot_data( + root, root_accessor, lambda frontier, new: frontier.extend(new) + ) + + +# Given f(current frontier, new entries) add new entries to frontier +AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None] + + +def _traverse_ot_data( + root: otBase.BaseTable, root_accessor: str, add_to_frontier_fn: AddToFrontierFn +) -> Iterable[SubTablePath]: + # no visited because general otData is forward-offset only and thus cannot cycle + + frontier: Deque[SubTablePath] = deque() + frontier.append((otBase.BaseTable.SubTableEntry(root_accessor, root),)) + while frontier: + # path is (value, attr_name) tuples. attr_name is attr of parent to get value + path = frontier.popleft() + current = path[-1].value + + yield path + + new_entries = [] + for subtable_entry in current.iterSubTables(): + new_entries.append(path + (subtable_entry,)) + + add_to_frontier_fn(frontier, new_entries) + + +def reorderGlyphs(font: ttLib.TTFont, new_glyph_order: List[str]): + old_glyph_order = font.getGlyphOrder() + if len(new_glyph_order) != len(old_glyph_order): + raise ValueError( + f"New glyph order contains {len(new_glyph_order)} glyphs, " + f"but font has {len(old_glyph_order)} glyphs" + ) + + if set(old_glyph_order) != set(new_glyph_order): + raise ValueError( + "New glyph order does not contain the same set of glyphs as the font:\n" + f"* only in new: {set(new_glyph_order) - set(old_glyph_order)}\n" + f"* only in old: {set(old_glyph_order) - set(new_glyph_order)}" + ) + + # Changing the order of glyphs in a TTFont requires that all tables that use + # glyph indexes have been fully. + # Cf. https://github.com/fonttools/fonttools/issues/2060 + font.ensureDecompiled() + not_loaded = sorted(t for t in font.keys() if not font.isLoaded(t)) + if not_loaded: + raise ValueError(f"Everything should be loaded, following aren't: {not_loaded}") + + font.setGlyphOrder(new_glyph_order) + + coverage_containers = {"GDEF", "GPOS", "GSUB", "MATH"} + for tag in coverage_containers: + if tag in font.keys(): + for path in _bfs_base_table(font[tag].table, f'font["{tag}"]'): + value = path[-1].value + reorder_key = (type(value), getattr(value, "Format", None)) + for reorder in _REORDER_RULES.get(reorder_key, []): + reorder.apply(font, value) diff --git a/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py b/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py index afe4e538f4..a2f672567e 100644 --- a/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py +++ b/contrib/python/fonttools/fontTools/ttLib/tables/otConverters.py @@ -153,6 +153,8 @@ class BaseConverter(object): self.name = name self.repeat = repeat self.aux = aux + if self.aux and not self.repeat: + self.aux = compile(self.aux, "<string>", "eval") self.tableClass = tableClass self.isCount = name.endswith("Count") or name in [ "DesignAxisRecordSize", diff --git a/contrib/python/fonttools/fontTools/ttLib/ttFont.py b/contrib/python/fonttools/fontTools/ttLib/ttFont.py index ad62a187de..52e048b5f1 100644 --- a/contrib/python/fonttools/fontTools/ttLib/ttFont.py +++ b/contrib/python/fonttools/fontTools/ttLib/ttFont.py @@ -840,6 +840,11 @@ class TTFont(object): """ return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) + def reorderGlyphs(self, new_glyph_order): + from .reorderGlyphs import reorderGlyphs + + reorderGlyphs(self, new_glyph_order) + class GlyphOrder(object): """A pseudo table. The glyph order isn't in the font as a separate diff --git a/contrib/python/fonttools/ya.make b/contrib/python/fonttools/ya.make index a6872287a9..91be3261df 100644 --- a/contrib/python/fonttools/ya.make +++ b/contrib/python/fonttools/ya.make @@ -2,7 +2,7 @@ PY3_LIBRARY() -VERSION(4.50.0) +VERSION(4.51.0) LICENSE(MIT) @@ -163,6 +163,7 @@ PY_SRCS( fontTools/ttLib/__main__.py fontTools/ttLib/macUtils.py fontTools/ttLib/removeOverlaps.py + fontTools/ttLib/reorderGlyphs.py fontTools/ttLib/scaleUpem.py fontTools/ttLib/sfnt.py fontTools/ttLib/standardGlyphOrder.py diff --git a/contrib/python/parso/py3/.dist-info/METADATA b/contrib/python/parso/py3/.dist-info/METADATA index 331fef3a49..10f9cb843e 100644 --- a/contrib/python/parso/py3/.dist-info/METADATA +++ b/contrib/python/parso/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: parso -Version: 0.8.3 +Version: 0.8.4 Summary: A Python Parser Home-page: https://github.com/davidhalter/parso Author: David Halter @@ -26,11 +26,12 @@ Classifier: Topic :: Utilities Classifier: Typing :: Typed Requires-Python: >=3.6 Provides-Extra: qa -Requires-Dist: flake8 (==3.8.3) ; extra == 'qa' -Requires-Dist: mypy (==0.782) ; extra == 'qa' +Requires-Dist: flake8 (==5.0.4) ; extra == 'qa' +Requires-Dist: mypy (==0.971) ; extra == 'qa' +Requires-Dist: types-setuptools (==67.2.0.1) ; extra == 'qa' Provides-Extra: testing Requires-Dist: docopt ; extra == 'testing' -Requires-Dist: pytest (<6.0.0) ; extra == 'testing' +Requires-Dist: pytest ; extra == 'testing' ################################################################### parso - A Python Parser @@ -137,6 +138,11 @@ Changelog Unreleased ++++++++++ +0.8.4 (2024-04-05) +++++++++++++++++++ + +- Add basic support for Python 3.13 + 0.8.3 (2021-11-30) ++++++++++++++++++ diff --git a/contrib/python/parso/py3/parso/__init__.py b/contrib/python/parso/py3/parso/__init__.py index 0cceabedca..354aff5c25 100644 --- a/contrib/python/parso/py3/parso/__init__.py +++ b/contrib/python/parso/py3/parso/__init__.py @@ -43,7 +43,7 @@ from parso.grammar import Grammar, load_grammar from parso.utils import split_lines, python_bytes_to_unicode -__version__ = '0.8.3' +__version__ = '0.8.4' def parse(code=None, **kwargs): diff --git a/contrib/python/parso/py3/parso/grammar.py b/contrib/python/parso/py3/parso/grammar.py index 1f81148682..9d6f1a1ea0 100644 --- a/contrib/python/parso/py3/parso/grammar.py +++ b/contrib/python/parso/py3/parso/grammar.py @@ -107,14 +107,14 @@ class Grammar(Generic[_NodeT]): if file_io is None: if code is None: - file_io = FileIO(path) # type: ignore + file_io = FileIO(path) # type: ignore[arg-type] else: file_io = KnownContentFileIO(path, code) if cache and file_io.path is not None: module_node = load_module(self._hashed, file_io, cache_path=cache_path) if module_node is not None: - return module_node # type: ignore + return module_node # type: ignore[no-any-return] if code is None: code = file_io.read() @@ -133,7 +133,7 @@ class Grammar(Generic[_NodeT]): module_node = module_cache_item.node old_lines = module_cache_item.lines if old_lines == lines: - return module_node # type: ignore + return module_node # type: ignore[no-any-return] new_node = self._diff_parser( self._pgen_grammar, self._tokenizer, module_node @@ -145,7 +145,7 @@ class Grammar(Generic[_NodeT]): # Never pickle in pypy, it's slow as hell. pickling=cache and not is_pypy, cache_path=cache_path) - return new_node # type: ignore + return new_node # type: ignore[no-any-return] tokens = self._tokenizer(lines) @@ -161,7 +161,7 @@ class Grammar(Generic[_NodeT]): # Never pickle in pypy, it's slow as hell. pickling=cache and not is_pypy, cache_path=cache_path) - return root_node # type: ignore + return root_node # type: ignore[no-any-return] def _get_token_namespace(self): ns = self._token_namespace diff --git a/contrib/python/parso/py3/parso/pgen2/generator.py b/contrib/python/parso/py3/parso/pgen2/generator.py index db6e1cb326..30f0b546b8 100644 --- a/contrib/python/parso/py3/parso/pgen2/generator.py +++ b/contrib/python/parso/py3/parso/pgen2/generator.py @@ -276,7 +276,7 @@ def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar: dfa_state.transitions[transition] = DFAPlan(next_dfa) _calculate_tree_traversal(rule_to_dfas) - return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore + return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore[arg-type] def _make_transition(token_namespace, reserved_syntax_strings, label): diff --git a/contrib/python/parso/py3/parso/python/errors.py b/contrib/python/parso/py3/parso/python/errors.py index 5da046ab01..09c5047b61 100644 --- a/contrib/python/parso/py3/parso/python/errors.py +++ b/contrib/python/parso/py3/parso/python/errors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import codecs +import sys import warnings import re from contextlib import contextmanager @@ -33,7 +34,10 @@ def _get_rhs_name(node, version): return "literal" else: if second.children[1] == ":" or second.children[0] == "**": - return "dict display" + if version < (3, 10): + return "dict display" + else: + return "dict literal" else: return "set display" elif ( @@ -47,7 +51,10 @@ def _get_rhs_name(node, version): elif first == "[": return "list" elif first == "{" and second == "}": - return "dict display" + if version < (3, 10): + return "dict display" + else: + return "dict literal" elif first == "{" and len(node.children) > 2: return "set display" elif type_ == "keyword": @@ -58,7 +65,10 @@ def _get_rhs_name(node, version): else: return str(node.value) elif type_ == "operator" and node.value == "...": - return "Ellipsis" + if version < (3, 10): + return "Ellipsis" + else: + return "ellipsis" elif type_ == "comparison": return "comparison" elif type_ in ("string", "number", "strings"): @@ -83,7 +93,10 @@ def _get_rhs_name(node, version): or "_test" in type_ or type_ in ("term", "factor") ): - return "operator" + if version < (3, 10): + return "operator" + else: + return "expression" elif type_ == "star_expr": return "starred" elif type_ == "testlist_star_expr": @@ -610,7 +623,10 @@ class _NameChecks(SyntaxRule): @ErrorFinder.register_rule(type='string') class _StringChecks(SyntaxRule): - message = "bytes can only contain ASCII literal characters." + if sys.version_info < (3, 10): + message = "bytes can only contain ASCII literal characters." + else: + message = "bytes can only contain ASCII literal characters" def is_issue(self, leaf): string_prefix = leaf.string_prefix.lower() @@ -1043,14 +1059,20 @@ class _CheckAssignmentRule(SyntaxRule): error = 'literal' else: if second.children[1] == ':': - error = 'dict display' + if self._normalizer.version < (3, 10): + error = 'dict display' + else: + error = 'dict literal' else: error = 'set display' elif first == "{" and second == "}": if self._normalizer.version < (3, 8): error = 'literal' else: - error = "dict display" + if self._normalizer.version < (3, 10): + error = "dict display" + else: + error = "dict literal" elif first == "{" and len(node.children) > 2: if self._normalizer.version < (3, 8): error = 'literal' @@ -1083,7 +1105,10 @@ class _CheckAssignmentRule(SyntaxRule): error = str(node.value) elif type_ == 'operator': if node.value == '...': - error = 'Ellipsis' + if self._normalizer.version < (3, 10): + error = 'Ellipsis' + else: + error = 'ellipsis' elif type_ == 'comparison': error = 'comparison' elif type_ in ('string', 'number', 'strings'): @@ -1098,7 +1123,10 @@ class _CheckAssignmentRule(SyntaxRule): if node.children[0] == 'await': error = 'await expression' elif node.children[-2] == '**': - error = 'operator' + if self._normalizer.version < (3, 10): + error = 'operator' + else: + error = 'expression' else: # Has a trailer trailer = node.children[-1] @@ -1120,7 +1148,10 @@ class _CheckAssignmentRule(SyntaxRule): elif ('expr' in type_ and type_ != 'star_expr' # is a substring or '_test' in type_ or type_ in ('term', 'factor')): - error = 'operator' + if self._normalizer.version < (3, 10): + error = 'operator' + else: + error = 'expression' elif type_ == "star_expr": if is_deletion: if self._normalizer.version >= (3, 9): diff --git a/contrib/python/parso/py3/parso/python/grammar313.txt b/contrib/python/parso/py3/parso/python/grammar313.txt new file mode 100644 index 0000000000..f092050d88 --- /dev/null +++ b/contrib/python/parso/py3/parso/python/grammar313.txt @@ -0,0 +1,169 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +lambdef: 'lambda' [varargslist] ':' test +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' or_test [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/contrib/python/parso/py3/parso/python/tree.py b/contrib/python/parso/py3/parso/python/tree.py index ebb4087030..0624e6755d 100644 --- a/contrib/python/parso/py3/parso/python/tree.py +++ b/contrib/python/parso/py3/parso/python/tree.py @@ -295,6 +295,8 @@ class FStringEnd(PythonLeaf): class _StringComparisonMixin: + __slots__ = () + def __eq__(self, other): """ Make comparisons with strings easy. @@ -544,6 +546,7 @@ class Function(ClassOrFunc): 4. annotation (if present) """ type = 'funcdef' + __slots__ = () def __init__(self, children): super().__init__(children) diff --git a/contrib/python/parso/py3/tests/test_python_errors.py b/contrib/python/parso/py3/tests/test_python_errors.py index fe43a301ad..b4986d33f6 100644 --- a/contrib/python/parso/py3/tests/test_python_errors.py +++ b/contrib/python/parso/py3/tests/test_python_errors.py @@ -1,6 +1,7 @@ """ Testing if parso finds syntax errors and indentation errors. """ +import re import sys import warnings @@ -136,6 +137,28 @@ def _get_actual_exception(code): wanted = 'SyntaxError: invalid syntax' elif wanted == "SyntaxError: f-string: single '}' is not allowed": wanted = 'SyntaxError: invalid syntax' + elif "Maybe you meant '==' instead of '='?" in wanted: + wanted = wanted.removesuffix(" here. Maybe you meant '==' instead of '='?") + elif re.match( + r"SyntaxError: unterminated string literal \(detected at line \d+\)", wanted + ): + wanted = "SyntaxError: EOL while scanning string literal" + elif re.match( + r"SyntaxError: unterminated triple-quoted string literal \(detected at line \d+\)", + wanted, + ): + wanted = 'SyntaxError: EOF while scanning triple-quoted string literal' + elif wanted == 'SyntaxError: cannot use starred expression here': + wanted = "SyntaxError: can't use starred expression here" + elif wanted == 'SyntaxError: f-string: cannot use starred expression here': + wanted = "SyntaxError: f-string: can't use starred expression here" + elif re.match( + r"IndentationError: expected an indented block after '[^']*' statement on line \d", + wanted, + ): + wanted = 'IndentationError: expected an indented block' + elif wanted == 'SyntaxError: unterminated string literal': + wanted = 'SyntaxError: EOL while scanning string literal' return [wanted], line_nr diff --git a/contrib/python/parso/py3/ya.make b/contrib/python/parso/py3/ya.make index fa4210f7c5..4a388e26b2 100644 --- a/contrib/python/parso/py3/ya.make +++ b/contrib/python/parso/py3/ya.make @@ -2,7 +2,7 @@ PY3_LIBRARY() -VERSION(0.8.3) +VERSION(0.8.4) LICENSE(PSF-2.0) @@ -41,6 +41,7 @@ RESOURCE_FILES( parso/python/grammar310.txt parso/python/grammar311.txt parso/python/grammar312.txt + parso/python/grammar313.txt parso/python/grammar36.txt parso/python/grammar37.txt parso/python/grammar38.txt diff --git a/contrib/python/typing-extensions/py3/.dist-info/METADATA b/contrib/python/typing-extensions/py3/.dist-info/METADATA index 13d06e24b7..5089b4ddde 100644 --- a/contrib/python/typing-extensions/py3/.dist-info/METADATA +++ b/contrib/python/typing-extensions/py3/.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: typing_extensions -Version: 4.10.0 +Version: 4.11.0 Summary: Backported and Experimental Type Hints for Python 3.8+ Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing Author-email: "Guido van Rossum, Jukka Lehtosalo, Ćukasz Langa, Michael Lee" <levkivskyi@gmail.com> diff --git a/contrib/python/typing-extensions/py3/typing_extensions.py b/contrib/python/typing-extensions/py3/typing_extensions.py index f3132ea4ae..9ccd519ce4 100644 --- a/contrib/python/typing-extensions/py3/typing_extensions.py +++ b/contrib/python/typing-extensions/py3/typing_extensions.py @@ -147,27 +147,6 @@ class _Sentinel: _marker = _Sentinel() -def _check_generic(cls, parameters, elen=_marker): - """Check correct count for parameters of a generic cls (internal helper). - This gives a nice error message in case of count mismatch. - """ - if not elen: - raise TypeError(f"{cls} is not a generic class") - if elen is _marker: - if not hasattr(cls, "__parameters__") or not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - elen = len(cls.__parameters__) - alen = len(parameters) - if alen != elen: - if hasattr(cls, "__parameters__"): - parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] - num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) - if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): - return - raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" - f" actual {alen}, expected {elen}") - - if sys.version_info >= (3, 10): def _should_collect_from_parameters(t): return isinstance( @@ -181,27 +160,6 @@ else: return isinstance(t, typing._GenericAlias) and not t._special -def _collect_type_vars(types, typevar_types=None): - """Collect all type variable contained in types in order of - first appearance (lexicographic order). For example:: - - _collect_type_vars((T, List[S, T])) == (T, S) - """ - if typevar_types is None: - typevar_types = typing.TypeVar - tvars = [] - for t in types: - if ( - isinstance(t, typevar_types) and - t not in tvars and - not _is_unpack(t) - ): - tvars.append(t) - if _should_collect_from_parameters(t): - tvars.extend([t for t in t.__parameters__ if t not in tvars]) - return tuple(tvars) - - NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. @@ -834,7 +792,11 @@ def _ensure_subclassable(mro_entries): return inner -if hasattr(typing, "ReadOnly"): +# Update this to something like >=3.13.0b1 if and when +# PEP 728 is implemented in CPython +_PEP_728_IMPLEMENTED = False + +if _PEP_728_IMPLEMENTED: # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" @@ -845,7 +807,8 @@ if hasattr(typing, "ReadOnly"): # Aaaand on 3.12 we add __orig_bases__ to TypedDict # to enable better runtime introspection. # On 3.13 we deprecate some odd ways of creating TypedDicts. - # PEP 705 proposes adding the ReadOnly[] qualifier. + # Also on 3.13, PEP 705 adds the ReadOnly[] qualifier. + # PEP 728 (still pending) makes more changes. TypedDict = typing.TypedDict _TypedDictMeta = typing._TypedDictMeta is_typeddict = typing.is_typeddict @@ -1122,15 +1085,15 @@ else: return val -if hasattr(typing, "Required"): # 3.11+ +if hasattr(typing, "ReadOnly"): # 3.13+ get_type_hints = typing.get_type_hints -else: # <=3.10 +else: # <=3.13 # replaces _strip_annotations() def _strip_extras(t): """Strips Annotated, Required and NotRequired from a given type.""" if isinstance(t, _AnnotatedAlias): return _strip_extras(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): return _strip_extras(t.__args__[0]) if isinstance(t, typing._GenericAlias): stripped_args = tuple(_strip_extras(a) for a in t.__args__) @@ -2689,9 +2652,151 @@ else: # counting generic parameters, so that when we subscript a generic, # the runtime doesn't try to substitute the Unpack with the subscripted type. if not hasattr(typing, "TypeVarTuple"): + def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + things = "arguments" if sys.version_info >= (3, 10) else "parameters" + raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}" + f" for {cls}; actual {alen}, expected {expect_val}") +else: + # Python 3.11+ + + def _check_generic(cls, parameters, elen): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments" + f" for {cls}; actual {alen}, expected {expect_val}") + +typing._check_generic = _check_generic + +# Python 3.11+ _collect_type_vars was renamed to _collect_parameters +if hasattr(typing, '_collect_type_vars'): + def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + typing._collect_type_vars = _collect_type_vars - typing._check_generic = _check_generic +else: + def _collect_parameters(args): + """Collect all type variables and parameter specifications in args + in order of first appearance (lexicographic order). + + For example:: + + assert _collect_parameters((T, Callable[P, T])) == (T, P) + """ + parameters = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in args: + if isinstance(t, type): + # We don't want __parameters__ descriptor of a bare Python class. + pass + elif isinstance(t, tuple): + # `t` might be a tuple, when `ParamSpec` is substituted with + # `[T, int]`, or `[int, *Ts]`, etc. + for x in t: + for collected in _collect_parameters([x]): + if collected not in parameters: + parameters.append(collected) + elif hasattr(t, '__typing_subst__'): + if t not in parameters: + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + parameters.append(t) + else: + for x in getattr(t, '__parameters__', ()): + if x not in parameters: + parameters.append(x) + + return tuple(parameters) + typing._collect_parameters = _collect_parameters # Backport typing.NamedTuple as it exists in Python 3.13. # In 3.11, the ability to define generic `NamedTuple`s was supported. diff --git a/contrib/python/typing-extensions/py3/ya.make b/contrib/python/typing-extensions/py3/ya.make index 6a099000e4..293ccb585e 100644 --- a/contrib/python/typing-extensions/py3/ya.make +++ b/contrib/python/typing-extensions/py3/ya.make @@ -2,7 +2,7 @@ PY3_LIBRARY() -VERSION(4.10.0) +VERSION(4.11.0) LICENSE(PSF-2.0) |