summaryrefslogtreecommitdiffstats
path: root/contrib/python/more-itertools/py3
diff options
context:
space:
mode:
authorrobot-piglet <[email protected]>2025-05-07 16:01:33 +0300
committerrobot-piglet <[email protected]>2025-05-07 16:40:48 +0300
commitbab8f9e181aeff66a037d892252ff3d972922bd6 (patch)
tree560f3a6e6e512e2b8e303de2d0a49bad9c9fc9d5 /contrib/python/more-itertools/py3
parent6c0a53e80dc1a7177112f16be38041f54df54af7 (diff)
Intermediate changes
commit_hash:b9d0dab354258c30c639c4cd045b559a38836838
Diffstat (limited to 'contrib/python/more-itertools/py3')
-rw-r--r--contrib/python/more-itertools/py3/.dist-info/METADATA7
-rw-r--r--contrib/python/more-itertools/py3/README.rst3
-rw-r--r--contrib/python/more-itertools/py3/more_itertools/__init__.py2
-rw-r--r--contrib/python/more-itertools/py3/more_itertools/more.py182
-rw-r--r--contrib/python/more-itertools/py3/more_itertools/more.pyi18
-rw-r--r--contrib/python/more-itertools/py3/more_itertools/recipes.py270
-rw-r--r--contrib/python/more-itertools/py3/more_itertools/recipes.pyi2
-rw-r--r--contrib/python/more-itertools/py3/tests/test_more.py35
-rw-r--r--contrib/python/more-itertools/py3/tests/test_recipes.py55
-rw-r--r--contrib/python/more-itertools/py3/ya.make2
10 files changed, 382 insertions, 194 deletions
diff --git a/contrib/python/more-itertools/py3/.dist-info/METADATA b/contrib/python/more-itertools/py3/.dist-info/METADATA
index 72be20349f3..847e174a706 100644
--- a/contrib/python/more-itertools/py3/.dist-info/METADATA
+++ b/contrib/python/more-itertools/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
-Metadata-Version: 2.1
+Metadata-Version: 2.3
Name: more-itertools
-Version: 10.6.0
+Version: 10.7.0
Summary: More routines for operating on iterables, beyond itertools
Keywords: itertools,iterator,iteration,filter,peek,peekable,chunk,chunked
Author-email: Erik Rose <[email protected]>
@@ -144,8 +144,9 @@ Python iterables.
| | `dotproduct <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.dotproduct>`_, |
| | `factor <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.factor>`_, |
| | `is_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.is_prime>`_, |
-| | `nth_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.nth_prime>`_, |
| | `matmul <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.matmul>`_, |
+| | `multinomial <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.multinomial>`_, |
+| | `nth_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.nth_prime>`_, |
| | `polynomial_from_roots <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_from_roots>`_, |
| | `polynomial_derivative <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_derivative>`_, |
| | `polynomial_eval <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_eval>`_, |
diff --git a/contrib/python/more-itertools/py3/README.rst b/contrib/python/more-itertools/py3/README.rst
index 059fda91262..c82a5905e80 100644
--- a/contrib/python/more-itertools/py3/README.rst
+++ b/contrib/python/more-itertools/py3/README.rst
@@ -119,8 +119,9 @@ Python iterables.
| | `dotproduct <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.dotproduct>`_, |
| | `factor <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.factor>`_, |
| | `is_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.is_prime>`_, |
-| | `nth_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.nth_prime>`_, |
| | `matmul <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.matmul>`_, |
+| | `multinomial <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.multinomial>`_, |
+| | `nth_prime <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.nth_prime>`_, |
| | `polynomial_from_roots <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_from_roots>`_, |
| | `polynomial_derivative <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_derivative>`_, |
| | `polynomial_eval <https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.polynomial_eval>`_, |
diff --git a/contrib/python/more-itertools/py3/more_itertools/__init__.py b/contrib/python/more-itertools/py3/more_itertools/__init__.py
index 4d43d11a637..9fec1d275b4 100644
--- a/contrib/python/more-itertools/py3/more_itertools/__init__.py
+++ b/contrib/python/more-itertools/py3/more_itertools/__init__.py
@@ -3,4 +3,4 @@
from .more import * # noqa
from .recipes import * # noqa
-__version__ = '10.6.0'
+__version__ = '10.7.0'
diff --git a/contrib/python/more-itertools/py3/more_itertools/more.py b/contrib/python/more-itertools/py3/more_itertools/more.py
index 2228687ea27..0f3d1074e87 100644
--- a/contrib/python/more-itertools/py3/more_itertools/more.py
+++ b/contrib/python/more-itertools/py3/more_itertools/more.py
@@ -159,7 +159,34 @@ __all__ = [
]
# math.sumprod is available for Python 3.12+
-_fsumprod = getattr(math, 'sumprod', lambda x, y: fsum(map(mul, x, y)))
+try:
+ from math import sumprod as _fsumprod
+
+except ImportError: # pragma: no cover
+ # Extended precision algorithms from T. J. Dekker,
+ # "A Floating-Point Technique for Extending the Available Precision"
+ # https://csclub.uwaterloo.ca/~pbarfuss/dekker1971.pdf
+ # Formulas: (5.5) (5.6) and (5.8). Code: mul12()
+
+ def dl_split(x: float):
+ "Split a float into two half-precision components."
+ t = x * 134217729.0 # Veltkamp constant = 2.0 ** 27 + 1
+ hi = t - (t - x)
+ lo = x - hi
+ return hi, lo
+
+ def dl_mul(x, y):
+ "Lossless multiplication."
+ xx_hi, xx_lo = dl_split(x)
+ yy_hi, yy_lo = dl_split(y)
+ p = xx_hi * yy_hi
+ q = xx_hi * yy_lo + xx_lo * yy_hi
+ z = p + q
+ zz = p - z + q + xx_lo * yy_lo
+ return z, zz
+
+ def _fsumprod(p, q):
+ return fsum(chain.from_iterable(map(dl_mul, p, q)))
def chunked(iterable, n, strict=False):
@@ -469,10 +496,20 @@ def consumer(func):
def ilen(iterable):
"""Return the number of items in *iterable*.
- >>> ilen(x for x in range(1000000) if x % 3 == 0)
- 333334
+ For example, there are 168 prime numbers below 1,000:
- This consumes the iterable, so handle with care.
+ >>> ilen(sieve(1000))
+ 168
+
+ Equivalent to, but faster than::
+
+ def ilen(iterable):
+ count = 0
+ for _ in iterable:
+ count += 1
+ return count
+
+ This fully consumes the iterable, so handle with care.
"""
# This is the "most beautiful of the fast variants" of this function.
@@ -484,17 +521,21 @@ def ilen(iterable):
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
- >>> from itertools import islice
- >>> list(islice(iterate(lambda x: 2*x, 1), 10))
+ Produces an infinite iterator. To add a stopping condition,
+ use :func:`take`, ``takewhile``, or :func:`takewhile_inclusive`:.
+
+ >>> take(10, iterate(lambda x: 2*x, 1))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+ >>> collatz = lambda x: 3*x + 1 if x%2==1 else x // 2
+ >>> list(takewhile_inclusive(lambda x: x!=1, iterate(collatz, 10)))
+ [10, 5, 16, 8, 4, 2, 1]
+
"""
- while True:
- yield start
- try:
+ with suppress(StopIteration):
+ while True:
+ yield start
start = func(start)
- except StopIteration:
- break
def with_iter(context_manager):
@@ -528,7 +569,7 @@ def one(iterable, too_short=None, too_long=None):
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
- ValueError: too many items in iterable (expected 1)'
+ ValueError: too few items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
@@ -556,27 +597,16 @@ def one(iterable, too_short=None, too_long=None):
contents less destructively.
"""
- it = iter(iterable)
-
- try:
- first_value = next(it)
- except StopIteration as exc:
- raise (
- too_short or ValueError('too few items in iterable (expected 1)')
- ) from exc
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- f'Expected exactly one item in iterable, but got {first_value!r}, '
- f'{second_value!r}, and perhaps more.'
- )
- raise too_long or ValueError(msg)
-
- return first_value
+ iterator = iter(iterable)
+ for first in iterator:
+ for second in iterator:
+ msg = (
+ f'Expected exactly one item in iterable, but got {first!r}, '
+ f'{second!r}, and perhaps more.'
+ )
+ raise too_long or ValueError(msg)
+ return first
+ raise too_short or ValueError('too few items in iterable (expected 1)')
def raise_(exception, *args):
@@ -643,21 +673,19 @@ def strictly_n(iterable, n, too_short=None, too_long=None):
)
it = iter(iterable)
- for i in range(n):
- try:
- item = next(it)
- except StopIteration:
- too_short(i)
- return
- else:
- yield item
- try:
- next(it)
- except StopIteration:
- pass
- else:
+ sent = 0
+ for item in islice(it, n):
+ yield item
+ sent += 1
+
+ if sent < n:
+ too_short(sent)
+ return
+
+ for item in it:
too_long(n + 1)
+ return
def distinct_permutations(iterable, r=None):
@@ -674,7 +702,7 @@ def distinct_permutations(iterable, r=None):
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
- sequence.
+ sequence. The function :func:`multinomial` computes this directly.
If *r* is given, only the *r*-length permutations are yielded.
@@ -1074,7 +1102,7 @@ class bucket:
if self._validator(item_value):
self._cache[item_value].append(item)
- yield from self._cache.keys()
+ return iter(self._cache)
def __getitem__(self, value):
if not self._validator(value):
@@ -2984,7 +3012,7 @@ def exactly_n(iterable, n, predicate=bool):
so avoid calling it on infinite iterables.
"""
- return len(take(n + 1, filter(predicate, iterable))) == n
+ return ilen(islice(filter(predicate, iterable), n + 1)) == n
def circular_shifts(iterable, steps=1):
@@ -3428,22 +3456,18 @@ def only(iterable, default=None, too_long=None):
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
- """
- it = iter(iterable)
- first_value = next(it, default)
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- f'Expected exactly one item in iterable, but got {first_value!r}, '
- f'{second_value!r}, and perhaps more.'
- )
- raise too_long or ValueError(msg)
- return first_value
+ """
+ iterator = iter(iterable)
+ for first in iterator:
+ for second in iterator:
+ msg = (
+ f'Expected exactly one item in iterable, but got {first!r}, '
+ f'{second!r}, and perhaps more.'
+ )
+ raise too_long or ValueError(msg)
+ return first
+ return default
def _ichunk(iterable, n):
@@ -3451,16 +3475,12 @@ def _ichunk(iterable, n):
chunk = islice(iterable, n)
def generator():
- while True:
- if cache:
- yield cache.popleft()
- else:
- try:
- item = next(chunk)
- except StopIteration:
- return
+ with suppress(StopIteration):
+ while True:
+ if cache:
+ yield cache.popleft()
else:
- yield item
+ yield next(chunk)
def materialize_next(n=1):
# if n not specified materialize everything
@@ -3784,7 +3804,7 @@ def sample(iterable, k, weights=None, *, counts=None, strict=False):
return []
if weights is not None and counts is not None:
- raise TypeError('weights and counts are mutally exclusive')
+ raise TypeError('weights and counts are mutually exclusive')
elif weights is not None:
weights = iter(weights)
@@ -4091,7 +4111,7 @@ def nth_permutation(iterable, r, index):
raise ValueError
else:
c = perm(n, r)
- assert c > 0 # factortial(n)>0, and r<n so perm(n,r) is never zero
+ assert c > 0 # factorial(n)>0, and r<n so perm(n,r) is never zero
if index < 0:
index += c
@@ -4886,9 +4906,11 @@ def powerset_of_sets(iterable):
:func:`powerset_of_sets` takes care to minimize the number
of hash operations performed.
"""
- sets = tuple(map(set, dict.fromkeys(map(frozenset, zip(iterable)))))
- for r in range(len(sets) + 1):
- yield from starmap(set().union, combinations(sets, r))
+ sets = tuple(dict.fromkeys(map(frozenset, zip(iterable))))
+ return chain.from_iterable(
+ starmap(set().union, combinations(sets, r))
+ for r in range(len(sets) + 1)
+ )
def join_mappings(**field_to_map):
@@ -4922,7 +4944,7 @@ def _complex_sumprod(v1, v2):
def dft(xarr):
- """Discrete Fourier Tranform. *xarr* is a sequence of complex numbers.
+ """Discrete Fourier Transform. *xarr* is a sequence of complex numbers.
Yields the components of the corresponding transformed output vector.
>>> import cmath
@@ -4941,7 +4963,7 @@ def dft(xarr):
def idft(Xarr):
- """Inverse Discrete Fourier Tranform. *Xarr* is a sequence of
+ """Inverse Discrete Fourier Transform. *Xarr* is a sequence of
complex numbers. Yields the components of the corresponding
inverse-transformed output vector.
diff --git a/contrib/python/more-itertools/py3/more_itertools/more.pyi b/contrib/python/more-itertools/py3/more_itertools/more.pyi
index b8406906154..dea9abc1cd1 100644
--- a/contrib/python/more-itertools/py3/more_itertools/more.pyi
+++ b/contrib/python/more-itertools/py3/more_itertools/more.pyi
@@ -468,42 +468,42 @@ def groupby_transform(
keyfunc: None,
valuefunc: Callable[[_T], _V],
reducefunc: None,
-) -> Iterable[tuple[_T, Iterable[_V]]]: ...
+) -> Iterator[tuple[_T, Iterator[_V]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
reducefunc: None,
-) -> Iterable[tuple[_U, Iterator[_V]]]: ...
+) -> Iterator[tuple[_U, Iterator[_V]]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None,
valuefunc: None,
reducefunc: Callable[[Iterator[_T]], _W],
-) -> Iterable[tuple[_T, _W]]: ...
+) -> Iterator[tuple[_T, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: None,
reducefunc: Callable[[Iterator[_T]], _W],
-) -> Iterable[tuple[_U, _W]]: ...
+) -> Iterator[tuple[_U, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: None,
valuefunc: Callable[[_T], _V],
- reducefunc: Callable[[Iterable[_V]], _W],
-) -> Iterable[tuple[_T, _W]]: ...
+ reducefunc: Callable[[Iterator[_V]], _W],
+) -> Iterator[tuple[_T, _W]]: ...
@overload
def groupby_transform(
iterable: Iterable[_T],
keyfunc: Callable[[_T], _U],
valuefunc: Callable[[_T], _V],
- reducefunc: Callable[[Iterable[_V]], _W],
-) -> Iterable[tuple[_U, _W]]: ...
+ reducefunc: Callable[[Iterator[_V]], _W],
+) -> Iterator[tuple[_U, _W]]: ...
class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
@overload
@@ -919,7 +919,7 @@ def filter_map(
) -> Iterator[_V]: ...
def powerset_of_sets(iterable: Iterable[_T]) -> Iterator[set[_T]]: ...
def join_mappings(
- **field_to_map: Mapping[_T, _V]
+ **field_to_map: Mapping[_T, _V],
) -> dict[_T, dict[str, _V]]: ...
def doublestarmap(
func: Callable[..., _T],
diff --git a/contrib/python/more-itertools/py3/more_itertools/recipes.py b/contrib/python/more-itertools/py3/more_itertools/recipes.py
index 5a11fc5a304..51bb93b1efc 100644
--- a/contrib/python/more-itertools/py3/more_itertools/recipes.py
+++ b/contrib/python/more-itertools/py3/more_itertools/recipes.py
@@ -8,13 +8,14 @@ Some backward-compatible usability improvements have been made.
"""
-import math
-import operator
+import random
from collections import deque
+from contextlib import suppress
from collections.abc import Sized
from functools import lru_cache, partial
from itertools import (
+ accumulate,
chain,
combinations,
compress,
@@ -28,6 +29,8 @@ from itertools import (
tee,
zip_longest,
)
+from math import prod, comb, isqrt, gcd
+from operator import mul, not_, itemgetter, getitem
from random import randrange, sample, choice
from sys import hexversion
@@ -47,6 +50,7 @@ __all__ = [
'iter_index',
'loops',
'matmul',
+ 'multinomial',
'ncycles',
'nth',
'nth_combination',
@@ -93,12 +97,16 @@ except TypeError:
else:
_zip_strict = partial(zip, strict=True)
+
# math.sumprod is available for Python 3.12+
-_sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y))
+try:
+ from math import sumprod as _sumprod
+except ImportError:
+ _sumprod = lambda x, y: dotproduct(x, y)
def take(n, iterable):
- """Return first *n* items of the iterable as a list.
+ """Return first *n* items of the *iterable* as a list.
>>> take(3, range(10))
[0, 1, 2]
@@ -144,9 +152,9 @@ def tail(n, iterable):
# either islice or deque will throw a TypeError. This is why we don't
# check if it is Iterable.
if isinstance(iterable, Sized):
- yield from islice(iterable, max(0, len(iterable) - n), None)
+ return islice(iterable, max(0, len(iterable) - n), None)
else:
- yield from iter(deque(iterable, maxlen=n))
+ return iter(deque(iterable, maxlen=n))
def consume(iterator, n=None):
@@ -268,11 +276,14 @@ def ncycles(iterable, n):
def dotproduct(vec1, vec2):
"""Returns the dot product of the two iterables.
- >>> dotproduct([10, 10], [20, 20])
- 400
+ >>> dotproduct([10, 15, 12], [0.65, 0.80, 1.25])
+ 33.5
+ >>> 10 * 0.65 + 15 * 0.80 + 12 * 1.25
+ 33.5
+ In Python 3.12 and later, use ``math.sumprod()`` instead.
"""
- return sum(map(operator.mul, vec1, vec2))
+ return sum(map(mul, vec1, vec2))
def flatten(listOfLists):
@@ -397,26 +408,26 @@ def grouper(iterable, n, incomplete='fill', fillvalue=None):
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
- >>> it = grouper('ABCDEFG', 3, incomplete='strict')
- >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> iterator = grouper('ABCDEFG', 3, incomplete='strict')
+ >>> list(iterator) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnequalIterablesError
"""
- args = [iter(iterable)] * n
+ iterators = [iter(iterable)] * n
if incomplete == 'fill':
- return zip_longest(*args, fillvalue=fillvalue)
+ return zip_longest(*iterators, fillvalue=fillvalue)
if incomplete == 'strict':
- return _zip_equal(*args)
+ return _zip_equal(*iterators)
if incomplete == 'ignore':
- return zip(*args)
+ return zip(*iterators)
else:
raise ValueError('Expected fill, strict, or ignore')
def roundrobin(*iterables):
- """Yields an item from each iterable, alternating between them.
+ """Visit input iterables in a cycle until each is exhausted.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
@@ -458,7 +469,7 @@ def partition(pred, iterable):
t1, t2, p = tee(iterable, 3)
p1, p2 = tee(map(pred, p))
- return (compress(t1, map(operator.not_, p1)), compress(t2, p2))
+ return (compress(t1, map(not_, p1)), compress(t2, p2))
def powerset(iterable):
@@ -537,9 +548,9 @@ def unique_justseen(iterable, key=None):
"""
if key is None:
- return map(operator.itemgetter(0), groupby(iterable))
+ return map(itemgetter(0), groupby(iterable))
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
+ return map(next, map(itemgetter(1), groupby(iterable, key)))
def unique(iterable, key=None, reverse=False):
@@ -558,7 +569,8 @@ def unique(iterable, key=None, reverse=False):
The elements in *iterable* need not be hashable, but they must be
comparable for sorting to work.
"""
- return unique_justseen(sorted(iterable, key=key, reverse=reverse), key=key)
+ sequenced = sorted(iterable, key=key, reverse=reverse)
+ return unique_justseen(sequenced, key=key)
def iter_except(func, exception, first=None):
@@ -583,13 +595,11 @@ def iter_except(func, exception, first=None):
[]
"""
- try:
+ with suppress(exception):
if first is not None:
yield first()
- while 1:
+ while True:
yield func()
- except exception:
- pass
def first_true(iterable, default=None, pred=None):
@@ -741,19 +751,40 @@ def prepend(value, iterator):
def convolve(signal, kernel):
- """Convolve the iterable *signal* with the iterable *kernel*.
+ """Discrete linear convolution of two iterables.
+ Equivalent to polynomial multiplication.
+
+ For example, multiplying ``(x² -x - 20)`` by ``(x - 3)``
+ gives ``(x³ -4x² -17x + 60)``.
+
+ >>> list(convolve([1, -1, -20], [1, -3]))
+ [1, -4, -17, 60]
+
+ Examples of popular kinds of kernels:
- >>> signal = (1, 2, 3, 4, 5)
- >>> kernel = [3, 2, 1]
- >>> list(convolve(signal, kernel))
- [3, 8, 14, 20, 26, 14, 5]
+ * The kernel ``[0.25, 0.25, 0.25, 0.25]`` computes a moving average.
+ For image data, this blurs the image and reduces noise.
+ * The kernel ``[1/2, 0, -1/2]`` estimates the first derivative of
+ a function evaluated at evenly spaced inputs.
+ * The kernel ``[1, -2, 1]`` estimates the second derivative of a
+ function evaluated at evenly spaced inputs.
- Note: the input arguments are not interchangeable, as the *kernel*
- is immediately consumed and stored.
+ Convolutions are mathematically commutative; however, the inputs are
+ evaluated differently. The signal is consumed lazily and can be
+ infinite. The kernel is fully consumed before the calculations begin.
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
+
+ References:
+
+ * Article: https://betterexplained.com/articles/intuitive-convolution/
+ * Video by 3Blue1Brown: https://www.youtube.com/watch?v=KuXjwB4LzSA
"""
- # This implementation intentionally doesn't match the one in the itertools
- # documentation.
+ # This implementation comes from an older version of the itertools
+ # documentation. While the newer implementation is a bit clearer,
+ # this one was kept because the inlined window logic is faster
+ # and it avoids an unnecessary deque-to-tuple conversion.
kernel = tuple(kernel)[::-1]
n = len(kernel)
window = deque([0], maxlen=n) * n
@@ -821,9 +852,9 @@ def _sliding_window_islice(iterable, n):
def _sliding_window_deque(iterable, n):
# Normal path for other values of n.
- it = iter(iterable)
- window = deque(islice(it, n - 1), maxlen=n)
- for x in it:
+ iterator = iter(iterable)
+ window = deque(islice(iterator, n - 1), maxlen=n)
+ for x in iterator:
window.append(x)
yield tuple(window)
@@ -864,16 +895,21 @@ def subslices(iterable):
"""
seq = list(iterable)
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
- return map(operator.getitem, repeat(seq), slices)
+ return map(getitem, repeat(seq), slices)
def polynomial_from_roots(roots):
"""Compute a polynomial's coefficients from its roots.
- >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
- >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
+ >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
+ >>> polynomial_from_roots(roots) # x³ - 4 x² - 17 x + 60
[1, -4, -17, 60]
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
"""
+ # This recipe differs from the one in itertools docs in that it
+ # applies list() after each call to convolve(). This avoids
+ # hitting stack limits with nested generators.
poly = [1]
for root in roots:
poly = list(convolve(poly, (1, -root)))
@@ -908,19 +944,17 @@ def iter_index(iterable, value, start=0, stop=None):
seq_index = getattr(iterable, 'index', None)
if seq_index is None:
# Slow path for general iterables
- it = islice(iterable, start, stop)
- for i, element in enumerate(it, start):
+ iterator = islice(iterable, start, stop)
+ for i, element in enumerate(iterator, start):
if element is value or element == value:
yield i
else:
# Fast path for sequences
stop = len(iterable) if stop is None else stop
i = start - 1
- try:
+ with suppress(ValueError):
while True:
yield (i := seq_index(value, i + 1, stop))
- except ValueError:
- pass
def sieve(n):
@@ -928,13 +962,16 @@ def sieve(n):
>>> list(sieve(30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
+
"""
+ # This implementation comes from an older version of the itertools
+ # documentation. The newer implementation is easier to read but is
+ # less lazy.
if n > 2:
yield 2
start = 3
data = bytearray((0, 1)) * (n // 2)
- limit = math.isqrt(n) + 1
- for p in iter_index(data, 1, start, limit):
+ for p in iter_index(data, 1, start, stop=isqrt(n) + 1):
yield from iter_index(data, 1, start, p * p)
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
start = p * p
@@ -954,14 +991,14 @@ def _batched(iterable, n, *, strict=False):
"""
if n < 1:
raise ValueError('n must be at least one')
- it = iter(iterable)
- while batch := tuple(islice(it, n)):
+ iterator = iter(iterable)
+ while batch := tuple(islice(iterator, n)):
if strict and len(batch) != n:
raise ValueError('batched(): incomplete batch')
yield batch
-if hexversion >= 0x30D00A2:
+if hexversion >= 0x30D00A2: # pragma: no cover
from itertools import batched as itertools_batched
def batched(iterable, n, *, strict=False):
@@ -1004,15 +1041,17 @@ def matmul(m1, m2):
The caller should ensure that the dimensions of the input matrices are
compatible with each other.
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
"""
n = len(m2[0])
return batched(starmap(_sumprod, product(m1, transpose(m2))), n)
def _factor_pollard(n):
- # Return a factor of n using Pollard's rho algorithm
- gcd = math.gcd
- for b in range(1, n - 2):
+ # Return a factor of n using Pollard's rho algorithm.
+ # Efficient when n is odd and composite.
+ for b in range(1, n):
x = y = 2
d = 1
while d == 1:
@@ -1064,16 +1103,20 @@ def factor(n):
def polynomial_eval(coefficients, x):
"""Evaluate a polynomial at a specific value.
- Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5:
+ Computes with better numeric stability than Horner's method.
+
+ Evaluate ``x^3 - 4 * x^2 - 17 * x + 60`` at ``x = 2.5``:
>>> coefficients = [1, -4, -17, 60]
>>> x = 2.5
>>> polynomial_eval(coefficients, x)
8.125
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
"""
n = len(coefficients)
if n == 0:
- return x * 0 # coerce zero to the type of x
+ return type(x)(0)
powers = map(pow, repeat(x), reversed(range(n)))
return _sumprod(coefficients, powers)
@@ -1083,6 +1126,8 @@ def sum_of_squares(it):
>>> sum_of_squares([10, 20, 30])
1400
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
"""
return _sumprod(*tee(it))
@@ -1090,25 +1135,38 @@ def sum_of_squares(it):
def polynomial_derivative(coefficients):
"""Compute the first derivative of a polynomial.
- Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60
+ Evaluate the derivative of ``x³ - 4 x² - 17 x + 60``:
>>> coefficients = [1, -4, -17, 60]
>>> derivative_coefficients = polynomial_derivative(coefficients)
>>> derivative_coefficients
[3, -8, -17]
+
+ Supports all numeric types: int, float, complex, Decimal, Fraction.
"""
n = len(coefficients)
powers = reversed(range(1, n))
- return list(map(operator.mul, coefficients, powers))
+ return list(map(mul, coefficients, powers))
def totient(n):
"""Return the count of natural numbers up to *n* that are coprime with *n*.
- >>> totient(9)
+ Euler's totient function φ(n) gives the number of totatives.
+ Totative are integers k in the range 1 ≤ k ≤ n such that gcd(n, k) = 1.
+
+ >>> n = 9
+ >>> totient(n)
+ 6
+
+ >>> totatives = [x for x in range(1, n) if gcd(n, x) == 1]
+ >>> totatives
+ [1, 2, 4, 5, 7, 8]
+ >>> len(totatives)
6
- >>> totient(12)
- 4
+
+ Reference: https://en.wikipedia.org/wiki/Euler%27s_totient_function
+
"""
for prime in set(factor(n)):
n -= n // prime
@@ -1157,31 +1215,56 @@ def _strong_probable_prime(n, base):
return False
+# Separate instance of Random() that doesn't share state
+# with the default user instance of Random().
+_private_randrange = random.Random().randrange
+
+
def is_prime(n):
"""Return ``True`` if *n* is prime and ``False`` otherwise.
- >>> is_prime(37)
- True
- >>> is_prime(3 * 13)
- False
- >>> is_prime(18_446_744_073_709_551_557)
- True
-
- This function uses the Miller-Rabin primality test, which can return false
- positives for very large inputs. For values of *n* below 10**24
- there are no false positives. For larger values, there is less than
- a 1 in 2**128 false positive rate. Multiple tests can further reduce the
+ Basic examples:
+
+ >>> is_prime(37)
+ True
+ >>> is_prime(3 * 13)
+ False
+ >>> is_prime(18_446_744_073_709_551_557)
+ True
+
+ Find the next prime over one billion:
+
+ >>> next(filter(is_prime, count(10**9)))
+ 1000000007
+
+ Generate random primes up to 200 bits and up to 60 decimal digits:
+
+ >>> from random import seed, randrange, getrandbits
+ >>> seed(18675309)
+
+ >>> next(filter(is_prime, map(getrandbits, repeat(200))))
+ 893303929355758292373272075469392561129886005037663238028407
+
+ >>> next(filter(is_prime, map(randrange, repeat(10**60))))
+ 269638077304026462407872868003560484232362454342414618963649
+
+ This function is exact for values of *n* below 10**24. For larger inputs,
+ the probabilistic Miller-Rabin primality test has a less than 1 in 2**128
chance of a false positive.
"""
+
if n < 17:
return n in {2, 3, 5, 7, 11, 13}
+
if not (n & 1 and n % 3 and n % 5 and n % 7 and n % 11 and n % 13):
return False
+
for limit, bases in _perfect_tests:
if n < limit:
break
else:
- bases = [randrange(2, n - 1) for i in range(64)]
+ bases = (_private_randrange(2, n - 1) for i in range(64))
+
return all(_strong_probable_prime(n, base) for base in bases)
@@ -1197,3 +1280,48 @@ def loops(n):
"""
return repeat(None, n)
+
+
+def multinomial(*counts):
+ """Number of distinct arrangements of a multiset.
+
+ The expression ``multinomial(3, 4, 2)`` has several equivalent
+ interpretations:
+
+ * In the expansion of ``(a + b + c)⁹``, the coefficient of the
+ ``a³b⁴c²`` term is 1260.
+
+ * There are 1260 distinct ways to arrange 9 balls consisting of 3 reds, 4
+ greens, and 2 blues.
+
+ * There are 1260 unique ways to place 9 distinct objects into three bins
+ with sizes 3, 4, and 2.
+
+ The :func:`multinomial` function computes the length of
+ :func:`distinct_permutations`. For example, there are 83,160 distinct
+ anagrams of the word "abracadabra":
+
+ >>> from more_itertools import distinct_permutations, ilen
+ >>> ilen(distinct_permutations('abracadabra'))
+ 83160
+
+ This can be computed directly from the letter counts, 5a 2b 2r 1c 1d:
+
+ >>> from collections import Counter
+ >>> list(Counter('abracadabra').values())
+ [5, 2, 2, 1, 1]
+ >>> multinomial(5, 2, 1, 1, 2)
+ 83160
+
+ A binomial coefficient is a special case of multinomial where there are
+ only two categories. For example, the number of ways to arrange 12 balls
+ with 5 reds and 7 blues is ``multinomial(5, 7)`` or ``math.comb(12, 5)``.
+
+ When the multiplicities are all just 1, :func:`multinomial`
+ is a special case of ``math.factorial`` so that
+ ``multinomial(1, 1, 1, 1, 1, 1, 1) == math.factorial(7)``.
+
+ Reference: https://en.wikipedia.org/wiki/Multinomial_theorem
+
+ """
+ return prod(map(comb, accumulate(counts), counts))
diff --git a/contrib/python/more-itertools/py3/more_itertools/recipes.pyi b/contrib/python/more-itertools/py3/more_itertools/recipes.pyi
index e404f4d3df4..0d50ca63a39 100644
--- a/contrib/python/more-itertools/py3/more_itertools/recipes.pyi
+++ b/contrib/python/more-itertools/py3/more_itertools/recipes.pyi
@@ -26,6 +26,7 @@ __all__ = [
'iter_index',
'loops',
'matmul',
+ 'multinomial',
'ncycles',
'nth',
'nth_combination',
@@ -188,3 +189,4 @@ def _shift_to_odd(n: int) -> tuple[int, int]: ...
def _strong_probable_prime(n: int, base: int) -> bool: ...
def is_prime(n: int) -> bool: ...
def loops(n: int) -> Iterator[None]: ...
+def multinomial(*counts: int) -> int: ...
diff --git a/contrib/python/more-itertools/py3/tests/test_more.py b/contrib/python/more-itertools/py3/tests/test_more.py
index bfbf583f28f..f30e747d190 100644
--- a/contrib/python/more-itertools/py3/tests/test_more.py
+++ b/contrib/python/more-itertools/py3/tests/test_more.py
@@ -1,5 +1,4 @@
import cmath
-import warnings
from collections import Counter, abc
from collections.abc import Set
@@ -29,7 +28,6 @@ from statistics import mean
from string import ascii_letters
from sys import version_info
from time import sleep
-from traceback import format_exc
from unittest import skipIf, TestCase
import more_itertools as mi
@@ -615,24 +613,12 @@ class OneTests(TestCase):
it = iter(['item'])
self.assertEqual(mi.one(it), 'item')
- def test_too_short(self):
+ def test_too_short_new(self):
it = iter([])
- for too_short, exc_type in [
- (None, ValueError),
- (IndexError, IndexError),
- ]:
- with self.subTest(too_short=too_short):
- try:
- mi.one(it, too_short=too_short)
- except exc_type:
- formatted_exc = format_exc()
- self.assertIn('StopIteration', formatted_exc)
- self.assertIn(
- 'The above exception was the direct cause',
- formatted_exc,
- )
- else:
- self.fail()
+ self.assertRaises(ValueError, lambda: mi.one(it))
+ self.assertRaises(
+ OverflowError, lambda: mi.one(it, too_short=OverflowError)
+ )
def test_too_long(self):
it = count()
@@ -1908,15 +1894,11 @@ class StaggerTest(TestCase):
class ZipEqualTest(TestCase):
@skipIf(version_info[:2] < (3, 10), 'zip_equal deprecated for 3.10+')
def test_deprecation(self):
- with warnings.catch_warnings(record=True) as caught:
- warnings.simplefilter('always')
+ with self.assertWarns(DeprecationWarning):
self.assertEqual(
list(mi.zip_equal([1, 2], [3, 4])), [(1, 3), (2, 4)]
)
- (warning,) = caught
- assert warning.category == DeprecationWarning
-
def test_equal(self):
lists = [0, 1, 2], [2, 3, 4]
@@ -4137,7 +4119,7 @@ class FilterExceptTests(TestCase):
list(mi.filter_except(int, iterable))
def test_raise(self):
- iterable = ['0', '1' '2', 'three', None]
+ iterable = ['0', '12', 'three', None]
with self.assertRaises(TypeError):
list(mi.filter_except(int, iterable, ValueError))
@@ -4169,7 +4151,7 @@ class MapExceptTests(TestCase):
list(mi.map_except(int, iterable))
def test_raise(self):
- iterable = ['0', '1' '2', 'three', None]
+ iterable = ['0', '12', 'three', None]
with self.assertRaises(TypeError):
list(mi.map_except(int, iterable, ValueError))
@@ -4322,7 +4304,6 @@ class SampleTests(TestCase):
self.assertTrue(difference_in_means < 4.4)
def test_error_cases(self):
-
# weights and counts are mutally exclusive
with self.assertRaises(TypeError):
mi.sample(
diff --git a/contrib/python/more-itertools/py3/tests/test_recipes.py b/contrib/python/more-itertools/py3/tests/test_recipes.py
index a810b8de1e0..63be39426eb 100644
--- a/contrib/python/more-itertools/py3/tests/test_recipes.py
+++ b/contrib/python/more-itertools/py3/tests/test_recipes.py
@@ -1,10 +1,11 @@
+from collections import Counter
from decimal import Decimal
from doctest import DocTestSuite
from fractions import Fraction
from functools import reduce
from itertools import combinations, count, groupby, permutations
from operator import mul
-from math import comb, factorial
+from math import comb, prod, factorial
from sys import version_info
from unittest import TestCase, skipIf
from unittest.mock import patch
@@ -1385,3 +1386,55 @@ class LoopsTests(TestCase):
self.assertTrue(
all(list(mi.loops(n)) == [None] * n for n in range(-10, 10))
)
+
+
+class MultinomialTests(TestCase):
+ def test_basic(self):
+ multinomial = mi.multinomial
+
+ # Case M(11; 5, 2, 1, 1, 2) = 83160
+ # https://www.wolframalpha.com/input?i=Multinomia%285%2C+2%2C+1%2C+1%2C+2%29
+ self.assertEqual(multinomial(5, 2, 1, 1, 2), 83160)
+
+ # Commutative
+ self.assertEqual(multinomial(2, 1, 1, 2, 5), 83160)
+
+ # Unaffected by zero-sized bins
+ self.assertEqual(multinomial(2, 0, 1, 0, 1, 2, 5, 0), 83160)
+
+ # Matches definition
+ self.assertEqual(
+ multinomial(5, 2, 1, 1, 2),
+ (
+ factorial(sum([5, 2, 1, 1, 2]))
+ // prod(map(factorial, [5, 2, 1, 1, 2]))
+ ),
+ )
+
+ # Corner cases and identities
+ self.assertEqual(multinomial(), 1)
+ self.assertEqual(multinomial(5), 1)
+ self.assertEqual(multinomial(5, 7), comb(12, 5))
+ self.assertEqual(multinomial(1, 1, 1, 1, 1, 1, 1), factorial(7))
+
+ # Relationship to distinct_permuations() and permutations()
+ for word in ['plain', 'pizza', 'coffee', 'honolulu', 'assists']:
+ with self.subTest(word=word):
+ self.assertEqual(
+ multinomial(*Counter(word).values()),
+ mi.ilen(mi.distinct_permutations(word)),
+ )
+ self.assertEqual(
+ multinomial(*Counter(word).values()),
+ len(set(permutations(word))),
+ )
+
+ # Error cases
+ with self.assertRaises(ValueError):
+ multinomial(-5, 7) # No negative inputs
+ with self.assertRaises(TypeError):
+ multinomial(5, 7.25) # No float inputs
+ with self.assertRaises(TypeError):
+ multinomial(5, 'x') # No non-numeric inputs
+ with self.assertRaises(TypeError):
+ multinomial([5, 7]) # No sequence inputs
diff --git a/contrib/python/more-itertools/py3/ya.make b/contrib/python/more-itertools/py3/ya.make
index b604dff0158..d8f6243c812 100644
--- a/contrib/python/more-itertools/py3/ya.make
+++ b/contrib/python/more-itertools/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(10.6.0)
+VERSION(10.7.0)
LICENSE(MIT)