aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/hypothesis
diff options
context:
space:
mode:
authorrobot-piglet <robot-piglet@yandex-team.com>2024-03-10 19:22:41 +0300
committerrobot-piglet <robot-piglet@yandex-team.com>2024-03-10 19:31:09 +0300
commit13a34e8a2fe1c3498a9a3e1d56202bb29eb5d17b (patch)
treef188fb0f394d6b20e68951e88cf555610dad8cf1 /contrib/python/hypothesis
parente0439374e8770430b5a391cea94769059544e2a2 (diff)
downloadydb-13a34e8a2fe1c3498a9a3e1d56202bb29eb5d17b.tar.gz
Intermediate changes
Diffstat (limited to 'contrib/python/hypothesis')
-rw-r--r--contrib/python/hypothesis/py3/.dist-info/METADATA2
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py66
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py21
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/stateful.py32
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/strategies/_internal/collections.py5
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/strategies/_internal/featureflags.py26
-rw-r--r--contrib/python/hypothesis/py3/hypothesis/version.py2
-rw-r--r--contrib/python/hypothesis/py3/ya.make2
8 files changed, 92 insertions, 64 deletions
diff --git a/contrib/python/hypothesis/py3/.dist-info/METADATA b/contrib/python/hypothesis/py3/.dist-info/METADATA
index 171a09eebb..95fe6d3510 100644
--- a/contrib/python/hypothesis/py3/.dist-info/METADATA
+++ b/contrib/python/hypothesis/py3/.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: hypothesis
-Version: 6.98.10
+Version: 6.98.11
Summary: A library for property-based testing
Home-page: https://hypothesis.works
Author: David R. MacIver and Zac Hatfield-Dodds
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
index cdce1afacf..b84db4df85 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/data.py
@@ -62,11 +62,14 @@ from hypothesis.internal.floats import (
from hypothesis.internal.intervalsets import IntervalSet
if TYPE_CHECKING:
+ from typing import TypeAlias
+
from typing_extensions import dataclass_transform
from hypothesis.strategies import SearchStrategy
from hypothesis.strategies._internal.strategies import Ex
else:
+ TypeAlias = object
def dataclass_transform():
def wrapper(tp):
@@ -94,6 +97,41 @@ TargetObservations = Dict[Optional[str], Union[int, float]]
T = TypeVar("T")
+class IntegerKWargs(TypedDict):
+ min_value: Optional[int]
+ max_value: Optional[int]
+ weights: Optional[Sequence[float]]
+ shrink_towards: int
+
+
+class FloatKWargs(TypedDict):
+ min_value: float
+ max_value: float
+ allow_nan: bool
+ smallest_nonzero_magnitude: float
+
+
+class StringKWargs(TypedDict):
+ intervals: IntervalSet
+ min_size: int
+ max_size: Optional[int]
+
+
+class BytesKWargs(TypedDict):
+ size: int
+
+
+class BooleanKWargs(TypedDict):
+ p: float
+
+
+IRType: TypeAlias = Union[int, str, bool, float, bytes]
+IRKWargsType: TypeAlias = Union[
+ IntegerKWargs, FloatKWargs, StringKWargs, BytesKWargs, BooleanKWargs
+]
+IRTypeName: TypeAlias = Literal["integer", "string", "boolean", "float", "bytes"]
+
+
class ExtraInformation:
"""A class for holding shared state on a ``ConjectureData`` that should
be added to the final ``ConjectureResult``."""
@@ -798,34 +836,6 @@ global_test_counter = 0
MAX_DEPTH = 100
-class IntegerKWargs(TypedDict):
- min_value: Optional[int]
- max_value: Optional[int]
- weights: Optional[Sequence[float]]
- shrink_towards: int
-
-
-class FloatKWargs(TypedDict):
- min_value: float
- max_value: float
- allow_nan: bool
- smallest_nonzero_magnitude: float
-
-
-class StringKWargs(TypedDict):
- intervals: IntervalSet
- min_size: int
- max_size: Optional[int]
-
-
-class BytesKWargs(TypedDict):
- size: int
-
-
-class BooleanKWargs(TypedDict):
- p: float
-
-
class DataObserver:
"""Observer class for recording the behaviour of a
ConjectureData object, primarily used for tracking
diff --git a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
index a9a6e5b196..c8e5d70aa7 100644
--- a/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
+++ b/contrib/python/hypothesis/py3/hypothesis/internal/conjecture/datatree.py
@@ -10,7 +10,7 @@
import itertools
import math
-from typing import TYPE_CHECKING, List, Literal, Optional, Union
+from typing import List, Optional, Union
import attr
@@ -24,23 +24,14 @@ from hypothesis.internal.conjecture.data import (
DataObserver,
FloatKWargs,
IntegerKWargs,
+ IRKWargsType,
+ IRType,
+ IRTypeName,
Status,
StringKWargs,
)
from hypothesis.internal.floats import count_between_floats, float_to_int, int_to_float
-if TYPE_CHECKING:
- from typing import TypeAlias
-else:
- TypeAlias = object
-
-IRType: TypeAlias = Union[int, str, bool, float, bytes]
-IRKWargsType: TypeAlias = Union[
- IntegerKWargs, FloatKWargs, StringKWargs, BytesKWargs, BooleanKWargs
-]
-# this would be "IRTypeType", but that's just confusing.
-IRLiteralType: TypeAlias = Literal["integer", "string", "boolean", "float", "bytes"]
-
class PreviouslyUnseenBehaviour(HypothesisException):
pass
@@ -336,7 +327,7 @@ class TreeNode:
# have the same length. The values at index i belong to node i.
kwargs: List[IRKWargsType] = attr.ib(factory=list)
values: List[IRType] = attr.ib(factory=list)
- ir_types: List[IRLiteralType] = attr.ib(factory=list)
+ ir_types: List[IRTypeName] = attr.ib(factory=list)
# The indices of nodes which had forced values.
#
@@ -885,7 +876,7 @@ class TreeRecordingObserver(DataObserver):
def draw_value(
self,
- ir_type: IRLiteralType,
+ ir_type: IRTypeName,
value: IRType,
*,
was_forced: bool,
diff --git a/contrib/python/hypothesis/py3/hypothesis/stateful.py b/contrib/python/hypothesis/py3/hypothesis/stateful.py
index 2ab7ef13d5..60cd92721c 100644
--- a/contrib/python/hypothesis/py3/hypothesis/stateful.py
+++ b/contrib/python/hypothesis/py3/hypothesis/stateful.py
@@ -358,7 +358,6 @@ class RuleBasedStateMachine(metaclass=StateMachineMeta):
return cls._invariants_per_class[cls]
def _repr_step(self, rule, data, result):
- self.step_count = getattr(self, "step_count", 0) + 1
output_assignment = ""
if rule.targets:
if isinstance(result, MultipleResults):
@@ -431,7 +430,7 @@ class RuleBasedStateMachine(metaclass=StateMachineMeta):
return StateMachineTestCase
-@attr.s()
+@attr.s(repr=False)
class Rule:
targets = attr.ib()
function = attr.ib(repr=get_pretty_function_description)
@@ -451,6 +450,11 @@ class Rule:
self.arguments_strategies[k] = v
self.bundles = tuple(bundles)
+ def __repr__(self) -> str:
+ rep = get_pretty_function_description
+ bits = [f"{k}={rep(v)}" for k, v in attr.asdict(self).items() if v]
+ return f"{self.__class__.__name__}({', '.join(bits)})"
+
self_strategy = st.runner()
@@ -937,7 +941,8 @@ class RuleStrategy(SearchStrategy):
self.rules = list(machine.rules())
self.enabled_rules_strategy = st.shared(
- FeatureStrategy(), key=("enabled rules", machine)
+ FeatureStrategy(at_least_one_of={r.function.__name__ for r in self.rules}),
+ key=("enabled rules", machine),
)
# The order is a bit arbitrary. Primarily we're trying to group rules
@@ -965,17 +970,16 @@ class RuleStrategy(SearchStrategy):
feature_flags = data.draw(self.enabled_rules_strategy)
- # Note: The order of the filters here is actually quite important,
- # because checking is_enabled makes choices, so increases the size of
- # the choice sequence. This means that if we are in a case where many
- # rules are invalid we will make a lot more choices if we ask if they
- # are enabled before we ask if they are valid, so our test cases will
- # be artificially large.
- rule = data.draw(
- st.sampled_from(self.rules)
- .filter(self.is_valid)
- .filter(lambda r: feature_flags.is_enabled(r.function.__name__))
- )
+ def rule_is_enabled(r):
+ # Note: The order of the filters here is actually quite important,
+ # because checking is_enabled makes choices, so increases the size of
+ # the choice sequence. This means that if we are in a case where many
+ # rules are invalid we would make a lot more choices if we ask if they
+ # are enabled before we ask if they are valid, so our test cases would
+ # be artificially large.
+ return self.is_valid(r) and feature_flags.is_enabled(r.function.__name__)
+
+ rule = data.draw(st.sampled_from(self.rules).filter(rule_is_enabled))
arguments = {}
for k, strat in rule.arguments_strategies.items():
diff --git a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/collections.py b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/collections.py
index 1f86f37a42..e8f8f21ba4 100644
--- a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/collections.py
+++ b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/collections.py
@@ -13,6 +13,7 @@ from typing import Any, Iterable, Tuple, overload
from hypothesis.errors import InvalidArgument
from hypothesis.internal.conjecture import utils as cu
+from hypothesis.internal.conjecture.engine import BUFFER_SIZE
from hypothesis.internal.conjecture.junkdrawer import LazySequenceCopy
from hypothesis.internal.conjecture.utils import combine_labels
from hypothesis.internal.filtering import get_integer_predicate_bounds
@@ -142,6 +143,10 @@ class ListStrategy(SearchStrategy):
self.min_size = min_size or 0
self.max_size = max_size if max_size is not None else float("inf")
assert 0 <= self.min_size <= self.max_size
+ if min_size > BUFFER_SIZE:
+ raise InvalidArgument(
+ f"min_size={min_size:_d} is larger than Hypothesis is designed to handle"
+ )
self.average_size = min(
max(self.min_size * 2, self.min_size + 5),
0.5 * (self.min_size + self.max_size),
diff --git a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/featureflags.py b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/featureflags.py
index cf72b5c10b..98af8f087a 100644
--- a/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/featureflags.py
+++ b/contrib/python/hypothesis/py3/hypothesis/strategies/_internal/featureflags.py
@@ -31,7 +31,7 @@ class FeatureFlags:
required disabled features.
"""
- def __init__(self, data=None, enabled=(), disabled=()):
+ def __init__(self, data=None, enabled=(), disabled=(), at_least_one_of=()):
self.__data = data
self.__is_disabled = {}
@@ -52,13 +52,18 @@ class FeatureFlags:
# features will be enabled. This is so that we shrink in the direction
# of more features being enabled.
if self.__data is not None:
- self.__p_disabled = data.draw_integer(0, 255) / 255.0
+ self.__p_disabled = data.draw_integer(0, 254) / 255
else:
# If data is None we're in example mode so all that matters is the
# enabled/disabled lists above. We set this up so that everything
# else is enabled by default.
self.__p_disabled = 0.0
+ # The naive approach can lead to disabling e.g. every single rule on a
+ # RuleBasedStateMachine, which aborts the test as unable to make progress.
+ # Track the set of possible names, and ensure that at least one is enabled.
+ self.__at_least_one_of = set(at_least_one_of)
+
def is_enabled(self, name):
"""Tests whether the feature named ``name`` should be enabled on this
test run."""
@@ -81,10 +86,19 @@ class FeatureFlags:
# of the test case where we originally decided, the next point at
# which we make this decision just makes the decision it previously
# made.
+ oneof = self.__at_least_one_of
is_disabled = self.__data.draw_boolean(
- self.__p_disabled, forced=self.__is_disabled.get(name)
+ self.__p_disabled,
+ forced=(
+ False
+ if len(oneof) == 1 and name in oneof
+ else self.__is_disabled.get(name)
+ ),
)
self.__is_disabled[name] = is_disabled
+ if name in oneof and not is_disabled:
+ oneof.clear()
+ oneof.discard(name)
data.stop_example()
return not is_disabled
@@ -100,5 +114,9 @@ class FeatureFlags:
class FeatureStrategy(SearchStrategy):
+ def __init__(self, at_least_one_of=()):
+ super().__init__()
+ self._at_least_one_of = frozenset(at_least_one_of)
+
def do_draw(self, data):
- return FeatureFlags(data)
+ return FeatureFlags(data, at_least_one_of=self._at_least_one_of)
diff --git a/contrib/python/hypothesis/py3/hypothesis/version.py b/contrib/python/hypothesis/py3/hypothesis/version.py
index ce617c47e9..da7f74708c 100644
--- a/contrib/python/hypothesis/py3/hypothesis/version.py
+++ b/contrib/python/hypothesis/py3/hypothesis/version.py
@@ -8,5 +8,5 @@
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
-__version_info__ = (6, 98, 10)
+__version_info__ = (6, 98, 11)
__version__ = ".".join(map(str, __version_info__))
diff --git a/contrib/python/hypothesis/py3/ya.make b/contrib/python/hypothesis/py3/ya.make
index 4c7de0c00e..c71ce1c809 100644
--- a/contrib/python/hypothesis/py3/ya.make
+++ b/contrib/python/hypothesis/py3/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(6.98.10)
+VERSION(6.98.11)
LICENSE(MPL-2.0)