aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/hypothesis/py3/_hypothesis_pytestplugin.py
blob: 944304ccdb5d7b7bd8b5906c8af6e7262f090eba (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.

"""
The pytest plugin for Hypothesis.

We move this from the old location at `hypothesis.extra.pytestplugin` so that it
can be loaded by Pytest without importing Hypothesis.  In turn, this means that
Hypothesis will not load our own third-party plugins (with associated side-effects)
unless and until the user explicitly runs `import hypothesis`.

See https://github.com/HypothesisWorks/hypothesis/issues/3140 for details.
"""

import base64
import json
import os
import sys
import warnings
from inspect import signature

import _hypothesis_globals
import pytest

try:
    from _pytest.junitxml import xml_key
except ImportError:
    xml_key = "_xml"  # type: ignore

LOAD_PROFILE_OPTION = "--hypothesis-profile"
VERBOSITY_OPTION = "--hypothesis-verbosity"
PRINT_STATISTICS_OPTION = "--hypothesis-show-statistics"
SEED_OPTION = "--hypothesis-seed"
EXPLAIN_OPTION = "--hypothesis-explain"

_VERBOSITY_NAMES = ["quiet", "normal", "verbose", "debug"]
_ALL_OPTIONS = [
    LOAD_PROFILE_OPTION,
    VERBOSITY_OPTION,
    PRINT_STATISTICS_OPTION,
    SEED_OPTION,
    EXPLAIN_OPTION,
]
_FIXTURE_MSG = """Function-scoped fixture {0!r} used by {1!r}

Function-scoped fixtures are not reset between examples generated by
`@given(...)`, which is often surprising and can cause subtle test bugs.

If you were expecting the fixture to run separately for each generated example,
then unfortunately you will need to find a different way to achieve your goal
(e.g. using a similar context manager instead of a fixture).

If you are confident that your test will work correctly even though the
fixture is not reset between generated examples, you can suppress this health
check to assure Hypothesis that you understand what you are doing.
"""

STATS_KEY = "_hypothesis_stats"
FAILING_EXAMPLES_KEY = "_hypothesis_failing_examples"


class StoringReporter:
    def __init__(self, config):
        assert "hypothesis" in sys.modules
        from hypothesis.reporting import default

        self.report = default
        self.config = config
        self.results = []

    def __call__(self, msg):
        if self.config.getoption("capture", "fd") == "no":
            self.report(msg)
        if not isinstance(msg, str):
            msg = repr(msg)
        self.results.append(msg)


# Avoiding distutils.version.LooseVersion due to
# https://github.com/HypothesisWorks/hypothesis/issues/2490
if tuple(map(int, pytest.__version__.split(".")[:2])) < (4, 6):  # pragma: no cover
    import warnings

    PYTEST_TOO_OLD_MESSAGE = """
        You are using pytest version %s. Hypothesis tests work with any test
        runner, but our pytest plugin requires pytest 4.6 or newer.
        Note that the pytest developers no longer support your version either!
        Disabling the Hypothesis pytest plugin...
    """
    warnings.warn(PYTEST_TOO_OLD_MESSAGE % (pytest.__version__,), stacklevel=1)

else:
    # Restart side-effect detection as early as possible, to maximize coverage. We
    # need balanced increment/decrement in configure/sessionstart to support nested
    # pytest (e.g. runpytest_inprocess), so this early increment in effect replaces
    # the first one in pytest_configure.
    _configured = False
    if not os.environ.get("HYPOTHESIS_EXTEND_INITIALIZATION"):
        _hypothesis_globals.in_initialization += 1
        if "hypothesis" in sys.modules:
            # Some other plugin has imported hypothesis, so we'll check if there
            # have been undetected side-effects and warn if so.
            from hypothesis.configuration import notice_initialization_restarted

            notice_initialization_restarted()

    def pytest_addoption(parser):
        group = parser.getgroup("hypothesis", "Hypothesis")
        group.addoption(
            LOAD_PROFILE_OPTION,
            action="store",
            help="Load in a registered hypothesis.settings profile",
        )
        group.addoption(
            VERBOSITY_OPTION,
            action="store",
            choices=_VERBOSITY_NAMES,
            help="Override profile with verbosity setting specified",
        )
        group.addoption(
            PRINT_STATISTICS_OPTION,
            action="store_true",
            help="Configure when statistics are printed",
            default=False,
        )
        group.addoption(
            SEED_OPTION,
            action="store",
            help="Set a seed to use for all Hypothesis tests",
        )
        group.addoption(
            EXPLAIN_OPTION,
            action="store_true",
            help="Enable the `explain` phase for failing Hypothesis tests",
            default=False,
        )

    def _any_hypothesis_option(config):
        return bool(any(config.getoption(opt) for opt in _ALL_OPTIONS))

    def pytest_report_header(config):
        if not (
            config.option.verbose >= 1
            or "hypothesis" in sys.modules
            or _any_hypothesis_option(config)
        ):
            return None

        from hypothesis import Verbosity, settings

        if config.option.verbose < 1 and settings.default.verbosity < Verbosity.verbose:
            return None
        settings_str = settings.default.show_changed()
        if settings_str != "":
            settings_str = f" -> {settings_str}"
        return f"hypothesis profile {settings._current_profile!r}{settings_str}"

    def pytest_configure(config):
        global _configured
        # skip first increment because we pre-incremented at import time
        if _configured:
            _hypothesis_globals.in_initialization += 1
        _configured = True

        config.addinivalue_line("markers", "hypothesis: Tests which use hypothesis.")
        if not _any_hypothesis_option(config):
            return
        from hypothesis import Phase, Verbosity, core, settings

        profile = config.getoption(LOAD_PROFILE_OPTION)
        if profile:
            settings.load_profile(profile)
        verbosity_name = config.getoption(VERBOSITY_OPTION)
        if verbosity_name and verbosity_name != settings.default.verbosity.name:
            verbosity_value = Verbosity[verbosity_name]
            name = f"{settings._current_profile}-with-{verbosity_name}-verbosity"
            # register_profile creates a new profile, exactly like the current one,
            # with the extra values given (in this case 'verbosity')
            settings.register_profile(name, verbosity=verbosity_value)
            settings.load_profile(name)
        if (
            config.getoption(EXPLAIN_OPTION)
            and Phase.explain not in settings.default.phases
        ):
            name = f"{settings._current_profile}-with-explain-phase"
            phases = (*settings.default.phases, Phase.explain)
            settings.register_profile(name, phases=phases)
            settings.load_profile(name)

        seed = config.getoption(SEED_OPTION)
        if seed is not None:
            try:
                seed = int(seed)
            except ValueError:
                pass
            core.global_force_seed = seed

    @pytest.hookimpl(hookwrapper=True)
    def pytest_runtest_call(item):
        __tracebackhide__ = True
        if not (hasattr(item, "obj") and "hypothesis" in sys.modules):
            yield
            return

        from hypothesis import core
        from hypothesis.internal.detection import is_hypothesis_test

        # See https://github.com/pytest-dev/pytest/issues/9159
        core.pytest_shows_exceptiongroups = (
            getattr(pytest, "version_tuple", ())[:2] >= (7, 2)
            or item.config.getoption("tbstyle", "auto") == "native"
        )
        core.running_under_pytest = True

        if not is_hypothesis_test(item.obj):
            # If @given was not applied, check whether other hypothesis
            # decorators were applied, and raise an error if they were.
            # We add this frame of indirection to enable __tracebackhide__.
            def raise_hypothesis_usage_error(msg):
                raise InvalidArgument(msg)

            if getattr(item.obj, "is_hypothesis_strategy_function", False):
                from hypothesis.errors import InvalidArgument

                raise_hypothesis_usage_error(
                    f"{item.nodeid} is a function that returns a Hypothesis strategy, "
                    "but pytest has collected it as a test function.  This is useless "
                    "as the function body will never be executed.  To define a test "
                    "function, use @given instead of @composite."
                )
            message = "Using `@%s` on a test without `@given` is completely pointless."
            for name, attribute in [
                ("example", "hypothesis_explicit_examples"),
                ("seed", "_hypothesis_internal_use_seed"),
                ("settings", "_hypothesis_internal_settings_applied"),
                ("reproduce_example", "_hypothesis_internal_use_reproduce_failure"),
            ]:
                if hasattr(item.obj, attribute):
                    from hypothesis.errors import InvalidArgument

                    raise_hypothesis_usage_error(message % (name,))
            yield
        else:
            from hypothesis import HealthCheck, settings as Settings
            from hypothesis.internal.escalation import current_pytest_item
            from hypothesis.internal.healthcheck import fail_health_check
            from hypothesis.reporting import with_reporter
            from hypothesis.statistics import collector, describe_statistics

            # Retrieve the settings for this test from the test object, which
            # is normally a Hypothesis wrapped_test wrapper. If this doesn't
            # work, the test object is probably something weird
            # (e.g a stateful test wrapper), so we skip the function-scoped
            # fixture check.
            settings = getattr(
                item.obj, "_hypothesis_internal_use_settings", Settings.default
            )

            # Check for suspicious use of function-scoped fixtures, but only
            # if the corresponding health check is not suppressed.
            fixture_params = False
            if not set(settings.suppress_health_check).issuperset(
                {HealthCheck.function_scoped_fixture, HealthCheck.differing_executors}
            ):
                # Warn about function-scoped fixtures, excluding autouse fixtures because
                # the advice is probably not actionable and the status quo seems OK...
                # See https://github.com/HypothesisWorks/hypothesis/issues/377 for detail.
                argnames = None
                for fx_defs in item._request._fixturemanager.getfixtureinfo(
                    node=item, func=item.function, cls=None
                ).name2fixturedefs.values():
                    if argnames is None:
                        argnames = frozenset(signature(item.function).parameters)
                    for fx in fx_defs:
                        fixture_params |= bool(fx.params)
                        if fx.argname in argnames:
                            active_fx = item._request._get_active_fixturedef(fx.argname)
                            if active_fx.scope == "function":
                                fail_health_check(
                                    settings,
                                    _FIXTURE_MSG.format(fx.argname, item.nodeid),
                                    HealthCheck.function_scoped_fixture,
                                )

            if fixture_params or (item.get_closest_marker("parametrize") is not None):
                # Disable the differing_executors health check due to false alarms:
                # see https://github.com/HypothesisWorks/hypothesis/issues/3733
                from hypothesis import settings as Settings

                fn = getattr(item.obj, "__func__", item.obj)
                fn._hypothesis_internal_use_settings = Settings(
                    parent=settings,
                    suppress_health_check={HealthCheck.differing_executors}
                    | set(settings.suppress_health_check),
                )

                # Give every parametrized test invocation a unique database key
                key = item.nodeid.encode()
                item.obj.hypothesis.inner_test._hypothesis_internal_add_digest = key

            store = StoringReporter(item.config)

            def note_statistics(stats):
                stats["nodeid"] = item.nodeid
                item.hypothesis_statistics = describe_statistics(stats)

            with collector.with_value(note_statistics):
                with with_reporter(store):
                    with current_pytest_item.with_value(item):
                        yield
            if store.results:
                item.hypothesis_report_information = list(store.results)

    def _stash_get(config, key, default):
        if hasattr(config, "stash"):
            # pytest 7
            return config.stash.get(key, default)
        elif hasattr(config, "_store"):
            # pytest 5.4
            return config._store.get(key, default)
        else:
            return getattr(config, key, default)

    @pytest.hookimpl(hookwrapper=True)
    def pytest_runtest_makereport(item, call):
        report = (yield).get_result()
        if hasattr(item, "hypothesis_report_information"):
            report.sections.append(
                ("Hypothesis", "\n".join(item.hypothesis_report_information))
            )
        if report.when != "teardown":
            return

        terminalreporter = item.config.pluginmanager.getplugin("terminalreporter")

        if hasattr(item, "hypothesis_statistics"):
            stats = item.hypothesis_statistics
            stats_base64 = base64.b64encode(stats.encode()).decode()

            name = "hypothesis-statistics-" + item.nodeid

            # Include hypothesis information to the junit XML report.
            #
            # Note that when `pytest-xdist` is enabled, `xml_key` is not present in the
            # stash, so we don't add anything to the junit XML report in that scenario.
            # https://github.com/pytest-dev/pytest/issues/7767#issuecomment-1082436256
            xml = _stash_get(item.config, xml_key, None)
            if xml:
                xml.add_global_property(name, stats_base64)

            # If there's a terminal report, include our summary stats for each test
            if terminalreporter is not None:
                report.__dict__[STATS_KEY] = stats

            # If there's an HTML report, include our summary stats for each test
            pytest_html = item.config.pluginmanager.getplugin("html")
            if pytest_html is not None:  # pragma: no cover
                report.extra = [
                    *getattr(report, "extra", []),
                    pytest_html.extras.text(stats, name="Hypothesis stats"),
                ]

        # This doesn't intrinsically have anything to do with the terminalreporter;
        # we're just cargo-culting a way to get strings back to a single function
        # even if the test were distributed with pytest-xdist.
        failing_examples = getattr(item, FAILING_EXAMPLES_KEY, None)
        if failing_examples and terminalreporter is not None:
            try:
                from hypothesis.extra._patching import FAIL_MSG, get_patch_for
            except ImportError:
                return
            # We'll save this as a triple of [filename, hunk_before, hunk_after].
            triple = get_patch_for(item.obj, [(x, FAIL_MSG) for x in failing_examples])
            if triple is not None:
                report.__dict__[FAILING_EXAMPLES_KEY] = json.dumps(triple)

    def pytest_terminal_summary(terminalreporter):
        failing_examples = []
        print_stats = terminalreporter.config.getoption(PRINT_STATISTICS_OPTION)
        if print_stats:
            terminalreporter.section("Hypothesis Statistics")
        for reports in terminalreporter.stats.values():
            for report in reports:
                stats = report.__dict__.get(STATS_KEY)
                if stats and print_stats:
                    terminalreporter.write_line(stats + "\n\n")
                fex = report.__dict__.get(FAILING_EXAMPLES_KEY)
                if fex:
                    failing_examples.append(json.loads(fex))

        from hypothesis.internal.observability import _WROTE_TO

        if _WROTE_TO:
            terminalreporter.section("Hypothesis")
            for fname in sorted(_WROTE_TO):
                terminalreporter.write_line(f"observations written to {fname}")

        if failing_examples:
            # This must have been imported already to write the failing examples
            from hypothesis.extra._patching import gc_patches, make_patch, save_patch

            patch = make_patch(failing_examples)
            try:
                gc_patches()
                fname = save_patch(patch)
            except Exception:
                # fail gracefully if we hit any filesystem or permissions problems
                return
            if not _WROTE_TO:
                terminalreporter.section("Hypothesis")
            terminalreporter.write_line(
                f"`git apply {fname}` to add failing examples to your code."
            )

    def pytest_collection_modifyitems(items):
        if "hypothesis" not in sys.modules:
            return

        from hypothesis.internal.detection import is_hypothesis_test

        for item in items:
            if isinstance(item, pytest.Function) and is_hypothesis_test(item.obj):
                item.add_marker("hypothesis")

    def pytest_sessionstart(session):
        _hypothesis_globals.in_initialization -= 1

    # Monkeypatch some internals to prevent applying @pytest.fixture() to a
    # function which has already been decorated with @hypothesis.given().
    # (the reverse case is already an explicit error in Hypothesis)
    # We do this here so that it catches people on old Pytest versions too.
    from _pytest import fixtures

    def _ban_given_call(self, function):
        if "hypothesis" in sys.modules:
            from hypothesis.internal.detection import is_hypothesis_test

            if is_hypothesis_test(function):
                raise RuntimeError(
                    f"Can't apply @pytest.fixture() to {function.__name__} because "
                    "it is already decorated with @hypothesis.given()"
                )
        return _orig_call(self, function)

    _orig_call = fixtures.FixtureFunctionMarker.__call__
    fixtures.FixtureFunctionMarker.__call__ = _ban_given_call  # type: ignore


def load():
    """Required for `pluggy` to load a plugin from setuptools entrypoints."""