aboutsummaryrefslogtreecommitdiffstats
path: root/tools/flake8_linter/bin/tests/test_report.py
blob: 9304fe86b3bba7e1b5524edfcf75518f75f59c2e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import pytest

from . import util

FLAKE8_CONFIG_DATA = """
    [flake8]
    select = E, W, F
    ignore =
        E122,
"""


def test_no_errors():
    test_file = "project/test.py"
    runner = util.LinterRunner()
    runner.create_source_tree(util.DEFAULT_CONFIGS + [test_file])

    run_result = runner.run_test([test_file])

    abs_test_file = runner.abs_source_file_path(test_file)
    file_report = run_result.report_data["report"][abs_test_file]
    assert file_report["status"] == "GOOD"
    assert file_report["message"] == ""
    assert file_report["elapsed"] >= 0.0


def test_skip_markup():
    test_file = "project/test.py"
    flake8_result = """
        [[bad]]
        [[rst]]
    """

    runner = util.LinterRunner()
    runner.create_source_tree(util.DEFAULT_CONFIGS + [test_file])

    run_result = runner.run_test([test_file], flake8_result=flake8_result)

    abs_test_file = runner.abs_source_file_path(test_file)
    file_report = run_result.report_data["report"][abs_test_file]
    assert file_report["status"] == "GOOD"
    assert file_report["message"] == ""
    assert file_report["elapsed"] >= 0.0


@pytest.mark.parametrize(
    "errors",
    [
        [("10", "F401", "Error with row number only")],
        [("10:20", "F401", "Error with row and column numbers")],
        [
            ("10", "F401", "Multiple errors: the first error"),
            ("20", "F402", "Multiple errors: the second error"),
        ],
    ],
)
def test_error_formatting(errors):
    test_file = "project/test.py"
    flake8_result = "[[bad]]\n"
    for file_pos, code, text in errors:
        flake8_result += f"{{test_dir}}/{test_file}:{file_pos}: [{code}] {text}\n"
    flake8_result += "[[rst]]\n"

    runner = util.LinterRunner()
    runner.create_source_tree(util.DEFAULT_CONFIGS + [test_file])

    run_result = runner.run_test([test_file], flake8_result=flake8_result)

    abs_test_file = runner.abs_source_file_path(test_file)
    file_report = run_result.report_data["report"][abs_test_file]
    expected_message_lines = []
    for file_pos, code, text in errors:
        if ":" in file_pos:
            row, col = file_pos.split(":")
            col_with_sep = col + ":"
        else:
            row = file_pos
            col_with_sep = ""
        line = f"[[unimp]]{abs_test_file}[[rst]]:[[alt2]]{row}[[rst]]:[[alt2]]{col_with_sep}[[rst]] [[[alt1]]{code}[[rst]]] [[bad]]{text}[[rst]]"
        expected_message_lines.append(line)

    assert file_report["status"] == "FAIL"
    assert file_report["message"] == "\n".join(expected_message_lines)
    assert file_report["elapsed"] >= 0.0


def test_fail_on_wrong_message():
    test_file = "project/test.py"
    flake8_result = """
        [[bad]]
        Unexpected error message
        [[rst]]
    """

    runner = util.LinterRunner()
    runner.create_source_tree(util.DEFAULT_CONFIGS + [test_file])

    run_result = runner.run_test([test_file], flake8_result=flake8_result)

    assert run_result.linter_run_result.returncode != 0
    assert "Cannot parse flake8 output line" in run_result.linter_run_result.stderr