diff options
author | Nikita Kozlovskiy <nikitka@gmail.com> | 2023-06-02 16:01:55 +0000 |
---|---|---|
committer | nkozlovskiy <nmk@ydb.tech> | 2023-06-02 19:01:55 +0300 |
commit | 3332cdd408c34bd067db2bde8bc0f322e392946b (patch) | |
tree | 55b852d30a65cc02e030d13e8279da13551813a4 | |
parent | 7b0b68668aa4e020c44ab92b637620432c15ecd3 (diff) | |
download | ydb-3332cdd408c34bd067db2bde8bc0f322e392946b.tar.gz |
ci: test muting
ci: test muting
Pull Request resolved: #240
-rw-r--r-- | .github/actions/test/action.yml | 78 | ||||
-rw-r--r-- | .github/config/muted_shard.txt | 3 | ||||
-rw-r--r-- | .github/config/muted_test.txt | 6 | ||||
-rwxr-xr-x | .github/scripts/tests/ctest-postprocess.py | 58 | ||||
-rwxr-xr-x | .github/scripts/tests/extract-logs.py | 164 | ||||
-rwxr-xr-x | .github/scripts/tests/fail-checker.py | 32 | ||||
-rwxr-xr-x | .github/scripts/tests/junit-postprocess.py | 63 | ||||
-rw-r--r-- | .github/scripts/tests/log_parser.py | 102 | ||||
-rw-r--r-- | .github/scripts/tests/mute_utils.py | 114 | ||||
-rw-r--r-- | .github/workflows/pr_check.yml | 2 |
10 files changed, 613 insertions, 9 deletions
diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml index 80d6df2f8d..819c29d3ef 100644 --- a/.github/actions/test/action.yml +++ b/.github/actions/test/action.yml @@ -43,6 +43,10 @@ runs: echo "TESTMO_TOKEN=${{inputs.testman_token}}" >> $GITHUB_ENV echo "TESTMO_URL=${{inputs.testman_url}}" >> $GITHUB_ENV echo "logfilename=${{inputs.log_suffix}}-ctest-stdout.gz" >> $GITHUB_OUTPUT + echo "artifactsdir=$(pwd)/artifacts" >> $GITHUB_OUTPUT + echo "testfilterfile=$(pwd)/.github/config/muted_test.txt" >> $GITHUB_OUTPUT + echo "testshardfilterfile=$(pwd)/.github/config/muted_shard.txt" >> $GITHUB_OUTPUT + echo "logurlprefix=${{inputs.aws_endpoint}}/${{inputs.aws_bucket}}/${{ github.repository }}/${{github.workflow}}/${{github.run_id}}" >> $GITHUB_OUTPUT - name: Install Node required for Testmo CLI uses: actions/setup-node@v3 with: @@ -88,33 +92,86 @@ runs: echo "runid=$(cat)" >> $GITHUB_OUTPUT - name: Test + id: ctest shell: bash run: | cd $WORKDIR/../build/ydb - echo "Stdout log (gzip archive): ${{inputs.aws_endpoint}}/${{inputs.aws_bucket}}/${{ github.repository }}/${{github.workflow}}/${{ github.run_id }}/${{steps.init.outputs.logfilename}}" >> $GITHUB_STEP_SUMMARY + echo "[Stdout log (gzip archive)](${{steps.init.outputs.logurlprefix}}/${{steps.init.outputs.logfilename}})" >> $GITHUB_STEP_SUMMARY + echo "[Testmo](${TESTMO_URL}/automation/runs/view/${{steps.th.outputs.runid}})" >> $GITHUB_STEP_SUMMARY # Sed removes coloring from the output TMPDIR=$WORKDIR/tmp GTEST_OUTPUT="xml:$TESTREPDIR/unittests/" Y_UNITTEST_OUTPUT="xml:$TESTREPDIR/unittests/" \ ctest -j28 --timeout 1200 --force-new-ctest-process --output-on-failure \ --output-junit $TESTREPDIR/suites/ctest_report.xml \ - -L '${{inputs.test_label_regexp}}' | \ - sed -e 's/\x1b\[[0-9;]*m//g' | \ - tee >(gzip --stdout > $WORKDIR/artifacts/${{steps.init.outputs.logfilename}}) | \ - grep -E '(Test\s*#.*\*\*\*|\[FAIL\])|.*tests passed,.*tests failed out of' | \ - tee $WORKDIR/short.log + -L '${{inputs.test_label_regexp}}' -E "${CTEST_SKIP_SHARDS:-}" | \ + sed -u -e 's/\x1b\[[0-9;]*m//g' | \ + tee >(gzip --stdout > ${{steps.init.outputs.artifactsdir}}/${{steps.init.outputs.logfilename}}) | \ + grep --line-buffered -E '(Test\s*#.*\*\*\*|\[FAIL\])|.*tests passed,.*tests failed out of' | \ + tee $WORKDIR/short.log || ( + RC=$? + if [ $RC == 8 ]; then + echo "ctest returned TEST_ERRORS, recovering.." + else + exit $RC + fi + ) + - name: archive unitest reports (orig) + shell: bash + run: | + tar -C $TESTREPDIR/ -czf ${{steps.init.outputs.artifactsdir}}/reports.tar.gz . + ls -la ${{steps.init.outputs.artifactsdir}}/reports.tar.gz + echo "[Unittest/CTest XML reports archive](${{steps.init.outputs.logurlprefix}}/reports.tar.gz)" >> $GITHUB_STEP_SUMMARY + - name: postprocess junit reports + shell: bash + run: | + echo "::group::junit-postprocess" + + .github/scripts/tests/junit-postprocess.py \ + --filter-file ${{steps.init.outputs.testfilterfile}} \ + $TESTREPDIR/unittests/ + + echo "::endgroup::" + + echo "::group::ctest-postprocess" + + .github/scripts/tests/ctest-postprocess.py \ + --filter-file ${{steps.init.outputs.testshardfilterfile}} \ + --decompress \ + ${{steps.init.outputs.artifactsdir}}/${{steps.init.outputs.logfilename}} \ + $TESTREPDIR/suites/ctest_report.xml + + echo "::endgroup::" + - name: extract log output + shell: bash + run: | + mkdir ${{steps.init.outputs.artifactsdir}}/logs/ + + .github/scripts/tests/extract-logs.py \ + --write-summary \ + --url-prefix ${{steps.init.outputs.logurlprefix}}/logs/ \ + --filter-shard-file ${{steps.init.outputs.testshardfilterfile}} \ + --filter-test-file ${{steps.init.outputs.testfilterfile}} \ + --patch-jsuite \ + --ctest-report $TESTREPDIR/suites/ctest_report.xml \ + --junit-reports-path $TESTREPDIR/unittests/ \ + --decompress \ + ${{steps.init.outputs.artifactsdir}}/${{steps.init.outputs.logfilename}} \ + ${{steps.init.outputs.artifactsdir}}/logs/ + + - name: Test history upload results if: always() && inputs.testman_token shell: bash run: | testmo automation:run:submit-thread \ --instance "$TESTMO_URL" --run-id ${{steps.th.outputs.runid}} \ - --results $TESTREPDIR/unittests/*.xml + --results "$TESTREPDIR/unittests/*.xml" testmo automation:run:submit-thread \ --exec-suppress \ --instance "$TESTMO_URL" --run-id ${{steps.th.outputs.runid}} \ - --results $TESTREPDIR/suites/*.xml \ + --results "$TESTREPDIR/suites/*.xml" \ -- cat $WORKDIR/short.log testmo automation:run:complete --instance "$TESTMO_URL" --run-id ${{steps.th.outputs.runid}} - name: Upload S3 @@ -124,6 +181,11 @@ runs: aws_key_id: ${{inputs.AWS_KEY_ID }} aws_secret_access_key: ${{inputs.AWS_KEY_VALUE}} aws_bucket: ${{inputs.aws_bucket}} + # FIXME: must be constant here source_dir: artifacts destination_dir: '${{ github.repository }}/${{github.workflow}}/${{ github.run_id }}' endpoint: ${{inputs.aws_endpoint}} + - name: finish + shell: bash + run: | + .github/scripts/tests/fail-checker.py $TESTREPDIR/unittests/ $TESTREPDIR/suites/
\ No newline at end of file diff --git a/.github/config/muted_shard.txt b/.github/config/muted_shard.txt new file mode 100644 index 0000000000..374888cc66 --- /dev/null +++ b/.github/config/muted_shard.txt @@ -0,0 +1,3 @@ +ydb-core-blobstorage-dsproxy-ut_2 +ydb-core-blobstorage-dsproxy-ut_3 +ydb-core-blobstorage-dsproxy-ut_4
\ No newline at end of file diff --git a/.github/config/muted_test.txt b/.github/config/muted_test.txt new file mode 100644 index 0000000000..c296956ac0 --- /dev/null +++ b/.github/config/muted_test.txt @@ -0,0 +1,6 @@ +-KqpFederatedQuery +-KqpScanSpilling::SelfJoin +-KqpSpillingFileTests::StartError +-RetryPolicy::TWriteSession_SeqNoShift +-TPgCodegen::PgFixedFuncBC +-TPgCodegen::PgStrFuncBC diff --git a/.github/scripts/tests/ctest-postprocess.py b/.github/scripts/tests/ctest-postprocess.py new file mode 100755 index 0000000000..1abe43a7db --- /dev/null +++ b/.github/scripts/tests/ctest-postprocess.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +import argparse +from typing import TextIO +import xml.etree.ElementTree as ET + +from log_parser import ctest_log_parser, log_reader +from mute_utils import mute_target, remove_failure, update_suite_info, MutedShardCheck + + +def find_targets_to_remove(log_fp): + return {target for target, reason, _ in ctest_log_parser(log_fp) if reason == "Failed"} + + +def postprocess_ctest(log_fp: TextIO, ctest_junit_report, is_mute_shard, dry_run): + to_remove = find_targets_to_remove(log_fp) + tree = ET.parse(ctest_junit_report) + root = tree.getroot() + n_remove_failures = n_skipped = 0 + + for testcase in root.findall("testcase"): + target = testcase.attrib["classname"] + + if is_mute_shard(target): + if mute_target(testcase): + print(f"mute {target}") + testcase.set("status", "run") # CTEST specific + n_remove_failures += 1 + n_skipped += 1 + elif target in to_remove: + print(f"set {target} as passed") + n_remove_failures += 1 + remove_failure(testcase) + + if n_remove_failures: + update_suite_info(root, n_remove_failures, n_skipped) + print(f"{'(dry-run) ' if dry_run else ''}update {ctest_junit_report}") + if not dry_run: + tree.write(ctest_junit_report, xml_declaration=True, encoding="UTF-8") + else: + print("nothing to remove") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--dry-run", action="store_true", default=False) + parser.add_argument("--filter-file", required=False) + parser.add_argument("--decompress", action="store_true", default=False, help="decompress ctest log") + parser.add_argument("ctest_log", type=str) + parser.add_argument("ctest_junit_report") + args = parser.parse_args() + + log = log_reader(args.ctest_log, args.decompress) + is_mute_shard = MutedShardCheck(args.filter_file) + postprocess_ctest(log, args.ctest_junit_report, is_mute_shard, args.dry_run) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/extract-logs.py b/.github/scripts/tests/extract-logs.py new file mode 100755 index 0000000000..f6b49743aa --- /dev/null +++ b/.github/scripts/tests/extract-logs.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +import argparse +import io +import os +import glob +from xml.etree import ElementTree as ET +from pathlib import Path +from typing import List +from log_parser import ctest_log_parser, parse_yunit_fails, parse_gtest_fails, log_reader +from mute_utils import MutedTestCheck, MutedShardCheck + + +def make_filename(*parts): + return f'{"-".join(parts)}.log' + + +def save_log(err_lines: List[str], out_path: Path, *parts): + fn = make_filename(*parts) + print(f"write {fn} for {'::'.join(parts)}") + with open(out_path.joinpath(fn), "wt") as fp: + for line in err_lines: + fp.write(f"{line}\n") + + return fn + + +def extract_logs(log_fp: io.StringIO, out_path: Path, url_prefix): + # FIXME: memory inefficient because new buffer created every time + + log_urls = [] + for target, reason, ctest_buf in ctest_log_parser(log_fp): + suite_summary = [] + + fn = save_log(ctest_buf, out_path, target) + log_url = f"{url_prefix}{fn}" + + log_urls.append((target, reason, log_url, suite_summary)) + + if not ctest_buf: + continue + + first_line = ctest_buf[0] + if first_line.startswith("[==========]"): + for classname, method, err in parse_gtest_fails(ctest_buf): + fn = save_log(err, out_path, classname, method) + log_url = f"{url_prefix}{fn}" + suite_summary.append((classname, method, log_url)) + elif first_line.startswith("<-----"): + for classname, method, err in parse_yunit_fails(ctest_buf): + fn = save_log(err, out_path, classname, method) + log_url = f"{url_prefix}{fn}" + suite_summary.append((classname, method, log_url)) + else: + pass + + return log_urls + + +def generate_summary(summary, is_mute_shard, is_mute_test): + icon = ":floppy_disk:" + mute_icon = ":white_check_mark:" + text = [ + "| Test | Status | Muted | Log |", + "| ----: | :----: | :---: | --: |", + ] + + for target, reason, target_log_url, cases in summary: + mute_target = mute_icon if is_mute_shard(target) else "" + display_reason = reason if reason != "Failed" else "" + text.append(f"| **{target}** | {display_reason} | {mute_target} | [{icon}]({target_log_url}) |") + for classname, method, log_url in cases: + mute_class = mute_icon if is_mute_test(classname, method) else "" + text.append(f"| _{ classname }::{ method }_ | Failed | {mute_class} | [{icon}]({log_url}) |") + return text + + +def write_summary(summary, is_mute_shard, is_mute_test): + fail_count = sum([len(s[3]) for s in summary]) + text = generate_summary(summary, is_mute_shard, is_mute_test) + with open(os.environ["GITHUB_STEP_SUMMARY"], "at") as fp: + fp.write(f"Failed tests log files ({fail_count}):\n") + for line in text: + fp.write(f"{line}\n") + + +def patch_jsuite(log_urls, ctest_path, unit_paths): + def add_link_property(tc, url): + props = tc.find("properties") + if props is None: + props = ET.Element("properties") + tc.append(props) + props.append(ET.Element("property", dict(name="url:Log", value=url))) + + suite_logs = {} + test_logs = {} + + for shard_name, _, log_url, cases in log_urls: + suite_logs[shard_name] = log_url + for classname, method, test_log_url in cases: + test_logs[(classname, method)] = test_log_url + + if ctest_path: + tree = ET.parse(ctest_path) + root = tree.getroot() + changed = False + for testcase in root.findall("testcase"): + log_url = suite_logs.get(testcase.attrib["classname"]) + if log_url: + add_link_property(testcase, log_url) + changed = True + + if changed: + print(f"patch {ctest_path}") + tree.write(ctest_path, xml_declaration=True, encoding="UTF-8") + + for path in unit_paths: + for fn in glob.glob(os.path.join(path, "*.xml")): + tree = ET.parse(fn) + root = tree.getroot() + changed = False + for testsuite in root.findall("testsuite"): + for testcase in testsuite.findall("testcase"): + cls, method = testcase.attrib["classname"], testcase.attrib["name"] + log_url = test_logs.get((cls, method)) + if log_url: + add_link_property(testcase, log_url) + changed = True + if changed: + print(f"patch {fn}") + tree.write(fn, xml_declaration=True, encoding="UTF-8") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--url-prefix", default="./") + parser.add_argument("--decompress", action="store_true", default=False, help="decompress ctest log") + parser.add_argument("--write-summary", action="store_true", default=False, help="update github summary") + parser.add_argument("--filter-test-file", required=False) + parser.add_argument("--filter-shard-file", required=False) + parser.add_argument("--patch-jsuite", default=False, action="store_true") + parser.add_argument("--ctest-report") + parser.add_argument("--junit-reports-path", nargs="*") + parser.add_argument("ctest_log") + parser.add_argument("out_log_dir") + + args = parser.parse_args() + + log_urls = extract_logs(log_reader(args.ctest_log, args.decompress), Path(args.out_log_dir), args.url_prefix) + + if args.patch_jsuite and log_urls: + patch_jsuite(log_urls, args.ctest_report, args.junit_reports_path) + + is_mute_shard = MutedShardCheck(args.filter_shard_file) + is_mute_test = MutedTestCheck(args.filter_test_file) + + if args.write_summary: + if log_urls: + write_summary(log_urls, is_mute_shard, is_mute_test) + else: + print("\n".join(generate_summary(log_urls, is_mute_shard, is_mute_test))) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/fail-checker.py b/.github/scripts/tests/fail-checker.py new file mode 100755 index 0000000000..f71f7f117b --- /dev/null +++ b/.github/scripts/tests/fail-checker.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import argparse +import glob +import os +from typing import List +import xml.etree.ElementTree as ET + + +def check_for_fail(paths: List[str]): + for path in paths: + for fn in glob.glob(os.path.join(path, "*.xml")): + root = ET.parse(fn).getroot() + if root.tag != "testsuite": + suites = root.findall("testsuite") + else: + suites = [root] + + for suite in suites: + if int(suite.get("failures", 0)) > 0: + print(f"::error::You have failed tests") + raise SystemExit(-1) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("path", nargs="+", help="jsuite xml reports directories") + args = parser.parse_args() + check_for_fail(args.path) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/junit-postprocess.py b/.github/scripts/tests/junit-postprocess.py new file mode 100755 index 0000000000..b704eb02e2 --- /dev/null +++ b/.github/scripts/tests/junit-postprocess.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +import os +import glob +import argparse +import xml.etree.ElementTree as ET +from mute_utils import mute_target, update_suite_info, MutedTestCheck + + +def case_iterator(root): + for case in root.findall("testcase"): + cls, method = case.attrib["classname"], case.attrib["name"] + yield case, cls, method + + +def mute_junit(is_mute_test, folder, dry_run): + for fn in glob.glob(os.path.join(folder, "*.xml")): + tree = ET.parse(fn) + root = tree.getroot() + total_muted = 0 + for suite in root.findall("testsuite"): + muted_cnt = 0 + for case, cls, method in case_iterator(suite): + if is_mute_test(cls, method): + if mute_target(case): + print(f"mute {cls}::{method}") + muted_cnt += 1 + + if muted_cnt: + update_suite_info(suite, n_skipped=muted_cnt, n_remove_failures=muted_cnt) + total_muted += muted_cnt + + if total_muted: + update_suite_info(root, n_skipped=total_muted, n_remove_failures=total_muted) + + print(f"{'(dry-run) ' if dry_run else ''}patch {fn}") + + if not dry_run: + tree.write(fn, xml_declaration=True, encoding="UTF-8") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--filter-file", required=True) + parser.add_argument("--dry-run", action="store_true", default=False) + parser.add_argument("yunit_path") + args = parser.parse_args() + + if not os.path.isdir(args.yunit_path): + print(f"{args.yunit_path} is not a directory, exit") + raise SystemExit(-1) + + # FIXME: add gtest filter file ? + is_mute_test = MutedTestCheck(args.filter_file) + + if not is_mute_test.has_rules: + print("nothing to mute") + return + + mute_junit(is_mute_test, args.yunit_path, args.dry_run) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/tests/log_parser.py b/.github/scripts/tests/log_parser.py new file mode 100644 index 0000000000..0e4be2ee2d --- /dev/null +++ b/.github/scripts/tests/log_parser.py @@ -0,0 +1,102 @@ +import gzip +import re +from typing import TextIO + + +def log_reader(fn, decompress, errors="backslashreplace"): + if decompress: + return gzip.open(fn, "rt", errors=errors) + + return open(fn, "rt", errors=errors) + + +def parse_gtest_fails(log): + ilog = iter(log) + while 1: + try: + line = next(ilog) + except StopIteration: + break + + if line.startswith("[ RUN ]"): + buf = [] + while 1: + try: + line = next(ilog) + except StopIteration: + break + + if line.startswith("[ FAILED ]"): + plen = len("[ FAILED ] ") + classname, method = line[plen:].split(" ")[0].split(".", maxsplit=1) + yield classname, method, buf + break + elif line.startswith("[ OK ]"): + break + else: + buf.append(line) + + +def parse_yunit_fails(log): + i = 0 + class_method = found_fail = found_exec = buf_start = None + while i < len(log): + line = log[i] + + if found_fail: + if line.startswith(("[exec] ", "-----> ")): + cls, method = class_method.split("::") + yield cls, method, log[buf_start:i] + class_method = found_fail = found_exec = buf_start = None + elif found_exec: + if line.startswith("[FAIL] "): + found_fail = True + elif line.startswith("[good] "): + found_exec = class_method = buf_start = None + + if not found_exec and line.startswith("[exec] "): + class_method = line[7:].rstrip("...") + found_exec = True + buf_start = i + i += 1 + + if buf_start is not None: + cls, method = class_method.split("::") + yield cls, method, log[buf_start:] + + +def ctest_log_parser(fp: TextIO): + start_re = re.compile(r"^\s+Start\s+\d+: ") + status_re = re.compile(r"^\s*\d+/\d+ Test\s+#\d+: ([^ ]+) [.]+(\D+)") + finish_re = re.compile(r"\d+% tests passed") + + buf = [] + target = reason = None + + while 1: + line = fp.readline() + if not line: + break + + if target: + if not (start_re.match(line) or status_re.match(line) or finish_re.match(line)): + buf.append(line.rstrip()) + else: + yield target, reason, buf + target = reason = None + buf = [] + + if target is None: + if "***" not in line: + continue + + m = status_re.match(line) + + if not m: + continue + + target = m.group(1) + reason = m.group(2).replace("*", "").strip() + + if buf: + yield target, reason, buf diff --git a/.github/scripts/tests/mute_utils.py b/.github/scripts/tests/mute_utils.py new file mode 100644 index 0000000000..111682e96b --- /dev/null +++ b/.github/scripts/tests/mute_utils.py @@ -0,0 +1,114 @@ +import operator +import xml.etree.ElementTree as ET + + +class MutedTestCheck: + def __init__(self, fn=None): + self.classes = set() + self.methods = set() + + if fn: + self.populate(fn) + + def populate(self, fn): + with open(fn, "r") as fp: + for line in fp: + if line.startswith("-"): + line = line[1:].rstrip() + if "::" in line: + cls, method = line.split("::", maxsplit=1) + self.methods.add((cls, method)) + else: + self.classes.add(line) + + def __call__(self, cls, method=None): + if cls in self.classes: + return True + + if method and (cls, method) in self.methods: + return True + + return False + + @property + def has_rules(self): + return len(self.classes) or len(self.methods) + + +class MutedShardCheck: + def __init__(self, fn=None): + self.muted = set() + if fn: + self.populate(fn) + + def populate(self, fn): + with open(fn, "rt") as fp: + for line in fp: + target = line.strip() + if target: + self.muted.add(target) + + def __call__(self, target): + return target in self.muted + + +def mute_target(node): + failure = node.find("failure") + + if failure is None: + return False + + skipped = ET.Element("skipped", {"message": failure.attrib["message"]}) + node.remove(failure) + node.append(skipped) + + return True + + +def remove_failure(node): + failure = node.find("failure") + + if failure is not None: + node.remove(failure) + return True + + return False + + +def op_attr(node, attr, op, value): + v = int(node.get(attr, 0)) + node.set(attr, str(op(v, value))) + + +def inc_attr(node, attr, value): + return op_attr(node, attr, operator.add, value) + + +def dec_attr(node, attr, value): + return op_attr(node, attr, operator.sub, value) + + +def update_suite_info(root, n_remove_failures=None, n_skipped=None): + if n_remove_failures: + dec_attr(root, "failures", n_remove_failures) + + if n_skipped: + inc_attr(root, "skipped", n_skipped) + + +def recalc_suite_info(suite): + tests = failures = skipped = 0 + elapsed = 0.0 + + for case in suite.findall("testcase"): + tests += 1 + elapsed += float(case.get("time")) + if case.find("skipped"): + skipped += 1 + if case.find("failure"): + failures += 1 + + suite.set("tests", str(tests)) + suite.set("failures", str(failures)) + suite.set("skipped", str(skipped)) + suite.set("time", str(elapsed)) diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml index 709e1245f7..f8e3c56870 100644 --- a/.github/workflows/pr_check.yml +++ b/.github/workflows/pr_check.yml @@ -1,4 +1,4 @@ -name: PR check +name: PR-check on: pull_request_target: branches: |