aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikita Kozlovskiy <nikitka@gmail.com>2023-08-22 20:50:31 +0300
committernkozlovskiy <nmk@ydb.tech>2023-08-22 21:55:34 +0300
commite4a985d19c86ab1131bbb93cc24a83132449653b (patch)
tree2d75edf22232a2700ca430b24328e6c50f68d569
parent89ce593f3ff3de624f9a0dd5cb352c3cb49c3679 (diff)
downloadydb-e4a985d19c86ab1131bbb93cc24a83132449653b.tar.gz
new test summary badge
new test summary badge Pull Request resolved: #343
-rw-r--r--.github/actions/test/action.yml61
-rwxr-xr-x.github/scripts/tests/generate-summary.py204
-rw-r--r--.github/scripts/tests/junit_utils.py3
-rw-r--r--.github/scripts/tests/templates/summary.html54
-rw-r--r--.mapping.json1
5 files changed, 228 insertions, 95 deletions
diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml
index 0c4f3039fcc..c0768a97385 100644
--- a/.github/actions/test/action.yml
+++ b/.github/actions/test/action.yml
@@ -51,6 +51,7 @@ runs:
echo "TESTMO_TOKEN=${{inputs.testman_token}}" >> $GITHUB_ENV
echo "TESTMO_URL=${{inputs.testman_url}}" >> $GITHUB_ENV
echo "ARTIFACTS_DIR=$(pwd)/artifacts" >> $GITHUB_ENV
+ echo "SUMMARY_LINKS=$(mktemp)" >> $GITHUB_ENV
echo "logfilename=${{inputs.log_suffix}}-ctest-stdout.gz" >> $GITHUB_OUTPUT
echo "testfilterfile=$(pwd)/.github/config/muted_test.txt" >> $GITHUB_OUTPUT
echo "testshardfilterfile=$(pwd)/.github/config/muted_shard.txt" >> $GITHUB_OUTPUT
@@ -69,8 +70,8 @@ runs:
host_bucket = %(bucket)s.storage.yandexcloud.net
EOF
echo "S3CMD_CONFIG=$TMPDIR/s3cfg" >> $GITHUB_ENV
- echo "S3_BUCKET_PATH=s3://${{ inputs.aws_bucket }}/${{ github.repository }}/${{github.workflow}}/${{ github.run_id }}/" >> $GITHUB_ENV
- echo "S3_URL_PREFIX=${{inputs.aws_endpoint}}/${{inputs.aws_bucket}}/${{ github.repository }}/${{github.workflow}}/${{github.run_id}}" >> $GITHUB_ENV
+ echo "S3_BUCKET_PATH=s3://${{ inputs.aws_bucket }}/${{ github.repository }}/${{github.workflow}}/${{ github.run_id }}/${{inputs.log_suffix}}/" >> $GITHUB_ENV
+ echo "S3_URL_PREFIX=${{inputs.aws_endpoint}}/${{inputs.aws_bucket}}/${{ github.repository }}/${{github.workflow}}/${{github.run_id}}/${{inputs.log_suffix}}" >> $GITHUB_ENV
env:
aws_key_id: ${{inputs.AWS_KEY_ID }}
aws_secret_access_key: ${{inputs.AWS_KEY_VALUE}}
@@ -122,7 +123,7 @@ runs:
- name: Print test history link
shell: bash
run: |
- echo "[Test history](${TESTMO_URL}/automation/runs/view/${{steps.th.outputs.runid}})" >> $GITHUB_STEP_SUMMARY
+ echo "10 [Test history](${TESTMO_URL}/automation/runs/view/${{steps.th.outputs.runid}})" >> $SUMMARY_LINKS
- name: Run unit tests
id: ctest
@@ -131,7 +132,7 @@ runs:
run: |
cd $WORKDIR/../build/ydb
- echo "[Stdout unittest/ctest log (gzip archive)]($S3_URL_PREFIX/${{steps.init.outputs.logfilename}})" >> $GITHUB_STEP_SUMMARY
+ echo "20 [Unittest log]($S3_URL_PREFIX/${{steps.init.outputs.logfilename}})" >> $SUMMARY_LINKS
# Sed removes coloring from the output
@@ -155,9 +156,7 @@ runs:
if: inputs.run_unit_tests == 'true'
shell: bash
run: |
- tar -C $TESTREPDIR/ -czf $ARTIFACTS_DIR/reports.tar.gz .
- ls -la $ARTIFACTS_DIR/reports.tar.gz
- echo "[Unittest/CTest XML reports archive]($S3_URL_PREFIX/reports.tar.gz)" >> $GITHUB_STEP_SUMMARY
+ tar -C $TESTREPDIR/ -czf $ARTIFACTS_DIR/xml_orig.tar.gz .
- name: postprocess xml reports
if: inputs.run_unit_tests == 'true'
@@ -193,15 +192,11 @@ runs:
$ARTIFACTS_DIR/${{steps.init.outputs.logfilename}} \
$TESTREPDIR/suites/ctest_report.xml
- echo "::endgroup::"
+ tar -C $TESTREPDIR/ -czf $ARTIFACTS_DIR/reports.tar.gz .
- - name: write unittests summary
- if: inputs.run_unit_tests == 'true'
- shell: bash
- run: |
- .github/scripts/tests/generate-summary.py -t "#### CTest run test shard failures" $TESTREPDIR/suites/ctest_report.xml
- .github/scripts/tests/generate-summary.py -t "#### Unittest failures" $TESTREPDIR/unittests/
+ echo "90 [XML reports archive]($S3_URL_PREFIX/reports.tar.gz)" >> $SUMMARY_LINKS
+ echo "::endgroup::"
- name: sync test results to s3
if: always() && inputs.run_unit_tests == 'true'
@@ -231,7 +226,7 @@ runs:
export source_root=$WORKDIR
export build_root=$WORKDIR/../build/
- echo "[Stdout pytest log (gzip archive)]($S3_URL_PREFIX/${{steps.init.outputs.pytest-logfilename}})" >> $GITHUB_STEP_SUMMARY
+ echo "30 [Functional test log]($S3_URL_PREFIX/${{steps.init.outputs.pytest-logfilename}})" >> $SUMMARY_LINKS
source $WORKDIR/ydb/tests/oss/launch/prepare.sh
rm -rf $ARTIFACTS_DIR/pytest/
@@ -265,19 +260,25 @@ runs:
shell: bash
run: |
echo "::group::junit-postprocess"
+
+ # append orig pytest reports
+ if [ -f "$ARTIFACTS_DIR/xml_orig.tar.gz" ]; then
+ echo "add pytest to xml_orig.tar.gz"
+ gzip -d $ARTIFACTS_DIR/xml_orig.tar.gz
+ tar -C $TESTREPDIR/ -rvf $ARTIFACTS_DIR/xml_orig.tar pytest
+ gzip -v $ARTIFACTS_DIR/xml_orig.tar
+ fi
.github/scripts/tests/pytest-postprocess.py \
--filter-file ${{ steps.init.outputs.functestfilterfile }} \
$PYTESTREPDIR/
- echo "::endgroup::"
+ # make archive again with pytest reports
+ tar -C $TESTREPDIR/ -czf $ARTIFACTS_DIR/reports.tar.gz .
+ ls -la $ARTIFACTS_DIR/reports.tar.gz
+ echo "::endgroup::"
- - name: write functional tests summary
- if: always() && inputs.run_functional_tests == 'true'
- shell: bash
- run: |
- .github/scripts/tests/generate-summary.py -t "#### Functional tests failures" $PYTESTREPDIR
- name: Functional tests history upload results
if: always() && inputs.run_functional_tests == 'true' && inputs.testman_token
@@ -294,8 +295,24 @@ runs:
run: |
testmo automation:run:complete --instance "$TESTMO_URL" --run-id ${{steps.th.outputs.runid}}
+ - name: write tests summary
+ if: always()
+ shell: bash
+ run: |
+
+ cat $SUMMARY_LINKS | python3 -c 'import sys; print(" | ".join([v for _, v in sorted([l.strip().split(" ", 1) for l in sys.stdin], key=lambda a: (int(a[0]), a))]))' >> $GITHUB_STEP_SUMMARY
+
+ mkdir $ARTIFACTS_DIR/summary/
+
+ .github/scripts/tests/generate-summary.py \
+ --summary-out-path $ARTIFACTS_DIR/summary/ \
+ --summary-url-prefix $S3_URL_PREFIX/summary/ \
+ "Unittest" unittest.html $TESTREPDIR/unittests \
+ "CTest shard" ctest.html $TESTREPDIR/suites \
+ "Functional" functional.html $PYTESTREPDIR
+
- name: sync test results to s3
- if: always() && inputs.run_functional_tests == 'true'
+ if: always()
shell: bash
run: |
echo "::group::s3-sync"
diff --git a/.github/scripts/tests/generate-summary.py b/.github/scripts/tests/generate-summary.py
index 33d61b01f1a..1dae6aba4b3 100755
--- a/.github/scripts/tests/generate-summary.py
+++ b/.github/scripts/tests/generate-summary.py
@@ -1,93 +1,91 @@
#!/usr/bin/env python3
import argparse
-import os
import dataclasses
-import sys
-from typing import Optional, List
+import os, sys
+from enum import Enum
+from itertools import groupby
+from operator import attrgetter
+from typing import List
+from jinja2 import Environment, FileSystemLoader
from junit_utils import get_property_value, iter_xml_files
+class TestStatus(Enum):
+ PASS = 0
+ FAIL = 1
+ ERROR = 2
+ SKIP = 3
+ MUTE = 4
+
+ def __lt__(self, other):
+ return self.value < other.value
+
+
@dataclasses.dataclass
-class SummaryEntry:
- target: str
- log_url: Optional[str]
- reason = ""
- is_failure: bool = False
- is_error: bool = False
- is_muted: bool = False
- is_skipped: bool = False
+class TestResult:
+ classname: str
+ name: str
+ status: TestStatus
@property
- def display_status(self):
- if self.is_error:
- return "Error"
- elif self.is_failure:
- return "Failure"
- elif self.is_muted:
- return "Muted"
- elif self.is_skipped:
- return "Skipped"
-
- return "?"
-
-
-def parse_junit(folder_or_file):
- result = []
- for fn, suite, case in iter_xml_files(folder_or_file):
- is_failure = case.find("failure") is not None
- is_error = case.find("error") is not None
- is_muted = get_property_value(case, "mute") is not None
- is_skipped = is_muted is False and case.find("skipped") is not None
-
- if any([is_failure, is_muted, is_skipped, is_error]):
- cls, method = case.attrib["classname"], case.attrib["name"]
- log_url = get_property_value(case, "url:Log")
- target = f"{ cls }::{ method }" if cls != method else cls
-
- result.append(
- SummaryEntry(
- target=target,
- log_url=log_url,
- is_skipped=is_skipped,
- is_muted=is_muted,
- is_failure=is_failure,
- is_error=is_error,
- )
- )
- return result
+ def status_display(self):
+ return {
+ TestStatus.PASS: "PASS",
+ TestStatus.FAIL: "FAIL",
+ TestStatus.ERROR: "ERROR",
+ TestStatus.SKIP: "SKIP",
+ TestStatus.MUTE: "MUTE",
+ }[self.status]
+
+ def __str__(self):
+ return f"{self.full_name:<138} {self.status_display}"
+ @property
+ def full_name(self):
+ return f"{self.classname}/{self.name}"
-def generate_summary(summary: List[SummaryEntry]):
- log_icon = ":floppy_disk:"
- mute_icon = ":white_check_mark:"
- text = [
- "| Test | Muted | Log |",
- "| ----: | :---: | --: |",
- ]
+def render_pm(value, url, diff=None):
+ if value:
+ text = f"[{value}]({url})"
+ else:
+ text = str(value)
- for entry in summary:
- if entry.log_url:
- log_url = f"[{log_icon}]({entry.log_url})"
+ if diff is not None and diff != 0:
+ if diff == 0:
+ sign = "±"
+ elif diff < 0:
+ sign = "-"
else:
- log_url = ""
-
- mute_target = mute_icon if entry.is_muted else ""
+ sign = "+"
- text.append(f"| {entry.target} | {mute_target} | {log_url} |")
+ text = f"{text} {sign}{abs(diff)}"
return text
-def write_summary(title, lines: List[str]):
+def render_testlist_html(rows, fn):
+ TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), "templates")
+
+ env = Environment(loader=FileSystemLoader(TEMPLATES_PATH))
+
+ rows.sort(key=attrgetter('full_name'))
+ rows.sort(key=attrgetter('status'), reverse=True)
+
+ rows = groupby(rows, key=attrgetter('status'))
+ content = env.get_template("summary.html").render(test_results=rows)
+
+ with open(fn, 'w') as fp:
+ fp.write(content)
+
+
+def write_summary(lines: List[str]):
summary_fn = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_fn:
fp = open(summary_fn, "at")
else:
fp = sys.stdout
- if title:
- fp.write(f"{title}\n")
for line in lines:
fp.write(f"{line}\n")
fp.write("\n")
@@ -96,18 +94,80 @@ def write_summary(title, lines: List[str]):
fp.close()
+def gen_summary(summary_url_prefix, summary_out_folder, paths, ):
+ summary = [
+ "| | TESTS | PASSED | ERRORS | FAILED | SKIPPED | MUTED[^1] |",
+ "| :--- | ---: | -----: | -----: | -----: | ------: | ----: |",
+ ]
+ for title, html_fn, path in paths:
+ tests = failed = errors = muted = skipped = passed = 0
+
+ test_results = []
+
+ for fn, suite, case in iter_xml_files(path):
+ tests += 1
+ classname, name = case.get("classname"), case.get("name")
+ if case.find("failure") is not None:
+ failed += 1
+ status = TestStatus.FAIL
+ elif case.find("error") is not None:
+ errors += 1
+ status = TestStatus.ERROR
+ elif get_property_value(case, "mute") is not None:
+ muted += 1
+ status = TestStatus.MUTE
+ elif case.find("skipped") is not None:
+ skipped += 1
+ status = TestStatus.SKIP
+ else:
+ passed += 1
+ status = TestStatus.PASS
+
+ test_result = TestResult(classname=classname, name=name, status=status)
+ test_results.append(test_result)
+
+ report_url = f'{summary_url_prefix}{html_fn}'
+
+ render_testlist_html(test_results, os.path.join(summary_out_folder, html_fn))
+
+ summary.append(
+ " | ".join(
+ [
+ title,
+ render_pm(tests, f'{report_url}', 0),
+ render_pm(passed, f'{report_url}#PASS', 0),
+ render_pm(errors, f'{report_url}#ERROR', 0),
+ render_pm(failed, f'{report_url}#FAIL', 0),
+ render_pm(skipped, f'{report_url}#SKIP', 0),
+ render_pm(muted, f'{report_url}#MUTE', 0),
+ ]
+ )
+ )
+
+ github_srv = os.environ.get('GITHUB_SERVER_URL', 'https://github.com')
+ repo = os.environ.get('GITHUB_REPOSITORY', 'ydb-platform/ydb')
+
+ summary.append("\n")
+ summary.append(f"[^1]: All mute rules are defined [here]({github_srv}/{repo}/tree/main/.github/config).")
+
+ write_summary(lines=summary)
+
+
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("-t", "--title")
- parser.add_argument("folder_or_file")
-
+ parser.add_argument("--summary-out-path", required=True)
+ parser.add_argument("--summary-url-prefix", required=True)
+ parser.add_argument("args", nargs="+", metavar="TITLE html_out path")
args = parser.parse_args()
- summary = parse_junit(args.folder_or_file)
+ if len(args.args) % 3 != 0:
+ print("Invalid argument count")
+ raise SystemExit(-1)
+
+ paths = iter(args.args)
+ title_path = list(zip(paths, paths, paths))
- if summary:
- text = generate_summary(summary)
- write_summary(args.title, text)
+ gen_summary(args.summary_url_prefix, args.summary_out_path, title_path)
if __name__ == "__main__":
diff --git a/.github/scripts/tests/junit_utils.py b/.github/scripts/tests/junit_utils.py
index fd636884cfb..b4ec72e406a 100644
--- a/.github/scripts/tests/junit_utils.py
+++ b/.github/scripts/tests/junit_utils.py
@@ -1,5 +1,6 @@
import os
import glob
+import sys
from xml.etree import ElementTree as ET
@@ -77,7 +78,7 @@ def iter_xml_files(folder_or_file):
try:
tree = ET.parse(fn)
except ET.ParseError as e:
- print(f"Unable to parse {fn}: {e}")
+ print(f"Unable to parse {fn}: {e}", file=sys.stderr)
continue
root = tree.getroot()
diff --git a/.github/scripts/tests/templates/summary.html b/.github/scripts/tests/templates/summary.html
new file mode 100644
index 00000000000..7098edaf131
--- /dev/null
+++ b/.github/scripts/tests/templates/summary.html
@@ -0,0 +1,54 @@
+<html>
+<head>
+ <style>
+ th {
+ text-transform: uppercase;
+ }
+
+ th, td {
+ padding: 5px;
+ }
+
+ table {
+ border-collapse: collapse;
+ }
+
+ span.test_status {
+ font-weight: bold;
+ }
+
+ span.test_fail {
+ color: red;
+ }
+
+ span.test_pass {
+ color: green;
+ }
+
+ span.test_mute {
+ color: blue;
+ }
+ </style>
+</head>
+<body>
+{% for status, rows in test_results %}
+<h1 id="{{ status.name}}">{{ status.name }}</h1>
+<table style="width:90%;" border="1">
+ <thead>
+ <tr>
+ <th>test name</th>
+ <th>status</th>
+ </tr>
+ </thead>
+ <tbody>
+ {% for t in rows %}
+ <tr>
+ <td>{{ t.full_name }}</td>
+ <td><span class="test_status test_{{ t.status_display }}">{{ t.status_display }}</span></td>
+ </tr>
+ {% endfor %}
+ </tbody>
+</table>
+{% endfor %}
+</body>
+</html> \ No newline at end of file
diff --git a/.mapping.json b/.mapping.json
index 81f6d1c2e56..a19507b8775 100644
--- a/.mapping.json
+++ b/.mapping.json
@@ -25,6 +25,7 @@
".github/scripts/tests/log_parser.py":"ydb/github_toplevel/.github/scripts/tests/log_parser.py",
".github/scripts/tests/mute_utils.py":"ydb/github_toplevel/.github/scripts/tests/mute_utils.py",
".github/scripts/tests/pytest-postprocess.py":"ydb/github_toplevel/.github/scripts/tests/pytest-postprocess.py",
+ ".github/scripts/tests/templates/summary.html":"ydb/github_toplevel/.github/scripts/tests/templates/summary.html",
".github/workflows/allowed_dirs.yml":"ydb/github_toplevel/.github/workflows/allowed_dirs.yml",
".github/workflows/build_and_test_ondemand.yml":"ydb/github_toplevel/.github/workflows/build_and_test_ondemand.yml",
".github/workflows/build_and_test_provisioned.yml":"ydb/github_toplevel/.github/workflows/build_and_test_provisioned.yml",