aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill Rysin <35688753+naspirato@users.noreply.github.com>2025-04-15 18:09:03 +0200
committerGitHub <noreply@github.com>2025-04-15 18:09:03 +0200
commit9e56c53c26fef73e9f19922912092c6d9eecfee8 (patch)
treea293ed710baae1f680ac8de23e1df76261ce9731
parent55776321fe124eff96e948c7a97c2dbc62765978 (diff)
downloadydb-9e56c53c26fef73e9f19922912092c6d9eecfee8.tar.gz
Mute and separete test analytic for main and stables (#16964)
-rw-r--r--.github/actions/test_ya/action.yml1
-rw-r--r--.github/scripts/analytics/data_mart_delete_table.py86
-rw-r--r--.github/scripts/analytics/data_mart_queries/muted_test_mart.sql (renamed from .github/scripts/analytics/data_mart_queries/test_results_mart.sql)0
-rw-r--r--.github/scripts/analytics/data_mart_queries/test_history_fast_mart.sql25
-rw-r--r--.github/scripts/analytics/data_mart_queries/test_monitor_mart.sql3
-rw-r--r--.github/scripts/analytics/data_mart_ttl_analog.py102
-rwxr-xr-x.github/scripts/analytics/flaky_tests_history.py7
-rwxr-xr-x.github/scripts/analytics/flaky_tests_history_n_runs.py6
-rwxr-xr-x.github/scripts/analytics/test_history_fast.py41
-rwxr-xr-x.github/scripts/analytics/tests_monitor.py8
-rwxr-xr-x.github/scripts/analytics/upload_testowners.py6
-rwxr-xr-x.github/scripts/tests/create_new_muted_ya.py24
-rwxr-xr-x.github/scripts/tests/generate-summary.py21
-rwxr-xr-x.github/scripts/tests/get_muted_tests.py2
-rw-r--r--.github/scripts/tests/get_test_history.py78
-rw-r--r--.github/scripts/tests/templates/summary.html231
-rwxr-xr-x.github/scripts/tests/update_mute_issues.py25
-rw-r--r--.github/workflows/collect_analytics.yml71
-rw-r--r--.github/workflows/collect_analytics_fast.yml9
-rw-r--r--.github/workflows/update_muted_ya.yml124
-rw-r--r--.github/workflows/validate_pr_description.yml2
-rw-r--r--.github/workflows/weekly_analytic.yml28
22 files changed, 720 insertions, 180 deletions
diff --git a/.github/actions/test_ya/action.yml b/.github/actions/test_ya/action.yml
index 9ccf0c29e49..c41fdbbc01b 100644
--- a/.github/actions/test_ya/action.yml
+++ b/.github/actions/test_ya/action.yml
@@ -468,6 +468,7 @@ runs:
--public_dir "$PUBLIC_DIR" \
--public_dir_url "$PUBLIC_DIR_URL" \
--build_preset "$BUILD_PRESET" \
+ --branch "$BRANCH_NAME" \
--status_report_file statusrep.txt \
--is_retry $IS_RETRY \
--is_last_retry $IS_LAST_RETRY \
diff --git a/.github/scripts/analytics/data_mart_delete_table.py b/.github/scripts/analytics/data_mart_delete_table.py
new file mode 100644
index 00000000000..76121b3fc52
--- /dev/null
+++ b/.github/scripts/analytics/data_mart_delete_table.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+
+import argparse
+import ydb
+import configparser
+import os
+
+# Load configuration
+dir = os.path.dirname(__file__)
+config = configparser.ConfigParser()
+config_file_path = f"{dir}/../../config/ydb_qa_db.ini"
+config.read(config_file_path)
+
+DATABASE_ENDPOINT = config["QA_DB"]["DATABASE_ENDPOINT"]
+DATABASE_PATH = config["QA_DB"]["DATABASE_PATH"]
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Delete a YDB table")
+ parser.add_argument("--table_path", required=True, help="Table path and name to delete")
+
+ return parser.parse_args()
+
+def check_table_exists(session, table_path):
+ """Check if table exists"""
+ try:
+ session.describe_table(table_path)
+ return True
+ except ydb.SchemeError:
+ return False
+
+def delete_table(session, table_path):
+ """Delete the specified table."""
+ try:
+ session.drop_table(table_path)
+ print(f"Table '{table_path}' successfully deleted.")
+ return True
+ except ydb.Error as e:
+ print(f"Error deleting table: {e}")
+ return False
+
+def main():
+ args = parse_args()
+
+ if "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS" not in os.environ:
+ print("Error: Env variable CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS is missing, skipping")
+ return 1
+ else:
+ os.environ["YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"] = os.environ[
+ "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"
+ ]
+
+ table_path = args.table_path
+ full_table_path = f'{DATABASE_PATH}/{table_path}'
+
+ print(f"Connecting to YDB to delete table {full_table_path}")
+
+ with ydb.Driver(
+ endpoint=DATABASE_ENDPOINT,
+ database=DATABASE_PATH,
+ credentials=ydb.credentials_from_env_variables()
+ ) as driver:
+ # Wait until driver is ready
+ driver.wait(timeout=10, fail_fast=True)
+
+ with ydb.SessionPool(driver) as pool:
+ # Проверяем существование таблицы
+ def check_and_delete(session):
+ exists = check_table_exists(session, full_table_path)
+ if exists:
+ return delete_table(session, full_table_path)
+ else:
+ print(f"Table '{full_table_path}' does not exist.")
+ return False
+
+ result = pool.retry_operation_sync(check_and_delete)
+
+ if result:
+ print(f"Table {full_table_path} has been deleted successfully.")
+ return 0
+ else:
+ print(f"No table was deleted.")
+ return 1
+
+if __name__ == "__main__":
+ exit_code = main()
+ exit(exit_code)
diff --git a/.github/scripts/analytics/data_mart_queries/test_results_mart.sql b/.github/scripts/analytics/data_mart_queries/muted_test_mart.sql
index 3e7c3d31619..3e7c3d31619 100644
--- a/.github/scripts/analytics/data_mart_queries/test_results_mart.sql
+++ b/.github/scripts/analytics/data_mart_queries/muted_test_mart.sql
diff --git a/.github/scripts/analytics/data_mart_queries/test_history_fast_mart.sql b/.github/scripts/analytics/data_mart_queries/test_history_fast_mart.sql
new file mode 100644
index 00000000000..8245527f336
--- /dev/null
+++ b/.github/scripts/analytics/data_mart_queries/test_history_fast_mart.sql
@@ -0,0 +1,25 @@
+SELECT
+ build_type,
+ job_name,
+ job_id,
+ commit,
+ branch,
+ pull,
+ run_timestamp,
+ test_id,
+ suite_folder,
+ test_name,
+ cast(suite_folder || '/' || test_name as UTF8) as full_name,
+ duration,
+ status,
+ cast(String::ReplaceAll(status_description, ';;', '\n')as Utf8) as status_description ,
+ owners
+ FROM `test_results/test_runs_column` as all_data
+ WHERE
+ run_timestamp >= CurrentUtcDate() - Interval("P1D")
+ and String::Contains(test_name, '.flake8') = FALSE
+ and (CASE
+ WHEN String::Contains(test_name, 'chunk chunk') OR String::Contains(test_name, 'chunk+chunk') THEN TRUE
+ ELSE FALSE
+ END) = FALSE
+ and (branch = 'main' or branch like 'stable-%')
diff --git a/.github/scripts/analytics/data_mart_queries/test_monitor_mart.sql b/.github/scripts/analytics/data_mart_queries/test_monitor_mart.sql
index ebf1793f836..4383be24034 100644
--- a/.github/scripts/analytics/data_mart_queries/test_monitor_mart.sql
+++ b/.github/scripts/analytics/data_mart_queries/test_monitor_mart.sql
@@ -36,10 +36,9 @@ SELECT
END as is_muted_or_skipped
FROM `test_results/analytics/tests_monitor`
WHERE date_window >= CurrentUtcDate() - 30 * Interval("P1D")
-and branch = 'main'
+and ( branch = 'main' or branch like 'stable-%')
and is_test_chunk = 0
and (CASE
WHEN is_muted = 1 OR (state = 'Skipped' AND days_in_state > 14) THEN TRUE
ELSE FALSE
END ) = TRUE
-
diff --git a/.github/scripts/analytics/data_mart_ttl_analog.py b/.github/scripts/analytics/data_mart_ttl_analog.py
new file mode 100644
index 00000000000..9d80406ca0c
--- /dev/null
+++ b/.github/scripts/analytics/data_mart_ttl_analog.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+
+import argparse
+import ydb
+import configparser
+import os
+import time
+
+# Load configuration
+dir = os.path.dirname(__file__)
+config = configparser.ConfigParser()
+config_file_path = f"{dir}/../../config/ydb_qa_db.ini"
+config.read(config_file_path)
+
+DATABASE_ENDPOINT = config["QA_DB"]["DATABASE_ENDPOINT"]
+DATABASE_PATH = config["QA_DB"]["DATABASE_PATH"]
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Delete old records from YDB table")
+ parser.add_argument("--table-path", required=True, help="Table path and name")
+ parser.add_argument("--timestamp-field", required=True, help="Name of the timestamp field")
+ parser.add_argument("--delete-interval", required=True, help="Interval to delete records older than, in ISO 8601 format (https://en.wikipedia.org/wiki/ISO_8601#Durations) without 'P'")
+
+ return parser.parse_args()
+
+def delete_old_records(session, full_table_path, timestamp_field, delete_interval):
+ """Delete records older than the specified interval."""
+ # First, count the number of records that will be deleted
+ count_query = f"""
+ SELECT COUNT(*) as count
+ FROM `{full_table_path}`
+ WHERE `{timestamp_field}` < CurrentUtcDate() - Interval("P{delete_interval}")
+ """
+
+ print(f"Counting records to delete...")
+ result_sets = session.transaction().execute(count_query)
+ row_count = result_sets[0].rows[0].count
+
+ if row_count == 0:
+ print("No records to delete.")
+ return 0
+
+ print(f"Found {row_count} records older than {delete_interval}.")
+
+ # Now perform the delete operation
+ delete_query = f"""
+ DELETE FROM `{full_table_path}`
+ WHERE `{timestamp_field}` < CurrentUtcDate() - Interval("P{delete_interval}")
+ """
+
+ print(f"Executing DELETE query: {delete_query}")
+ start_time = time.time()
+ session.transaction().execute(delete_query, commit_tx=True)
+ end_time = time.time()
+
+ print(f"Deleted {row_count} records in {end_time - start_time:.2f} seconds.")
+ return row_count
+
+def main():
+ args = parse_args()
+
+ if "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS" not in os.environ:
+ print("Error: Env variable CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS is missing, skipping")
+ return 1
+ else:
+ os.environ["YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"] = os.environ[
+ "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS"
+ ]
+
+ table_path = args.table_path
+ full_table_path = f'{DATABASE_PATH}/{table_path}'
+ timestamp_field = args.timestamp_field
+ delete_interval = args.delete_interval
+
+ print(f"Connecting to YDB to delete records from {full_table_path}")
+ print(f"Will delete records where {timestamp_field} < CurrentUtcDate() - Interval(\"P{delete_interval}\")")
+
+ with ydb.Driver(
+ endpoint=DATABASE_ENDPOINT,
+ database=DATABASE_PATH,
+ credentials=ydb.credentials_from_env_variables()
+ ) as driver:
+ # Wait until driver is ready
+ driver.wait(timeout=10, fail_fast=True)
+
+ with ydb.SessionPool(driver) as pool:
+ try:
+ def transaction_delete(session):
+ return delete_old_records(session, full_table_path, timestamp_field, delete_interval)
+
+ deleted_count = pool.retry_operation_sync(transaction_delete)
+
+ print(f"Successfully deleted old records from {full_table_path}")
+ print(f"Total records deleted: {deleted_count}")
+ return 0
+ except ydb.Error as e:
+ print(f"Error deleting records: {e}")
+ return 1
+
+if __name__ == "__main__":
+ exit_code = main()
+ exit(exit_code)
diff --git a/.github/scripts/analytics/flaky_tests_history.py b/.github/scripts/analytics/flaky_tests_history.py
index d1c1c637af4..29b55d1dc11 100755
--- a/.github/scripts/analytics/flaky_tests_history.py
+++ b/.github/scripts/analytics/flaky_tests_history.py
@@ -78,8 +78,8 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--days-window', default=1, type=int, help='how many days back we collecting history')
- parser.add_argument('--build_type',choices=['relwithdebinfo', 'release-asan'], default='relwithdebinfo', type=str, help='build : relwithdebinfo or release-asan')
- parser.add_argument('--branch', default='main',choices=['main'], type=str, help='branch')
+ parser.add_argument('--build_type', default='relwithdebinfo', type=str, help='build types')
+ parser.add_argument('--branch', default='main', type=str, help='branch')
args, unknown = parser.parse_known_args()
history_for_n_day = args.days_window
@@ -203,6 +203,9 @@ def main():
and job_name in (
'Nightly-run',
'Regression-run',
+ 'Regression-run_Large',
+ 'Regression-run_Small_and_Medium',
+ 'Regression-run_compatibility',
'Regression-whitelist-run',
'Postcommit_relwithdebinfo',
'Postcommit_asan'
diff --git a/.github/scripts/analytics/flaky_tests_history_n_runs.py b/.github/scripts/analytics/flaky_tests_history_n_runs.py
index 1c4787a45c5..c35046374e2 100755
--- a/.github/scripts/analytics/flaky_tests_history_n_runs.py
+++ b/.github/scripts/analytics/flaky_tests_history_n_runs.py
@@ -203,6 +203,9 @@ def main():
and job_name in (
'Nightly-run',
'Regression-run',
+ 'Regression-run_Large',
+ 'Regression-run_Small_and_Medium',
+ 'Regression-run_compatibility',
'Regression-whitelist-run',
'Postcommit_relwithdebinfo',
'Postcommit_asan'
@@ -227,6 +230,9 @@ def main():
and job_name in (
'Nightly-run',
'Regression-run',
+ 'Regression-run_Large',
+ 'Regression-run_Small_and_Medium',
+ 'Regression-run_compatibility',
'Regression-whitelist-run',
'Postcommit_relwithdebinfo',
'Postcommit_asan'
diff --git a/.github/scripts/analytics/test_history_fast.py b/.github/scripts/analytics/test_history_fast.py
index cf14a383637..7dc78b484f1 100755
--- a/.github/scripts/analytics/test_history_fast.py
+++ b/.github/scripts/analytics/test_history_fast.py
@@ -19,6 +19,17 @@ def drop_table(session, table_path):
session.execute_scheme(f"DROP TABLE IF EXISTS `{table_path}`;")
+def check_table_exists(session, table_path):
+ """Check if table exists"""
+ try:
+ session.describe_table(table_path)
+ print(f"Table '{table_path}' already exists.")
+ return True
+ except ydb.SchemeError:
+ print(f"Table '{table_path}' does not exist.")
+ return False
+
+
def create_test_history_fast_table(session, table_path):
print(f"> Creating table: '{table_path}'")
session.execute_scheme(f"""
@@ -38,7 +49,7 @@ def create_test_history_fast_table(session, table_path):
`status` Utf8,
`status_description` Utf8,
`owners` Utf8,
- PRIMARY KEY (`full_name`, `run_timestamp`, `job_name`, `branch`, `build_type`, test_id)
+ PRIMARY KEY (`run_timestamp`, `full_name`, `job_name`, `branch`, `build_type`, test_id)
)
PARTITION BY HASH(run_timestamp)
WITH (
@@ -74,7 +85,7 @@ def bulk_upsert(table_client, table_path, rows):
def get_missed_data_for_upload(driver):
results = []
query = f"""
- SELECT
+ SELECT
build_type,
job_name,
job_id,
@@ -96,8 +107,14 @@ def get_missed_data_for_upload(driver):
) as fast_data_missed
ON all_data.run_timestamp = fast_data_missed.run_timestamp
WHERE
- all_data.run_timestamp >= CurrentUtcDate() - 6*Interval("P1D") AND
- fast_data_missed.run_timestamp is NULL
+ all_data.run_timestamp >= CurrentUtcDate() - 6*Interval("P1D")
+ and String::Contains(all_data.test_name, '.flake8') = FALSE
+ and (CASE
+ WHEN String::Contains(all_data.test_name, 'chunk chunk') OR String::Contains(all_data.test_name, 'chunk+chunk') THEN TRUE
+ ELSE FALSE
+ END) = FALSE
+ and (all_data.branch = 'main' or all_data.branch like 'stable-%')
+ and fast_data_missed.run_timestamp is NULL
"""
scan_query = ydb.ScanQuery(query, {})
@@ -127,7 +144,8 @@ def main():
]
table_path = "test_results/analytics/test_history_fast"
- batch_size = 50000
+ full_table_path = f'{DATABASE_PATH}/{table_path}'
+ batch_size = 1000
with ydb.Driver(
endpoint=DATABASE_ENDPOINT,
@@ -136,13 +154,24 @@ def main():
) as driver:
driver.wait(timeout=10, fail_fast=True)
with ydb.SessionPool(driver) as pool:
+ # Проверяем существование таблицы и создаем её если нужно
+ def check_and_create_table(session):
+ exists = check_table_exists(session, full_table_path)
+ if not exists:
+ create_test_history_fast_table(session, full_table_path)
+ return True
+ return exists
+
+ pool.retry_operation_sync(check_and_create_table)
+
+ # Продолжаем с основной логикой скрипта
prepared_for_upload_rows = get_missed_data_for_upload(driver)
print(f'Preparing to upsert: {len(prepared_for_upload_rows)} rows')
if prepared_for_upload_rows:
for start in range(0, len(prepared_for_upload_rows), batch_size):
batch_rows_for_upload = prepared_for_upload_rows[start:start + batch_size]
print(f'upserting: {start}-{start + len(batch_rows_for_upload)}/{len(prepared_for_upload_rows)} rows')
- bulk_upsert(driver.table_client, f'{DATABASE_PATH}/{table_path}', batch_rows_for_upload)
+ bulk_upsert(driver.table_client, full_table_path, batch_rows_for_upload)
print('Tests uploaded')
else:
print('Nothing to upload')
diff --git a/.github/scripts/analytics/tests_monitor.py b/.github/scripts/analytics/tests_monitor.py
index 89c2489ad3a..fc23a4c0ed6 100755
--- a/.github/scripts/analytics/tests_monitor.py
+++ b/.github/scripts/analytics/tests_monitor.py
@@ -283,12 +283,12 @@ def main():
parser.add_argument('--days-window', default=1, type=int, help='how many days back we collecting history')
parser.add_argument(
'--build_type',
- choices=['relwithdebinfo', 'release-asan'],
+ choices=['relwithdebinfo', 'release-asan', 'release-tsan', 'release-msan'],
default='relwithdebinfo',
type=str,
- help='build : relwithdebinfo or release-asan',
+ help='build type',
)
- parser.add_argument('--branch', default='main', choices=['main'], type=str, help='branch')
+ parser.add_argument('--branch', default='main', type=str, help='branch')
parser.add_argument(
'--concurent',
@@ -325,7 +325,7 @@ def main():
tc_settings = ydb.TableClientSettings().with_native_date_in_result_sets(enabled=False)
table_client = ydb.TableClient(driver, tc_settings)
base_date = datetime.datetime(1970, 1, 1)
- default_start_date = datetime.date(2024, 11, 1)
+ default_start_date = datetime.date(2025, 2, 1)
today = datetime.date.today()
table_path = f'test_results/analytics/tests_monitor'
diff --git a/.github/scripts/analytics/upload_testowners.py b/.github/scripts/analytics/upload_testowners.py
index 05cae9b04f0..ed20ffcbf03 100755
--- a/.github/scripts/analytics/upload_testowners.py
+++ b/.github/scripts/analytics/upload_testowners.py
@@ -1,11 +1,8 @@
#!/usr/bin/env python3
-import argparse
import configparser
-import datetime
import os
import posixpath
-import traceback
import time
import ydb
from collections import Counter
@@ -95,6 +92,9 @@ def main():
and job_name in (
'Nightly-run',
'Regression-run',
+ 'Regression-run_Large',
+ 'Regression-run_Small_and_Medium',
+ 'Regression-run_compatibility',
'Regression-whitelist-run',
'Postcommit_relwithdebinfo',
'Postcommit_asan'
diff --git a/.github/scripts/tests/create_new_muted_ya.py b/.github/scripts/tests/create_new_muted_ya.py
index 8642a93398d..e2aae4931d9 100755
--- a/.github/scripts/tests/create_new_muted_ya.py
+++ b/.github/scripts/tests/create_new_muted_ya.py
@@ -28,8 +28,8 @@ DATABASE_ENDPOINT = config["QA_DB"]["DATABASE_ENDPOINT"]
DATABASE_PATH = config["QA_DB"]["DATABASE_PATH"]
-def execute_query(driver):
- query_string = '''
+def execute_query(driver, branch='main', build_type='relwithdebinfo'):
+ query_string = f'''
SELECT * from (
SELECT data.*,
CASE WHEN new_flaky.full_name IS NOT NULL THEN True ELSE False END AS new_flaky_today,
@@ -101,7 +101,7 @@ def execute_query(driver):
and data.build_type = deleted.build_type
and data.branch = deleted.branch
)
- where date_window = CurrentUtcDate() and branch = 'main'
+ where date_window = CurrentUtcDate() and branch = '{branch}' and build_type = '{build_type}'
'''
@@ -178,8 +178,8 @@ def apply_and_add_mutes(all_tests, output_path, mute_check):
for test in all_tests
if test.get('days_in_state') >= 1
and test.get('flaky_today')
- and (test.get('pass_count') + test.get('fail_count')) >= 3
- and test.get('fail_count') > 2
+ and (test.get('pass_count') + test.get('fail_count')) >= 2
+ and test.get('fail_count') >= 2
and test.get('fail_count')/(test.get('pass_count') + test.get('fail_count')) > 0.2 # <=80% success rate
)
flaky_tests = sorted(flaky_tests)
@@ -191,8 +191,8 @@ def apply_and_add_mutes(all_tests, output_path, mute_check):
for test in all_tests
if test.get('days_in_state') >= 1
and test.get('flaky_today')
- and (test.get('pass_count') + test.get('fail_count')) >= 3
- and test.get('fail_count') > 2
+ and (test.get('pass_count') + test.get('fail_count')) >=2
+ and test.get('fail_count') >= 2
and test.get('fail_count')/(test.get('pass_count') + test.get('fail_count')) > 0.2 # <=80% success rate
)
## тесты может запускаться 1 раз в день. если за последние 7 дней набирается трешход то мьютим
@@ -356,9 +356,9 @@ def create_mute_issues(all_tests, file_path):
print(f"Writing results to {file_path}")
with open(file_path, 'w') as f:
- f.write("```\n")
+ f.write("\n")
f.write("\n".join(results))
- f.write("\n```")
+ f.write("\n")
with open(os.environ['GITHUB_OUTPUT'], 'a') as gh_out:
gh_out.write(f"created_issues_file={file_path}")
@@ -389,7 +389,7 @@ def mute_worker(args):
) as driver:
driver.wait(timeout=10, fail_fast=True)
- all_tests = execute_query(driver)
+ all_tests = execute_query(driver, args.branch)
if args.mode == 'update_muted_ya':
output_path = args.output_folder
os.makedirs(output_path, exist_ok=True)
@@ -407,6 +407,7 @@ if __name__ == "__main__":
update_muted_ya_parser = subparsers.add_parser('update_muted_ya', help='create new muted_ya')
update_muted_ya_parser.add_argument('--output_folder', default=repo_path, required=False, help='Output folder.')
+ update_muted_ya_parser.add_argument('--branch', default='main', help='Branch to get history')
create_issues_parser = subparsers.add_parser(
'create_issues',
@@ -415,7 +416,8 @@ if __name__ == "__main__":
create_issues_parser.add_argument(
'--file_path', default=f'{repo_path}/mute_update/flaky.txt', required=False, help='file path'
)
+ create_issues_parser.add_argument('--branch', default='main', help='Branch to get history')
args = parser.parse_args()
- mute_worker(args) \ No newline at end of file
+ mute_worker(args)
diff --git a/.github/scripts/tests/generate-summary.py b/.github/scripts/tests/generate-summary.py
index 5fd6f20588b..239f24e4e2e 100755
--- a/.github/scripts/tests/generate-summary.py
+++ b/.github/scripts/tests/generate-summary.py
@@ -230,7 +230,7 @@ def render_pm(value, url, diff=None):
return text
-def render_testlist_html(rows, fn, build_preset):
+def render_testlist_html(rows, fn, build_preset, branch):
TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(TEMPLATES_PATH), undefined=StrictUndefined)
@@ -273,7 +273,7 @@ def render_testlist_html(rows, fn, build_preset):
tests_names_for_history.append(test.full_name)
try:
- history = get_test_history(tests_names_for_history, last_n_runs, build_preset)
+ history = get_test_history(tests_names_for_history, last_n_runs, build_preset, branch)
except Exception:
print(traceback.format_exc())
@@ -308,7 +308,9 @@ def render_testlist_html(rows, fn, build_preset):
tests=status_test,
has_any_log=has_any_log,
history=history,
- build_preset=buid_preset_params
+ build_preset=build_preset,
+ buid_preset_params=buid_preset_params,
+ branch=branch
)
with open(fn, "w") as fp:
@@ -345,7 +347,7 @@ def get_codeowners_for_tests(codeowners_file_path, tests_data):
tests_data_with_owners.append(test)
-def gen_summary(public_dir, public_dir_url, paths, is_retry: bool, build_preset):
+def gen_summary(public_dir, public_dir_url, paths, is_retry: bool, build_preset, branch):
summary = TestSummary(is_retry=is_retry)
for title, html_fn, path in paths:
@@ -359,7 +361,7 @@ def gen_summary(public_dir, public_dir_url, paths, is_retry: bool, build_preset)
html_fn = os.path.relpath(html_fn, public_dir)
report_url = f"{public_dir_url}/{html_fn}"
- render_testlist_html(summary_line.tests, os.path.join(public_dir, html_fn),build_preset)
+ render_testlist_html(summary_line.tests, os.path.join(public_dir, html_fn),build_preset, branch)
summary_line.add_report(html_fn, report_url)
summary.add_line(summary_line)
@@ -418,6 +420,7 @@ def main():
parser.add_argument("--public_dir_url", required=True)
parser.add_argument("--summary_links", required=True)
parser.add_argument('--build_preset', default="default-linux-x86-64-relwithdebinfo", required=False)
+ parser.add_argument('--branch', default="main", required=False)
parser.add_argument('--status_report_file', required=False)
parser.add_argument('--is_retry', required=True, type=int)
parser.add_argument('--is_last_retry', required=True, type=int)
@@ -434,7 +437,13 @@ def main():
paths = iter(args.args)
title_path = list(zip(paths, paths, paths))
- summary = gen_summary(args.public_dir, args.public_dir_url, title_path, is_retry=bool(args.is_retry),build_preset=args.build_preset)
+ summary = gen_summary(args.public_dir,
+ args.public_dir_url,
+ title_path,
+ is_retry=bool(args.is_retry),
+ build_preset=args.build_preset,
+ branch=args.branch
+ )
write_summary(summary)
if summary.is_failed and not args.is_test_result_ignored:
diff --git a/.github/scripts/tests/get_muted_tests.py b/.github/scripts/tests/get_muted_tests.py
index ff28a7f831a..ce523706d99 100755
--- a/.github/scripts/tests/get_muted_tests.py
+++ b/.github/scripts/tests/get_muted_tests.py
@@ -187,7 +187,7 @@ def mute_applier(args):
for test in all_tests:
testsuite = to_str(test['suite_folder'])
testcase = to_str(test['test_name'])
- test['branch'] = 'main'
+ test['branch'] = args.branch
test['is_muted'] = int(mute_check(testsuite, testcase))
upload_muted_tests(all_tests)
diff --git a/.github/scripts/tests/get_test_history.py b/.github/scripts/tests/get_test_history.py
index 06d4c323725..a3b2e32146b 100644
--- a/.github/scripts/tests/get_test_history.py
+++ b/.github/scripts/tests/get_test_history.py
@@ -16,7 +16,7 @@ DATABASE_ENDPOINT = config["QA_DB"]["DATABASE_ENDPOINT"]
DATABASE_PATH = config["QA_DB"]["DATABASE_PATH"]
-def get_test_history(test_names_array, last_n_runs_of_test_amount, build_type):
+def get_test_history(test_names_array, last_n_runs_of_test_amount, build_type, branch):
if "CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS" not in os.environ:
print(
"Error: Env variable CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS is missing, skipping"
@@ -48,33 +48,64 @@ def get_test_history(test_names_array, last_n_runs_of_test_amount, build_type):
DECLARE $test_names AS List<Utf8>;
DECLARE $rn_max AS Int32;
DECLARE $build_type AS Utf8;
-
- $test_names=[{','.join("'{0}'".format(x) for x in test_names_batch)}];
+ DECLARE $branch AS Utf8;
+
+ $test_names = [{','.join("'{0}'".format(x) for x in test_names_batch)}];
$rn_max = {last_n_runs_of_test_amount};
$build_type = '{build_type}';
+ $branch = '{branch}';
- $tests=(
+ -- Оптимизированный запрос с учетом особенностей YDB
+ $filtered_tests = (
SELECT
- suite_folder ||'/' || test_name as full_name,test_name,build_type, commit, branch, run_timestamp, status, status_description,
+ suite_folder || '/' || test_name AS full_name,
+ test_name,
+ build_type,
+ commit,
+ branch,
+ run_timestamp,
+ status,
+ status_description,
+ job_id,
+ job_name,
ROW_NUMBER() OVER (PARTITION BY test_name ORDER BY run_timestamp DESC) AS rn
FROM
- `test_results/test_runs_column`
- where job_name in (
- 'Nightly-run',
- 'Regression-run',
- 'Regression-whitelist-run',
- 'Postcommit_relwithdebinfo',
- 'Postcommit_asan'
- )
- and build_type = $build_type
- and suite_folder ||'/' || test_name in $test_names
- and status != 'skipped'
+ `test_results/test_runs_column` AS t
+ WHERE
+ t.build_type = $build_type
+ AND t.branch = $branch
+ AND t.job_name IN (
+ 'Nightly-run',
+ 'Regression-run',
+ 'Regression-whitelist-run',
+ 'Postcommit_relwithdebinfo',
+ 'Postcommit_asan'
+ )
+ AND t.status != 'skipped'
+ AND suite_folder || '/' || test_name IN $test_names
);
- select full_name,test_name,build_type, commit, branch, run_timestamp, status, status_description,rn
- from $tests
- WHERE rn <= $rn_max
- ORDER BY test_name, run_timestamp;
+ -- Финальный запрос с ограничением по количеству запусков
+ SELECT
+ full_name,
+ test_name,
+ build_type,
+ commit,
+ branch,
+ run_timestamp,
+ status,
+ status_description,
+ job_id,
+ job_name,
+ rn
+ FROM
+ $filtered_tests
+ WHERE
+ rn <= $rn_max
+ ORDER BY
+ test_name,
+ run_timestamp;
+
"""
query = ydb.ScanQuery(history_query, {})
it = driver.table_client.scan_query(query)
@@ -92,10 +123,13 @@ def get_test_history(test_names_array, last_n_runs_of_test_amount, build_type):
results[row["full_name"].decode("utf-8")] = {}
results[row["full_name"].decode("utf-8")][row["run_timestamp"]] = {
+ "branch": row["branch"],
"status": row["status"],
"commit": row["commit"],
"datetime": datetime.datetime.fromtimestamp(int(row["run_timestamp"] / 1000000)).strftime("%H:%m %B %d %Y"),
- "status_description": row["status_description"],
+ "status_description": row["status_description"].replace(';;','\n'),
+ "job_id": row["job_id"],
+ "job_name": row["job_name"]
}
end_time = time.time()
print(
@@ -104,4 +138,4 @@ def get_test_history(test_names_array, last_n_runs_of_test_amount, build_type):
if __name__ == "__main__":
- get_test_history(test_names_array, last_n_runs_of_test_amount, build_type)
+ get_test_history(test_names_array, last_n_runs_of_test_amount, build_type, branch)
diff --git a/.github/scripts/tests/templates/summary.html b/.github/scripts/tests/templates/summary.html
index a595d1b7e23..e21a31e54b6 100644
--- a/.github/scripts/tests/templates/summary.html
+++ b/.github/scripts/tests/templates/summary.html
@@ -321,6 +321,60 @@
overflow: auto;
}
+ /* Стили для модального окна */
+ #errorModal {
+ display: none;
+ position: fixed;
+ z-index: 1000;
+ left: 0;
+ top: 0;
+ width: 100%;
+ height: 100%;
+ overflow: auto;
+ background-color: rgba(0,0,0,0.5);
+ }
+
+ .modal-content {
+ background-color: #fefefe;
+ margin: 5% auto;
+ padding: 20px;
+ border: 1px solid #888;
+ width: 80%;
+ border-radius: 5px;
+ max-height: 80vh;
+ overflow-y: auto;
+ }
+
+ .close-modal {
+ color: #aaa;
+ float: right;
+ font-size: 28px;
+ font-weight: bold;
+ cursor: pointer;
+ }
+
+ .modal-header {
+ margin-bottom: 15px;
+ padding-bottom: 10px;
+ border-bottom: 1px solid #eee;
+ font-size: smaller;
+ }
+
+ .modal-info {
+ font-size: small;
+ margin-bottom: 15px;
+ line-height: 1.5;
+ }
+
+ .modal-info-item {
+ margin-bottom: 5px;
+ }
+
+ .modal-info-label {
+ font-weight: bold;
+ display: inline-block;
+ width: 80px;
+ }
</style>
<script>
function findParentBySelector(elm, selector) {
@@ -364,7 +418,7 @@
testName = namePieces[0] + '.' + namePieces[1] + '::' + namePieces.slice(2).join('::');
}
- const cmdArg = `./ya make -ttt {{ build_preset }} -F '${testName}' ${path}`;
+ const cmdArg = `./ya make -ttt {{ buid_preset_params }} -F '${testName}' ${path}`;
console.log(cmdArg);
@@ -415,11 +469,11 @@
let [path, testName] = [pieces[1], pieces[2]];
if (success_count + fail_count != 0){
- let url = "https://github.com/ydb-platform/ydb/issues/new?title=Mute "+ encodeURIComponent(path)+"/"+ encodeURIComponent(testName) + "&body=" + encodeURIComponent(path)+"/"+ encodeURIComponent(testName) +"%0A%0A**Add%20line%20to%20[muted_ya.txt](https://github.com/ydb-platform/ydb/blob/main/.github/config/muted_ya.txt):**%0A%60" + encodeURIComponent(path)+" "+ encodeURIComponent(testName)+"%60%0A%0A%20Owner:%20[TEAM:@ydb-platform/"+owner+"](https://github.com/orgs/ydb-platform/teams/"+owner+")%0A%0A**Read%20more%20in%20[mute_rules.md](https://github.com/ydb-platform/ydb/blob/main/.github/config/mute_rules.md)**%20%20%0A%0A**Summary%20history:**%0A%20Success%20rate%20**"+(success_count/(success_count+fail_count)*100)+"%25**%0APass:"+success_count+"%20Fail:"+fail_count+"%20%0A%0A**Test%20run%20history:**%20[link](https://datalens.yandex/34xnbsom67hcq?full_name=" +path+ "/"+ testName+ ")%0A%0AMore%20info%20in%20[dashboard](https://datalens.yandex/4un3zdm0zcnyr)&labels=mute"
+ let url = "https://github.com/ydb-platform/ydb/issues/new?title=Mute "+ encodeURIComponent(path)+"/"+ encodeURIComponent(testName) + "&body=" + encodeURIComponent(path)+"/"+ encodeURIComponent(testName) +"%0A%0A**Add%20line%20to%20[muted_ya.txt](https://github.com/ydb-platform/ydb/blob/{{branch}}/.github/config/muted_ya.txt):**%0A%60" + encodeURIComponent(path)+" "+ encodeURIComponent(testName)+"%60%0A%0A%20Owner:%20[TEAM:@ydb-platform/"+owner+"](https://github.com/orgs/ydb-platform/teams/"+owner+")%0A%0A**Read%20more%20in%20[mute_rules.md](https://github.com/ydb-platform/ydb/blob/{{branch}}/.github/config/mute_rules.md)**%20%20%0A%0A**Summary%20history:**%0A%20Success%20rate%20**"+(success_count/(success_count+fail_count)*100)+"%25**%0APass:"+success_count+"%20Fail:"+fail_count+"%20%0A%0A**Test%20run%20history:**%20[link](https://datalens.yandex/34xnbsom67hcq?branch={{branch}}&full_name=" +path+ "/"+ testName+ ")%0A%0AMore%20info%20in%20[dashboard](https://datalens.yandex/4un3zdm0zcnyr)&labels=mute"
window.open(url, '_blank');
}
else {
- let url = "https://github.com/ydb-platform/ydb/issues/new?title=Mute "+ encodeURIComponent(path)+"/"+ encodeURIComponent(testName) + "&body=" + encodeURIComponent(path)+"/"+ encodeURIComponent(testName) +"%0A%0A**Add%20line%20to%20[muted_ya.txt](https://github.com/ydb-platform/ydb/blob/main/.github/config/muted_ya.txt):**%0A%60" + encodeURIComponent(path)+" "+ encodeURIComponent(testName)+"%60%0A%0A%20Owner:%20[TEAM:@ydb-platform/"+owner+"](https://github.com/orgs/ydb-platform/teams/"+owner+")%0A%0A**Read%20more%20in%20[mute_rules.md](https://github.com/ydb-platform/ydb/blob/main/.github/config/mute_rules.md)**%20%20%0A%0A**Summary%20history:**%0APass:"+success_count+"%20Fail:"+fail_count+"%20%0A%0A**Test%20run%20history:**%20[link](https://datalens.yandex/34xnbsom67hcq?full_name=" +path+ "/"+ testName+ ")%0A%0AMore%20info%20in%20[dashboard](https://datalens.yandex/4un3zdm0zcnyr)&labels=mute"
+ let url = "https://github.com/ydb-platform/ydb/issues/new?title=Mute "+ encodeURIComponent(path)+"/"+ encodeURIComponent(testName) + "&body=" + encodeURIComponent(path)+"/"+ encodeURIComponent(testName) +"%0A%0A**Add%20line%20to%20[muted_ya.txt](https://github.com/ydb-platform/ydb/blob/{{branch}}/.github/config/muted_ya.txt):**%0A%60" + encodeURIComponent(path)+" "+ encodeURIComponent(testName)+"%60%0A%0A%20Owner:%20[TEAM:@ydb-platform/"+owner+"](https://github.com/orgs/ydb-platform/teams/"+owner+")%0A%0A**Read%20more%20in%20[mute_rules.md](https://github.com/ydb-platform/ydb/blob/{{branch}}/.github/config/mute_rules.md)**%20%20%0A%0A**Summary%20history:**%0APass:"+success_count+"%20Fail:"+fail_count+"%20%0A%0A**Test%20run%20history:**%20[link](https://datalens.yandex/34xnbsom67hcq?branch={{branch}}&full_name=" +path+ "/"+ testName+ ")%0A%0AMore%20info%20in%20[dashboard](https://datalens.yandex/4un3zdm0zcnyr)&labels=mute"
window.open(url, '_blank');
}
@@ -427,35 +481,64 @@
function openHistory(test) {
const full_name = test.trim();
- let url = "https://datalens.yandex/34xnbsom67hcq?full_name="+full_name
+ let url = "https://datalens.yandex/34xnbsom67hcq?branch={{branch}}&full_name="+full_name
window.open(url, '_blank');
}
-
- let lastOpenedTooltip = null;
-
- function toggleTooltip(event) {
- event.stopPropagation();
- const tooltip = event.currentTarget.querySelector('.tooltip');
-
- if (tooltip.classList.contains('visible')) {
- tooltip.classList.remove('visible');
- lastOpenedTooltip = null;
- } else {
- hideTooltips();
- tooltip.classList.add('visible');
- lastOpenedTooltip = tooltip;
+ // Функция для отображения информации о тесте в модальном окне
+ function showTestInfo(testInfo) {
+ const modal = document.getElementById('errorModal');
+ const modalTitle = document.getElementById('modalTitle');
+ const modalStatus = document.getElementById('modalStatus');
+ const modalDate = document.getElementById('modalDate');
+ const modalSha = document.getElementById('modalSha');
+ const modalJob = document.getElementById('modalJob');
+ const modalErrorContent = document.getElementById('modalErrorContent');
+
+ // Заполняем информацию
+ modalTitle.textContent = 'Test details: ' + testInfo.testName;
+ modalStatus.textContent = testInfo.status;
+ modalDate.textContent = testInfo.date;
+
+ // Обновляем ссылку на запуск
+ const jobLink = document.getElementById('modalJobLink');
+ jobLink.href = `https://github.com/ydb-platform/ydb/actions/runs/${testInfo.job_id}`;
+ jobLink.textContent = testInfo.job_name;
+ // Обновляем ссылку на коммит
+ const shaLink = document.getElementById('modalShaLink');
+ shaLink.href = `https://github.com/ydb-platform/ydb//commit/${testInfo.commit}`;
+ shaLink.textContent = testInfo.commit.substring(0, 8);
+
+ // Класс для статуса
+ modalStatus.className = '';
+ if (testInfo.status === 'passed') {
+ modalStatus.classList.add('test_pass');
+ } else if (testInfo.status === 'failure') {
+ modalStatus.classList.add('test_fail');
+ } else if (testInfo.status === 'mute') {
+ modalStatus.classList.add('test_mute');
}
- }
-
- function hideTooltips() {
- if (lastOpenedTooltip) {
- lastOpenedTooltip.classList.remove('visible');
- lastOpenedTooltip = null;
+
+ // Обработка текста ошибки
+ if (testInfo.errorText && testInfo.errorText.trim() !== "") {
+ document.getElementById('modalErrorSection').style.display = 'block';
+ modalErrorContent.textContent = testInfo.errorText;
+
+ // Определяем язык для подсветки синтаксиса
+ modalErrorContent.className = 'language-' + (testInfo.testName.includes('.py') ? 'python' : 'cpp');
+
+ // Активируем подсветку синтаксиса
+ if (typeof Prism !== 'undefined') {
+ Prism.highlightElement(modalErrorContent);
+ }
+ } else {
+ document.getElementById('modalErrorSection').style.display = 'none';
}
+
+ // Показываем модальное окно
+ modal.style.display = 'block';
}
-
-
+
function toggleAllTables(action) {
const contents = document.querySelectorAll('.collapsible-content');
if (action === 'expand') {
@@ -481,6 +564,7 @@
});
}
+
document.addEventListener("DOMContentLoaded", function() {
let openDropdown = null; // Track the currently open dropdown
@@ -525,15 +609,33 @@
}
});
- document.addEventListener('click', function(event) {
- if (!event.target.closest('.svg-icon') && !event.target.classList.contains('copy')) {
- hideTooltips();
- }
- });
const svgIcons = document.querySelectorAll('.svg-icon');
svgIcons.forEach(icon => {
- icon.addEventListener('click', toggleTooltip);
+ icon.addEventListener('click', function(event) {
+ event.stopPropagation();
+
+ // Получаем данные для модального окна
+ const testName = icon.getAttribute('data-test-name');
+ const status = icon.getAttribute('data-status');
+ const date = icon.getAttribute('data-date');
+ const errorText = icon.getAttribute('data-error');
+ const commit = icon.getAttribute('data-commit');
+ const job_name = icon.getAttribute('data-job-name');
+ const job_id = icon.getAttribute('data-job-id');
+
+ // Открываем модальное окно с данными
+ showTestInfo({
+ testName: testName,
+ status: status,
+ date: date,
+ errorText: errorText,
+ commit: commit,
+ job_name: job_name,
+ job_id: job_id
+ });
+ });
});
+
const copyButtons = document.querySelectorAll(".copy");
copyButtons.forEach(button => {
button.addEventListener('click', function(event) {
@@ -588,6 +690,19 @@
document.getElementById('collapse-all').addEventListener('click', function(event) {
toggleAllTables('collapse');
});
+
+ // Обработчик для закрытия модального окна
+ document.querySelector('.close-modal').onclick = function() {
+ document.getElementById('errorModal').style.display = 'none';
+ }
+
+ // Закрытие при клике вне модального окна
+ window.onclick = function(event) {
+ const modal = document.getElementById('errorModal');
+ if (event.target == modal) {
+ modal.style.display = 'none';
+ }
+ }
});
function toggleError(button) {
const errorContent = button.nextElementSibling;
@@ -724,7 +839,14 @@
{% if (status.is_error and t.full_name in history) %}
<td>
{% for h in history[t.full_name] %}
- <span class="svg-icon">
+ <span class="svg-icon"
+ data-test-name="{{ t.full_name }}"
+ data-status="{{ history[t.full_name][h].status }}"
+ data-date="{{ history[t.full_name][h].datetime }}"
+ data-error="{{ history[t.full_name][h].status_description }}"
+ data-commit="{{ history[t.full_name][h].commit }}"
+ data-job-name="{{ history[t.full_name][h].job_name }}[{{ history[t.full_name][h].branch}}:{{ build_preset }}]"
+ data-job-id="{{ history[t.full_name][h].job_id }}">
{% if history[t.full_name][h].status == 'failure' %}
<svg class="svg_failure" viewBox="0 0 16 16" >
<path fill-rule="evenodd" d="M13.5 8a5.5 5.5 0 1 1-11 0 5.5 5.5 0 0 1 11 0M15 8A7 7 0 1 1 1 8a7 7 0 0 1 14 0M6.53 5.47a.75.75 0 0 0-1.06 1.06L6.94 8 5.47 9.47a.75.75 0 1 0 1.06 1.06L8 9.06l1.47 1.47a.75.75 0 1 0 1.06-1.06L9.06 8l1.47-1.47a.75.75 0 1 0-1.06-1.06L8 6.94z" clip-rule="evenodd"></path>
@@ -738,15 +860,6 @@
<path fill-rule="evenodd" d="M5.06 9.94A1.5 1.5 0 0 0 4 9.5H2a.5.5 0 0 1-.5-.5V7a.5.5 0 0 1 .5-.5h2a1.5 1.5 0 0 0 1.06-.44l2.483-2.482a.268.268 0 0 1 .457.19v8.464a.268.268 0 0 1-.457.19zM2 5h2l2.482-2.482A1.768 1.768 0 0 1 9.5 3.768v8.464a1.768 1.768 0 0 1-3.018 1.25L4 11H2a2 2 0 0 1-2-2V7a2 2 0 0 1 2-2m10.28.72a.75.75 0 1 0-1.06 1.06L12.44 8l-1.22 1.22a.75.75 0 1 0 1.06 1.06l1.22-1.22 1.22 1.22a.75.75 0 1 0 1.06-1.06L14.56 8l1.22-1.22a.75.75 0 0 0-1.06-1.06L13.5 6.94z" clip-rule="evenodd"></path> </svg>
</svg>
{% endif %}
- <span class="tooltip">
- Status: {{history[t.full_name][h].status}}<br>
- Date: {{ history[t.full_name][h].datetime }}<br>
- {% if history[t.full_name][h].status_description != "" %}
- Info: {{ history[t.full_name][h].status_description.split(';')[0][0:100] }}<br>
- {% endif %}
- SHA: <a href="https://github.com/ydb-platform/ydb//commit/{{ history[t.full_name][h].commit }}" style="color: #00f;" target="_blank">{{history[t.full_name][h].commit[0:8]}}</a>
- </span>
-
</span>
{% endfor %}
</td>
@@ -775,5 +888,39 @@
</tbody>
</table>
{% endfor %}
+
+<!-- Модальное окно для детального отображения ошибок -->
+<div id="errorModal" class="modal">
+ <div class="modal-content">
+ <span class="close-modal">&times;</span>
+ <div class="modal-header">
+ <h3 id="modalTitle">Test Details</h3>
+ </div>
+ <div class="modal-info">
+ <div class="modal-info-item">
+ <span class="modal-info-label">Run:</span>
+ <a id="modalJobLink" href="#" target="_blank"></a>
+ </div>
+ <div class="modal-info-item">
+ <span class="modal-info-label">Status:</span>
+ <span id="modalStatus"></span>
+ </div>
+ <div class="modal-info-item">
+ <span class="modal-info-label">Date:</span>
+ <span id="modalDate"></span>
+ </div>
+ <div class="modal-info-item">
+ <span class="modal-info-label">Commit:</span>
+ <a id="modalShaLink" href="#" target="_blank"></a>
+ </div>
+ </div>
+ <div id="modalErrorSection">
+ <div class="console-frame">
+ <pre><code id="modalErrorContent"></code></pre>
+ </div>
+ </div>
+ </div>
+</div>
+
</body>
</html>
diff --git a/.github/scripts/tests/update_mute_issues.py b/.github/scripts/tests/update_mute_issues.py
index 8387f40a0f3..238387e5300 100755
--- a/.github/scripts/tests/update_mute_issues.py
+++ b/.github/scripts/tests/update_mute_issues.py
@@ -1,5 +1,4 @@
import os
-import re
import requests
from github import Github #pip3 install PyGithub
from urllib.parse import quote, urlencode
@@ -324,9 +323,9 @@ def generate_github_issue_title_and_body(test_data):
# Title
if len(test_full_names) > 1:
- title = f'Mute {test_data[0]["suite_folder"]} {len(test_full_names)} tests'
+ title = f'Mute {test_data[0]["suite_folder"]} {len(test_full_names)} tests in {branch}'
else:
- title = f'Mute {test_data[0]["full_name"]}'
+ title = f'Mute {test_data[0]["full_name"]} in {branch}'
# Преобразование списка тестов в строку и кодирование
test_string = "\n".join(test_full_names)
@@ -337,11 +336,12 @@ def generate_github_issue_title_and_body(test_data):
# Создаем ссылку на историю тестов, кодируя параметры
- test_run_history_params = "&".join(
+ test_name_params = "&".join(
urlencode({"full_name": f"__in_{test}"})
for test in test_full_names
)
- test_run_history_link = f"{CURRENT_TEST_HISTORY_DASHBOARD}{test_run_history_params}"
+ branch_param = urlencode({"&branch": branch})
+ test_run_history_link = f"{CURRENT_TEST_HISTORY_DASHBOARD}{test_name_params}{branch_param}"
# owner
# Тело сообщения и кодирование
@@ -412,12 +412,6 @@ def get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID):
content = issue['content']
if content:
body = content['body']
-
- # for debug
- if content['id'] == 'I_kwDOGzZjoM6V3BoE':
- print(1)
- #
-
tests, branches = parse_body(body)
field_values = issue.get('fieldValues', {}).get('nodes', [])
@@ -438,6 +432,7 @@ def get_issues_and_tests_from_project(ORG_NAME, PROJECT_ID):
print(f"Status: {status}")
print(f"Status updated: {status_updated}")
print(f"Owner: {owner}")
+ print(f"Branch: {(',').join(branches) if branches else 'main'}")
print("Tests:")
all_issues_with_contet[content['id']] = {}
@@ -488,14 +483,6 @@ def main():
return 1
else:
github_token = os.environ["GITHUB_TOKEN"]
- # muted_tests = get_muted_tests_from_issues()
-
- # create_github_issues(tests)
-
-
-# create_and_add_issue_to_project('test issue','test_issue_body', state = 'Muted', owner = 'fq')
-# print(1)
-# update_issue_state(muted_tests, github_token, "closed")
if __name__ == "__main__":
main() \ No newline at end of file
diff --git a/.github/workflows/collect_analytics.yml b/.github/workflows/collect_analytics.yml
index 06a40ecba0e..772af966cdf 100644
--- a/.github/workflows/collect_analytics.yml
+++ b/.github/workflows/collect_analytics.yml
@@ -1,25 +1,23 @@
name: Collect-analytics-run
on:
schedule:
- - cron: "0 1-23/2 * * *" #каждые 2 часа в 0 минут, начиная с 1:00 и заканчивая 23:00.
+ - cron: "0 1-23/2 * * *" # Every 2 hours starting from 1:00 to 23:00.
workflow_dispatch:
- inputs:
- commit_sha:
- type: string
- default: ""
-
+
+
defaults:
run:
shell: bash
+
jobs:
main:
- name: Checkout and setup
- runs-on: [ self-hosted ]
+ name: Checkout, setup and collect testowners
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
- ref: ${{ inputs.commit_sha }}
+ ref: ${{ github.ref }}
- name: Setup ydb access
uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials
with:
@@ -27,23 +25,48 @@ jobs:
- name: Install dependencies
run: |
python3 -m pip install ydb ydb[yc] codeowners pandas
- - name: Collect testowners
+ - name: Collect testowners
run: python3 .github/scripts/analytics/upload_testowners.py
- - name: Collect test history data with window 1 days relwithdebinfo for main
- run: python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1
- - name: Collect test history data with window 1 days release-asan for main
- run: python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1 --build_type=release-asan
+
+ matrix-job:
+ name: Test collection on ${{ matrix.branch }} with ${{ matrix.build_type }}
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
+ needs: main
+ strategy:
+ fail-fast: false
+ matrix:
+ branch: [ main, stable-25-1, stable-25-1-1 ]
+ build_type: [ relwithdebinfo, release-asan, release-msan, release-tsan ]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ - name: Setup ydb access
+ uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials
+ with:
+ ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }}
+ - name: Install dependencies
+ run: |
+ python3 -m pip install ydb ydb[yc] codeowners pandas
+ - name: Collect test history data
+ run: |
+ python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1 --branch ${{ matrix.branch }} --build_type ${{ matrix.build_type }}
- name: Collect all muted and not muted tests
- run: python3 .github/scripts/tests/get_muted_tests.py upload_muted_tests --branch main
- - name: Collect all test monitor (how long tests in state)
- run: python3 .github/scripts/analytics/tests_monitor.py --branch main
- - name: Collect test history data with window 10 run relwithdebinfo for main
- continue-on-error: true
- run: python3 .github/scripts/analytics/flaky_tests_history_n_runs.py --runs=10
- - name: Collect test history data with window 10 run release-asan for main
- continue-on-error: true
- run: python3 .github/scripts/analytics/flaky_tests_history_n_runs.py --runs=10 --build_type=release-asan
+ run: |
+ # Save the original muted_ya.txt file
+ cp .github/config/muted_ya.txt .github/config/muted_ya.txt.bak
-
+ # Download muted_ya.txt from the specified branch
+ BRANCH=${{ matrix.branch }}
+ URL="https://raw.githubusercontent.com/ydb-platform/ydb/${BRANCH}/.github/config/muted_ya.txt"
+ wget -O .github/config/muted_ya.txt $URL
+ # Run the script
+ python3 .github/scripts/tests/get_muted_tests.py upload_muted_tests --branch $BRANCH
+ # Restore the original muted_ya.txt file
+ mv .github/config/muted_ya.txt.bak .github/config/muted_ya.txt
+ - name: Collect all test monitor (how long tests in state)
+ run: python3 .github/scripts/analytics/tests_monitor.py --branch ${{ matrix.branch }} --build_type ${{ matrix.build_type }}
+
diff --git a/.github/workflows/collect_analytics_fast.yml b/.github/workflows/collect_analytics_fast.yml
index ff6e0694593..6996b49e519 100644
--- a/.github/workflows/collect_analytics_fast.yml
+++ b/.github/workflows/collect_analytics_fast.yml
@@ -14,7 +14,7 @@ defaults:
jobs:
main:
name: Checkout and setup
- runs-on: [ self-hosted ]
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -32,10 +32,11 @@ jobs:
run: python3 .github/scripts/analytics/test_history_fast.py
- name: Upload olap perfomance data mart
continue-on-error: true
- run: python3 .github/scripts/analytics/data_mart_executor.py --query_path .github/scripts/analytics/data_mart_queries/perfomance_olap_mart.sql --table_path perfomance/olap/fast_results --store_type column --partition_keys Run_start_timestamp --primary_keys Db Suite Test Branch Run_start_timestamp --ttl_min 43200 --ttl_key Run_start_timestamp
+ run: python3 .github/scripts/analytics/data_mart_executor.py --query_path .github/scripts/analytics/data_mart_queries/perfomance_olap_mart.sql --table_path perfomance/olap/fast_results --store_type column --partition_keys Run_start_timestamp --primary_keys Run_start_timestamp Db Suite Test Branch --ttl_min 43200 --ttl_key Run_start_timestamp
- name: Upload test monitor data mart
continue-on-error: true
run: python3 .github/scripts/analytics/data_mart_executor.py --query_path .github/scripts/analytics/data_mart_queries/test_monitor_mart.sql --table_path test_results/analytics/test_monitor_mart --store_type column --partition_keys date_window branch build_type owner_team suite_folder --primary_keys date_window owner_team branch build_type suite_folder full_name --ttl_min 43200 --ttl_key date_window
- - name: Upload test history data mart
+ - name: Upload muted test data mart
continue-on-error: true
- run: python3 .github/scripts/analytics/data_mart_executor.py --query_path .github/scripts/analytics/data_mart_queries/test_results_mart.sql --table_path test_results/analytics/test_history_mart --store_type column --partition_keys run_timestamp job_id build_type branch --primary_keys full_name run_timestamp job_id job_name branch build_type test_id --ttl_min 20160 --ttl_key run_timestamp
+ run: python3 .github/scripts/analytics/data_mart_executor.py --query_path .github/scripts/analytics/data_mart_queries/muted_test_mart.sql --table_path test_results/analytics/test_history_mart --store_type column --partition_keys run_timestamp job_id build_type branch --primary_keys run_timestamp full_name job_id job_name branch build_type test_id --ttl_min 20160 --ttl_key run_timestamp
+
diff --git a/.github/workflows/update_muted_ya.yml b/.github/workflows/update_muted_ya.yml
index 99f40222b6c..da498b0bd05 100644
--- a/.github/workflows/update_muted_ya.yml
+++ b/.github/workflows/update_muted_ya.yml
@@ -4,19 +4,46 @@ on:
schedule:
- cron: "0 */2 * * *" # At the beginning of every 2nd hour
workflow_dispatch:
+ inputs:
+ branches:
+ description: 'Comma-separated list of branches to process'
+ required: false
+ default: 'main,stable-25-1,stable-25-1-1'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH_FOR_PR: update-muted-ya
TITLE: "Update muted_ya.txt"
- BASE_BRANCH: main
REVIEWERS: "['ci']"
LABEL: mute-unmute
+ BUILD_TYPE: relwithdebinfo # Используем только один тип сборки
jobs:
- create-or-update-muted-ya:
- runs-on: ubuntu-latest
+ setup:
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
+ outputs:
+ matrix_branches: ${{ steps.set-matrix.outputs.branches }}
steps:
+ - id: set-matrix
+ run: |
+ if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
+ echo "branches=$(echo '${{ github.event.inputs.branches }}' | jq -R -s -c 'split(",")')" >> $GITHUB_OUTPUT
+ else
+ echo "branches=$(echo '["main","stable-25-1","stable-25-1-1"]' | jq -c .)" >> $GITHUB_OUTPUT
+ fi
+
+ update-muted-tests:
+ needs: setup
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
+ strategy:
+ fail-fast: false
+ matrix:
+ BASE_BRANCH: ${{ fromJson(needs.setup.outputs.matrix_branches) }}
+ steps:
+ - name: Set environment variables
+ run: |
+ echo "BASE_BRANCH=${{ matrix.BASE_BRANCH }}" >> $GITHUB_ENV
+
- name: Checkout repository
uses: actions/checkout@v4
with:
@@ -32,48 +59,50 @@ jobs:
with:
ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }}
- - name: Collect test history data with window 1 days relwithdebinfo for ${{ env.BASE_BRANCH }}
- run: python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1 --branch=${{ env.BASE_BRANCH }}
+ - name: Collect test history data
+ run: python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1 --branch=${{ env.BASE_BRANCH }} --build_type=${{ env.BUILD_TYPE }}
- - name: Update muted and not muted tests in DB for ${{ env.BASE_BRANCH }}
- run: python3 .github/scripts/tests/get_muted_tests.py upload_muted_tests --branch=${{ env.BASE_BRANCH }}
+ - name: Update muted tests in DB
+ run: python3 .github/scripts/tests/get_muted_tests.py upload_muted_tests --branch=${{ env.BASE_BRANCH }}
- - name: Update test monitor (how long tests in state) for ${{ env.BASE_BRANCH }}
- run: python3 .github/scripts/analytics/tests_monitor.py --branch=${{ env.BASE_BRANCH }}
+ - name: Update test monitor
+ run: python3 .github/scripts/analytics/tests_monitor.py --branch=${{ env.BASE_BRANCH }} --build_type=${{ env.BUILD_TYPE }}
- - name: Update branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} with branch ${{ env.BASE_BRANCH }}
+ - name: Update branch for PR
run: |
+ # Устанавливаем имя ветки PR без суффикса build_type
+ PR_BRANCH="${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}"
+ echo "PR_BRANCH=${PR_BRANCH}" >> $GITHUB_ENV
+
git config user.name YDBot
git config user.email ydbot@ydb.tech
# Fetch the latest changes from remote
- git fetch origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
+ git fetch origin ${PR_BRANCH} || true
# Checkout BRANCH_FOR_PR, create if it doesn't exist based on BASE_BRANCH
- if git show-ref --quiet origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}; then
- echo 'Branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} exists.'
- git checkout ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
+ if git show-ref --quiet refs/remotes/origin/${PR_BRANCH}; then
+ echo "Branch ${PR_BRANCH} exists."
+ git checkout ${PR_BRANCH}
else
- echo 'Branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} does not exist. Creating based on ${{ env.BASE_BRANCH }}'
- git checkout -b ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} origin/${{ env.BASE_BRANCH }}
+ echo "Branch ${PR_BRANCH} does not exist. Creating based on ${{ env.BASE_BRANCH }}"
+ git checkout -b ${PR_BRANCH}
fi
- # Attempt to rebase BRANCH_FOR_PR onto BASE_BRANCH
+ # Attempt to rebase PR branch onto BASE_BRANCH
if ! git rebase origin/${{ env.BASE_BRANCH }} -X theirs; then
echo "Rebase failed, resetting branch to match ${{ env.BASE_BRANCH }}..."
-
# Abort the rebase process
git rebase --abort
-
- echo "Reset branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} to origin/${{ env.BASE_BRANCH }}"
+ echo "Reset branch ${PR_BRANCH} to origin/${{ env.BASE_BRANCH }}"
git reset --hard origin/${{ env.BASE_BRANCH }}
-
fi
- git push origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} --force
+
+ git push origin ${PR_BRANCH} --force
- - name: Run script create_new_muted_ya.py
+ - name: Run create_new_muted_ya.py
run: |
- .github/scripts/tests/create_new_muted_ya.py update_muted_ya
+ .github/scripts/tests/create_new_muted_ya.py update_muted_ya --build_type=${{ env.BUILD_TYPE }}
- name: Move new_muted_ya_with_flaky.txt to muted_ya.txt
run: |
@@ -96,6 +125,8 @@ jobs:
run: |
PR_BODY=''
PR_BODY_FILE="pr_body_content.txt"
+
+ PR_BODY+=$'# Muted tests update for ${{ env.BASE_BRANCH }}\n\n'
if [ -s mute_update/deleted_tests_in_mute_debug.txt ]; then
DELETED_COUNT=$(wc -l < mute_update/deleted_tests_in_mute_debug.txt)
@@ -110,6 +141,32 @@ jobs:
PR_BODY+=$'```\n'
PR_BODY+=$(cat mute_update/flaky_debug.txt)
PR_BODY+=$'\n```\n\n'
+
+ # Создаем ссылку на дашборд с тестами из flaky.txt (а не flaky_debug.txt)
+ if [ -s mute_update/flaky.txt ]; then
+ BASE_URL="https://datalens.yandex.cloud/34xnbsom67hcq-ydb-autotests-test-history-link?branch=${{ env.BASE_BRANCH }}"
+ MAX_TESTS=50
+ TEST_COUNT=0
+
+ # Создаем временный файл для URL
+ echo "${BASE_URL}" > dashboard_url.txt
+
+ # Для каждого теста из flaky.txt, добавляем его в URL
+ while read -r test_name && [ ${TEST_COUNT} -lt ${MAX_TESTS} ]; do
+ # Заменяем пробелы на "/"
+ formatted_name=$(echo "${test_name}" | tr ' ' '/')
+ # Добавляем параметр в URL
+ echo -n "&full_name=${formatted_name}" >> dashboard_url.txt
+ TEST_COUNT=$((TEST_COUNT+1))
+ done < mute_update/flaky.txt
+
+ # Получаем полную URL
+ DASHBOARD_URL=$(cat dashboard_url.txt)
+
+ # Добавляем ссылку на дашборд в описание
+ PR_BODY+=$'**Dashboard Link:**\n'
+ PR_BODY+=$"[View history of flaky tests on Dashboard](${DASHBOARD_URL})\n\n"
+ fi
fi
if [ -s mute_update/muted_stable_debug.txt ]; then
MUTED_STABLE_COUNT=$(wc -l < mute_update/muted_stable_debug.txt)
@@ -129,32 +186,35 @@ jobs:
run: |
git add .github/config/muted_ya.txt
- - name: Delete other files
+ - name: Delete temporary files
run: |
rm -rf mute_update
+ rm -f dashboard_url.txt
- name: Commit changes
if: env.changes == 'true'
run: |
- git commit -m "Update muted YA file"
+ git commit -m "Update muted YA file for ${{ env.BASE_BRANCH }}"
- name: Push changes
if: env.changes == 'true'
uses: ad-m/github-push-action@v0.8.0
with:
github_token: ${{ secrets.YDBOT_TOKEN }}
- branch: ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
+ branch: ${{ env.PR_BRANCH }}
force: true
-
- name: Create or update PR
if: env.changes == 'true'
id: create_or_update_pr
env:
GITHUB_TOKEN: ${{ secrets.YDBOT_TOKEN }}
run: |
- python .github/scripts/create_or_update_pr.py create_or_update --base_branch="${{ env.BASE_BRANCH }}" --branch_for_pr="${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}" --title="${{ env.TITLE }} in ${{ env.BASE_BRANCH }}" --body="${{ env.PR_BODY_PATH }}"
-
+ python .github/scripts/create_or_update_pr.py create_or_update \
+ --base_branch="${{ env.BASE_BRANCH }}" \
+ --branch_for_pr="${{ env.PR_BRANCH }}" \
+ --title="${{ env.TITLE }} in ${{ env.BASE_BRANCH }}" \
+ --body="${{ env.PR_BODY_PATH }}"
- name: Comment PR
uses: actions/github-script@v7
@@ -168,7 +228,7 @@ jobs:
const workflowUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`;
const filePath = path.join(process.env.GITHUB_WORKSPACE, 'pr_body_content.txt');
const bodyText = fs.readFileSync(filePath, 'utf8');
- const completeBody = `Collected in workflow [#${{ github.run_number }}](${workflowUrl})\n\n${bodyText}`;
+ const completeBody = `Collected in workflow [#${{ github.run_number }}](${workflowUrl}) for ${{ env.BASE_BRANCH }}\n\n${bodyText}`;
github.rest.issues.createComment({
issue_number: ${{ steps.create_or_update_pr.outputs.pr_number }},
@@ -193,5 +253,3 @@ jobs:
pull_number: ${{ steps.create_or_update_pr.outputs.pr_number }}
team_reviewers: ${{ env.REVIEWERS }}
token: ${{ secrets.YDBOT_TOKEN }}
-
- \ No newline at end of file
diff --git a/.github/workflows/validate_pr_description.yml b/.github/workflows/validate_pr_description.yml
index caf3e76084a..6bd4877978c 100644
--- a/.github/workflows/validate_pr_description.yml
+++ b/.github/workflows/validate_pr_description.yml
@@ -12,7 +12,7 @@ on:
jobs:
validate-pr-description:
- runs-on: ubuntu-latest
+ runs-on: [ self-hosted, auto-provisioned, build-preset-analytic-node]
steps:
- name: Check out the repository
diff --git a/.github/workflows/weekly_analytic.yml b/.github/workflows/weekly_analytic.yml
new file mode 100644
index 00000000000..dd202becae8
--- /dev/null
+++ b/.github/workflows/weekly_analytic.yml
@@ -0,0 +1,28 @@
+name: Weekly-Analytic
+on:
+ schedule:
+ - cron: "0 0 * * 6" # Every Saturday at 00:00
+ workflow_dispatch:
+
+defaults:
+ run:
+ shell: bash
+jobs:
+ main:
+ name: Run Weekly Python Script
+ runs-on: [ self-hosted ]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.ref }}
+ - name: Setup ydb access
+ uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials
+ with:
+ ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }}
+ - name: Install dependencies
+ run: |
+ python3 -m pip install ydb ydb[yc] pandas # Add any other required packages
+ - name: Execute weekly Python script
+ continue-on-error: true
+ run: python3 .github/scripts/analytics/data_mart_delete_table.py --table_path test_results/analytics/test_history_fast