aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSemyon <yentsovsemyon@ydb.tech>2025-04-07 11:13:08 +0300
committerGitHub <noreply@github.com>2025-04-07 11:13:08 +0300
commitc3301110258be632f5b73a6e348d5f9d98f3eacc (patch)
tree3c077bdd52351f0cd70ab18934e0a9754677ec5b
parent82aa45653da0f4a20cfeeb4ea228c9eff5abeef4 (diff)
downloadydb-c3301110258be632f5b73a6e348d5f9d98f3eacc.tar.gz
test disk exhaustion by duplicates (#13794)
-rw-r--r--ydb/tests/olap/data_quotas/test_quota_exhaustion.py (renamed from ydb/tests/olap/test_quota_exhaustion.py)93
-rw-r--r--ydb/tests/olap/data_quotas/ya.make30
-rw-r--r--ydb/tests/olap/ya.make2
3 files changed, 113 insertions, 12 deletions
diff --git a/ydb/tests/olap/test_quota_exhaustion.py b/ydb/tests/olap/data_quotas/test_quota_exhaustion.py
index c2153166cc..2a1dbaef46 100644
--- a/ydb/tests/olap/test_quota_exhaustion.py
+++ b/ydb/tests/olap/data_quotas/test_quota_exhaustion.py
@@ -1,6 +1,8 @@
import os
import subprocess
import sys
+import logging
+import time
import ydb
from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
@@ -8,22 +10,32 @@ from ydb.tests.library.harness.kikimr_runner import KiKiMR
from ydb.tests.library.test_meta import link_test_case
ROWS_CHUNK_SIZE = 1000000
-ROWS_CHUNKS_COUNT = 50
+ROWS_CHUNKS_COUNT = 500
+
+logger = logging.getLogger(__name__)
class TestYdbWorkload(object):
@classmethod
- def setup_class(cls):
- cls.cluster = KiKiMR(KikimrConfigGenerator(
- column_shard_config={},
+ def setup_method(self):
+ self.cluster = KiKiMR(KikimrConfigGenerator(
+ column_shard_config={
+ "alter_object_enabled": True,
+ },
static_pdisk_size=10 * 1024 * 1024,
dynamic_pdisk_size=5 * 1024 * 1024
))
- cls.cluster.start()
+ self.cluster.start()
+
+ self.driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database='/Root')
+ self.session = ydb.QuerySessionPool(self.driver)
+ self.driver.wait(5, fail_fast=True)
@classmethod
- def teardown_class(cls):
- cls.cluster.stop()
+ def teardown_method(self):
+ self.session.stop()
+ self.driver.stop()
+ self.cluster.stop()
def make_session(self):
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database=self.database_name)
@@ -50,13 +62,26 @@ class TestYdbWorkload(object):
SELECT * FROM AS_TABLE($rows_list);
""", None, ydb.retries.RetrySettings(max_retries=retries))
- def upsert_until_overload(self, session, table):
+ def try_upsert_test_chunk(self, session, table, chunk_id) -> bool:
+ try:
+ self.upsert_test_chunk(session, table, chunk_id, retries=0)
+ return True
+ except ydb.issues.Overloaded:
+ return False
+ except ydb.issues.Unavailable:
+ return False
+
+ def upsert_until_overload(self, do_upsert, timeout_seconds=0):
+ deadline = time.time() + timeout_seconds
try:
for i in range(ROWS_CHUNKS_COUNT):
- res = self.upsert_test_chunk(session, table, i, retries=0)
+ res = do_upsert(i)
print(f"upsert #{i} ok, result:", res, file=sys.stderr)
described = self.cluster.client.describe('/Root', '')
print('Quota exceeded {}'.format(described.PathDescription.DomainDescription.DomainState.DiskQuotaExceeded), file=sys.stderr)
+ if timeout_seconds:
+ assert time.time() <= deadline, "deadline exceeded"
+ assert False, "overload not reached"
except ydb.issues.Overloaded:
print('upsert: got overload issue', file=sys.stderr)
except ydb.issues.Unavailable:
@@ -70,7 +95,7 @@ class TestYdbWorkload(object):
# Overflow the database
self.create_test_table(session, 'huge')
- self.upsert_until_overload(session, 'huge')
+ self.upsert_until_overload(lambda i: self.upsert_test_chunk(session, 'huge', i, retries=0))
# Cleanup
session.execute_with_retries("""DROP TABLE huge""")
@@ -129,6 +154,7 @@ class TestYdbWorkload(object):
self.ydbcli_db_schema_exec(node, alter_proto)
+ @link_test_case("#13653")
def test_delete(self):
"""As per https://github.com/ydb-platform/ydb/issues/13653"""
self.database_name = os.path.join('/Root', 'test')
@@ -153,7 +179,7 @@ class TestYdbWorkload(object):
# Overflow the database
table_path = os.path.join(self.database_name, 'huge')
self.create_test_table(session, table_path)
- self.upsert_until_overload(session, table_path)
+ self.upsert_until_overload(lambda i: self.upsert_test_chunk(session, 'huge', i, retries=0))
# Check that deletion works at least first time
self.delete_test_chunk(session, table_path, 0)
@@ -165,3 +191,48 @@ class TestYdbWorkload(object):
assert i == ROWS_CHUNKS_COUNT
# Writes enabling after data deletion will be checked in separate PR
+
+ def wait_for(self, condition_func, timeout_seconds) -> bool:
+ t0 = time.time()
+ while time.time() - t0 < timeout_seconds:
+ if condition_func():
+ return True
+ time.sleep(1)
+ return False
+
+ @link_test_case("#13652")
+ def test_duplicates(self):
+ self.database_name = os.path.join('/Root', 'test')
+ print('Database name {}'.format(self.database_name), file=sys.stderr)
+ self.cluster.create_database(
+ self.database_name,
+ storage_pool_units_count={
+ 'hdd': 1
+ },
+ )
+ self.cluster.register_and_start_slots(self.database_name, count=1)
+ self.cluster.wait_tenant_up(self.database_name)
+
+ # Set soft and hard quotas to 40 Mb
+ self.alter_database_quotas(self.cluster.nodes[1], self.database_name, """
+ data_size_hard_quota: 40000000
+ data_size_soft_quota: 40000000
+ """)
+
+ session = self.make_session()
+ table_path = os.path.join(self.database_name, 'huge')
+ self.create_test_table(session, table_path)
+
+ # Delay compaction
+ session.execute_with_retries(
+ f"""
+ ALTER OBJECT `{table_path}` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `COMPACTION_PLANNER.CLASS_NAME`=`lc-buckets`, `COMPACTION_PLANNER.FEATURES`=`
+ {{"levels" : [{{"class_name" : "Zero", "portions_live_duration" : "200s", "expected_blobs_size" : 1000000000000, "portions_count_available" : 2}},
+ {{"class_name" : "Zero"}}]}}`);
+ """
+ )
+
+ # Overflow the database
+ self.upsert_until_overload(lambda i: self.upsert_test_chunk(session, table_path, 0, retries=0), timeout_seconds=200)
+
+ assert self.wait_for(lambda: self.try_upsert_test_chunk(session, table_path, 1), 200), "can't write after overload by duplicates"
diff --git a/ydb/tests/olap/data_quotas/ya.make b/ydb/tests/olap/data_quotas/ya.make
new file mode 100644
index 0000000000..ea968fd75e
--- /dev/null
+++ b/ydb/tests/olap/data_quotas/ya.make
@@ -0,0 +1,30 @@
+PY3TEST()
+FORK_SUBTESTS()
+
+ENV(YDB_DRIVER_BINARY="ydb/apps/ydbd/ydbd")
+ENV(YDB_CLI_BINARY="ydb/apps/ydb/ydb")
+ENV(YDB_ENABLE_COLUMN_TABLES="true")
+
+TEST_SRCS(
+ test_quota_exhaustion.py
+)
+
+IF (SANITIZER_TYPE OR WITH_VALGRIND)
+ SIZE(LARGE)
+ TAG(ya:fat)
+ELSE()
+ SIZE(MEDIUM)
+ENDIF()
+
+DEPENDS(
+ ydb/apps/ydb
+ ydb/apps/ydbd
+)
+
+PEERDIR(
+ydb/tests/library
+ydb/tests/library/test_meta
+)
+
+END()
+
diff --git a/ydb/tests/olap/ya.make b/ydb/tests/olap/ya.make
index 147450c033..57b7cf7740 100644
--- a/ydb/tests/olap/ya.make
+++ b/ydb/tests/olap/ya.make
@@ -4,7 +4,6 @@ PY3TEST()
ENV(YDB_ENABLE_COLUMN_TABLES="true")
TEST_SRCS(
- test_quota_exhaustion.py
test_log_scenario.py
zip_bomb.py
)
@@ -40,4 +39,5 @@ RECURSE(
s3_import
scenario
ttl_tiering
+ data_quotas
)