aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorijon <ijon@yandex-team.com>2022-12-06 15:50:08 +0300
committerijon <ijon@yandex-team.com>2022-12-06 15:50:08 +0300
commita47304bb6087fabf3b7f45817ac0b25232afb5ac (patch)
treee8676787d5789abaa4b16a3a54b444a2728e553b
parent9a4c3159305aa80064b7c4138f6a813a98f75b87 (diff)
downloadydb-a47304bb6087fabf3b7f45817ac0b25232afb5ac.tar.gz
schemeshard, alter-extsubdomain: create hive before all other system tablets
schemeshard, alter-extsubdomain: create hive before all other system tablets - make alter-extsubdomain a composite operation: hive creation and extsubdomain construction/altering - create feature flag `EnableAlterDatabaseCreateHiveFirst` (default: false) to switch between old and new behavior alter-extsubdomain now create extsubdomain's Hive first, then wait for it coming alive and only then create other extsubdomain's system tablets (SchemeShard, Coordinators, Mediators and SysViewProcessor). So system tablets are emerging directly in the extsubdomain's Hive and not in the root hive (and do not require separate migration pass). - tests/functional/{rename,serverless,tenants}: refactor to use the common set of cluster/database fixtures - schemeshard: move states of subdomain operations into separate header - schemeshard: move CreateEvCreateTablet out of CreateParts - schemeshard: make alter-extsubdomain semantics on input more strict - schemeshard (unit)tests: add ability to check expected results also by reason - schemeshard (unit)tests: convert TSubDomainSettings into raw literals - schemeshard: fix typos CreateFroceDrop -> CreateForceDrop ListSubThee -> ListSubTree Droping -> Dropping Trasaction -> Transaction Dafault -> Default Buket -> Bucket etc - console: make code for {Create,Alter}Subdomain more identical
-rw-r--r--ydb/core/cms/console/console_tenants_manager.cpp21
-rw-r--r--ydb/core/mind/hive/hive_impl.cpp14
-rw-r--r--ydb/core/protos/config.proto3
-rw-r--r--ydb/core/protos/counters_schemeshard.proto11
-rw-r--r--ydb/core/protos/flat_scheme_op.proto3
-rw-r--r--ydb/core/testlib/basics/feature_flags.h1
-rw-r--r--ydb/core/testlib/tablet_helpers.cpp45
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__init.cpp15
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation.cpp50
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation.h2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp1134
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp1
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_common.cpp84
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_common.h433
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.h374
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp32
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp1
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp16
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_db_changes.h6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp16
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp20
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_part.h23
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp4
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp20
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp4
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp5
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.cpp71
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.h6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_info_types.h16
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_tx_infly.h4
-rw-r--r--ydb/core/tx/schemeshard/ut_allocate_pq.cpp2
-rw-r--r--ydb/core/tx/schemeshard/ut_base.cpp2
-rw-r--r--ydb/core/tx/schemeshard/ut_extsubdomain.cpp1053
-rw-r--r--ydb/core/tx/schemeshard/ut_extsubdomain_reboots.cpp221
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.cpp102
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.h74
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp35
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/ls_checks.h8
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/test_env.cpp6
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/test_env.h1
-rw-r--r--ydb/core/tx/schemeshard/ut_index_build.cpp2
-rw-r--r--ydb/core/tx/schemeshard/ut_restore.cpp4
-rw-r--r--ydb/core/tx/schemeshard/ut_subdomain.cpp4
-rw-r--r--ydb/core/tx/tx_proxy/schemereq.cpp4
-rw-r--r--ydb/tests/functional/rename/conftest.py113
-rw-r--r--ydb/tests/functional/rename/test.py67
-rw-r--r--ydb/tests/functional/serverless/conftest.py144
-rw-r--r--ydb/tests/functional/serverless/test.py48
-rw-r--r--ydb/tests/functional/tenants/common.py150
-rw-r--r--ydb/tests/functional/tenants/conftest.py26
-rw-r--r--ydb/tests/functional/tenants/test_dynamic_tenants.py665
-rw-r--r--ydb/tests/functional/tenants/test_storage_config.py1
-rw-r--r--ydb/tests/functional/tenants/test_tenants.py889
-rw-r--r--ydb/tests/library/harness/kikimr_cluster_interface.py15
-rw-r--r--ydb/tests/library/harness/kikimr_config.py3
-rw-r--r--ydb/tests/library/harness/kikimr_runner.py23
-rw-r--r--ydb/tests/library/harness/ydb_fixtures.py160
60 files changed, 3781 insertions, 2482 deletions
diff --git a/ydb/core/cms/console/console_tenants_manager.cpp b/ydb/core/cms/console/console_tenants_manager.cpp
index 5d84cd9dda8..ff1bbb8c508 100644
--- a/ydb/core/cms/console/console_tenants_manager.cpp
+++ b/ydb/core/cms/console/console_tenants_manager.cpp
@@ -472,12 +472,6 @@ public:
bool tablets)
{
subdomain.SetName(Subdomain.second);
- if (tablets) {
- subdomain.SetCoordinators(Tenant->Coordinators);
- subdomain.SetMediators(Tenant->Mediators);
- subdomain.SetPlanResolution(Tenant->PlanResolution);
- subdomain.SetTimeCastBucketsPerMediator(Tenant->TimeCastBucketsPerMediator);
- }
if (Tenant->IsExternalSubdomain) {
subdomain.SetExternalSchemeShard(true);
if (Tenant->IsExternalHive) {
@@ -487,6 +481,12 @@ public:
subdomain.SetExternalSysViewProcessor(true);
}
}
+ if (tablets) {
+ subdomain.SetCoordinators(Tenant->Coordinators);
+ subdomain.SetMediators(Tenant->Mediators);
+ subdomain.SetPlanResolution(Tenant->PlanResolution);
+ subdomain.SetTimeCastBucketsPerMediator(Tenant->TimeCastBucketsPerMediator);
+ }
for (auto &pr : (SharedTenant ? SharedTenant->StoragePools : Tenant->StoragePools)) {
// N.B. only provide schemeshard with pools that have at least one allocated group
@@ -572,16 +572,21 @@ public:
auto request = MakeHolder<TEvTxUserProxy::TEvProposeTransaction>();
request->Record.SetDatabaseName(TString(ExtractDomain(Subdomain.first)));
request->Record.SetExecTimeoutPeriod(Max<ui64>());
+
if (Tenant->UserToken.GetUserSID())
request->Record.SetUserToken(Tenant->UserToken.SerializeAsString());
+
auto &tx = *request->Record.MutableTransaction()->MutableModifyScheme();
+ tx.SetWorkingDir(Subdomain.first);
+
+ FillSubdomainCreationInfo(*tx.MutableSubDomain());
+
if (Tenant->IsExternalSubdomain) {
tx.SetOperationType(NKikimrSchemeOp::ESchemeOpCreateExtSubDomain);
} else {
tx.SetOperationType(NKikimrSchemeOp::ESchemeOpCreateSubDomain);
}
- tx.SetWorkingDir(Subdomain.first);
- FillSubdomainCreationInfo(*tx.MutableSubDomain());
+
if (Tenant->Attributes.UserAttributesSize())
tx.MutableAlterUserAttributes()->CopyFrom(Tenant->Attributes);
diff --git a/ydb/core/mind/hive/hive_impl.cpp b/ydb/core/mind/hive/hive_impl.cpp
index b93431e0530..8b9fed981b8 100644
--- a/ydb/core/mind/hive/hive_impl.cpp
+++ b/ydb/core/mind/hive/hive_impl.cpp
@@ -473,6 +473,18 @@ void THive::Handle(TEvPrivate::TEvBootTablets::TPtr&) {
} else {
BLOG_D("SubDomain Hive is ready");
+ if (!PrimaryDomainKey && Info()->TenantPathId) {
+ //NOTE: Primary(Sub)DomainKey isn't set after loading everything from the local db --
+ // -- this is first time boot or incomplete configuration.
+ BLOG_I("Primary(Sub)DomainKey is not set, setting it from TTabletStorageInfo::TenantPathId to " << Info()->TenantPathId);
+
+ auto msg = MakeHolder<TEvHive::TEvConfigureHive>(TSubDomainKey(Info()->TenantPathId.OwnerId, Info()->TenantPathId.LocalPathId));
+ TEvHive::TEvConfigureHive::TPtr event((TEventHandle<TEvHive::TEvConfigureHive>*) new IEventHandle(
+ TActorId(), TActorId(), msg.Release()
+ ));
+ Execute(CreateConfigureSubdomain(event));
+ }
+
if (!TabletOwnersSynced) {
// this code should be removed later
THolder<TEvHive::TEvRequestTabletOwners> request(new TEvHive::TEvRequestTabletOwners());
@@ -1217,6 +1229,8 @@ THive::TBestNodeResult THive::FindBestNode(const TTabletInfo& tablet) {
break;
}
}
+ BLOG_TRACE("[FBN] Tablet " << tablet.ToString() << " selected nodes count " << selectedNodes.size());
+
TNodeInfo* selectedNode = nullptr;
if (!selectedNodes.empty()) {
switch (GetNodeSelectStrategy()) {
diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto
index 3f933adc4ab..f425649349f 100644
--- a/ydb/core/protos/config.proto
+++ b/ydb/core/protos/config.proto
@@ -749,6 +749,9 @@ message TFeatureFlags {
optional bool EnableDynamicNodeAuthorization = 79 [default = false];
optional bool EnableKqpImmediateEffects = 80 [default = false];
optional bool EnableDataShardGenericReadSets = 81 [default = false];
+ // enable alter database operation to create subdomain's system tablets
+ // directly in subdomain's hive
+ optional bool EnableAlterDatabaseCreateHiveFirst = 82 [default = false];
}
diff --git a/ydb/core/protos/counters_schemeshard.proto b/ydb/core/protos/counters_schemeshard.proto
index 4be693507db..5a806e3db76 100644
--- a/ydb/core/protos/counters_schemeshard.proto
+++ b/ydb/core/protos/counters_schemeshard.proto
@@ -170,6 +170,8 @@ enum ESimpleCounters {
COUNTER_IN_FLIGHT_OPS_TxAllocatePQ = 138 [(CounterOpts) = {Name: "InFlightOps/AllocatePQ"}];
COUNTER_IN_FLIGHT_OPS_TxCreateCdcStreamAtTableWithSnapshot = 139 [(CounterOpts) = {Name: "InFlightOps/CreateCdcStreamAtTableWithSnapshot"}];
+
+ COUNTER_IN_FLIGHT_OPS_TxAlterExtSubDomainCreateHive = 140 [(CounterOpts) = {Name: "InFlightOps/AlterExtSubDomainCreateHive"}];
}
enum ECumulativeCounters {
@@ -207,7 +209,7 @@ enum ECumulativeCounters {
COUNTER_FINISHED_OPS_TxAlterUserAttributes = 29 [(CounterOpts) = {Name: "FinishedOps/AlterUserAttributes"}];
COUNTER_FINISHED_OPS_TxCreateTableIndex = 30 [(CounterOpts) = {Name: "FinishedOps/CreateTableIndex"}];
COUNTER_FINISHED_OPS_TxDropTableIndex = 31 [(CounterOpts) = {Name: "FinishedOps/DropTableIndex"}];
- COUNTER_FINISHED_OPS_TxCreateExtSubDomain = 32 [(CounterOpts) = {Name: "FinishedOps/DropExtSubDomain"}];
+ COUNTER_FINISHED_OPS_TxCreateExtSubDomain = 32 [(CounterOpts) = {Name: "FinishedOps/CreateExtSubDomain"}];
COUNTER_FINISHED_OPS_TxMergeTablePartition = 33 [(CounterOpts) = {Name: "FinishedOps/MergeTablePartition"}];
COUNTER_FINISHED_OPS_TxAlterExtSubDomain = 34 [(CounterOpts) = {Name: "FinishedOps/AlterExtSubDomain"}];
COUNTER_FINISHED_OPS_TxForceDropExtSubDomain = 35 [(CounterOpts) = {Name: "FinishedOps/ForceDropExtSubDomain"}];
@@ -261,8 +263,8 @@ enum ECumulativeCounters {
COUNTER_BACKGROUND_COMPACTION_NOT_NEEDED = 74 [(CounterOpts) = {Name: "BackgroundCompactionNotNeeded"}];
COUNTER_BACKGROUND_COMPACTION_LOANED = 75 [(CounterOpts) = {Name: "BackgroundCompactionLoaned"}];
- COUNTER_BORROWED_COMPACTION_OK = 76 [(CounterOpts) = {Name: "BorrowedCompactionOK"}];
- COUNTER_BORROWED_COMPACTION_TIMEOUT = 77 [(CounterOpts) = {Name: "BorrowedCompactionTimeout"}];
+ COUNTER_BORROWED_COMPACTION_OK = 76 [(CounterOpts) = {Name: "BorrowedCompactionOK"}];
+ COUNTER_BORROWED_COMPACTION_TIMEOUT = 77 [(CounterOpts) = {Name: "BorrowedCompactionTimeout"}];
COUNTER_FINISHED_OPS_TxCreateBlobDepot = 78 [(CounterOpts) = {Name: "FinishedOps/CreateBlobDepot"}];
COUNTER_FINISHED_OPS_TxAlterBlobDepot = 79 [(CounterOpts) = {Name: "FinishedOps/AlterBlobDepot"}];
@@ -273,6 +275,9 @@ enum ECumulativeCounters {
COUNTER_FINISHED_OPS_TxAllocatePQ = 83 [(CounterOpts) = {Name: "FinishedOps/AllocatePQ"}];
COUNTER_FINISHED_OPS_TxCreateCdcStreamAtTableWithSnapshot = 84 [(CounterOpts) = {Name: "FinishedOps/CreateCdcStreamAtTableWithSnapshot"}];
+
+ COUNTER_FINISHED_OPS_TxAlterExtSubDomainCreateHive = 85 [(CounterOpts) = {Name: "FinishedOps/AlterExtSubDomainCreateHive"}];
+
}
enum EPercentileCounters {
diff --git a/ydb/core/protos/flat_scheme_op.proto b/ydb/core/protos/flat_scheme_op.proto
index b7910d79ea3..0226e5eb363 100644
--- a/ydb/core/protos/flat_scheme_op.proto
+++ b/ydb/core/protos/flat_scheme_op.proto
@@ -1272,6 +1272,9 @@ enum EOperationType {
ESchemeOpAllocatePersQueueGroup = 83;
ESchemeOpDeallocatePersQueueGroup = 84;
+
+ // AlterExtSubDomain suboperations
+ ESchemeOpAlterExtSubDomainCreateHive = 85;
}
message TApplyIf {
diff --git a/ydb/core/testlib/basics/feature_flags.h b/ydb/core/testlib/basics/feature_flags.h
index 9b7c6bd89e8..e8c17c91489 100644
--- a/ydb/core/testlib/basics/feature_flags.h
+++ b/ydb/core/testlib/basics/feature_flags.h
@@ -43,6 +43,7 @@ public:
FEATURE_FLAG_SETTER(EnableChangefeedInitialScan)
FEATURE_FLAG_SETTER(EnableKqpImmediateEffects)
FEATURE_FLAG_SETTER(EnableDataShardGenericReadSets)
+ FEATURE_FLAG_SETTER(EnableAlterDatabaseCreateHiveFirst)
TDerived& SetEnableMvcc(std::optional<bool> value) {
if (value) {
diff --git a/ydb/core/testlib/tablet_helpers.cpp b/ydb/core/testlib/tablet_helpers.cpp
index 6b0c84baa72..f671b629a3f 100644
--- a/ydb/core/testlib/tablet_helpers.cpp
+++ b/ydb/core/testlib/tablet_helpers.cpp
@@ -1107,6 +1107,8 @@ namespace NKikimr {
ctx.ExecutorThread.Send(ev.Release());
InitialEventsQueue.pop_front();
}
+
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] started, primary subdomain " << PrimarySubDomainKey);
}
void OnDetach(const TActorContext &ctx) override {
@@ -1130,6 +1132,7 @@ namespace NKikimr {
void StateWork(STFUNC_SIG) {
switch (ev->GetTypeRewrite()) {
HFunc(TEvTablet::TEvTabletDead, HandleTabletDead);
+ HFunc(TEvHive::TEvConfigureHive, Handle);
HFunc(TEvHive::TEvCreateTablet, Handle);
HFunc(TEvHive::TEvAdoptTablet, Handle);
HFunc(TEvHive::TEvDeleteTablet, Handle);
@@ -1147,17 +1150,35 @@ namespace NKikimr {
}
}
+ void Handle(TEvHive::TEvConfigureHive::TPtr& ev, const TActorContext& ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvConfigureHive, msg: " << ev->Get()->Record.ShortDebugString());
+
+ const auto& subdomainKey(ev->Get()->Record.GetDomain());
+ PrimarySubDomainKey = TSubDomainKey(subdomainKey);
+
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvConfigureHive, subdomain set to " << subdomainKey);
+ ctx.Send(ev->Sender, new TEvSubDomain::TEvConfigureStatus(NKikimrTx::TEvSubDomainConfigurationAck::SUCCESS, TabletID()));
+ }
+
void Handle(TEvHive::TEvCreateTablet::TPtr& ev, const TActorContext& ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvCreateTablet, msg: " << ev->Get()->Record.ShortDebugString());
Cout << "FAKEHIVE " << TabletID() << " TEvCreateTablet " << ev->Get()->Record.ShortDebugString() << Endl;
NKikimrProto::EReplyStatus status = NKikimrProto::OK;
const std::pair<ui64, ui64> key(ev->Get()->Record.GetOwner(), ev->Get()->Record.GetOwnerIdx());
const auto type = ev->Get()->Record.GetTabletType();
const auto bootMode = ev->Get()->Record.GetTabletBootMode();
+
+ auto logPrefix = TStringBuilder() << "[" << TabletID() << "] TEvCreateTablet"
+ << ", Owner " << ev->Get()->Record.GetOwner() << ", OwnerIdx " << ev->Get()->Record.GetOwnerIdx()
+ << ", type " << type
+ << ", ";
+
auto it = State->Tablets.find(key);
TActorId bootstrapperActorId;
if (it == State->Tablets.end()) {
if (bootMode == NKikimrHive::TABLET_BOOT_MODE_EXTERNAL) {
// don't boot anything
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, logPrefix << "external boot mode requested");
} else if (auto x = GetTabletCreationFunc(type)) {
bootstrapperActorId = Boot(ctx, type, x, DataGroupErasure);
} else if (type == TTabletTypes::DataShard) {
@@ -1197,6 +1218,10 @@ namespace NKikimr {
ui64 tabletId = State->AllocateTabletId();
it = State->Tablets.insert(std::make_pair(key, TTabletInfo(type, tabletId, bootstrapperActorId))).first;
State->TabletIdToOwner[tabletId] = key;
+
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, logPrefix << "boot OK, tablet id " << tabletId);
+ } else {
+ LOG_ERROR_S(ctx, NKikimrServices::HIVE, logPrefix << "boot failed, status " << status);
}
} else {
if (it->second.Type != type) {
@@ -1224,18 +1249,18 @@ namespace NKikimr {
auto it = State->Tablets.find(newKey);
if (it != State->Tablets.end()) {
if (it->second.TabletId != tabletID) {
- explain = "there is another tablet assotiated with the (owner; ownerIdx)";
+ explain = "there is another tablet associated with the (owner; ownerIdx)";
status = NKikimrProto::EReplyStatus::RACE;
return;
}
if (it->second.Type != type) {
- explain = "there is the tablet with different type assotiated with the (owner; ownerIdx)";
+ explain = "there is the tablet with different type associated with the (owner; ownerIdx)";
status = NKikimrProto::EReplyStatus::RACE;
return;
}
- explain = "it seems like the tablet aleready adopted";
+ explain = "it seems like the tablet already adopted";
status = NKikimrProto::EReplyStatus::ALREADY;
return;
}
@@ -1248,13 +1273,13 @@ namespace NKikimr {
}
if (it->second.TabletId != tabletID) {
- explain = "there is another tablet assotiated with the (prevOwner; prevOwnerIdx)";
+ explain = "there is another tablet associated with the (prevOwner; prevOwnerIdx)";
status = NKikimrProto::EReplyStatus::ERROR;
return;
}
if (it->second.Type != type) { // tablet is the same
- explain = "there is the tablet with different type assotiated with the (preOwner; prevOwnerIdx)";
+ explain = "there is the tablet with different type associated with the (preOwner; prevOwnerIdx)";
status = NKikimrProto::EReplyStatus::ERROR;
return;
}
@@ -1269,6 +1294,7 @@ namespace NKikimr {
}
void Handle(TEvHive::TEvAdoptTablet::TPtr& ev, const TActorContext& ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvAdoptTablet, msg: " << ev->Get()->Record.ShortDebugString());
const std::pair<ui64, ui64> prevKey(ev->Get()->Record.GetPrevOwner(), ev->Get()->Record.GetPrevOwnerIdx());
const std::pair<ui64, ui64> newKey(ev->Get()->Record.GetOwner(), ev->Get()->Record.GetOwnerIdx());
const TTabletTypes::EType type = ev->Get()->Record.GetTabletType();
@@ -1304,6 +1330,7 @@ namespace NKikimr {
}
void Handle(TEvHive::TEvDeleteTablet::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvDeleteTablet, msg: " << ev->Get()->Record.ShortDebugString());
NKikimrHive::TEvDeleteTablet& rec = ev->Get()->Record;
Cout << "FAKEHIVE " << TabletID() << " TEvDeleteTablet " << rec.ShortDebugString() << Endl;
TVector<ui64> deletedIdx;
@@ -1316,6 +1343,7 @@ namespace NKikimr {
}
void Handle(TEvHive::TEvDeleteOwnerTablets::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvDeleteOwnerTablets, msg: " << ev->Get()->Record);
NKikimrHive::TEvDeleteOwnerTablets& rec = ev->Get()->Record;
Cout << "FAKEHIVE " << TabletID() << " TEvDeleteOwnerTablets " << rec.ShortDebugString() << Endl;
auto ownerId = rec.GetOwner();
@@ -1350,6 +1378,7 @@ namespace NKikimr {
}
void Handle(TEvHive::TEvRequestHiveInfo::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvRequestHiveInfo, msg: " << ev->Get()->Record.ShortDebugString());
const auto& record = ev->Get()->Record;
TAutoPtr<TEvHive::TEvResponseHiveInfo> response = new TEvHive::TEvResponseHiveInfo();
@@ -1369,6 +1398,8 @@ namespace NKikimr {
}
void Handle(TEvHive::TEvInitiateTabletExternalBoot::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvInitiateTabletExternalBoot, msg: " << ev->Get()->Record.ShortDebugString());
+
ui64 tabletId = ev->Get()->Record.GetTabletID();
if (!State->TabletIdToOwner.contains(tabletId)) {
ctx.Send(ev->Sender, new TEvHive::TEvBootTabletReply(NKikimrProto::EReplyStatus::ERROR), 0, ev->Cookie);
@@ -1384,6 +1415,8 @@ namespace NKikimr {
}
void Handle(TEvFakeHive::TEvSubscribeToTabletDeletion::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvSubscribeToTabletDeletion, " << ev->Get()->TabletId);
+
ui64 tabletId = ev->Get()->TabletId;
auto it = State->TabletIdToOwner.find(tabletId);
if (it == State->TabletIdToOwner.end()) {
@@ -1400,6 +1433,7 @@ namespace NKikimr {
}
void Handle(TEvents::TEvPoisonPill::TPtr &ev, const TActorContext &ctx) {
+ LOG_INFO_S(ctx, NKikimrServices::HIVE, "[" << TabletID() << "] TEvPoisonPill");
Y_UNUSED(ev);
Become(&TThis::BrokenState);
ctx.Send(Tablet(), new TEvents::TEvPoisonPill);
@@ -1429,6 +1463,7 @@ namespace NKikimr {
TState::TPtr State;
TGetTabletCreationFunc GetTabletCreationFunc;
TDeque<TAutoPtr<IEventHandle>> InitialEventsQueue;
+ TSubDomainKey PrimarySubDomainKey;
};
void BootFakeHive(TTestActorRuntime& runtime, ui64 tabletId, TFakeHiveState::TPtr state,
diff --git a/ydb/core/tx/schemeshard/schemeshard__init.cpp b/ydb/core/tx/schemeshard/schemeshard__init.cpp
index 49befbee14d..6a9225674be 100644
--- a/ydb/core/tx/schemeshard/schemeshard__init.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__init.cpp
@@ -1390,7 +1390,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
if (!Self->IsShemeShardConfigured()) {
LOG_NOTICE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "TTxInit, SS hasn't been configured jet"
+ "TTxInit, SS hasn't been configured yet"
<< ", state: " << (ui64)Self->InitState
<< ", at schemeshard: " << Self->TabletID());
return true;
@@ -1576,14 +1576,14 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
ui64 alterVersion = rowset.GetValue<Schema::SubDomains::AlterVersion>();
ui64 planResolution = rowset.GetValue<Schema::SubDomains::PlanResolution>();
- ui32 timeCastBukets = rowset.GetValue<Schema::SubDomains::TimeCastBuckets>();
+ ui32 timeCastBuckets = rowset.GetValue<Schema::SubDomains::TimeCastBuckets>();
TPathId resourcesDomainId = TPathId(
rowset.GetValue<Schema::SubDomains::ResourcesDomainOwnerPathId>(),
rowset.GetValue<Schema::SubDomains::ResourcesDomainLocalPathId>());
TSubDomainInfo::TPtr domainInfo = new TSubDomainInfo(
alterVersion,
planResolution,
- timeCastBukets,
+ timeCastBuckets,
resourcesDomainId);
Self->SubDomains[pathId] = domainInfo;
Self->IncrementPathDbRefCount(pathId);
@@ -1645,7 +1645,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
ui64 alterVersion = rowset.GetValue<Schema::SubDomainsAlterData::AlterVersion>();
ui64 planResolution = rowset.GetValue<Schema::SubDomainsAlterData::PlanResolution>();
- ui32 timeCastBukets = rowset.GetValue<Schema::SubDomainsAlterData::TimeCastBuckets>();
+ ui32 timeCastBuckets = rowset.GetValue<Schema::SubDomainsAlterData::TimeCastBuckets>();
TPathId resourcesDomainId = TPathId(
rowset.GetValue<Schema::SubDomainsAlterData::ResourcesDomainOwnerPathId>(),
@@ -1667,7 +1667,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
alter = new TSubDomainInfo(
alterVersion,
planResolution,
- timeCastBukets,
+ timeCastBuckets,
resourcesDomainId);
TTabletId sharedHiveId = rowset.GetValue<Schema::SubDomainsAlterData::SharedHiveId>();
@@ -2929,6 +2929,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
}
+ // Initialize SubDomains
{
for(auto item: Self->SubDomains) {
auto pathId = item.first;
@@ -3463,8 +3464,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
for (TOperationId opId: forceDropOpIds) {
TTxState* txState = Self->FindTx(opId);
Y_VERIFY(txState);
- auto pathes = Self->ListSubThee(txState->TargetPathId, ctx);
- Self->MarkAsDroping(pathes, opId.GetTxId(), ctx);
+ auto pathes = Self->ListSubTree(txState->TargetPathId, ctx);
+ Self->MarkAsDropping(pathes, opId.GetTxId(), ctx);
}
// Read txid dependencies
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation.cpp b/ydb/core/tx/schemeshard/schemeshard__operation.cpp
index 800582cdbde..856ef27f1b9 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation.cpp
@@ -71,7 +71,7 @@ NKikimrScheme::TEvModifySchemeTransaction GetRecordForPrint(const NKikimrScheme:
return recordForPrint;
}
-void MakeAuditLog(const TTxId& txId, const THolder<TProposeResponse>& response, TOperationContext& context) {
+void MakeAuditLog(const TTxId& txId, const THolder<TProposeResponse>& response, TOperationContext& context) {
auto fragPath = TPath::Resolve(context.AuditLogFragments.front().GetAnyPath(), context.SS);
if (!fragPath.IsResolved()) {
fragPath.RiseUntilFirstResolvedParent();
@@ -879,19 +879,13 @@ ISubOperationBase::TPtr TOperation::RestorePart(TTxState::ETxType txType, TTxSta
case TTxState::ETxType::TxDropSubDomain:
return CreateDropSubdomain(NextPartId(), txState);
case TTxState::ETxType::TxForceDropSubDomain:
- return CreateFroceDropSubDomain(NextPartId(), txState);
- case TTxState::ETxType::TxCreateExtSubDomain:
- return CreateExtSubDomain(NextPartId(), txState);
+ return CreateForceDropSubDomain(NextPartId(), txState);
case TTxState::ETxType::TxCreateKesus:
return CreateNewKesus(NextPartId(), txState);
case TTxState::ETxType::TxAlterKesus:
return CreateAlterKesus(NextPartId(), txState);
case TTxState::ETxType::TxDropKesus:
return CreateDropKesus(NextPartId(), txState);
- case TTxState::ETxType::TxAlterExtSubDomain:
- return CreateAlterExtSubDomain(NextPartId(), txState);
- case TTxState::ETxType::TxForceDropExtSubDomain:
- return CreateFroceDropExtSubDomain(NextPartId(), txState);
case TTxState::ETxType::TxInitializeBuildIndex:
return CreateInitializeBuildIndexMainTable(NextPartId(), txState);
case TTxState::ETxType::TxFinalizeBuildIndex:
@@ -909,6 +903,16 @@ ISubOperationBase::TPtr TOperation::RestorePart(TTxState::ETxType txType, TTxSta
case TTxState::ETxType::TxAlterSolomonVolume:
return CreateAlterSolomon(NextPartId(), txState);
+ // ExtSubDomain
+ case TTxState::ETxType::TxCreateExtSubDomain:
+ return CreateExtSubDomain(NextPartId(), txState);
+ case TTxState::ETxType::TxAlterExtSubDomain:
+ return CreateAlterExtSubDomain(NextPartId(), txState);
+ case TTxState::ETxType::TxAlterExtSubDomainCreateHive:
+ return CreateAlterExtSubDomainCreateHive(NextPartId(), txState);
+ case TTxState::ETxType::TxForceDropExtSubDomain:
+ return CreateForceDropExtSubDomain(NextPartId(), txState);
+
// BlockStore
case TTxState::ETxType::TxCreateBlockStoreVolume:
return CreateNewBSV(NextPartId(), txState);
@@ -993,7 +997,7 @@ ISubOperationBase::TPtr TOperation::ConstructPart(NKikimrSchemeOp::EOperationTyp
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterUserAttributes:
return CreateAlterUserAttrs(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpForceDropUnsafe:
- return CreateFroceDropUnsafe(NextPartId(), tx);
+ return CreateForceDropUnsafe(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpCreateTable:
return CreateNewTable(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterTable:
@@ -1053,12 +1057,17 @@ ISubOperationBase::TPtr TOperation::ConstructPart(NKikimrSchemeOp::EOperationTyp
return CreateDropSubdomain(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpForceDropSubDomain:
Y_FAIL("run in compatible");
+
+ // ExtSubDomain
case NKikimrSchemeOp::EOperationType::ESchemeOpCreateExtSubDomain:
return CreateExtSubDomain(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomain:
return CreateAlterExtSubDomain(NextPartId(), tx);
+ case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomainCreateHive:
+ Y_FAIL("multipart operations are handled before, also they require transaction details");
case NKikimrSchemeOp::EOperationType::ESchemeOpForceDropExtSubDomain:
- return CreateFroceDropExtSubDomain(NextPartId(), tx);
+ return CreateForceDropExtSubDomain(NextPartId(), tx);
+
case NKikimrSchemeOp::EOperationType::ESchemeOpCreateKesus:
return CreateNewKesus(NextPartId(), tx);
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterKesus:
@@ -1214,7 +1223,8 @@ TVector<ISubOperationBase::TPtr> TOperation::ConstructParts(const TTxTransaction
return CreateConsistentAlterTable(NextPartId(), tx, context);
case NKikimrSchemeOp::EOperationType::ESchemeOpMoveIndex:
return CreateConsistentMoveIndex(NextPartId(), tx, context);
-
+ case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomain:
+ return CreateCompatibleAlterExtSubDomain(NextPartId(), tx, context);
default:
return {ConstructPart(opType, tx)};
}
@@ -1308,17 +1318,12 @@ void TOperation::ProposePart(TSubTxId partId, TTabletId tableId) {
void TOperation::DoPropose(TSchemeShard* ss, TSideEffects& sideEffects, const TActorContext& ctx) const {
Y_VERIFY(IsReadyToPropose());
- //agregate
+ //aggregate
TTabletId selfTabletId = ss->SelfTabletId();
- TTabletId coordinatorId = InvalidTabletId; //common for all part
+ TTabletId coordinatorId = InvalidTabletId; //common for all parts
TStepId effectiveMinStep = TStepId(0);
- for (auto& rec: Proposes) {
- TSubTxId partId = InvalidSubTxId;
- TPathId pathId = InvalidPathId;
- TStepId minStep = InvalidStepId;
- std::tie(partId, pathId, minStep) = rec;
-
+ for (auto [_, pathId, minStep]: Proposes) {
{
TTabletId curCoordinatorId = ss->SelectCoordinator(TxId, pathId);
if (coordinatorId == InvalidTabletId) {
@@ -1331,10 +1336,7 @@ void TOperation::DoPropose(TSchemeShard* ss, TSideEffects& sideEffects, const TA
}
TSet<TTabletId> shards;
- for (auto& rec: ShardsProposes) {
- TSubTxId partId = InvalidSubTxId;
- TTabletId shard = InvalidTabletId;
- std::tie(partId, shard) = rec;
+ for (auto [partId, shard]: ShardsProposes) {
shards.insert(shard);
sideEffects.RouteByTablet(TOperationId(TxId, partId), shard);
@@ -1492,7 +1494,7 @@ TSet<TOperationId> TOperation::ActivatePartsWaitPublication(TPathId pathId, ui64
activateParts.insert(TOperationId(TxId, partId)); // activate on every path
}
- it = WaitingPublicationsByPath.erase(it); // move iterator it forwart to the next element
+ it = WaitingPublicationsByPath.erase(it); // move iterator it forward to the next element
}
return activateParts;
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation.h b/ydb/core/tx/schemeshard/schemeshard__operation.h
index d98bdf23e7e..7bbd05c2c7b 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation.h
@@ -102,7 +102,7 @@ struct TOperation: TSimpleRefCount<TOperation> {
void ProposePart(TSubTxId partId, TTabletId tableId);
void DoPropose(TSchemeShard* ss, TSideEffects& sideEffects, const TActorContext& ctx) const;
- // route incomming messages to the parts
+ // route incoming messages to suboperations (parts)
void RegisterRelationByTabletId(TSubTxId partId, TTabletId tablet, const TActorContext& ctx);
void RegisterRelationByShardIdx(TSubTxId partId, TShardIdx shardIdx, const TActorContext& ctx);
TSubTxId FindRelatedPartByTabletId(TTabletId tablet, const TActorContext& ctx) const;
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp
index e2ed28bafdb..30510a84eba 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp
@@ -1,68 +1,523 @@
#include "schemeshard__operation_part.h"
+#include "schemeshard__operation_common_subdomain.h"
#include "schemeshard__operation_common.h"
#include "schemeshard_impl.h"
#include <ydb/core/base/subdomain.h>
-#include <ydb/core/persqueue/config/config.h>
-namespace {
-using namespace NKikimr;
-using namespace NSchemeShard;
+#define LOG_D(stream) LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "[" << context.SS->TabletID() << "] " << stream)
+#define LOG_I(stream) LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "[" << context.SS->TabletID() << "] " << stream)
+#define LOG_N(stream) LOG_NOTICE_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "[" << context.SS->TabletID() << "] " << stream)
+
+
+namespace NKikimr::NSchemeShard {
+
+namespace {
-void DeclareShards(TTxState& txState, TTxId txId, TPathId pathId,
- ui32 count, TTabletTypes::EType type,
- const TChannelsBindings& channelsBindings,
- TSchemeShard* ss)
+void AddShardsTo(TTxState& txState, TTxId txId, TPathId pathId,
+ ui32 count, TTabletTypes::EType type,
+ const TChannelsBindings& channelsBindings,
+ TSchemeShard* ss)
{
txState.Shards.reserve(count);
for (ui64 i = 0; i < count; ++i) {
auto shardId = ss->RegisterShardInfo(
- TShardInfo(txId, pathId, type)
- .WithBindedChannels(channelsBindings));
+ TShardInfo(txId, pathId, type).WithBindedChannels(channelsBindings)
+ );
txState.Shards.emplace_back(shardId, type, TTxState::CreateParts);
}
}
-void PersistShards(NIceDb::TNiceDb& db, TTxState& txState, ui64 shardsToCreate, TSchemeShard* ss) {
- for (const auto& shard : txState.Shards) {
- Y_VERIFY(shard.Operation == TTxState::ETxState::CreateParts);
- Y_VERIFY(ss->ShardInfos.contains(shard.Idx), "shard info is set before");
- auto& shardInfo = ss->ShardInfos[shard.Idx];
- ss->PersistShardMapping(db, shard.Idx, InvalidTabletId, shardInfo.PathId, shardInfo.CurrentTxId, shardInfo.TabletType);
- ss->PersistChannelsBinding(db, shard.Idx, shardInfo.BindedChannels);
+struct TParamsDelta {
+ uint64_t CoordinatorsAdded = 0;
+ uint64_t MediatorsAdded = 0;
+ uint64_t TimeCastBucketsPerMediatorAdded = 0;
+ uint8_t AddExternalSchemeShard = 0;
+ uint8_t AddExternalHive = 0;
+ uint8_t AddExternalSysViewProcessor = 0;
+ bool SharedTxSupportAdded = false;
+ TVector<TStoragePool> StoragePoolsAdded;
+};
+
+std::tuple<NKikimrScheme::EStatus, TString>
+VerifyParams(TParamsDelta* delta, const TSubDomainInfo::TPtr& current, const NKikimrSubDomains::TSubDomainSettings& input) {
+ auto paramError = [](const TStringBuf& msg) {
+ return std::make_tuple(NKikimrScheme::EStatus::StatusInvalidParameter,
+ TStringBuilder() << "Invalid ExtSubDomain request: " << msg
+ );
+ };
+
+ // Process input TSubDomainSetting using diff semantics:
+ // - present diff.param indicate change to the state
+ // - unset diff.param does not matter
+ // - state with applied change should be valid and workable
+ //
+ // Currently this operation support very few workable result states:
+ // 1. extsubdomain with full SharedTxSupport (ExternalSchemeShard, Coordinators, Mediators + required params),
+ // with or without ExternalHive and ExternalSysViewProcessor
+ //
+
+ // First params check: single values
+
+ // PlanResolution
+ uint64_t planResolutionAdded = 0;
+ if (input.HasPlanResolution()) {
+ const auto prev = current->GetPlanResolution();
+ const auto next = input.GetPlanResolution();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (prev != 0) {
+ return paramError("PlanResolution could be set only once");
+ }
+ planResolutionAdded = next;
+ }
+ }
+
+ // Coordinators (also mediators) check:
+ // if state.param unset, then diff.param:
+ // - could be 0 -> state.param stays unset
+ // - could be non-zero value -> state.param become set
+ // if state.param set, then state.param:
+ // - couldn't be changed
+ // - couldn't be unset
+ //
+ uint64_t coordinatorsAdded = 0;
+ if (input.HasCoordinators()) {
+ const auto prev = current->GetProcessingParams().CoordinatorsSize();
+ const auto next = input.GetCoordinators();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (prev != 0) {
+ return paramError("Coordinators could be set only once");
+ }
+ coordinatorsAdded = next;
+ }
+ }
+ // Mediators checks
+ uint64_t mediatorsAdded = 0;
+ if (input.HasMediators()) {
+ const auto prev = current->GetProcessingParams().MediatorsSize();
+ const auto next = input.GetMediators();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (prev != 0) {
+ return paramError("Mediators could be set only once");
+ }
+ mediatorsAdded = next;
+ }
+ }
+
+ // TimeCastBucketsPerMediator
+ uint64_t timeCastBucketsPerMediatorAdded = 0;
+ if (input.HasTimeCastBucketsPerMediator()) {
+ const auto prev = current->GetProcessingParams().GetTimeCastBucketsPerMediator();
+ const auto next = input.GetTimeCastBucketsPerMediator();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (prev != 0) {
+ return paramError("TimeCastBucketsPerMediator could be set only once");
+ }
+ timeCastBucketsPerMediatorAdded = next;
+ }
+ }
+
+ // ExternalSchemeShard checks
+ uint8_t addExternalSchemeShard = 0;
+ if (input.HasExternalSchemeShard()) {
+ const bool prev = bool(current->GetTenantSchemeShardID());
+ const bool next = input.GetExternalSchemeShard();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (next == false) {
+ return paramError("ExternalSchemeShard could only be added, not removed");
+ }
+ addExternalSchemeShard = 1;
+ }
+ }
+
+ // ExternalHive checks
+ uint8_t addExternalHive = 0;
+ if (input.HasExternalHive()) {
+ const bool prev = bool(current->GetTenantHiveID());
+ const bool next = input.GetExternalHive();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (next == false) {
+ return paramError("ExternalHive could only be added, not removed");
+ }
+ addExternalHive = 1;
+ }
+ }
+
+ // ExternalSysViewProcessor checks
+ uint8_t addExternalSysViewProcessor = 0;
+ if (input.HasExternalSysViewProcessor()) {
+ const bool prev = bool(current->GetTenantSysViewProcessorID());
+ const bool next = input.GetExternalSysViewProcessor();
+ const bool changed = (prev != next);
+
+ if (changed) {
+ if (next == false) {
+ return paramError("ExternalSysViewProcessor could only be added, not removed");
+ }
+ addExternalSysViewProcessor = 1;
+ }
+ }
+
+ // Second params check: combinations
+
+ bool sharedTxSupportAdded = (coordinatorsAdded + mediatorsAdded) > 0;
+
+ if (sharedTxSupportAdded) {
+ if (0 == coordinatorsAdded) {
+ return paramError("can not create ExtSubDomain with zero Coordinators");
+ }
+ if (0 == mediatorsAdded) {
+ return paramError("can not create ExtSubDomain with zero Mediators");
+ }
+
+ if (0 == timeCastBucketsPerMediatorAdded) {
+ return paramError("can not create ExtSubDomain with TimeCastBucketsPerMediator not set");
+ }
+ if (0 == planResolutionAdded) {
+ return paramError("can not create ExtSubDomain with PlanResolution not set");
+ }
+ }
+ if (!addExternalSchemeShard && sharedTxSupportAdded) {
+ return paramError("ExtSubDomain without ExternalSchemeShard is useless, use SubDomain");
+ }
+ if (addExternalSchemeShard && !sharedTxSupportAdded) {
+ return paramError("ExtSubDomain without coordinators/mediators is useful for NBS, but not supported yet, use SubDomain");
+ }
+
+ // Storage pools check
+ TVector<TStoragePool> storagePoolsAdded;
+ {
+ auto actualPools = TStoragePools(current->GetStoragePools());
+ std::sort(actualPools.begin(), actualPools.end());
+
+ auto requestedPools = TStoragePools(input.GetStoragePools().begin(), input.GetStoragePools().end());
+ std::sort(requestedPools.begin(), requestedPools.end());
+
+ auto uniqEnd = std::unique(requestedPools.begin(), requestedPools.end());
+ if (uniqEnd != requestedPools.end()) {
+ return paramError(TStringBuilder() << "requested storage pools are not unique, for example, the pool '" << uniqEnd->GetName() << "' repeats several times");
+ }
+
+ {
+ TStoragePools omittedPools;
+ std::set_difference(
+ actualPools.begin(), actualPools.end(),
+ requestedPools.begin(), requestedPools.end(),
+ std::back_inserter(omittedPools)
+ );
+
+ if (omittedPools && requestedPools) {
+ return paramError(TStringBuilder() << "deleting storage pools is not allowed, for example, deletion of '" << omittedPools.begin()->GetName() << "' requested");
+ }
+ }
+
+ std::set_difference(requestedPools.begin(), requestedPools.end(),
+ actualPools.begin(), actualPools.end(),
+ std::back_inserter(storagePoolsAdded));
+ }
+
+ delta->CoordinatorsAdded = coordinatorsAdded;
+ delta->MediatorsAdded = mediatorsAdded;
+ delta->TimeCastBucketsPerMediatorAdded = timeCastBucketsPerMediatorAdded;
+ delta->AddExternalSchemeShard = addExternalSchemeShard;
+ delta->AddExternalHive = addExternalHive;
+ delta->AddExternalSysViewProcessor = addExternalSysViewProcessor;
+ delta->SharedTxSupportAdded = sharedTxSupportAdded;
+ delta->StoragePoolsAdded = std::move(storagePoolsAdded);
+
+ return {NKikimrScheme::EStatus::StatusAccepted, {}};
+}
+
+void VerifyParams(TProposeResponse* result, TParamsDelta* delta, const TSubDomainInfo::TPtr& current, const NKikimrSubDomains::TSubDomainSettings& input) {
+ // TProposeRespose should come in assuming positive outcome (status NKikimrScheme::StatusAccepted, no errors)
+ Y_VERIFY(result->IsAccepted());
+ auto [status, reason] = VerifyParams(delta, current, input);
+ result->SetStatus(status, reason);
+}
+
+void RegisterChanges(const TTxState& txState, const TTxId operationTxId, TOperationContext& context, TPath& path, TSubDomainInfo::TPtr& subdomainInfo, TSubDomainInfo::TPtr& alter) {
+ const auto& basenameId = path.Base()->PathId;
+
+ context.MemChanges.GrabPath(context.SS, basenameId);
+
+ // Registering shards is a bit complicated as every shard should be registered
+ // in many places:
+ // - in schemeshard.ShardInfo as a "wannabe tablet" (this is done at AddShardsTo())
+ // - in extsubdomain path as a "shard that leaves inside" that path
+ // - in extsubdomain alter as a "private/system part of the subdomain entity"
+ for (auto& shard: txState.Shards) {
+ auto shardIdx = shard.Idx;
+
+ // Schemeshard local db
+ context.DbChanges.PersistShard(shardIdx);
+
+ // Path
+ path.DomainInfo()->AddInternalShard(shardIdx);
+ path.Base()->IncShardsInside(1);
+
+ // Extsubdomain data
+ alter->AddPrivateShard(shardIdx);
+ }
+
+ // Path state change
+ {
+ path.Base()->LastTxId = operationTxId;
+ path.Base()->PathState = TPathElement::EPathState::EPathStateAlter;
+ context.DbChanges.PersistPath(path.Base()->PathId);
+ }
+
+ // Subdomain alter state
+ {
+ subdomainInfo->SetAlter(alter);
}
- Y_VERIFY(shardsToCreate == txState.Shards.size());
}
-class TAlterExtSubDomain: public TSubOperation {
+class TCreateHive: public TSubOperationState {
+private:
+ TOperationId OperationId;
+
+ TString DebugHint() const override {
+ return TStringBuilder() << "TCreateHive, operationId# " << OperationId << ", ";
+ }
+
+public:
+ TCreateHive(TOperationId id)
+ : OperationId(id)
+ {
+ IgnoreMessages(DebugHint(), {});
+ }
+
+ void SendCreateTabletEvent(const TPathId& pathId, TShardIdx shardIdx, TOperationContext& context) {
+ auto path = context.SS->PathsById.at(pathId);
+
+ auto ev = CreateEvCreateTablet(path, shardIdx, context);
+ auto rootHiveId = context.SS->GetGlobalHive(context.Ctx);
+
+ LOG_D(DebugHint() << "Send CreateTablet event to Hive: " << rootHiveId << " msg: "<< ev->Record.DebugString());
+
+ context.OnComplete.BindMsgToPipe(OperationId, rootHiveId, shardIdx, ev.Release());
+
+ context.OnComplete.RouteByShardIdx(OperationId, shardIdx);
+ }
+
+ void SendPublishPathRequest(const TPathId& pathId, TOperationContext& context) {
+ context.OnComplete.PublishAndWaitPublication(OperationId, pathId);
+ }
+
+
+ bool ProgressState(TOperationContext& context) override {
+ LOG_I(DebugHint() << "ProgressState");
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxAlterExtSubDomainCreateHive);
+ Y_VERIFY(txState->Shards.size() == 1);
+ Y_VERIFY(txState->Shards.back().TabletType == ETabletType::Hive,
+ "expected tablet type HIVE, actual type %s", ETabletType::TypeToStr(txState->Shards.back().TabletType)
+ );
+
+ // In the case of schemeshard reboots hive could have already created.
+ // If so, operation should skip tablet creation step but still perform publishing step.
+
+ auto shard = txState->Shards.back();
+
+ auto getSubdomainHiveTabletId = [](const TPathId& pathId, TShardIdx shardIdx, TOperationContext& context) {
+ auto subdomain = context.SS->SubDomains.at(pathId);
+ Y_VERIFY(context.SS->ShardInfos.contains(shardIdx));
+ auto& shardInfo = context.SS->ShardInfos.at(shardIdx);
+ Y_VERIFY(shardInfo.TabletType == ETabletType::Hive);
+ return shardInfo.TabletID;
+ };
+
+ auto subdomainHiveTabletId = getSubdomainHiveTabletId(txState->TargetPathId, shard.Idx, context);
+
+ if (subdomainHiveTabletId == InvalidTabletId) {
+ SendCreateTabletEvent(txState->TargetPathId, shard.Idx, context);
+
+ } else {
+ LOG_I(DebugHint() << "ProgressState, ExtSubDomain hive already exist, tabletId: " << subdomainHiveTabletId);
+ SendPublishPathRequest(txState->TargetPathId, context);
+ }
+
+ return false;
+ }
+
+ bool HandleReply(TEvHive::TEvCreateTabletReply::TPtr& ev, TOperationContext& context) override {
+ LOG_I(DebugHint() << "HandleReply TEvCreateTabletReply");
+ LOG_D(DebugHint() << "HandleReply TEvCreateTabletReply, msg: " << DebugReply(ev));
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxAlterExtSubDomainCreateHive);
+
+ const auto& record = ev->Get()->Record;
+ auto shardIdx = TShardIdx(record.GetOwner(), TLocalShardIdx(record.GetOwnerIdx()));
+ auto createdTabletId = TTabletId(record.GetTabletID()); // global id from hive
+ Y_VERIFY(createdTabletId != InvalidTabletId);
+
+ NKikimrProto::EReplyStatus status = record.GetStatus();
+ Y_VERIFY_S((
+ status == NKikimrProto::OK
+ || status == NKikimrProto::ALREADY
+ ),
+ "Unexpected status " << NKikimrProto::EReplyStatus_Name(status)
+ << " in CreateTabletReply shard idx " << shardIdx << " tabletId " << createdTabletId
+ );
+
+ auto rootHiveId = TTabletId(record.GetOrigin());
+ Y_VERIFY(rootHiveId == context.SS->GetGlobalHive(context.Ctx));
+
+ TShardInfo& shardInfo = context.SS->ShardInfos.at(shardIdx);
+
+ Y_VERIFY(shardInfo.TabletType == ETabletType::Hive);
+ Y_VERIFY(shardInfo.TabletID == InvalidTabletId || shardInfo.TabletID == createdTabletId);
+ Y_VERIFY(shardInfo.CurrentTxId == OperationId.GetTxId());
+
+ if (shardInfo.TabletID == InvalidTabletId) {
+ context.SS->TabletCounters->Simple()[COUNTER_SUB_DOMAIN_HIVE_COUNT].Add(1);
+ }
+
+ shardInfo.TabletID = createdTabletId;
+ context.SS->TabletIdToShardIdx[createdTabletId] = shardIdx;
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ // commit new extsubdomain's hive data to the subdomain, db and memory,
+ // (and publish that change to the extsubdomain's path below)
+ // so that next stages would get extsubdomain's hive tablet id
+ // by requesting on extsubdomain's path
+ {
+ auto subdomain = context.SS->SubDomains.at(txState->TargetPathId);
+ subdomain->AddPrivateShard(shardIdx);
+ subdomain->AddInternalShard(shardIdx);
+
+ subdomain->SetTenantHiveIDPrivate(createdTabletId);
+
+ Y_VERIFY(subdomain->GetVersion() + 2 == subdomain->GetAlter()->GetVersion());
+ subdomain->SetVersion(subdomain->GetVersion() + 1);
+
+ context.SS->PersistSubDomainVersion(db, txState->TargetPathId, *subdomain);
+ context.SS->PersistSubDomainPrivateShards(db, txState->TargetPathId, *subdomain);
+ }
+
+ context.SS->PersistShardMapping(db, shardIdx, createdTabletId, shardInfo.PathId, OperationId.GetTxId(), ETabletType::Hive);
+
+ context.OnComplete.UnbindMsgFromPipe(OperationId, rootHiveId, shardIdx);
+ context.OnComplete.ActivateShardCreated(shardIdx, OperationId.GetTxId());
+
+ LOG_I(DebugHint() << "ExtSubDomain hive created, tabletId " << createdTabletId);
+
+ // no need to configure new hive by a separate EvConfigureHive
+ // as new hive is already configured to serve new subdomain at creation
+ // (by getting proper ObjectDomain, see ydb/core/mind/hive/tx_create_tablet.cpp)
+
+ // publish new path info to make this transient extsubdomain state
+ // (nothing + hive) visible to entire system, or rather to local services
+ // that should join extsubdomain resource pool and in order to do so must register
+ // to the newly created extsubdomain's hive
+ SendPublishPathRequest(txState->TargetPathId, context);
+
+ return false;
+ }
+
+ bool HandleReply(TEvPrivate::TEvCompletePublication::TPtr& ev, TOperationContext& context) override {
+ LOG_I(DebugHint() << "HandleReply TEvCompletePublication");
+ LOG_D(DebugHint() << "HandleReply TEvCompletePublication" << ", msg: " << DebugReply(ev));
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxAlterExtSubDomainCreateHive);
+
+ Y_VERIFY(txState->TargetPathId == ev->Get()->PathId);
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ // Register barrier to release fellow suboperation waiting on hive creation.
+ // This is a sync point with TAlterExtSubDomain.
+ context.OnComplete.Barrier(OperationId, "extsubdomain-hive-created");
+
+ return false;
+ }
+
+ bool HandleReply(TEvPrivate::TEvCompleteBarrier::TPtr& ev, TOperationContext& context) override {
+ LOG_I(DebugHint() << "HandleReply TEvPrivate:TEvCompleteBarrier, msg: " << ev->Get()->ToString());
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
+
+ return true;
+ }
+};
+
+class TEmptyPropose: public TSubOperationState {
+private:
+ TOperationId OperationId;
+
+ TString DebugHint() const override {
+ return TStringBuilder() << "TEmptyPropose, operationId " << OperationId << ", ";
+ }
+
+public:
+ TEmptyPropose(TOperationId id)
+ : OperationId(id)
+ {
+ IgnoreMessages(DebugHint(), {});
+ }
+
+ bool ProgressState(TOperationContext& context) override {
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+
+ LOG_I(DebugHint() << "ProgressState, operation type " << TTxState::TypeName(txState->TxType));
+
+ context.OnComplete.ProposeToCoordinator(OperationId, txState->TargetPathId, TStepId(0));
+
+ return true;
+ }
+};
+
+class TAlterExtSubDomainCreateHive: public TSubOperation {
static TTxState::ETxState NextState() {
return TTxState::CreateParts;
}
TTxState::ETxState NextState(TTxState::ETxState state) const override {
- switch (state) {
+ switch(state) {
case TTxState::Waiting:
case TTxState::CreateParts:
- return TTxState::ConfigureParts;
- case TTxState::ConfigureParts:
- return TTxState::Propose; // DONE ???
+ return TTxState::Propose;
case TTxState::Propose:
return TTxState::Done;
default:
return TTxState::Invalid;
}
+ return TTxState::Invalid;
}
TSubOperationState::TPtr SelectStateFunc(TTxState::ETxState state) override {
- switch (state) {
+ switch(state) {
case TTxState::Waiting:
case TTxState::CreateParts:
- return THolder(new TCreateParts(OperationId));
- case TTxState::ConfigureParts:
- return THolder(new NSubDomainState::TConfigureParts(OperationId));
+ return THolder(new TCreateHive(OperationId));
case TTxState::Propose:
- return THolder(new NSubDomainState::TPropose(OperationId));
+ return THolder(new TEmptyPropose(OperationId));
case TTxState::Done:
return THolder(new TDone(OperationId));
default:
@@ -74,358 +529,355 @@ public:
using TSubOperation::TSubOperation;
THolder<TProposeResponse> Propose(const TString&, TOperationContext& context) override {
- const TTabletId ssId = context.SS->SelfTabletId();
+ const TTabletId schemeshardTabletId = context.SS->SelfTabletId();
+ const NKikimrSubDomains::TSubDomainSettings& inputSettings = Transaction.GetSubDomain();
- const auto& settings = Transaction.GetSubDomain();
+ TPath path = TPath::Resolve(Transaction.GetWorkingDir(), context.SS).Dive(inputSettings.GetName());
- const TString& parentPathStr = Transaction.GetWorkingDir();
- const TString& name = settings.GetName();
+ LOG_I("TAlterExtSubDomainCreateHive Propose"
+ << ", opId: " << OperationId
+ << ", path: " << path.PathString()
+ );
- LOG_NOTICE_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "TAlterExtSubDomain Propose"
- << ", path: " << parentPathStr << "/" << name
- << ", opId: " << OperationId
- << ", at schemeshard: " << ssId);
+ // No need to check conditions on extsubdomain path: checked in CreateCompatibleAlterExtSubDomain() already
- TEvSchemeShard::EStatus status = NKikimrScheme::StatusAccepted;
- auto result = MakeHolder<TProposeResponse>(status, ui64(OperationId.GetTxId()), ui64(ssId));
+ const auto& basenameId = path.Base()->PathId;
- if (!parentPathStr) {
- result->SetError(NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: no working dir");
- return result;
- }
+ // Get existing extsubdomain
+ Y_VERIFY(context.SS->SubDomains.contains(basenameId));
+ auto subdomainInfo = context.SS->SubDomains.at(basenameId);
+ Y_VERIFY(subdomainInfo);
- if (!name) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: no name");
+ auto result = MakeHolder<TProposeResponse>(NKikimrScheme::StatusAccepted, ui64(OperationId.GetTxId()), ui64(schemeshardTabletId));
+ result->SetPathId(basenameId.LocalPathId);
+
+ // Check params and build change delta
+ TParamsDelta delta;
+ VerifyParams(result.Get(), &delta, subdomainInfo, inputSettings);
+ if (!result->IsAccepted()) {
return result;
}
- TPath path = TPath::Resolve(parentPathStr, context.SS).Dive(name);
+ // No need to check (shard) limits on path: hive goes above any limits
- {
- TPath::TChecker checks = path.Check();
- checks
- .NotEmpty()
- .NotUnderDomainUpgrade()
- .IsAtLocalSchemeShard()
- .IsResolved()
- .NotDeleted()
- .IsExternalSubDomain()
- .NotUnderOperation()
- .IsCommonSensePath();
+ // This suboperation can't be used as no-op, so check that hive creation is required
+ Y_VERIFY(delta.AddExternalHive);
- if (!checks) {
- result->SetError(checks.GetStatus(), checks.GetError());
- return result;
- }
- }
+ // Generate changes in: operation object, path, schemeshard in-memory object and local db
- TPathElement::TPtr subDomain = path.Base();
+ // Create in-flight operation object
+ Y_VERIFY(!context.SS->FindTx(OperationId));
+ TTxState& txState = context.SS->CreateTx(OperationId, TTxState::TxAlterExtSubDomainCreateHive, basenameId);
- Y_VERIFY(context.SS->SubDomains.contains(subDomain->PathId));
- auto subDomainInfo = context.SS->SubDomains.at(subDomain->PathId);
- Y_VERIFY(subDomainInfo);
+ // Create subdomain alter
+ TSubDomainInfo::TPtr alter = new TSubDomainInfo(*subdomainInfo, 0, 0);
- if (subDomainInfo->GetAlter()) {
- result->SetError(NKikimrScheme::StatusPathDoesNotExist, "SubDomain is under another alter 2");
- return result;
- }
+ LOG_D("TAlterExtSubDomainCreateHive Propose"
+ << ", opId: " << OperationId
+ << ", subdomain ver " << subdomainInfo->GetVersion()
+ << ", alter ver " << alter->GetVersion()
+ );
- result->SetPathId(subDomain->PathId.LocalPathId);
+ auto guard = context.DbGuard();
- if (0 != settings.GetPlanResolution()) {
- if (subDomainInfo->GetPlanResolution() != 0 && subDomainInfo->GetPlanResolution() != settings.GetPlanResolution()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change PlanResolution, only set it up");
- return result;
- }
- if (subDomain->IsRoot()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change PlanResolution at root, only additiong storage pools is allowed");
+ // Create shard for the hive to-be.
+ {
+ TChannelsBindings channelsBinding;
+ if (!context.SS->ResolveSubdomainsChannels(delta.StoragePoolsAdded, channelsBinding)) {
+ result->SetError(NKikimrScheme::StatusInvalidParameter, "failed to construct channels binding");
return result;
}
- }
- if (0 != settings.GetTimeCastBucketsPerMediator()) {
- if (subDomainInfo->GetTCB() != 0 && subDomainInfo->GetTCB() != settings.GetTimeCastBucketsPerMediator()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change TimeCastBucketsPerMediator, only set it up");
- return result;
- }
- if (subDomain->IsRoot()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change TimeCastBucketsPerMediator at root, only additiong storage pools is allowed");
- return result;
- }
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, 1, TTabletTypes::Hive, channelsBinding, context.SS);
+ Y_VERIFY(txState.Shards.size() == 1);
}
- if (0 == settings.GetCoordinators() && 0 != settings.GetMediators()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: cant create subdomain with mediators, but no coordinators");
- return result;
- }
+ // Register extsubdomain changes in shards, path, alter
+ RegisterChanges(txState, OperationId.GetTxId(), context, path, subdomainInfo, alter);
+ //NOTE: alter do not get persisted here, this is intentional
- if (0 != settings.GetCoordinators() && 0 == settings.GetMediators()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: cant create subdomain with coordinators, but no mediators");
- return result;
+ // Operation in-flight state change
+ {
+ txState.State = TTxState::CreateParts;
+ context.DbChanges.PersistTxState(OperationId);
}
- const bool wasSharedTxSupported = subDomainInfo->IsSupportTransactions();
- const bool setSupportSharedTx = bool(settings.GetCoordinators()) || bool(settings.GetMediators());
+ context.OnComplete.ActivateTx(OperationId);
- const bool wasExternalSchemeShard = bool(subDomainInfo->GetTenantSchemeShardID());
- const bool setExternalSchemeShard = settings.HasExternalSchemeShard();
- const bool addExternalSchemeShard = !wasExternalSchemeShard && setExternalSchemeShard && settings.GetExternalSchemeShard();
+ // Set initial operation state
+ SetState(NextState());
- const bool wasExternalHive = bool(subDomainInfo->GetTenantHiveID());
- const bool setExternalHive = settings.HasExternalHive();
- const bool addExternalHive = !wasExternalHive && setExternalHive && settings.GetExternalHive();
+ return result;
+ }
- const bool wasViewProcessors = bool(subDomainInfo->GetTenantSysViewProcessorID());
- const bool setViewProcessors = settings.HasExternalSysViewProcessor();
- const bool addViewProcessors = !wasViewProcessors && setViewProcessors && settings.GetExternalSysViewProcessor();
+ void AbortPropose(TOperationContext& context) override {
+ LOG_N("TAlterExtSubDomainCreateHive AbortPropose"
+ << ", opId " << OperationId
+ );
+ }
- ui64 shardsToCreate = 0;
- ui64 allowOverTheLimitShards = 0;
+ void AbortUnsafe(TTxId forceDropTxId, TOperationContext& context) override {
+ LOG_N("TAlterExtSubDomainCreateHive AbortUnsafe"
+ << ", opId: " << OperationId
+ << ", forceDropId: " << forceDropTxId
+ );
- if (wasExternalSchemeShard && setExternalSchemeShard) {
- if (bool(subDomainInfo->GetTenantSchemeShardID()) != settings.GetExternalSchemeShard()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change ExternalSchemeShard, only set it up");
- return result;
- }
- }
+ context.OnComplete.DoneOperation(OperationId);
+ }
+};
- if (addExternalSchemeShard) {
- shardsToCreate += 1;
+class TWaitHiveCreated: public TSubOperationState {
+private:
+ TOperationId OperationId;
- if (!wasSharedTxSupported && !setSupportSharedTx) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: ExtSubdomain without coordinators/mediators is usefull for NBS, but not supported yet, use SubDomain");
- return result;
- }
- }
+ TString DebugHint() const override {
+ return TStringBuilder() << "TWaitHiveCreated, operationId " << OperationId << ", ";
+ }
- if (wasExternalHive && setExternalHive) {
- if (bool(subDomainInfo->GetTenantHiveID()) != settings.GetExternalHive()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change ExternalHive, only set it up");
- return result;
- }
- }
+public:
+ TWaitHiveCreated(TOperationId id)
+ : OperationId(id)
+ {
+ IgnoreMessages(DebugHint(), {});
+ }
- if (addExternalHive) {
- shardsToCreate += 1;
- allowOverTheLimitShards += 1;
- }
+ bool ProgressState(TOperationContext& context) override {
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
- if (wasViewProcessors && setViewProcessors) {
- if (bool(subDomainInfo->GetTenantSysViewProcessorID()) != settings.GetExternalSysViewProcessor()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change ViewProcessors, only set it up");
- return result;
- }
- }
+ LOG_I(DebugHint() << "ProgressState, operation type " << TTxState::TypeName(txState->TxType));
- if (addViewProcessors) {
- shardsToCreate += 1;
- allowOverTheLimitShards += 1;
- }
+ // Register barrier which this suboperation will wait on.
+ // This is a sync point with TAlterExtSubDomainCreateHive suboperation.
+ context.OnComplete.Barrier(OperationId, "extsubdomain-hive-created");
- if (wasSharedTxSupported && setSupportSharedTx) {
- if (subDomainInfo->GetProcessingParams().CoordinatorsSize() != settings.GetCoordinators()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change Coordinators count, only set it up");
- return result;
- }
+ return false;
+ }
- if (subDomainInfo->GetProcessingParams().MediatorsSize() != settings.GetMediators()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: unable to change Mediators count, only set it up");
- return result;
- }
- }
+ bool HandleReply(TEvPrivate::TEvCompleteBarrier::TPtr& ev, TOperationContext& context) override {
+ LOG_I(DebugHint() << "HandleReply TEvPrivate:TEvCompleteBarrier, msg: " << ev->Get()->ToString());
- if (!wasSharedTxSupported && setSupportSharedTx) {
- shardsToCreate += settings.GetCoordinators() + settings.GetMediators();
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
- if (settings.GetTimeCastBucketsPerMediator() == 0) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: TimeCastBucketsPerMediator should be set when coordinators create");
- return result;
- }
+ NIceDb::TNiceDb db(context.GetDB());
- if (settings.GetPlanResolution() == 0) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: PlanResolution should be set when coordinators create");
- return result;
- }
+ context.SS->ChangeTxState(db, OperationId, TTxState::CreateParts);
- if (!wasExternalSchemeShard && !setExternalSchemeShard) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: ExtSubdomain without External SchemeShard is useless, use SubDomain");
- return result;
- }
- }
+ return true;
+ }
+};
- {
- TPath::TChecker checks = path.Check();
- checks
- .ShardsLimit(shardsToCreate - allowOverTheLimitShards)
- .PathShardsLimit(shardsToCreate - allowOverTheLimitShards);
+class TAlterExtSubDomain: public TSubOperation {
+ static TTxState::ETxState NextState() {
+ return TTxState::Waiting;
+ }
- if (!checks) {
- result->SetError(checks.GetStatus(), checks.GetError());
- return result;
- }
+ TTxState::ETxState NextState(TTxState::ETxState state) const override {
+ switch(state) {
+ case TTxState::Waiting:
+ return TTxState::CreateParts;
+ case TTxState::CreateParts:
+ return TTxState::ConfigureParts;
+ case TTxState::ConfigureParts:
+ return TTxState::Propose;
+ case TTxState::Propose:
+ return TTxState::Done;
+ default:
+ return TTxState::Invalid;
}
+ return TTxState::Invalid;
+ }
- auto actualPools = TStoragePools(subDomainInfo->GetStoragePools());
- std::sort(actualPools.begin(), actualPools.end());
+ TSubOperationState::TPtr SelectStateFunc(TTxState::ETxState state) override {
+ switch(state) {
+ case TTxState::Waiting:
+ return THolder(new TWaitHiveCreated(OperationId));
+ case TTxState::CreateParts:
+ return THolder(new TCreateParts(OperationId));
+ case TTxState::ConfigureParts:
+ return THolder(new NSubDomainState::TConfigureParts(OperationId));
+ case TTxState::Propose:
+ return THolder(new NSubDomainState::TPropose(OperationId));
+ case TTxState::Done:
+ return THolder(new TDone(OperationId));
+ default:
+ return nullptr;
+ }
+ }
- auto requestedPools = TVector<TStoragePool>(settings.GetStoragePools().begin(), settings.GetStoragePools().end());
- std::sort(requestedPools.begin(), requestedPools.end());
+public:
+ using TSubOperation::TSubOperation;
- auto uniqEnd = std::unique(requestedPools.begin(), requestedPools.end());
- if (uniqEnd != requestedPools.end()) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: requested storage pools is not qunique, for example, the pool '" + uniqEnd->GetName() +"' repeats several times");
- return result;
- }
+ THolder<TProposeResponse> Propose(const TString&, TOperationContext& context) override {
+ const TTabletId schemeshardTabletId = context.SS->SelfTabletId();
+ const NKikimrSubDomains::TSubDomainSettings& inputSettings = Transaction.GetSubDomain();
- {
- TVector<TStoragePool> omitedPools;
- std::set_difference(actualPools.begin(), actualPools.end(),
- requestedPools.begin(), requestedPools.end(),
- std::back_inserter(omitedPools));
-
- if (omitedPools && requestedPools) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: deleting storage pool is not allowed, for example, requested deletion '" + omitedPools.begin()->GetName() +"'");
- return result;
- }
- }
+ TPath path = TPath::Resolve(Transaction.GetWorkingDir(), context.SS).Dive(inputSettings.GetName());
- TVector<TStoragePool> addedPools;
- std::set_difference(requestedPools.begin(), requestedPools.end(),
- actualPools.begin(), actualPools.end(),
- std::back_inserter(addedPools));
+ LOG_I("TAlterExtSubDomain Propose"
+ << ", opId: " << OperationId
+ << ", path: " << path.PathString()
+ );
- TSubDomainInfo::TPtr alterData = new TSubDomainInfo(*subDomainInfo,
- settings.GetPlanResolution(),
- settings.GetTimeCastBucketsPerMediator(),
- addedPools);
+ // No need to check conditions on extsubdomain path: checked in CreateCompatibleAlterExtSubDomain() already
- TChannelsBindings channelBindings;
- if (setSupportSharedTx || setExternalSchemeShard || setExternalHive || setViewProcessors) {
- if (!context.SS->ResolveSubdomainsChannels(alterData->GetStoragePools(), channelBindings)) {
- result->SetError(NKikimrScheme::StatusInvalidParameter, "Unable construct channels binding");
- return result;
- }
- }
+ const auto& basenameId = path.Base()->PathId;
+
+ // Get existing extsubdomain
+ Y_VERIFY(context.SS->SubDomains.contains(basenameId));
+ auto subdomainInfo = context.SS->SubDomains.at(basenameId);
+ Y_VERIFY(subdomainInfo);
- TString errStr;
- if (!context.SS->CheckApplyIf(Transaction, errStr)) {
- result->SetError(NKikimrScheme::StatusPreconditionFailed, errStr);
+ auto result = MakeHolder<TProposeResponse>(NKikimrScheme::StatusAccepted, ui64(OperationId.GetTxId()), ui64(schemeshardTabletId));
+ result->SetPathId(basenameId.LocalPathId);
+
+ // Check params and build change delta
+ TParamsDelta delta;
+ VerifyParams(result.Get(), &delta, subdomainInfo, inputSettings);
+ if (!result->IsAccepted()) {
return result;
}
- if (settings.HasDeclaredSchemeQuotas()) {
- alterData->SetDeclaredSchemeQuotas(settings.GetDeclaredSchemeQuotas());
- }
+ // Count tablets to create
- if (settings.HasDatabaseQuotas()) {
- alterData->SetDatabaseQuotas(settings.GetDatabaseQuotas());
- }
+ //NOTE: ExternalHive and ExternalSysViewProcessor are _not_ counted against limits
+ ui64 tabletsToCreateUnderLimit = delta.AddExternalSchemeShard + delta.CoordinatorsAdded + delta.MediatorsAdded;
+ ui64 tabletsToCreateOverLimit = delta.AddExternalSysViewProcessor;
+ ui64 tabletsToCreateTotal = tabletsToCreateUnderLimit + tabletsToCreateOverLimit;
- NIceDb::TNiceDb db(context.GetDB());
+ // Check path limits
+
+ {
+ TPath::TChecker checks = path.Check();
+ checks
+ .ShardsLimit(tabletsToCreateUnderLimit)
+ .PathShardsLimit(tabletsToCreateUnderLimit);
- subDomain->LastTxId = OperationId.GetTxId();
- subDomain->PathState = TPathElement::EPathState::EPathStateAlter;
- context.SS->PersistPath(db, subDomain->PathId);
+ if (!checks) {
+ result->SetError(checks.GetStatus(), checks.GetError());
+ return result;
+ }
+ }
+
+ // Generate changes in: operation object, path, schemeshard in-memory object and local db
+ // Create in-flight operation object
Y_VERIFY(!context.SS->FindTx(OperationId));
- TTxState& txState = context.SS->CreateTx(OperationId, TTxState::TxAlterExtSubDomain, subDomain->PathId);
- txState.State = TTxState::CreateParts;
+ TTxState& txState = context.SS->CreateTx(OperationId, TTxState::TxAlterExtSubDomain, basenameId);
+
+ // Create or derive alter.
+ // (We could have always created new alter from a current subdomainInfo but
+ // we need to take into account possible version increase from CreateHive suboperation.)
+ auto createAlterFrom = [&inputSettings, &delta](auto prototype) {
+ return MakeIntrusive<TSubDomainInfo>(
+ *prototype,
+ inputSettings.GetPlanResolution(),
+ inputSettings.GetTimeCastBucketsPerMediator(),
+ delta.StoragePoolsAdded
+ );
+ };
+ TSubDomainInfo::TPtr alter = [&delta, &subdomainInfo, &createAlterFrom, &context]() {
+ if (delta.AddExternalHive && context.SS->EnableAlterDatabaseCreateHiveFirst) {
+ Y_VERIFY(subdomainInfo->GetAlter());
+ return createAlterFrom(subdomainInfo->GetAlter());
+ } else {
+ Y_VERIFY(!subdomainInfo->GetAlter());
+ return createAlterFrom(subdomainInfo);
+ }
+ }();
- if (!wasSharedTxSupported && setSupportSharedTx) {
- DeclareShards(txState, OperationId.GetTxId(), subDomain->PathId, settings.GetCoordinators(), TTabletTypes::Coordinator, channelBindings, context.SS);
- DeclareShards(txState, OperationId.GetTxId(), subDomain->PathId, settings.GetMediators(), TTabletTypes::Mediator, channelBindings, context.SS);
+ if (inputSettings.HasDeclaredSchemeQuotas()) {
+ alter->SetDeclaredSchemeQuotas(inputSettings.GetDeclaredSchemeQuotas());
}
-
- if (addExternalSchemeShard) {
- DeclareShards(txState, OperationId.GetTxId(), subDomain->PathId, 1, TTabletTypes::SchemeShard, channelBindings, context.SS);
+ if (inputSettings.HasDatabaseQuotas()) {
+ alter->SetDatabaseQuotas(inputSettings.GetDatabaseQuotas());
}
- if (addExternalHive) {
- DeclareShards(txState, OperationId.GetTxId(), subDomain->PathId, 1, TTabletTypes::Hive, channelBindings, context.SS);
- } else if (!alterData->GetSharedHive()) {
- alterData->SetSharedHive(context.SS->GetGlobalHive(context.Ctx));
- }
+ LOG_D("TAlterExtSubDomain Propose"
+ << ", opId: " << OperationId
+ << ", subdomain ver " << subdomainInfo->GetVersion()
+ << ", alter ver " << alter->GetVersion()
+ );
- if (addViewProcessors) {
- DeclareShards(txState, OperationId.GetTxId(), subDomain->PathId, 1, TTabletTypes::SysViewProcessor, channelBindings, context.SS);
- }
+ auto guard = context.DbGuard();
- for (auto& shard: txState.Shards) {
- alterData->AddPrivateShard(shard.Idx);
+ // Create shards for the requested tablets (except hive)
+ {
+ TChannelsBindings channelsBinding;
+ if (delta.SharedTxSupportAdded || delta.AddExternalSchemeShard || delta.AddExternalSysViewProcessor || delta.AddExternalHive) {
+ if (!context.SS->ResolveSubdomainsChannels(alter->GetStoragePools(), channelsBinding)) {
+ result->SetError(NKikimrScheme::StatusInvalidParameter, "failed to construct channels binding");
+ return result;
+ }
+ }
+
+ // Declare shards.
+ // - hive always come first (OwnerIdx 1)
+ // - schemeshard always come second (OwnerIdx 2)
+ // - others follow
+ //
+ if (delta.AddExternalHive && !context.SS->EnableAlterDatabaseCreateHiveFirst) {
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, 1, TTabletTypes::Hive, channelsBinding, context.SS);
+ ++tabletsToCreateTotal;
+ }
+ if (delta.AddExternalSchemeShard) {
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, 1, TTabletTypes::SchemeShard, channelsBinding, context.SS);
+ }
+ if (delta.SharedTxSupportAdded) {
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, delta.CoordinatorsAdded, TTabletTypes::Coordinator, channelsBinding, context.SS);
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, delta.MediatorsAdded, TTabletTypes::Mediator, channelsBinding, context.SS);
+ }
+ if (delta.AddExternalSysViewProcessor) {
+ AddShardsTo(txState, OperationId.GetTxId(), basenameId, 1, TTabletTypes::SysViewProcessor, channelsBinding, context.SS);
+ }
+ Y_VERIFY(txState.Shards.size() == tabletsToCreateTotal);
}
- PersistShards(db, txState, shardsToCreate, context.SS);
- context.SS->PersistUpdateNextShardIdx(db);
+ // Register extsubdomain changes in shards, path, alter
+ RegisterChanges(txState, OperationId.GetTxId(), context, path, subdomainInfo, alter);
+ // Persist alter
+ context.DbChanges.PersistSubDomainAlter(basenameId);
- subDomainInfo->SetAlter(alterData);
- context.SS->PersistSubDomainAlter(db, subDomain->PathId, *alterData);
+ // Operation in-flight state change
+ {
+ // txState.State = TTxState::CreateParts;
+ txState.State = TTxState::Waiting;
+ context.DbChanges.PersistTxState(OperationId);
+ }
- context.SS->PersistTxState(db, OperationId);
context.OnComplete.ActivateTx(OperationId);
- path.DomainInfo()->AddInternalShards(txState);
- path.Base()->IncShardsInside(shardsToCreate);
-
+ // Set initial operation state
SetState(NextState());
+
return result;
}
void AbortPropose(TOperationContext&) override {
- Y_FAIL("no AbortPropose for TAlterSubDomain");
+ Y_FAIL("no AbortPropose for TAlterExtSubDomain");
}
void AbortUnsafe(TTxId forceDropTxId, TOperationContext& context) override {
- LOG_NOTICE_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "TAlterSubDomain AbortUnsafe"
- << ", opId: " << OperationId
- << ", forceDropId: " << forceDropTxId
- << ", at schemeshard: " << context.SS->TabletID());
+ LOG_N("TAlterExtSubDomain AbortUnsafe"
+ << ", opId: " << OperationId
+ << ", forceDropId: " << forceDropTxId
+ );
context.OnComplete.DoneOperation(OperationId);
}
};
+} // anonymous namespace
+
+ISubOperationBase::TPtr CreateAlterExtSubDomainCreateHive(TOperationId id, const TTxTransaction& tx) {
+ return MakeSubOperation<TAlterExtSubDomainCreateHive>(id, tx);
}
-namespace NKikimr::NSchemeShard {
+ISubOperationBase::TPtr CreateAlterExtSubDomainCreateHive(TOperationId id, TTxState::ETxState state) {
+ Y_VERIFY(state != TTxState::Invalid);
+ return MakeSubOperation<TAlterExtSubDomainCreateHive>(id, state);
+}
ISubOperationBase::TPtr CreateAlterExtSubDomain(TOperationId id, const TTxTransaction& tx) {
return MakeSubOperation<TAlterExtSubDomain>(id, tx);
@@ -436,4 +888,100 @@ ISubOperationBase::TPtr CreateAlterExtSubDomain(TOperationId id, TTxState::ETxSt
return MakeSubOperation<TAlterExtSubDomain>(id, state);
}
+TVector<ISubOperationBase::TPtr> CreateCompatibleAlterExtSubDomain(TOperationId id, const TTxTransaction& tx, TOperationContext& context) {
+ Y_VERIFY(tx.GetOperationType() == NKikimrSchemeOp::ESchemeOpAlterExtSubDomain);
+
+ LOG_I("CreateCompatibleAlterExtSubDomain, opId " << id
+ << ", feature flag EnableAlterDatabaseCreateHiveFirst " << context.SS->EnableAlterDatabaseCreateHiveFirst
+ << ", tx " << tx.ShortDebugString()
+ );
+
+ const TString& parentPathStr = tx.GetWorkingDir();
+ const auto& inputSettings = tx.GetSubDomain();
+ const TString& name = inputSettings.GetName();
+
+ LOG_I("CreateCompatibleAlterExtSubDomain, opId " << id << ", path " << parentPathStr << "/" << name);
+
+ auto errorResult = [&id](NKikimrScheme::EStatus status, const TStringBuf& msg) -> TVector<ISubOperationBase::TPtr> {
+ return {CreateReject(id, status, TStringBuilder() << "Invalid AlterExtSubDomain request: " << msg)};
+ };
+
+ if (!parentPathStr) {
+ return errorResult(NKikimrScheme::StatusInvalidParameter, "no working dir");
+ }
+ if (!name) {
+ return errorResult(NKikimrScheme::StatusInvalidParameter, "no name");
+ }
+
+ TPath path = TPath::Resolve(parentPathStr, context.SS).Dive(name);
+
+ // check extsubdomain path and its condition
+ {
+ TPath::TChecker checks = path.Check();
+ checks
+ .NotEmpty()
+ .NotUnderDomainUpgrade()
+ .IsAtLocalSchemeShard()
+ .IsResolved()
+ .NotDeleted()
+ .IsExternalSubDomain()
+ .NotUnderOperation()
+ .IsCommonSensePath(); // dirname consist of directories and subdomain roots (and olapstores!!)
+
+ if (!checks) {
+ return errorResult(checks.GetStatus(), checks.GetError());
+ }
+ }
+
+ // check if extsubdomain is already being altered
+ //NOTE: (didn't TChecker::NotUnderOperation() checked that already?)
+ const auto& basenameId = path.Base()->PathId;
+
+ Y_VERIFY(context.SS->SubDomains.contains(basenameId));
+ auto subdomainInfo = context.SS->SubDomains.at(basenameId);
+ Y_VERIFY(subdomainInfo);
+
+ if (subdomainInfo->GetAlter()) {
+ return errorResult(NKikimrScheme::StatusMultipleModifications, "extsubdomain is under another alter operation");
+ }
+
+ // check operation condition and limits
+ {
+ TString explain;
+ if (!context.SS->CheckApplyIf(tx, explain)) {
+ return errorResult(NKikimrScheme::StatusPreconditionFailed, explain);
+ }
+ if (!context.SS->CheckInFlightLimit(TTxState::TxAlterExtSubDomain, explain)) {
+ return errorResult(NKikimrScheme::StatusResourceExhausted, explain);
+ }
+ }
+
+ // Check params and build change delta
+ TParamsDelta delta;
+ {
+ auto [status, reason] = VerifyParams(&delta, subdomainInfo, inputSettings);
+ if (status != NKikimrScheme::EStatus::StatusAccepted) {
+ return errorResult(status, reason);
+ }
+ }
+
+ // create suboperations
+ TVector<ISubOperationBase::TPtr> result;
+
+ if (delta.AddExternalHive && context.SS->EnableAlterDatabaseCreateHiveFirst) {
+ auto msg = TransactionTemplate(parentPathStr, NKikimrSchemeOp::ESchemeOpAlterExtSubDomainCreateHive);
+ msg.MutableSubDomain()->CopyFrom(inputSettings);
+
+ result.push_back(CreateAlterExtSubDomainCreateHive(NextPartId(id, result), msg));
+ }
+ {
+ auto msg = TransactionTemplate(parentPathStr, NKikimrSchemeOp::ESchemeOpAlterExtSubDomain);
+ msg.MutableSubDomain()->CopyFrom(inputSettings);
+
+ result.push_back(CreateAlterExtSubDomain(NextPartId(id, result), msg));
+ }
+
+ return result;
}
+
+} // namespace NKikimr::NSchemeShard
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp
index 97c3eec56f9..24b278b2af8 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp
@@ -1,4 +1,5 @@
#include "schemeshard__operation_part.h"
+#include "schemeshard__operation_common_subdomain.h"
#include "schemeshard__operation_common.h"
#include "schemeshard_impl.h"
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp
index 0894d603ab6..446b70083e3 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp
@@ -3,6 +3,86 @@
namespace NKikimr {
namespace NSchemeShard {
+THolder<TEvHive::TEvCreateTablet> CreateEvCreateTablet(TPathElement::TPtr targetPath, TShardIdx shardIdx, TOperationContext& context)
+{
+ auto tablePartitionConfig = context.SS->GetTablePartitionConfigWithAlterData(targetPath->PathId);
+ const auto& shard = context.SS->ShardInfos[shardIdx];
+
+ if (shard.TabletType == ETabletType::BlockStorePartition ||
+ shard.TabletType == ETabletType::BlockStorePartition2)
+ {
+ auto it = context.SS->BlockStoreVolumes.FindPtr(targetPath->PathId);
+ Y_VERIFY(it, "Missing BlockStoreVolume while creating BlockStorePartition tablet");
+ auto volume = *it;
+ /*const auto* volumeConfig = &volume->VolumeConfig;
+ if (volume->AlterData) {
+ volumeConfig = &volume->AlterData->VolumeConfig;
+ }*/
+ }
+
+ THolder<TEvHive::TEvCreateTablet> ev = MakeHolder<TEvHive::TEvCreateTablet>(ui64(shardIdx.GetOwnerId()), ui64(shardIdx.GetLocalId()), shard.TabletType, shard.BindedChannels);
+
+ TPathId domainId = context.SS->ResolvePathIdForDomain(targetPath);
+
+ TPathElement::TPtr domainEl = context.SS->PathsById.at(domainId);
+ auto objectDomain = ev->Record.MutableObjectDomain();
+ if (domainEl->IsRoot()) {
+ objectDomain->SetSchemeShard(context.SS->ParentDomainId.OwnerId);
+ objectDomain->SetPathId(context.SS->ParentDomainId.LocalPathId);
+ } else {
+ objectDomain->SetSchemeShard(domainId.OwnerId);
+ objectDomain->SetPathId(domainId.LocalPathId);
+ }
+
+ Y_VERIFY(context.SS->SubDomains.contains(domainId));
+ TSubDomainInfo::TPtr subDomain = context.SS->SubDomains.at(domainId);
+
+ TPathId resourcesDomainId;
+ if (subDomain->GetResourcesDomainId()) {
+ resourcesDomainId = subDomain->GetResourcesDomainId();
+ } else if (subDomain->GetAlter() && subDomain->GetAlter()->GetResourcesDomainId()) {
+ resourcesDomainId = subDomain->GetAlter()->GetResourcesDomainId();
+ } else {
+ Y_FAIL("Cannot retrieve resources domain id");
+ }
+
+ auto allowedDomain = ev->Record.AddAllowedDomains();
+ allowedDomain->SetSchemeShard(resourcesDomainId.OwnerId);
+ allowedDomain->SetPathId(resourcesDomainId.LocalPathId);
+
+ if (tablePartitionConfig) {
+ if (tablePartitionConfig->FollowerGroupsSize()) {
+ ev->Record.MutableFollowerGroups()->CopyFrom(tablePartitionConfig->GetFollowerGroups());
+ } else {
+ if (tablePartitionConfig->HasAllowFollowerPromotion()) {
+ ev->Record.SetAllowFollowerPromotion(tablePartitionConfig->GetAllowFollowerPromotion());
+ }
+
+ if (tablePartitionConfig->HasCrossDataCenterFollowerCount()) {
+ ev->Record.SetCrossDataCenterFollowerCount(tablePartitionConfig->GetCrossDataCenterFollowerCount());
+ } else if (tablePartitionConfig->HasFollowerCount()) {
+ ev->Record.SetFollowerCount(tablePartitionConfig->GetFollowerCount());
+ }
+ }
+ }
+
+ if (shard.TabletType == ETabletType::BlockStorePartition ||
+ shard.TabletType == ETabletType::BlockStorePartition2 ||
+ shard.TabletType == ETabletType::RTMRPartition) {
+ // Partitions should never be booted by local
+ ev->Record.SetTabletBootMode(NKikimrHive::TABLET_BOOT_MODE_EXTERNAL);
+ }
+
+ ev->Record.SetObjectId(targetPath->PathId.LocalPathId);
+
+ if (shard.TabletID) {
+ ev->Record.SetTabletID(ui64(shard.TabletID));
+ }
+
+ return ev;
+}
+
+
namespace
{
@@ -76,7 +156,7 @@ bool CollectProposeTxResults(
return false;
}
-}
+} // anonymous namespace
bool NTableState::CollectProposeTransactionResults(
const NKikimr::NSchemeShard::TOperationId &operationId,
@@ -534,7 +614,7 @@ void NForceDrop::CollectShards(const THashSet<TPathId>& pathes, TOperationId ope
context.SS->PersistTxState(db, operationId);
}
-void NForceDrop::ValidateNoTrasactionOnPathes(TOperationId operationId, const THashSet<TPathId>& pathes, TOperationContext &context) {
+void NForceDrop::ValidateNoTransactionOnPathes(TOperationId operationId, const THashSet<TPathId>& pathes, TOperationContext &context) {
// it is not supposed that someone transaction is able to materialise in dropping subdomain
// all transaction should check parent dir status
// however, it is better to check that all locks are ours
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_common.h b/ydb/core/tx/schemeshard/schemeshard__operation_common.h
index c6b93cba9c3..f0fd3c5bd36 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_common.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_common.h
@@ -16,6 +16,8 @@ void IncParentDirAlterVersionWithRepublish(const TOperationId& opId, const TPath
NKikimrSchemeOp::TModifyScheme MoveTableTask(NKikimr::NSchemeShard::TPath& src, NKikimr::NSchemeShard::TPath& dst);
NKikimrSchemeOp::TModifyScheme MoveTableIndexTask(NKikimr::NSchemeShard::TPath& src, NKikimr::NSchemeShard::TPath& dst);
+THolder<TEvHive::TEvCreateTablet> CreateEvCreateTablet(TPathElement::TPtr targetPath, TShardIdx shardIdx, TOperationContext& context);
+
namespace NTableState {
bool CollectProposeTransactionResults(const TOperationId& operationId, const TEvDataShard::TEvProposeTransactionResult::TPtr& ev, TOperationContext& context);
@@ -125,344 +127,7 @@ public:
}
};
-}
-
-namespace NSubDomainState {
-
-class TConfigureParts: public TSubOperationState {
-private:
- TOperationId OperationId;
-
- TString DebugHint() const override {
- return TStringBuilder()
- << "NSubDomainState::TConfigureParts"
- << " operationId#" << OperationId;
- }
-public:
- TConfigureParts(TOperationId id)
- : OperationId(id)
- {
- IgnoreMessages(DebugHint(), {TEvHive::TEvCreateTabletReply::EventType});
- }
-
- bool HandleReply(TEvSchemeShard::TEvInitTenantSchemeShardResult::TPtr& ev, TOperationContext& context) override {
- TTabletId ssId = context.SS->SelfTabletId();
- LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " HandleReply TEvInitTenantSchemeShardResult"
- << " operationId: " << OperationId
- << " at schemeshard: " << ssId);
-
- TTxState* txState = context.SS->FindTx(OperationId);
- Y_VERIFY(txState);
- Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
- || txState->TxType == TTxState::TxAlterSubDomain
- || txState->TxType == TTxState::TxAlterExtSubDomain);
-
- const auto& record = ev->Get()->Record;
-
- NIceDb::TNiceDb db(context.GetDB());
-
- TTabletId tabletId = TTabletId(record.GetTenantSchemeShard());
- auto status = record.GetStatus();
-
- auto shardIdx = context.SS->MustGetShardIdx(tabletId);
- Y_VERIFY(context.SS->ShardInfos.contains(shardIdx));
-
- if (status != NKikimrScheme::EStatus::StatusSuccess && status != NKikimrScheme::EStatus::StatusAlreadyExists) {
- LOG_CRIT_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " Got error status on SubDomain Configure"
- << "from tenant schemeshard tablet: " << tabletId
- << " shard: " << shardIdx
- << " status: " << NKikimrScheme::EStatus_Name(status)
- << " opId: " << OperationId
- << " schemeshard: " << ssId);
- return false;
- }
-
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " Got OK TEvInitTenantSchemeShardResult from schemeshard"
- << " tablet: " << tabletId
- << " shardIdx: " << shardIdx
- << " at schemeshard: " << ssId);
-
- txState->ShardsInProgress.erase(shardIdx);
- context.OnComplete.UnbindMsgFromPipe(OperationId, tabletId, shardIdx);
-
- if (txState->ShardsInProgress.empty()) {
- // All tablets have replied so we can done this transaction
- context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
- context.OnComplete.ActivateTx(OperationId);
- return true;
- }
-
- return false;
- }
-
- bool HandleReply(TEvSubDomain::TEvConfigureStatus::TPtr& ev, TOperationContext& context) override {
- TTabletId ssId = context.SS->SelfTabletId();
- LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " HandleReply TEvConfigureStatus"
- << " operationId:" << OperationId
- << " at schemeshard:" << ssId);
-
- TTxState* txState = context.SS->FindTx(OperationId);
- Y_VERIFY(txState);
- Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
- || txState->TxType == TTxState::TxAlterSubDomain
- || txState->TxType == TTxState::TxAlterExtSubDomain);
-
- const auto& record = ev->Get()->Record;
-
- NIceDb::TNiceDb db(context.GetDB());
-
- TTabletId tabletId = TTabletId(record.GetOnTabletId());
- auto status = record.GetStatus();
-
- auto shardIdx = context.SS->MustGetShardIdx(tabletId);
- Y_VERIFY(context.SS->ShardInfos.contains(shardIdx));
-
- if (status == NKikimrTx::TEvSubDomainConfigurationAck::REJECT) {
- LOG_CRIT_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " Got REJECT on SubDomain Configure"
- << "from tablet: " << tabletId
- << " shard: " << shardIdx
- << " opId: " << OperationId
- << " schemeshard: " << ssId);
- return false;
- }
-
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint() <<
- " Got OK TEvConfigureStatus from "
- << " tablet# " << tabletId
- << " shardIdx# " << shardIdx
- << " at schemeshard# " << ssId);
-
- txState->ShardsInProgress.erase(shardIdx);
- context.OnComplete.UnbindMsgFromPipe(OperationId, tabletId, shardIdx);
-
- if (txState->ShardsInProgress.empty()) {
- // All tablets have replied so we can done this transaction
- context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
- context.OnComplete.ActivateTx(OperationId);
- return true;
- }
-
- return false;
- }
-
-
- bool ProgressState(TOperationContext& context) override {
- TTabletId ssId = context.SS->SelfTabletId();
- LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- DebugHint()
- << " ProgressState"
- << ", at schemeshard: " << ssId);
-
- TTxState* txState = context.SS->FindTx(OperationId);
- Y_VERIFY(txState);
- Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
- || txState->TxType == TTxState::TxAlterSubDomain
- || txState->TxType == TTxState::TxAlterExtSubDomain);
-
- txState->ClearShardsInProgress();
-
- if (txState->Shards.empty()) {
- NIceDb::TNiceDb db(context.GetDB());
- context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
- context.OnComplete.ActivateTx(OperationId);
- return true;
- }
-
- auto pathId = txState->TargetPathId;
- Y_VERIFY(context.SS->PathsById.contains(pathId));
- TPath path = TPath::Init(pathId, context.SS);
-
- Y_VERIFY(context.SS->SubDomains.contains(pathId));
- auto subDomain = context.SS->SubDomains.at(pathId);
- auto alterData = subDomain->GetAlter();
- Y_VERIFY(alterData);
- alterData->Initialize(context.SS->ShardInfos);
- auto processing = alterData->GetProcessingParams();
- auto storagePools = alterData->GetStoragePools();
- auto& schemeLimits = subDomain->GetSchemeLimits();
-
- for (ui32 i = 0; i < txState->Shards.size(); ++i) {
- auto &shard = txState->Shards[i];
- TShardIdx idx = shard.Idx;
- Y_VERIFY(context.SS->ShardInfos.contains(idx));
- TTabletId tabletID = context.SS->ShardInfos[idx].TabletID;
- auto type = context.SS->ShardInfos[idx].TabletType;
-
- switch (type) {
- case ETabletType::Coordinator:
- case ETabletType::Mediator: {
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Send configure request to coordinator/mediator: " << tabletID <<
- " opId: " << OperationId <<
- " schemeshard: " << ssId);
- shard.Operation = TTxState::ConfigureParts;
- auto event = new TEvSubDomain::TEvConfigure(processing);
- context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
- break;
- }
- case ETabletType::Hive: {
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Send configure request to hive: " << tabletID <<
- " opId: " << OperationId <<
- " schemeshard: " << ssId);
- shard.Operation = TTxState::ConfigureParts;
- auto event = new TEvHive::TEvConfigureHive(TSubDomainKey(pathId.OwnerId, pathId.LocalPathId));
- context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
- break;
- }
- case ETabletType::SysViewProcessor: {
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Send configure request to sysview processor: " << tabletID <<
- " opId: " << OperationId <<
- " schemeshard: " << ssId);
- auto event = new NSysView::TEvSysView::TEvConfigureProcessor(path.PathString());
- shard.Operation = TTxState::ConfigureParts;
- context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
- break;
- }
- case ETabletType::SchemeShard: {
- auto event = new TEvSchemeShard::TEvInitTenantSchemeShard(ui64(ssId),
- pathId.LocalPathId, path.PathString(),
- path.Base()->Owner, path.GetEffectiveACL(), path.GetEffectiveACLVersion(),
- processing, storagePools,
- path.Base()->UserAttrs->Attrs, path.Base()->UserAttrs->AlterVersion,
- schemeLimits, ui64(alterData->GetSharedHive()), alterData->GetResourcesDomainId()
- );
- if (alterData->GetDeclaredSchemeQuotas()) {
- event->Record.MutableDeclaredSchemeQuotas()->CopyFrom(*alterData->GetDeclaredSchemeQuotas());
- }
- if (alterData->GetDatabaseQuotas()) {
- event->Record.MutableDatabaseQuotas()->CopyFrom(*alterData->GetDatabaseQuotas());
- }
- LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "Send configure request to schemeshard: " << tabletID <<
- " opId: " << OperationId <<
- " schemeshard: " << ssId <<
- " msg: " << event->Record.ShortDebugString());
-
- shard.Operation = TTxState::ConfigureParts;
- context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
- break;
- }
- default:
- Y_FAIL_S("Unexpected type, we don't create tablets with type " << ETabletType::TypeToStr(type));
- }
- }
-
- txState->UpdateShardsInProgress(TTxState::ConfigureParts);
- return false;
- }
-};
-
-class TPropose: public TSubOperationState {
-private:
- TOperationId OperationId;
-
- TString DebugHint() const override {
- return TStringBuilder()
- << "NSubDomainState::TPropose"
- << " operationId#" << OperationId;
- }
-
-public:
- TPropose(TOperationId id)
- : OperationId(id)
- {
- IgnoreMessages(DebugHint(),
- {TEvHive::TEvCreateTabletReply::EventType, TEvSubDomain::TEvConfigureStatus::EventType});
- }
-
- bool HandleReply(TEvPrivate::TEvOperationPlan::TPtr& ev, TOperationContext& context) override {
- TStepId step = TStepId(ev->Get()->StepId);
- TTabletId ssId = context.SS->SelfTabletId();
-
- LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "NSubDomainState::TPropose HandleReply TEvOperationPlan"
- << " operationId#" << OperationId
- << " at tablet" << ssId);
-
- TTxState* txState = context.SS->FindTx(OperationId);
- if (!txState) {
- return false;
- }
- Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
- || txState->TxType == TTxState::TxAlterSubDomain
- || txState->TxType == TTxState::TxCreateExtSubDomain
- || txState->TxType == TTxState::TxAlterExtSubDomain);
-
- TPathId pathId = txState->TargetPathId;
- Y_VERIFY(context.SS->PathsById.contains(pathId));
- TPathElement::TPtr path = context.SS->PathsById.at(pathId);
-
- NIceDb::TNiceDb db(context.GetDB());
-
- if (path->StepCreated == InvalidStepId) {
- path->StepCreated = step;
- context.SS->PersistCreateStep(db, pathId, step);
- }
-
- Y_VERIFY(context.SS->SubDomains.contains(pathId));
- auto subDomain = context.SS->SubDomains.at(pathId);
- auto alter = subDomain->GetAlter();
- Y_VERIFY(alter);
- Y_VERIFY(subDomain->GetVersion() < alter->GetVersion());
-
- subDomain->ActualizeAlterData(context.SS->ShardInfos, context.Ctx.Now(),
- /* isExternal */ path->PathType == TPathElement::EPathType::EPathTypeExtSubDomain,
- context.SS);
-
- context.SS->SubDomains[pathId] = alter;
- context.SS->PersistSubDomain(db, pathId, *alter);
- context.SS->PersistSubDomainSchemeQuotas(db, pathId, *alter);
-
- if (txState->TxType == TTxState::TxCreateSubDomain || txState->TxType == TTxState::TxCreateExtSubDomain) {
- auto parentDir = context.SS->PathsById.at(path->ParentPathId);
- ++parentDir->DirAlterVersion;
- context.SS->PersistPathDirAlterVersion(db, parentDir);
- context.SS->ClearDescribePathCaches(parentDir);
- context.OnComplete.PublishToSchemeBoard(OperationId, parentDir->PathId);
- }
-
- context.OnComplete.UpdateTenant(pathId);
- context.SS->ClearDescribePathCaches(path);
- context.OnComplete.PublishToSchemeBoard(OperationId, pathId);
-
- context.SS->ChangeTxState(db, OperationId, TTxState::Done);
- return true;
- }
-
- bool ProgressState(TOperationContext& context) override {
- TTabletId ssId = context.SS->SelfTabletId();
-
- LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "NSubDomainState::TPropose ProgressState"
- << ", operationId: " << OperationId
- << ", at schemeshard: " << ssId);
-
- TTxState* txState = context.SS->FindTx(OperationId);
- Y_VERIFY(txState);
- Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
- || txState->TxType == TTxState::TxAlterSubDomain
- || txState->TxType == TTxState::TxCreateExtSubDomain
- || txState->TxType == TTxState::TxAlterExtSubDomain);
-
- context.OnComplete.ProposeToCoordinator(OperationId, txState->TargetPathId, TStepId(0));
- return false;
- }
-};
-
-}
+} // namespace NTableState
class TCreateParts: public TSubOperationState {
private:
@@ -601,7 +266,7 @@ public:
context.OnComplete.UnbindMsgFromPipe(OperationId, hive, shardIdx);
auto path = context.SS->PathsById.at(txState.TargetPathId);
- auto request = CreateRequest(path, shardIdx, context);
+ auto request = CreateEvCreateTablet(path, shardIdx, context);
LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
DebugHint() << " CreateRequest"
@@ -656,84 +321,6 @@ public:
return false;
}
- THolder<TEvHive::TEvCreateTablet> CreateRequest(TPathElement::TPtr targetPath, TShardIdx shardIdx, TOperationContext& context) {
- auto tablePartitionConfig = context.SS->GetTablePartitionConfigWithAlterData(targetPath->PathId);
- const auto& shard = context.SS->ShardInfos[shardIdx];
-
- if (shard.TabletType == ETabletType::BlockStorePartition ||
- shard.TabletType == ETabletType::BlockStorePartition2)
- {
- auto it = context.SS->BlockStoreVolumes.FindPtr(targetPath->PathId);
- Y_VERIFY(it, "Missing BlockStoreVolume while creating BlockStorePartition tablet");
- auto volume = *it;
- /*const auto* volumeConfig = &volume->VolumeConfig;
- if (volume->AlterData) {
- volumeConfig = &volume->AlterData->VolumeConfig;
- }*/
- }
-
- THolder<TEvHive::TEvCreateTablet> ev = MakeHolder<TEvHive::TEvCreateTablet>(ui64(shardIdx.GetOwnerId()), ui64(shardIdx.GetLocalId()), shard.TabletType, shard.BindedChannels);
-
- TPathId domainId = context.SS->ResolvePathIdForDomain(targetPath);
-
- TPathElement::TPtr domainEl = context.SS->PathsById.at(domainId);
- auto objectDomain = ev->Record.MutableObjectDomain();
- if (domainEl->IsRoot()) {
- objectDomain->SetSchemeShard(context.SS->ParentDomainId.OwnerId);
- objectDomain->SetPathId(context.SS->ParentDomainId.LocalPathId);
- } else {
- objectDomain->SetSchemeShard(domainId.OwnerId);
- objectDomain->SetPathId(domainId.LocalPathId);
- }
-
- Y_VERIFY(context.SS->SubDomains.contains(domainId));
- TSubDomainInfo::TPtr subDomain = context.SS->SubDomains.at(domainId);
-
- TPathId resourcesDomainId;
- if (subDomain->GetResourcesDomainId()) {
- resourcesDomainId = subDomain->GetResourcesDomainId();
- } else if (subDomain->GetAlter() && subDomain->GetAlter()->GetResourcesDomainId()) {
- resourcesDomainId = subDomain->GetAlter()->GetResourcesDomainId();
- } else {
- Y_FAIL("Cannot retrieve resources domain id");
- }
-
- auto allowedDomain = ev->Record.AddAllowedDomains();
- allowedDomain->SetSchemeShard(resourcesDomainId.OwnerId);
- allowedDomain->SetPathId(resourcesDomainId.LocalPathId);
-
- if (tablePartitionConfig) {
- if (tablePartitionConfig->FollowerGroupsSize()) {
- ev->Record.MutableFollowerGroups()->CopyFrom(tablePartitionConfig->GetFollowerGroups());
- } else {
- if (tablePartitionConfig->HasAllowFollowerPromotion()) {
- ev->Record.SetAllowFollowerPromotion(tablePartitionConfig->GetAllowFollowerPromotion());
- }
-
- if (tablePartitionConfig->HasCrossDataCenterFollowerCount()) {
- ev->Record.SetCrossDataCenterFollowerCount(tablePartitionConfig->GetCrossDataCenterFollowerCount());
- } else if (tablePartitionConfig->HasFollowerCount()) {
- ev->Record.SetFollowerCount(tablePartitionConfig->GetFollowerCount());
- }
- }
- }
-
- if (shard.TabletType == ETabletType::BlockStorePartition ||
- shard.TabletType == ETabletType::BlockStorePartition2 ||
- shard.TabletType == ETabletType::RTMRPartition) {
- // Partitions should never be booted by local
- ev->Record.SetTabletBootMode(NKikimrHive::TABLET_BOOT_MODE_EXTERNAL);
- }
-
- ev->Record.SetObjectId(targetPath->PathId.LocalPathId);
-
- if (shard.TabletID) {
- ev->Record.SetTabletID(ui64(shard.TabletID));
- }
-
- return ev;
- }
-
THolder<TEvHive::TEvAdoptTablet> AdoptRequest(TShardIdx shardIdx, TOperationContext& context) {
Y_VERIFY(context.SS->AdoptedShards.contains(shardIdx));
auto& adoptedShard = context.SS->AdoptedShards[shardIdx];
@@ -795,10 +382,10 @@ public:
if (context.SS->AdoptedShards.contains(shard.Idx)) {
auto ev = AdoptRequest(shard.Idx, context);
- context.OnComplete.BindMsgToPipe(OperationId, context.SS->GetGlobalHive(context.Ctx) , shard.Idx, ev.Release());
+ context.OnComplete.BindMsgToPipe(OperationId, context.SS->GetGlobalHive(context.Ctx), shard.Idx, ev.Release());
} else {
auto path = context.SS->PathsById.at(txState->TargetPathId);
- auto ev = CreateRequest(path, shard.Idx, context);
+ auto ev = CreateEvCreateTablet(path, shard.Idx, context);
auto hiveToRequest = context.SS->ResolveHive(shard.Idx, context.Ctx);
@@ -1630,10 +1217,10 @@ protected:
} // NCdcStreamState
namespace NForceDrop {
-void ValidateNoTrasactionOnPathes(TOperationId operationId, const THashSet<TPathId>& pathes, TOperationContext& context);
+void ValidateNoTransactionOnPathes(TOperationId operationId, const THashSet<TPathId>& pathes, TOperationContext& context);
void CollectShards(const THashSet<TPathId>& pathes, TOperationId operationId, TTxState* txState, TOperationContext& context);
-}
+} // namespace NForceDrop
-}
-}
+} // namespace NSchemeShard
+} // namespace NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.h b/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.h
new file mode 100644
index 00000000000..3aadbb1516c
--- /dev/null
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.h
@@ -0,0 +1,374 @@
+#pragma once
+
+#include "schemeshard_impl.h"
+#include "schemeshard__operation_part.h"
+
+namespace NKikimr {
+namespace NSchemeShard {
+
+namespace NSubDomainState {
+
+class TConfigureParts: public TSubOperationState {
+private:
+ TOperationId OperationId;
+
+ TString DebugHint() const override {
+ return TStringBuilder()
+ << "NSubDomainState::TConfigureParts"
+ << " operationId#" << OperationId;
+ }
+public:
+ TConfigureParts(TOperationId id)
+ : OperationId(id)
+ {
+ IgnoreMessages(DebugHint(), {TEvHive::TEvCreateTabletReply::EventType});
+ }
+
+ bool HandleReply(TEvSchemeShard::TEvInitTenantSchemeShardResult::TPtr& ev, TOperationContext& context) override {
+ TTabletId ssId = context.SS->SelfTabletId();
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " HandleReply TEvInitTenantSchemeShardResult"
+ << " operationId: " << OperationId
+ << " at schemeshard: " << ssId);
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
+ || txState->TxType == TTxState::TxAlterSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomainCreateHive
+ );
+
+ const auto& record = ev->Get()->Record;
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ TTabletId tabletId = TTabletId(record.GetTenantSchemeShard());
+ auto status = record.GetStatus();
+
+ auto shardIdx = context.SS->MustGetShardIdx(tabletId);
+ Y_VERIFY(context.SS->ShardInfos.contains(shardIdx));
+
+ if (status != NKikimrScheme::EStatus::StatusSuccess && status != NKikimrScheme::EStatus::StatusAlreadyExists) {
+ LOG_CRIT_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " Got error status on SubDomain Configure"
+ << "from tenant schemeshard tablet: " << tabletId
+ << " shard: " << shardIdx
+ << " status: " << NKikimrScheme::EStatus_Name(status)
+ << " opId: " << OperationId
+ << " schemeshard: " << ssId);
+ return false;
+ }
+
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " Got OK TEvInitTenantSchemeShardResult from schemeshard"
+ << " tablet: " << tabletId
+ << " shardIdx: " << shardIdx
+ << " at schemeshard: " << ssId);
+
+ txState->ShardsInProgress.erase(shardIdx);
+ context.OnComplete.UnbindMsgFromPipe(OperationId, tabletId, shardIdx);
+
+ if (txState->ShardsInProgress.empty()) {
+ // All tablets have replied so we can done this transaction
+ context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
+ context.OnComplete.ActivateTx(OperationId);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool HandleReply(TEvSubDomain::TEvConfigureStatus::TPtr& ev, TOperationContext& context) override {
+ TTabletId ssId = context.SS->SelfTabletId();
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " HandleReply TEvConfigureStatus"
+ << " operationId:" << OperationId
+ << " at schemeshard:" << ssId);
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
+ || txState->TxType == TTxState::TxAlterSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomainCreateHive
+ );
+
+ const auto& record = ev->Get()->Record;
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ TTabletId tabletId = TTabletId(record.GetOnTabletId());
+ auto status = record.GetStatus();
+
+ auto shardIdx = context.SS->MustGetShardIdx(tabletId);
+ Y_VERIFY(context.SS->ShardInfos.contains(shardIdx));
+
+ if (status == NKikimrTx::TEvSubDomainConfigurationAck::REJECT) {
+ LOG_CRIT_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " Got REJECT on SubDomain Configure"
+ << "from tablet: " << tabletId
+ << " shard: " << shardIdx
+ << " opId: " << OperationId
+ << " schemeshard: " << ssId);
+ return false;
+ }
+
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint() <<
+ " Got OK TEvConfigureStatus from "
+ << " tablet# " << tabletId
+ << " shardIdx# " << shardIdx
+ << " at schemeshard# " << ssId);
+
+ txState->ShardsInProgress.erase(shardIdx);
+ context.OnComplete.UnbindMsgFromPipe(OperationId, tabletId, shardIdx);
+
+ if (txState->ShardsInProgress.empty()) {
+ // All tablets have replied so we can done this transaction
+ context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
+ context.OnComplete.ActivateTx(OperationId);
+ return true;
+ }
+
+ return false;
+ }
+
+
+ bool ProgressState(TOperationContext& context) override {
+ TTabletId ssId = context.SS->SelfTabletId();
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ DebugHint()
+ << " ProgressState"
+ << ", at schemeshard: " << ssId);
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
+ || txState->TxType == TTxState::TxAlterSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomainCreateHive
+ );
+
+ txState->ClearShardsInProgress();
+
+ if (txState->Shards.empty()) {
+ NIceDb::TNiceDb db(context.GetDB());
+ context.SS->ChangeTxState(db, OperationId, TTxState::Propose);
+ context.OnComplete.ActivateTx(OperationId);
+ return true;
+ }
+
+ auto pathId = txState->TargetPathId;
+ Y_VERIFY(context.SS->PathsById.contains(pathId));
+ TPath path = TPath::Init(pathId, context.SS);
+
+ Y_VERIFY(context.SS->SubDomains.contains(pathId));
+ auto subDomain = context.SS->SubDomains.at(pathId);
+ auto alterData = subDomain->GetAlter();
+ Y_VERIFY(alterData);
+ alterData->Initialize(context.SS->ShardInfos);
+ auto processing = alterData->GetProcessingParams();
+ auto storagePools = alterData->GetStoragePools();
+ auto& schemeLimits = subDomain->GetSchemeLimits();
+
+ for (auto& shard : txState->Shards) {
+ if (shard.Operation != TTxState::CreateParts) {
+ continue;
+ }
+ TShardIdx idx = shard.Idx;
+ Y_VERIFY(context.SS->ShardInfos.contains(idx));
+ TTabletId tabletID = context.SS->ShardInfos[idx].TabletID;
+ auto type = context.SS->ShardInfos[idx].TabletType;
+
+ switch (type) {
+ case ETabletType::Coordinator:
+ case ETabletType::Mediator: {
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Send configure request to coordinator/mediator: " << tabletID <<
+ " opId: " << OperationId <<
+ " schemeshard: " << ssId);
+ shard.Operation = TTxState::ConfigureParts;
+ auto event = new TEvSubDomain::TEvConfigure(processing);
+ context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
+ break;
+ }
+ case ETabletType::Hive: {
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Send configure request to hive: " << tabletID <<
+ " opId: " << OperationId <<
+ " schemeshard: " << ssId);
+ shard.Operation = TTxState::ConfigureParts;
+ auto event = new TEvHive::TEvConfigureHive(TSubDomainKey(pathId.OwnerId, pathId.LocalPathId));
+ context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
+ break;
+ }
+ case ETabletType::SysViewProcessor: {
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Send configure request to sysview processor: " << tabletID <<
+ " opId: " << OperationId <<
+ " schemeshard: " << ssId);
+ auto event = new NSysView::TEvSysView::TEvConfigureProcessor(path.PathString());
+ shard.Operation = TTxState::ConfigureParts;
+ context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
+ break;
+ }
+ case ETabletType::SchemeShard: {
+ auto event = new TEvSchemeShard::TEvInitTenantSchemeShard(ui64(ssId),
+ pathId.LocalPathId, path.PathString(),
+ path.Base()->Owner, path.GetEffectiveACL(), path.GetEffectiveACLVersion(),
+ processing, storagePools,
+ path.Base()->UserAttrs->Attrs, path.Base()->UserAttrs->AlterVersion,
+ schemeLimits, ui64(alterData->GetSharedHive()), alterData->GetResourcesDomainId()
+ );
+ if (alterData->GetDeclaredSchemeQuotas()) {
+ event->Record.MutableDeclaredSchemeQuotas()->CopyFrom(*alterData->GetDeclaredSchemeQuotas());
+ }
+ if (alterData->GetDatabaseQuotas()) {
+ event->Record.MutableDatabaseQuotas()->CopyFrom(*alterData->GetDatabaseQuotas());
+ }
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "Send configure request to schemeshard: " << tabletID <<
+ " opId: " << OperationId <<
+ " schemeshard: " << ssId <<
+ " msg: " << event->Record.ShortDebugString());
+
+ shard.Operation = TTxState::ConfigureParts;
+ context.OnComplete.BindMsgToPipe(OperationId, tabletID, idx, event);
+ break;
+ }
+ default:
+ Y_FAIL_S("Unexpected type, we don't create tablets with type " << ETabletType::TypeToStr(type));
+ }
+ }
+
+ txState->UpdateShardsInProgress(TTxState::ConfigureParts);
+ return false;
+ }
+};
+
+class TPropose: public TSubOperationState {
+private:
+ TOperationId OperationId;
+
+ TString DebugHint() const override {
+ return TStringBuilder()
+ << "NSubDomainState::TPropose"
+ << " operationId#" << OperationId;
+ }
+
+public:
+ TPropose(TOperationId id)
+ : OperationId(id)
+ {
+ IgnoreMessages(DebugHint(), {
+ TEvHive::TEvCreateTabletReply::EventType,
+ TEvSubDomain::TEvConfigureStatus::EventType,
+ TEvPrivate::TEvCompleteBarrier::EventType,
+ });
+ }
+
+ bool HandleReply(TEvPrivate::TEvOperationPlan::TPtr& ev, TOperationContext& context) override {
+ TStepId step = TStepId(ev->Get()->StepId);
+ TTabletId ssId = context.SS->SelfTabletId();
+
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "NSubDomainState::TPropose HandleReply TEvOperationPlan"
+ << ", operationId " << OperationId
+ << ", at tablet " << ssId);
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ if (!txState) {
+ return false;
+ }
+ Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
+ || txState->TxType == TTxState::TxAlterSubDomain
+ || txState->TxType == TTxState::TxCreateExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomainCreateHive
+ );
+
+ TPathId pathId = txState->TargetPathId;
+ Y_VERIFY(context.SS->PathsById.contains(pathId));
+ TPathElement::TPtr path = context.SS->PathsById.at(pathId);
+
+ NIceDb::TNiceDb db(context.GetDB());
+
+ if (path->StepCreated == InvalidStepId) {
+ path->StepCreated = step;
+ context.SS->PersistCreateStep(db, pathId, step);
+ }
+
+ Y_VERIFY(context.SS->SubDomains.contains(pathId));
+ auto subDomain = context.SS->SubDomains.at(pathId);
+ auto alter = subDomain->GetAlter();
+ Y_VERIFY(alter);
+ Y_VERIFY_S(subDomain->GetVersion() < alter->GetVersion(), "" << subDomain->GetVersion() << " and " << alter->GetVersion());
+
+ subDomain->ActualizeAlterData(context.SS->ShardInfos, context.Ctx.Now(),
+ /* isExternal */ path->PathType == TPathElement::EPathType::EPathTypeExtSubDomain,
+ context.SS);
+
+ context.SS->SubDomains[pathId] = alter;
+ context.SS->PersistSubDomain(db, pathId, *alter);
+ context.SS->PersistSubDomainSchemeQuotas(db, pathId, *alter);
+
+ if (txState->TxType == TTxState::TxCreateSubDomain || txState->TxType == TTxState::TxCreateExtSubDomain) {
+ auto parentDir = context.SS->PathsById.at(path->ParentPathId);
+ ++parentDir->DirAlterVersion;
+ context.SS->PersistPathDirAlterVersion(db, parentDir);
+ context.SS->ClearDescribePathCaches(parentDir);
+ context.OnComplete.PublishToSchemeBoard(OperationId, parentDir->PathId);
+ }
+
+ context.OnComplete.UpdateTenant(pathId);
+ context.SS->ClearDescribePathCaches(path);
+ context.OnComplete.PublishToSchemeBoard(OperationId, pathId);
+
+ context.SS->ChangeTxState(db, OperationId, TTxState::Done);
+
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "NSubDomainState::TPropose HandleReply TEvOperationPlan"
+ << ", operationId " << OperationId
+ << ", at tablet " << ssId);
+
+ return true;
+ }
+
+ bool ProgressState(TOperationContext& context) override {
+ TTabletId ssId = context.SS->SelfTabletId();
+
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "NSubDomainState::TPropose ProgressState"
+ << ", operationId: " << OperationId
+ << ", at schemeshard: " << ssId);
+
+ TTxState* txState = context.SS->FindTx(OperationId);
+ Y_VERIFY(txState);
+ Y_VERIFY(txState->TxType == TTxState::TxCreateSubDomain
+ || txState->TxType == TTxState::TxAlterSubDomain
+ || txState->TxType == TTxState::TxCreateExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomain
+ || txState->TxType == TTxState::TxAlterExtSubDomainCreateHive
+ );
+
+ context.OnComplete.ProposeToCoordinator(OperationId, txState->TargetPathId, TStepId(0));
+
+ LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "NSubDomainState::TPropose ProgressState leave"
+ << ", operationId " << OperationId
+ << ", at tablet " << ssId);
+
+ return false;
+ }
+};
+
+} // namespace NSubDomainState
+
+} // namespace NSchemeShard
+} // namespace NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp
index e46d2f8effe..177faa4053c 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp
@@ -1,9 +1,9 @@
#include "schemeshard__operation_part.h"
+#include "schemeshard__operation_common_subdomain.h"
#include "schemeshard__operation_common.h"
#include "schemeshard_impl.h"
#include <ydb/core/base/subdomain.h>
-#include <ydb/core/persqueue/config/config.h>
namespace {
@@ -50,8 +50,6 @@ public:
const TString& parentPathStr = Transaction.GetWorkingDir();
const TString& name = settings.GetName();
- ui64 shardsToCreate = settings.GetCoordinators() + settings.GetMediators();
-
LOG_NOTICE_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
"TCreateExtSubDomain Propose"
<< ", path" << parentPathStr << "/" << name
@@ -61,17 +59,19 @@ public:
TEvSchemeShard::EStatus status = NKikimrScheme::StatusAccepted;
auto result = MakeHolder<TProposeResponse>(status, ui64(OperationId.GetTxId()), ui64(ssId));
- if (!parentPathStr) {
+ auto paramErrorResult = [&result](const char* const msg) {
result->SetError(NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: no working dir");
- return result;
+ TStringBuilder() << "Invalid ExtSubDomain request: " << msg
+ );
+ return std::move(result);
+ };
+
+ if (!parentPathStr) {
+ return paramErrorResult("no working dir");
}
if (!name) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: no name");
- return result;
+ return paramErrorResult("no name");
}
NSchemeShard::TPath parentPath = NSchemeShard::TPath::Resolve(parentPathStr, context.SS);
@@ -115,8 +115,6 @@ public:
.DepthLimit()
.PathsLimit() //check capacity on root Domain
.DirChildrenLimit()
- .PathShardsLimit(shardsToCreate)
- .ShardsLimit(shardsToCreate) //check capacity on root Domain
.IsValidACL(acl);
}
@@ -136,10 +134,7 @@ public:
settings.GetMediators() == 0;
if (!onlyDeclaration) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: only declaration at creation is allowed, do not set up tables");
- return result;
+ return paramErrorResult("only declaration at creation is allowed, do not set up tables");
}
TPathId resourcesDomainId;
@@ -164,10 +159,7 @@ public:
bool requestedStoragePools = !settings.GetStoragePools().empty();
if (requestedStoragePools) {
- result->SetError(
- NKikimrScheme::StatusInvalidParameter,
- "Malformed subdomain request: only declaration at creation is allowed, do not set up storage");
- return result;
+ return paramErrorResult("only declaration at creation is allowed, do not set up storage");
}
const auto& userAttrsDetails = Transaction.GetAlterUserAttributes();
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp
index df870d67db1..67f33d7f64c 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp
@@ -1,4 +1,5 @@
#include "schemeshard__operation_part.h"
+#include "schemeshard__operation_common_subdomain.h"
#include "schemeshard__operation_common.h"
#include "schemeshard_impl.h"
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp
index 78be4752cd4..d4915bd6a6b 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp
@@ -8,6 +8,8 @@ namespace NKikimr::NSchemeShard {
void TStorageChanges::Apply(TSchemeShard* ss, NTabletFlatExecutor::TTransactionContext& txc, const TActorContext&) {
NIceDb::TNiceDb db(txc.DB);
+ //TODO: paths/other changes could be repeated many times, could it be a problem?
+
for (const auto& pId : Pathes) {
ss->PersistPath(db, pId);
}
@@ -51,13 +53,15 @@ void TStorageChanges::Apply(TSchemeShard* ss, NTabletFlatExecutor::TTransactionC
for (const auto& shardIdx : Shards) {
const TShardInfo& shardInfo = ss->ShardInfos.at(shardIdx);
const TPathId& pId = shardInfo.PathId;
- const TTableInfo::TPtr tableInfo = ss->Tables.at(pId);
ss->PersistShardMapping(db, shardIdx, shardInfo.TabletID, pId, shardInfo.CurrentTxId, shardInfo.TabletType);
ss->PersistChannelsBinding(db, shardIdx, shardInfo.BindedChannels);
- if (tableInfo->PerShardPartitionConfig.contains(shardIdx)) {
- ss->PersistAddTableShardPartitionConfig(db, shardIdx, tableInfo->PerShardPartitionConfig.at(shardIdx));
+ if (ss->Tables.contains(pId)) {
+ auto tableInfo = ss->Tables.at(pId);
+ if (tableInfo->PerShardPartitionConfig.contains(shardIdx)) {
+ ss->PersistAddTableShardPartitionConfig(db, shardIdx, tableInfo->PerShardPartitionConfig.at(shardIdx));
+ }
}
}
@@ -65,6 +69,12 @@ void TStorageChanges::Apply(TSchemeShard* ss, NTabletFlatExecutor::TTransactionC
ss->PersistTxState(db, opId);
}
+
+ for (const auto& pId : AlterSubDomains) {
+ auto subdomainInfo = ss->SubDomains.at(pId);
+ ss->PersistSubDomainAlter(db, pId, *subdomainInfo->GetAlter());
+ }
+
ss->PersistUpdateNextPathId(db);
ss->PersistUpdateNextShardIdx(db);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.h b/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.h
index 5a10603dc3f..ae632bb18f3 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.h
@@ -32,6 +32,8 @@ class TStorageChanges: public TSimpleRefCount<TStorageChanges> {
TDeque<TOperationId> TxStates;
+ TDeque<TPathId> AlterSubDomains;
+
public:
~TStorageChanges() = default;
@@ -83,6 +85,10 @@ public:
Shards.push_back(shardIdx);
}
+ void PersistSubDomainAlter(const TPathId& pathId) {
+ AlterSubDomains.push_back(pathId);
+ }
+
void Apply(TSchemeShard* ss, NTabletFlatExecutor::TTransactionContext &txc, const TActorContext &ctx);
};
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp
index adb0e8bfd4c..c06f992d5f0 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp
@@ -35,7 +35,7 @@ public:
TTxState* txState = context.SS->FindTx(OperationId);
- // Initiate asynchonous deletion of all shards
+ // Initiate asynchronous deletion of all shards
for (auto shard : txState->Shards) {
context.OnComplete.DeleteShard(shard.Idx);
}
@@ -169,7 +169,7 @@ public:
NIceDb::TNiceDb db(context.GetDB());
- auto pathes = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(pathId, context.Ctx);
context.SS->DropPathes(pathes, step, OperationId.GetTxId(), db, context.Ctx);
auto parentDir = context.SS->PathsById.at(path->ParentPathId);
@@ -197,9 +197,9 @@ public:
Y_VERIFY(txState);
Y_VERIFY(txState->TxType == TTxState::TxForceDropExtSubDomain);
- auto pathes = context.SS->ListSubThee(txState->TargetPathId, context.Ctx);
- NForceDrop::ValidateNoTrasactionOnPathes(OperationId, pathes, context);
- context.SS->MarkAsDroping({txState->TargetPathId}, OperationId.GetTxId(), context.Ctx);
+ auto pathes = context.SS->ListSubTree(txState->TargetPathId, context.Ctx);
+ NForceDrop::ValidateNoTransactionOnPathes(OperationId, pathes, context);
+ context.SS->MarkAsDropping({txState->TargetPathId}, OperationId.GetTxId(), context.Ctx);
NForceDrop::CollectShards(pathes, OperationId, txState, context);
context.OnComplete.ProposeToCoordinator(OperationId, txState->TargetPathId, TStepId(0));
@@ -328,7 +328,7 @@ public:
}
}
- context.SS->MarkAsDroping({path.Base()->PathId}, OperationId.GetTxId(), context.Ctx);
+ context.SS->MarkAsDropping({path.Base()->PathId}, OperationId.GetTxId(), context.Ctx);
txState.State = TTxState::Propose;
context.OnComplete.ActivateTx(OperationId);
@@ -381,11 +381,11 @@ public:
namespace NKikimr::NSchemeShard {
-ISubOperationBase::TPtr CreateFroceDropExtSubDomain(TOperationId id, const TTxTransaction& tx) {
+ISubOperationBase::TPtr CreateForceDropExtSubDomain(TOperationId id, const TTxTransaction& tx) {
return MakeSubOperation<TDropExtSubdomain>(id, tx);
}
-ISubOperationBase::TPtr CreateFroceDropExtSubDomain(TOperationId id, TTxState::ETxState state) {
+ISubOperationBase::TPtr CreateForceDropExtSubDomain(TOperationId id, TTxState::ETxState state) {
Y_VERIFY(state != TTxState::Invalid);
return MakeSubOperation<TDropExtSubdomain>(id, state);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp
index 526e7171793..c6ec4888d38 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp
@@ -80,7 +80,7 @@ public:
NIceDb::TNiceDb db(context.GetDB());
- auto pathes = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(pathId, context.Ctx);
Y_VERIFY(pathes.size() == 1);
context.SS->DropPathes(pathes, step, OperationId.GetTxId(), db, context.Ctx);
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp
index 3bcc66a5acd..e1e1458a37c 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp
@@ -79,7 +79,7 @@ public:
NIceDb::TNiceDb db(context.GetDB());
- auto pathes = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(pathId, context.Ctx);
Y_VERIFY(pathes.size() == 1);
context.SS->DropPathes(pathes, step, OperationId.GetTxId(), db, context.Ctx);
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp
index 2a78e6d2f22..c45916a4c87 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp
@@ -78,7 +78,7 @@ public:
TPathId pathId = txState->TargetPathId;
TPathElement::TPtr path = context.SS->PathsById.at(pathId);
- auto pathes = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(pathId, context.Ctx);
NIceDb::TNiceDb db(context.GetDB());
@@ -111,9 +111,9 @@ public:
Y_VERIFY(txState);
Y_VERIFY(txState->TxType == TTxState::TxForceDropSubDomain);
- auto pathes = context.SS->ListSubThee(txState->TargetPathId, context.Ctx);
- NForceDrop::ValidateNoTrasactionOnPathes(OperationId, pathes, context);
- context.SS->MarkAsDroping(pathes, OperationId.GetTxId(), context.Ctx);
+ auto pathes = context.SS->ListSubTree(txState->TargetPathId, context.Ctx);
+ NForceDrop::ValidateNoTransactionOnPathes(OperationId, pathes, context);
+ context.SS->MarkAsDropping(pathes, OperationId.GetTxId(), context.Ctx);
NForceDrop::CollectShards(pathes, OperationId, txState, context);
context.OnComplete.ProposeToCoordinator(OperationId, txState->TargetPathId, TStepId(0));
@@ -246,7 +246,7 @@ public:
NIceDb::TNiceDb db(context.GetDB());
- auto pathes = context.SS->ListSubThee(path.Base()->PathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(path.Base()->PathId, context.Ctx);
auto relatedTx = context.SS->GetRelatedTransactions(pathes, context.Ctx);
for (auto otherTxId: relatedTx) {
@@ -271,7 +271,7 @@ public:
}
}
- context.SS->MarkAsDroping(pathes, OperationId.GetTxId(), context.Ctx);
+ context.SS->MarkAsDropping(pathes, OperationId.GetTxId(), context.Ctx);
txState.State = TTxState::Propose;
context.OnComplete.ActivateTx(OperationId);
@@ -326,20 +326,20 @@ public:
namespace NKikimr::NSchemeShard {
-ISubOperationBase::TPtr CreateFroceDropUnsafe(TOperationId id, const TTxTransaction& tx) {
+ISubOperationBase::TPtr CreateForceDropUnsafe(TOperationId id, const TTxTransaction& tx) {
return MakeSubOperation<TDropForceUnsafe>(id, tx, TPathElement::EPathType::EPathTypeInvalid);
}
-ISubOperationBase::TPtr CreateFroceDropUnsafe(TOperationId id, TTxState::ETxState state) {
+ISubOperationBase::TPtr CreateForceDropUnsafe(TOperationId id, TTxState::ETxState state) {
Y_VERIFY(state != TTxState::Invalid);
return MakeSubOperation<TDropForceUnsafe>(id, state);
}
-ISubOperationBase::TPtr CreateFroceDropSubDomain(TOperationId id, const TTxTransaction& tx) {
+ISubOperationBase::TPtr CreateForceDropSubDomain(TOperationId id, const TTxTransaction& tx) {
return MakeSubOperation<TDropForceUnsafe>(id, tx, TPathElement::EPathType::EPathTypeSubDomain);
}
-ISubOperationBase::TPtr CreateFroceDropSubDomain(TOperationId id, TTxState::ETxState state) {
+ISubOperationBase::TPtr CreateForceDropSubDomain(TOperationId id, TTxState::ETxState state) {
Y_VERIFY(state != TTxState::Invalid);
return MakeSubOperation<TDropForceUnsafe>(id, state);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp
index da19c41a347..9fd70b27f81 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp
@@ -61,7 +61,7 @@ public:
path.Base()->ApplyACL(acl);
context.SS->PersistACL(db, path.Base());
- auto subtree = context.SS->ListSubThee(path.Base()->PathId, context.Ctx);
+ auto subtree = context.SS->ListSubTree(path.Base()->PathId, context.Ctx);
for (const TPathId pathId : subtree) {
if (context.SS->PathsById.at(pathId)->IsMigrated()) {
continue;
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_part.h b/ydb/core/tx/schemeshard/schemeshard__operation_part.h
index 5b5f373ea1d..e1a1890aa45 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_part.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_part.h
@@ -115,7 +115,7 @@ public:
void AddAuditLogFragment(TAuditLogFragment&& op) {
AuditLogFragments.push_back(std::move(op));
}
-
+
void ClearAuditLogFragments() {
AuditLogFragments.clear();
}
@@ -348,8 +348,8 @@ ISubOperationBase::TPtr CreateModifyACL(TOperationId id, TTxState::ETxState stat
ISubOperationBase::TPtr CreateAlterUserAttrs(TOperationId id, const TTxTransaction& tx);
ISubOperationBase::TPtr CreateAlterUserAttrs(TOperationId id, TTxState::ETxState state);
-ISubOperationBase::TPtr CreateFroceDropUnsafe(TOperationId id, const TTxTransaction& tx);
-ISubOperationBase::TPtr CreateFroceDropUnsafe(TOperationId id, TTxState::ETxState state);
+ISubOperationBase::TPtr CreateForceDropUnsafe(TOperationId id, const TTxTransaction& tx);
+ISubOperationBase::TPtr CreateForceDropUnsafe(TOperationId id, TTxState::ETxState state);
ISubOperationBase::TPtr CreateNewTable(TOperationId id, const TTxTransaction& tx, const THashSet<TString>& localSequences = { });
ISubOperationBase::TPtr CreateNewTable(TOperationId id, TTxState::ETxState state);
@@ -477,17 +477,26 @@ ISubOperationBase::TPtr CreateUpgradeSubDomainDecision(TOperationId id, TTxState
ISubOperationBase::TPtr CreateDropSubdomain(TOperationId id, const TTxTransaction& tx);
ISubOperationBase::TPtr CreateDropSubdomain(TOperationId id, TTxState::ETxState state);
-ISubOperationBase::TPtr CreateFroceDropSubDomain(TOperationId id, const TTxTransaction& tx);
-ISubOperationBase::TPtr CreateFroceDropSubDomain(TOperationId id, TTxState::ETxState state);
+ISubOperationBase::TPtr CreateForceDropSubDomain(TOperationId id, const TTxTransaction& tx);
+ISubOperationBase::TPtr CreateForceDropSubDomain(TOperationId id, TTxState::ETxState state);
+
+/// ExtSubDomain
+// Create
ISubOperationBase::TPtr CreateExtSubDomain(TOperationId id, const TTxTransaction& tx);
ISubOperationBase::TPtr CreateExtSubDomain(TOperationId id, TTxState::ETxState state);
+// Alter
+TVector<ISubOperationBase::TPtr> CreateCompatibleAlterExtSubDomain(TOperationId nextId, const TTxTransaction& tx, TOperationContext& context);
ISubOperationBase::TPtr CreateAlterExtSubDomain(TOperationId id, const TTxTransaction& tx);
ISubOperationBase::TPtr CreateAlterExtSubDomain(TOperationId id, TTxState::ETxState state);
+ISubOperationBase::TPtr CreateAlterExtSubDomainCreateHive(TOperationId id, const TTxTransaction& tx);
+ISubOperationBase::TPtr CreateAlterExtSubDomainCreateHive(TOperationId id, TTxState::ETxState state);
+
+// Drop
+ISubOperationBase::TPtr CreateForceDropExtSubDomain(TOperationId id, const TTxTransaction& tx);
+ISubOperationBase::TPtr CreateForceDropExtSubDomain(TOperationId id, TTxState::ETxState state);
-ISubOperationBase::TPtr CreateFroceDropExtSubDomain(TOperationId id, const TTxTransaction& tx);
-ISubOperationBase::TPtr CreateFroceDropExtSubDomain(TOperationId id, TTxState::ETxState state);
ISubOperationBase::TPtr CreateNewKesus(TOperationId id, const TTxTransaction& tx);
ISubOperationBase::TPtr CreateNewKesus(TOperationId id, TTxState::ETxState state);
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp
index 8c16a6c03a9..c966b7d93e1 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp
@@ -135,8 +135,8 @@ void TSideEffects::ReadyToNotify(TOperationId opId) {
ReadyToNotifyOperations.insert(opId);
}
-void TSideEffects::Dependence(TTxId parend, TTxId child) {
- Dependencies.push_back(TDependence(parend, child));
+void TSideEffects::Dependence(TTxId parent, TTxId child) {
+ Dependencies.push_back(TDependence(parent, child));
}
void TSideEffects::ApplyOnExecute(TSchemeShard* ss, NTabletFlatExecutor::TTransactionContext& txc, const TActorContext& ctx) {
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp
index 57285b787ad..4d1b2182c84 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp
@@ -42,7 +42,7 @@ public:
bool isDone = true;
// wait all transaction inside
- auto pathes = context.SS->ListSubThee(txState->TargetPathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(txState->TargetPathId, context.Ctx);
auto relatedTx = context.SS->GetRelatedTransactions(pathes, context.Ctx);
for (auto otherTxId: relatedTx) {
if (otherTxId == OperationId.GetTxId()) {
@@ -429,7 +429,7 @@ public:
shard.Operation = TTxState::ConfigureParts;
TenantSchemeShardId = TTabletId(processing.GetSchemeShard());
- PathesInside = context.SS->ListSubThee(path.Base()->PathId, context.Ctx);
+ PathesInside = context.SS->ListSubTree(path.Base()->PathId, context.Ctx);
LOG_DEBUG_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
DebugHint()
@@ -550,7 +550,7 @@ public:
auto path = context.SS->PathsById.at(pathId);
path->SwapChildren(HidenChildren); //return back children, now we do not pretend that there no children, we define them as Migrated
- auto pathsInside = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathsInside = context.SS->ListSubTree(pathId, context.Ctx);
pathsInside.erase(pathId);
for (auto pId: pathsInside) {
auto item = context.SS->PathsById.at(pId);
@@ -621,7 +621,7 @@ public:
context.SS->PersistACL(db, item);
context.SS->ClearDescribePathCaches(item);
- auto subtree = context.SS->ListSubThee(pathId, context.Ctx);
+ auto subtree = context.SS->ListSubTree(pathId, context.Ctx);
for (const TPathId pathId : subtree) {
context.OnComplete.RePublishToSchemeBoard(OperationId, pathId);
}
@@ -831,7 +831,7 @@ public:
TenantSchemeShardId = subDomain->GetTenantSchemeShardID();
Y_VERIFY(TenantSchemeShardId);
- auto pathesInside = context.SS->ListSubThee(targetPathId, context.Ctx);
+ auto pathesInside = context.SS->ListSubTree(targetPathId, context.Ctx);
pathesInside.erase(targetPathId);
for (auto pId: pathesInside) {
TPathElement::TPtr item = context.SS->PathsById.at(pId);
@@ -977,7 +977,7 @@ public:
}
IsInited = true;
- auto pathes = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathes = context.SS->ListSubTree(pathId, context.Ctx);
pathes.erase(pathId);
auto shards = context.SS->CollectAllShards(pathes);
@@ -1168,7 +1168,7 @@ public:
TPathElement::EPathType::EPathTypeKesus
};
- auto pathesInside = context.SS->ListSubThee(pathId, context.Ctx);
+ auto pathesInside = context.SS->ListSubTree(pathId, context.Ctx);
for (auto pId: pathesInside) {
if (pId == pathId) {
continue;
@@ -1514,15 +1514,15 @@ ISubOperationBase::TPtr CreateCompatibleSubdomainDrop(TSchemeShard* ss, TOperati
.NotDeleted();
if (!checks) {
- return CreateFroceDropSubDomain(id, tx);
+ return CreateForceDropSubDomain(id, tx);
}
}
if (path.Base()->IsExternalSubDomainRoot()) {
- return CreateFroceDropExtSubDomain(id, tx);
+ return CreateForceDropExtSubDomain(id, tx);
}
- return CreateFroceDropSubDomain(id, tx);
+ return CreateForceDropSubDomain(id, tx);
}
ISubOperationBase::TPtr CreateCompatibleSubdomainAlter(TSchemeShard* ss, TOperationId id, const TTxTransaction& tx) {
diff --git a/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp b/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp
index d04be460803..a74b28a6beb 100644
--- a/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp
@@ -89,7 +89,7 @@ struct TSchemeShard::TTxUpdateTenant : public TSchemeShard::TRwTxBase {
Self->ParentDomainCachedEffectiveACL.Init(Self->ParentDomainEffectiveACL);
Self->PersistParentDomainEffectiveACL(db, record.GetOwner(), record.GetEffectiveACL(), record.GetEffectiveACLVersion());
- for (const TPathId pathId : Self->ListSubThee(Self->RootPathId(), ctx)) {
+ for (const TPathId pathId : Self->ListSubTree(Self->RootPathId(), ctx)) {
SideEffects.PublishToSchemeBoard(InvalidOperationId, pathId);
}
@@ -195,7 +195,7 @@ struct TSchemeShard::TTxUpdateTenant : public TSchemeShard::TRwTxBase {
++path->ACLVersion;
Self->PersistACL(db, path);
- for (const TPathId pathId : Self->ListSubThee(Self->RootPathId(), ctx)) {
+ for (const TPathId pathId : Self->ListSubTree(Self->RootPathId(), ctx)) {
SideEffects.PublishToSchemeBoard(InvalidOperationId, pathId);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp b/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp
index bd1bfc1e051..0309d2ac437 100644
--- a/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp
@@ -62,6 +62,7 @@ TString DefineUserOperationName(NKikimrSchemeOp::EOperationType type) {
return "CREATE DATABASE";
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterSubDomain:
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomain:
+ case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomainCreateHive:
return "ALTER DATABASE";
case NKikimrSchemeOp::EOperationType::ESchemeOpDropSubDomain:
case NKikimrSchemeOp::EOperationType::ESchemeOpForceDropSubDomain:
@@ -186,6 +187,7 @@ TString DefineUserOperationName(NKikimrSchemeOp::EOperationType type) {
case NKikimrSchemeOp::EOperationType::ESchemeOpDropBlobDepot:
return "DROP BLOB DEPOT";
}
+ Y_FAIL("switch should cover all operation types");
}
TAuditLogFragment::TAuditLogFragment(const NKikimrSchemeOp::TModifyScheme& tx)
@@ -326,9 +328,8 @@ void TAuditLogFragment::FillPathes(const NKikimrSchemeOp::TModifyScheme& tx) {
Path = JoinPath({tx.GetWorkingDir(), tx.GetDrop().GetName()});
break;
case NKikimrSchemeOp::EOperationType::ESchemeOpCreateExtSubDomain:
- Path = JoinPath({tx.GetWorkingDir(), tx.GetSubDomain().GetName()});
- break;
case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomain:
+ case NKikimrSchemeOp::EOperationType::ESchemeOpAlterExtSubDomainCreateHive:
Path = JoinPath({tx.GetWorkingDir(), tx.GetSubDomain().GetName()});
break;
case NKikimrSchemeOp::EOperationType::ESchemeOpForceDropExtSubDomain:
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.cpp b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
index ace8f317e02..c75ca3ecc14 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
@@ -972,7 +972,7 @@ bool TSchemeShard::ResolveChannelsDetailsAsIs(
bool TSchemeShard::TabletResolveChannelsDetails(ui32 profileId, const TChannelProfiles::TProfile &profile, const TStoragePools &storagePools, TChannelsBindings &channelsBinding)
{
- const bool substituteMissedForDafaultProfile = 0 == profileId;
+ const bool isDefaultProfile = (0 == profileId);
TChannelsBindings result;
std::set<TString> uniqPoolsNames;
@@ -980,30 +980,31 @@ bool TSchemeShard::TabletResolveChannelsDetails(ui32 profileId, const TChannelPr
for (ui32 channelId = 0; channelId < profile.Channels.size(); ++channelId) {
const TChannelProfiles::TProfile::TChannel& channel = profile.Channels[channelId];
- auto poolIt = std::find_if(storagePools.begin(), storagePools.end(),
- [&channel] (const TStoragePools::value_type& pool)
- { return channel.PoolKind == pool.GetKind(); });
+ auto poolIt = std::find_if(storagePools.begin(), storagePools.end(), [&channel] (const TStoragePools::value_type& pool) {
+ return channel.PoolKind == pool.GetKind();
+ });
+ // substitute missing pool(s) for the default profile (but not for other profiles)
if (poolIt == storagePools.end()) {
- if (substituteMissedForDafaultProfile) {
+ if (isDefaultProfile) {
poolIt = storagePools.begin();
} else {
- //unable to construct channel binding with the storage pool
+ // unable to construct channel binding with the storage pool
return false;
}
}
- // sys log channel is 0
- // log channel is 1 always
+ // sys log channel is 0, log channel is 1 (always)
if (0 == channelId || 1 == channelId) {
result.emplace_back();
result.back().SetStoragePoolName(poolIt->GetName());
continue;
}
- // bytheway, channel 1 maight be shared with data and ext in new interafeca with StorageConfig
- // but we already already provide for clients variable like ColumnStorage1Ext2
- // so we do not want to break them and we should always make at least 3 channe until StorageConfig is mainstream
+ // by the way, channel 1 might be shared with data and ext in a new StorageConfig interface
+ // but as we already provide variable like ColumnStorage1Ext2 for the clients
+ // we do not want to break compatibility and so, until StorageConfig is mainstream,
+ // we should always return at least 3 channels
if (uniqPoolsNames.insert(poolIt->GetName()).second) {
result.emplace_back();
result.back().SetStoragePoolName(poolIt->GetName());
@@ -1373,6 +1374,7 @@ TPathElement::EPathState TSchemeShard::CalcPathState(TTxState::ETxType txType, T
case TTxState::TxAlterKesus:
case TTxState::TxAlterSubDomain:
case TTxState::TxAlterExtSubDomain:
+ case TTxState::TxAlterExtSubDomainCreateHive:
case TTxState::TxAlterUserAttributes:
case TTxState::TxInitializeBuildIndex:
case TTxState::TxFinalizeBuildIndex:
@@ -1481,37 +1483,37 @@ void TSchemeShard::BumpIncompatibleChanges(NIceDb::TNiceDb& db, ui64 incompatibl
void TSchemeShard::PersistTableIndex(NIceDb::TNiceDb& db, const TPathId& pathId) {
Y_VERIFY(PathsById.contains(pathId));
- TPathElement::TPtr elemnt = PathsById.at(pathId);
+ TPathElement::TPtr element = PathsById.at(pathId);
Y_VERIFY(Indexes.contains(pathId));
TTableIndexInfo::TPtr index = Indexes.at(pathId);
- Y_VERIFY(IsLocalId(elemnt->PathId));
- Y_VERIFY(elemnt->IsTableIndex());
+ Y_VERIFY(IsLocalId(element->PathId));
+ Y_VERIFY(element->IsTableIndex());
TTableIndexInfo::TPtr alterData = index->AlterData;
Y_VERIFY(alterData);
Y_VERIFY(index->AlterVersion < alterData->AlterVersion);
- db.Table<Schema::TableIndex>().Key(elemnt->PathId.LocalPathId).Update(
+ db.Table<Schema::TableIndex>().Key(element->PathId.LocalPathId).Update(
NIceDb::TUpdate<Schema::TableIndex::AlterVersion>(alterData->AlterVersion),
NIceDb::TUpdate<Schema::TableIndex::IndexType>(alterData->Type),
NIceDb::TUpdate<Schema::TableIndex::State>(alterData->State));
- db.Table<Schema::TableIndexAlterData>().Key(elemnt->PathId.LocalPathId).Delete();
+ db.Table<Schema::TableIndexAlterData>().Key(element->PathId.LocalPathId).Delete();
for (ui32 keyIdx = 0; keyIdx < alterData->IndexKeys.size(); ++keyIdx) {
- db.Table<Schema::TableIndexKeys>().Key(elemnt->PathId.LocalPathId, keyIdx).Update(
+ db.Table<Schema::TableIndexKeys>().Key(element->PathId.LocalPathId, keyIdx).Update(
NIceDb::TUpdate<Schema::TableIndexKeys::KeyName>(alterData->IndexKeys[keyIdx]));
- db.Table<Schema::TableIndexKeysAlterData>().Key(elemnt->PathId.LocalPathId, keyIdx).Delete();
+ db.Table<Schema::TableIndexKeysAlterData>().Key(element->PathId.LocalPathId, keyIdx).Delete();
}
for (ui32 dataColIdx = 0; dataColIdx < alterData->IndexDataColumns.size(); ++dataColIdx) {
- db.Table<Schema::TableIndexDataColumns>().Key(elemnt->PathId.OwnerId, elemnt->PathId.LocalPathId, dataColIdx).Update(
+ db.Table<Schema::TableIndexDataColumns>().Key(element->PathId.OwnerId, element->PathId.LocalPathId, dataColIdx).Update(
NIceDb::TUpdate<Schema::TableIndexDataColumns::DataColumnName>(alterData->IndexDataColumns[dataColIdx]));
- db.Table<Schema::TableIndexDataColumnsAlterData>().Key(elemnt->PathId.OwnerId, elemnt->PathId.LocalPathId, dataColIdx).Delete();
+ db.Table<Schema::TableIndexDataColumnsAlterData>().Key(element->PathId.OwnerId, element->PathId.LocalPathId, dataColIdx).Delete();
}
}
@@ -1897,6 +1899,14 @@ void TSchemeShard::PersistSubDomainSecurityStateVersion(NIceDb::TNiceDb& db, con
.Update<Schema::SubDomains::SecurityStateVersion>(subDomain.GetSecurityStateVersion());
}
+void TSchemeShard::PersistSubDomainPrivateShards(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain) {
+ Y_VERIFY(IsLocalId(pathId));
+
+ for (auto shardIdx: subDomain.GetPrivateShards()) {
+ db.Table<Schema::SubDomainShards>().Key(pathId.LocalPathId, shardIdx.GetLocalId()).Update();
+ }
+}
+
void TSchemeShard::PersistSubDomain(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain) {
Y_VERIFY(IsLocalId(pathId));
@@ -3607,15 +3617,8 @@ TTabletId TSchemeShard::ResolveHive(TPathId pathId, const TActorContext& ctx) co
}
TSubDomainInfo::TPtr subdomain = ResolveDomainInfo(pathId);
- TPathElement::TPtr path = PathsById.at(pathId);
-
- if (path->IsExternalSubDomainRoot()) {
- // we use either shared or global hive for private subdomain's shards. Like TenantSS, CC, MM
- // and don't use tenant Hive for privat subdomain;s shards
- return subdomain->GetSharedHive() ? subdomain->GetSharedHive() : GetGlobalHive(ctx);
- }
- // for pathes inside subdomain and their shards we choise Hive according that order: tanant, shared, global
+ // for paths inside subdomain and their shards we choose Hive according to that order: tenant, shared, global
if (subdomain->GetTenantHiveID()) {
return subdomain->GetTenantHiveID();
}
@@ -3996,6 +3999,7 @@ void TSchemeShard::OnActivateExecutor(const TActorContext &ctx) {
EnableBackgroundCompactionServerless = appData->FeatureFlags.GetEnableBackgroundCompactionServerless();
EnableBorrowedSplitCompaction = appData->FeatureFlags.GetEnableBorrowedSplitCompaction();
EnableMoveIndex = appData->FeatureFlags.GetEnableMoveIndex();
+ EnableAlterDatabaseCreateHiveFirst = appData->FeatureFlags.GetEnableAlterDatabaseCreateHiveFirst();
ConfigureCompactionQueues(appData->CompactionConfig, ctx);
ConfigureStatsBatching(appData->SchemeShardConfig, ctx);
@@ -4411,7 +4415,7 @@ void TSchemeShard::ExamineTreeVFS(TPathId nodeId, std::function<void (TPathEleme
}
}
-THashSet<TPathId> TSchemeShard::ListSubThee(TPathId subdomain_root, const TActorContext &ctx) {
+THashSet<TPathId> TSchemeShard::ListSubTree(TPathId subdomain_root, const TActorContext &ctx) {
THashSet<TPathId> pathes;
auto savePath = [&] (TPathElement::TPtr node) {
@@ -4546,7 +4550,7 @@ void TSchemeShard::UncountNode(TPathElement::TPtr node) {
}
}
-void TSchemeShard::MarkAsDroping(const THashSet<TPathId> &pathes, TTxId txId, const TActorContext &ctx) {
+void TSchemeShard::MarkAsDropping(const THashSet<TPathId> &pathes, TTxId txId, const TActorContext &ctx) {
for (auto id: pathes) {
MarkAsDroping(PathsById.at(id), txId, ctx);
}
@@ -5432,8 +5436,6 @@ void TSchemeShard::Handle(TEvSchemeShard::TEvInitTenantSchemeShardResult::TPtr&
return;
}
- Y_VERIFY(opId.GetSubTxId() == FirstSubTxId);
-
Execute(CreateTxOperationReply(opId, ev), ctx);
}
@@ -5460,8 +5462,6 @@ void TSchemeShard::Handle(TEvSchemeShard::TEvPublishTenantAsReadOnlyResult::TPtr
return;
}
- Y_VERIFY(opId.GetSubTxId() == FirstSubTxId);
-
Execute(CreateTxOperationReply(opId, ev), ctx);
}
@@ -5488,8 +5488,6 @@ void TSchemeShard::Handle(TEvSchemeShard::TEvPublishTenantResult::TPtr& ev, cons
return;
}
- Y_VERIFY(opId.GetSubTxId() == FirstSubTxId);
-
Execute(CreateTxOperationReply(opId, ev), ctx);
}
@@ -6367,6 +6365,7 @@ void TSchemeShard::ApplyConsoleConfigs(const NKikimrConfig::TFeatureFlags& featu
EnableBackgroundCompactionServerless = featureFlags.GetEnableBackgroundCompactionServerless();
EnableBorrowedSplitCompaction = featureFlags.GetEnableBorrowedSplitCompaction();
EnableMoveIndex = featureFlags.GetEnableMoveIndex();
+ EnableAlterDatabaseCreateHiveFirst = featureFlags.GetEnableAlterDatabaseCreateHiveFirst();
}
void TSchemeShard::ConfigureStatsBatching(const NKikimrConfig::TSchemeShardConfig& config, const TActorContext& ctx) {
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.h b/ydb/core/tx/schemeshard/schemeshard_impl.h
index 09c89915683..550e7159eac 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.h
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.h
@@ -256,6 +256,7 @@ public:
bool EnableBackgroundCompactionServerless = false;
bool EnableBorrowedSplitCompaction = false;
bool EnableMoveIndex = false;
+ bool EnableAlterDatabaseCreateHiveFirst = false;
TShardDeleter ShardDeleter;
@@ -529,11 +530,11 @@ public:
THashSet<TShardIdx> CollectAllShards(const THashSet<TPathId>& pathes) const;
void ExamineTreeVFS(TPathId nodeId, std::function<void(TPathElement::TPtr)> func, const TActorContext& ctx);
- THashSet<TPathId> ListSubThee(TPathId subdomain_root, const TActorContext& ctx);
+ THashSet<TPathId> ListSubTree(TPathId subdomain_root, const TActorContext& ctx);
THashSet<TTxId> GetRelatedTransactions(const THashSet<TPathId>& pathes, const TActorContext &ctx);
void MarkAsDroping(TPathElement::TPtr node, TTxId txId, const TActorContext& ctx);
- void MarkAsDroping(const THashSet<TPathId>& pathes, TTxId txId, const TActorContext& ctx);
+ void MarkAsDropping(const THashSet<TPathId>& pathes, TTxId txId, const TActorContext& ctx);
void UncountNode(TPathElement::TPtr node);
void MarkAsMigrated(TPathElement::TPtr node, const TActorContext& ctx);
@@ -655,6 +656,7 @@ public:
void PersistSubDomainState(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain);
void PersistSubDomainSchemeQuotas(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain);
void PersistSubDomainSecurityStateVersion(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain);
+ void PersistSubDomainPrivateShards(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain);
void PersistDeleteSubDomainAlter(NIceDb::TNiceDb& db, const TPathId& pathId, const TSubDomainInfo& subDomain);
void PersistKesusInfo(NIceDb::TNiceDb& db, TPathId pathId, const TKesusInfo::TPtr);
void PersistKesusVersion(NIceDb::TNiceDb& db, TPathId pathId, const TKesusInfo::TPtr);
diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.h b/ydb/core/tx/schemeshard/schemeshard_info_types.h
index 8756c924e93..bae7c638bb0 100644
--- a/ydb/core/tx/schemeshard/schemeshard_info_types.h
+++ b/ydb/core/tx/schemeshard/schemeshard_info_types.h
@@ -1392,6 +1392,10 @@ struct TSubDomainInfo: TSimpleRefCount<TSubDomainInfo> {
return TTabletId(ProcessingParams.GetHive());
}
+ void SetTenantHiveIDPrivate(const TTabletId& hiveId) {
+ ProcessingParams.SetHive(ui64(hiveId));
+ }
+
TTabletId GetTenantSysViewProcessorID() const {
if (!ProcessingParams.HasSysViewProcessor()) {
return InvalidTabletId;
@@ -1730,27 +1734,27 @@ struct TSubDomainInfo: TSimpleRefCount<TSubDomainInfo> {
}
ProcessingParams.ClearCoordinators();
- TVector<TTabletId> coordinators = FilterTablets(ETabletType::Coordinator, allShards);
+ TVector<TTabletId> coordinators = FilterPrivateTablets(ETabletType::Coordinator, allShards);
for (TTabletId coordinator: coordinators) {
ProcessingParams.AddCoordinators(ui64(coordinator));
}
CoordinatorSelector = new TCoordinators(ProcessingParams);
ProcessingParams.ClearMediators();
- TVector<TTabletId> mediators = FilterTablets(ETabletType::Mediator, allShards);
+ TVector<TTabletId> mediators = FilterPrivateTablets(ETabletType::Mediator, allShards);
for (TTabletId mediator: mediators) {
ProcessingParams.AddMediators(ui64(mediator));
}
ProcessingParams.ClearSchemeShard();
- TVector<TTabletId> schemeshards = FilterTablets(ETabletType::SchemeShard, allShards);
+ TVector<TTabletId> schemeshards = FilterPrivateTablets(ETabletType::SchemeShard, allShards);
Y_VERIFY_S(schemeshards.size() <= 1, "size was: " << schemeshards.size());
if (schemeshards.size()) {
ProcessingParams.SetSchemeShard(ui64(schemeshards.front()));
}
ProcessingParams.ClearHive();
- TVector<TTabletId> hives = FilterTablets(ETabletType::Hive, allShards);
+ TVector<TTabletId> hives = FilterPrivateTablets(ETabletType::Hive, allShards);
Y_VERIFY_S(hives.size() <= 1, "size was: " << hives.size());
if (hives.size()) {
ProcessingParams.SetHive(ui64(hives.front()));
@@ -1758,7 +1762,7 @@ struct TSubDomainInfo: TSimpleRefCount<TSubDomainInfo> {
}
ProcessingParams.ClearSysViewProcessor();
- TVector<TTabletId> sysViewProcessors = FilterTablets(ETabletType::SysViewProcessor, allShards);
+ TVector<TTabletId> sysViewProcessors = FilterPrivateTablets(ETabletType::SysViewProcessor, allShards);
Y_VERIFY_S(sysViewProcessors.size() <= 1, "size was: " << sysViewProcessors.size());
if (sysViewProcessors.size()) {
ProcessingParams.SetSysViewProcessor(ui64(sysViewProcessors.front()));
@@ -1960,7 +1964,7 @@ private:
NLoginProto::TSecurityState SecurityState;
ui64 SecurityStateVersion = 0;
- TVector<TTabletId> FilterTablets(TTabletTypes::EType type, const THashMap<TShardIdx, TShardInfo>& allShards) const {
+ TVector<TTabletId> FilterPrivateTablets(TTabletTypes::EType type, const THashMap<TShardIdx, TShardInfo>& allShards) const {
TVector<TTabletId> tablets;
for (auto shardId: PrivateShards) {
diff --git a/ydb/core/tx/schemeshard/schemeshard_tx_infly.h b/ydb/core/tx/schemeshard/schemeshard_tx_infly.h
index a5a81cb8e28..112595c65c6 100644
--- a/ydb/core/tx/schemeshard/schemeshard_tx_infly.h
+++ b/ydb/core/tx/schemeshard/schemeshard_tx_infly.h
@@ -117,6 +117,7 @@ struct TTxState {
item(TxUpdateMainTableOnIndexMove, 71) \
item(TxAllocatePQ, 72) \
item(TxCreateCdcStreamAtTableWithSnapshot, 73) \
+ item(TxAlterExtSubDomainCreateHive, 74) \
// TX_STATE_TYPE_ENUM
@@ -371,6 +372,7 @@ struct TTxState {
case TxUpgradeSubDomain:
case TxUpgradeSubDomainDecision:
case TxAlterExtSubDomain:
+ case TxAlterExtSubDomainCreateHive:
case TxAlterUserAttributes:
case TxAlterTableIndex:
case TxAlterSolomonVolume:
@@ -456,6 +458,7 @@ struct TTxState {
case TxUpgradeSubDomain:
case TxUpgradeSubDomainDecision:
case TxAlterExtSubDomain:
+ case TxAlterExtSubDomainCreateHive:
case TxAlterUserAttributes:
case TxAlterTableIndex:
case TxAlterSolomonVolume:
@@ -539,6 +542,7 @@ struct TTxState {
case TxAlterKesus:
case TxAlterSubDomain:
case TxAlterExtSubDomain:
+ case TxAlterExtSubDomainCreateHive:
case TxUpgradeSubDomain:
case TxUpgradeSubDomainDecision:
case TxAlterUserAttributes:
diff --git a/ydb/core/tx/schemeshard/ut_allocate_pq.cpp b/ydb/core/tx/schemeshard/ut_allocate_pq.cpp
index 0934cee0a58..4d7647a5b16 100644
--- a/ydb/core/tx/schemeshard/ut_allocate_pq.cpp
+++ b/ydb/core/tx/schemeshard/ut_allocate_pq.cpp
@@ -99,7 +99,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardAllocatePQTest) {
{
auto balancerDescr = GetDescibeFromPQBalancer(runtime, 9437197);
- TString expected = R"(TopicName: "PQGroup" Version: 1 Config { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database" } PartitionPerTablet: 4 Partitions { Partition: 0 TabletId: 9437194 } Partitions { Partition: 1 TabletId: 9437194 } Partitions { Partition: 2 TabletId: 9437195 } Partitions { Partition: 3 TabletId: 9437194 } Partitions { Partition: 4 TabletId: 9437196 } Partitions { Partition: 5 TabletId: 9437195 } Partitions { Partition: 6 TabletId: 9437194 } Partitions { Partition: 7 TabletId: 9437196 } Partitions { Partition: 8 TabletId: 9437195 } Partitions { Partition: 9 TabletId: 9437195 } SchemeShardId: 9437200 BalancerTabletId: 9437197 SecurityObject: "\022\000")";
+ TString expected = R"(TopicName: "PQGroup" Version: 1 Config { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database" } PartitionPerTablet: 4 Partitions { Partition: 0 TabletId: 9437194 } Partitions { Partition: 1 TabletId: 9437194 } Partitions { Partition: 2 TabletId: 9437195 } Partitions { Partition: 3 TabletId: 9437194 } Partitions { Partition: 4 TabletId: 9437196 } Partitions { Partition: 5 TabletId: 9437195 } Partitions { Partition: 6 TabletId: 9437194 } Partitions { Partition: 7 TabletId: 9437196 } Partitions { Partition: 8 TabletId: 9437195 } Partitions { Partition: 9 TabletId: 9437195 } SchemeShardId: 9437198 BalancerTabletId: 9437197 SecurityObject: "\022\000")";
UNIT_ASSERT_NO_DIFF(expected, balancerDescr.ShortUtf8DebugString());
}
diff --git a/ydb/core/tx/schemeshard/ut_base.cpp b/ydb/core/tx/schemeshard/ut_base.cpp
index b7b7c8d2560..16fc52e7837 100644
--- a/ydb/core/tx/schemeshard/ut_base.cpp
+++ b/ydb/core/tx/schemeshard/ut_base.cpp
@@ -8065,7 +8065,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardTest) {
auto event = runtime.GrabEdgeEvent<TEvSchemeShard::TEvModifySchemeTransactionResult>();
UNIT_ASSERT(event);
UNIT_ASSERT_VALUES_EQUAL(event->Record.GetTxId(), txId);
- CheckExpected(
+ CheckExpectedStatus(
{ NKikimrScheme::StatusMultipleModifications },
event->Record.GetStatus(), event->Record.GetReason());
diff --git a/ydb/core/tx/schemeshard/ut_extsubdomain.cpp b/ydb/core/tx/schemeshard/ut_extsubdomain.cpp
index a6b8eab0c43..78d148ab10e 100644
--- a/ydb/core/tx/schemeshard/ut_extsubdomain.cpp
+++ b/ydb/core/tx/schemeshard/ut_extsubdomain.cpp
@@ -14,27 +14,34 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "PlanResolution: 50 "
- "Coordinators: 3 "
- "Mediators: 3 "
- "TimeCastBucketsPerMediator: 2 "
- "Name: \"USER_0\"",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ PlanResolution: 50
+ Coordinators: 3
+ Mediators: 3
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "only declaration at creation is allowed"}}
+ );
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "StoragePools { "
- " Name: \"/dc-1/users/tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} "
- "StoragePools { "
- " Name: \"/dc-1/users/tenant-1:hdd-1\" "
- " Kind: \"hdd-1\" "
- "} "
- "Name: \"USER_0\"",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ StoragePools {
+ Name: "/dc-1/users/tenant-1:hdd"
+ Kind: "hdd"
+ }
+ StoragePools {
+ Name: "/dc-1/users/tenant-1:hdd-1"
+ Kind: "hdd-1"
+ }
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "only declaration at creation is allowed"}}
+ );
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
TestDescribeResult(DescribePath(runtime, "/MyRoot/USER_0"),
{NLs::PathExist});
@@ -48,7 +55,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
ui64 txId = 100;
AsyncMkDir(runtime, ++txId, "MyRoot", "dir");
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot/dir",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1});
@@ -70,25 +78,31 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
-
+ R"(Name: "USER_0")"
+ );
TestMkDir(runtime, ++txId, "/MyRoot/USER_0", "dir_0",
- {NKikimrScheme::StatusRedirectDomain});
+ {{NKikimrScheme::StatusRedirectDomain}});
TestCreateTable(runtime, ++txId, "/MyRoot/USER_0/dir_0",
- "Name: \"table_1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]",
- {NKikimrScheme::StatusRedirectDomain});
+ R"(
+ Name: "table_1"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ )",
+ {{NKikimrScheme::StatusRedirectDomain}}
+ );
TestCreateTable(runtime, ++txId, "/MyRoot/USER_0",
- "Name: \"table_0\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]",
- {NKikimrScheme::StatusRedirectDomain});
+ R"(
+ Name: "table_0"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ )",
+ {{NKikimrScheme::StatusRedirectDomain}}
+ );
env.TestWaitNotification(runtime, {txId, txId - 1, txId - 2, txId -3});
@@ -105,14 +119,15 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
{NLs::InExternalSubdomain});
}
- Y_UNIT_TEST(CreateAndAlterWithoutTx) {
+ Y_UNIT_TEST_FLAG(CreateAndAlterWithoutEnablingTx, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestMkDir(runtime, ++txId, "/MyRoot", "dir");
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot/dir",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
TestDescribeResult(DescribePath(runtime, "/MyRoot/dir/USER_0"),
{NLs::PathExist,
NLs::DomainKey(3, TTestTxConfig::SchemeShard),
@@ -122,51 +137,61 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
env.TestWaitNotification(runtime, {txId, txId - 1});
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot/dir",
- "ExternalSchemeShard: true "
- "Name: \"USER_0\"",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExtSubDomain without coordinators/mediators"}}
+ );
}
- Y_UNIT_TEST(CreateAndAlter) {
+ Y_UNIT_TEST_FLAG(CreateAndAlter, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "PlanResolution: 50 "
- "Coordinators: 3 "
- "Mediators: 3 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ PlanResolution: 50
+ Coordinators: 3
+ Mediators: 3
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExtSubDomain without ExternalSchemeShard"}}
+ );
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"pool-kind-1\" "
- "} "
- "StoragePools { "
- " Name: \"pool-2\" "
- " Kind: \"pool-kind-2\" "
- "} "
- "StoragePools { "
- " Name: \"/dc-1/users/tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} "
- "StoragePools { "
- " Name: \"/dc-1/users/tenant-1:hdd-1\" "
- " Kind: \"hdd-1\" "
- "} "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- "Name: \"USER_0\"");
+ R"(
+ StoragePools {
+ Name: "pool-1"
+ Kind: "pool-kind-1"
+ }
+ StoragePools {
+ Name: "pool-2"
+ Kind: "pool-kind-2"
+ }
+ StoragePools {
+ Name: "/dc-1/users/tenant-1:hdd"
+ Kind: "hdd"
+ }
+ StoragePools {
+ Name: "/dc-1/users/tenant-1:hdd-1"
+ Kind: "hdd-1"
+ }
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ Name: "USER_0"
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1, txId - 2});
@@ -204,10 +229,13 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
NLs::Finished});
TestCreateTable(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER_0/dir",
- "Name: \"table_1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]");
+ R"(
+ Name: "table_1"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ )"
+ );
env.TestWaitNotification(runtime, txId, tenantSchemeShard);
@@ -219,39 +247,46 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
NLs::Finished});
}
- Y_UNIT_TEST(CreateAndAlterAfter) {
+ Y_UNIT_TEST_FLAG(CreateAndAlterTwice, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1});
- TestAlterSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} "
- "StoragePools { "
- " Name: \"pool-2\" "
- " Kind: \"hdd-1\" "
- "} ");
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ StoragePools {
+ Name: "pool-2"
+ Kind: "hdd-1"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, txId);
@@ -302,15 +337,15 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
TestMkDir(runtime, ++txId, "/MyRoot", ".......a......");
TestMkDir(runtime, ++txId, "/MyRoot", ".............a");
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", "Name: \".\"", {NKikimrScheme::StatusSchemeError});
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", "Name: \"..\"", {NKikimrScheme::StatusSchemeError});
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", "Name: \"...\"", {NKikimrScheme::StatusSchemeError});
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", "Name: \"................\"", {NKikimrScheme::StatusSchemeError});
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: ".")", {NKikimrScheme::StatusSchemeError});
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "..")", {NKikimrScheme::StatusSchemeError});
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "...")", {NKikimrScheme::StatusSchemeError});
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "................")", {NKikimrScheme::StatusSchemeError});
}
- Y_UNIT_TEST(CreateWithExtraPathSymbolsAllowed) {
+ Y_UNIT_TEST_FLAG(CreateWithExtraPathSymbolsAllowed, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TSchemeLimits lowLimits;
@@ -322,67 +357,81 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
RebootTablet(runtime, TTestTxConfig::SchemeShard, runtime.AllocateEdgeActor());
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\"",
- {NKikimrScheme::StatusSchemeError});
+ R"(Name: "USER+0")",
+ {{NKikimrScheme::StatusSchemeError}}
+ );
env.TestWaitNotification(runtime, txId);
lowLimits.ExtraPathSymbolsAllowed = ".-+";
SetSchemeshardSchemaLimits(runtime, lowLimits);
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\"");
+ R"(Name: "USER+0")"
+ );
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER+0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1});
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER+0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, txId);
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\" "
- "ExternalSchemeShard: false "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER+0"
+ ExternalSchemeShard: false
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExternalSchemeShard could only be added, not removed"}}
+ );
env.TestWaitNotification(runtime, txId);
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER+0\" "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER+0"
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, txId);
ui64 tenantSchemeShard = 0;
@@ -391,7 +440,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
NLs::IsExternalSubDomain("USER+0"),
NLs::ExtractTenantSchemeshard(&tenantSchemeShard)});
- TestMkDir(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER+0", "Dir__!", {NKikimrScheme::StatusSchemeError});
+ TestMkDir(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER+0", "Dir__!", {{NKikimrScheme::StatusSchemeError}});
env.TestWaitNotification(runtime, txId, tenantSchemeShard);
lowLimits.ExtraPathSymbolsAllowed = ".-+!";
@@ -413,135 +462,516 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
NLs::Finished});
}
- Y_UNIT_TEST(AlterWithWrongParams) {
+ Y_UNIT_TEST_FLAG(CreateAndAlterWithExternalHive, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
+ // create ext-subdomain point
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
env.TestWaitNotification(runtime, txId);
+ ui64 rootHiveId = 0;
+ {
+ // no shards inside root domain
+ TestDescribeResult(DescribePath(runtime, "/MyRoot"), {
+ NLs::ShardsInsideDomain(0),
+ NLs::ExtractDomainHive(&rootHiveId)
+ });
+
+ // no tablets in root hive
+ UNIT_ASSERT_EQUAL(env.GetHiveState()->Tablets.size(), 0);
+ }
+
+ // construct ext-subdomain at its point, with domain hive
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 0 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ // (minimally correct ExtSubDomain settings + ExternalHive)
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+
+ ExternalHive: true
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
+ struct ExpectedValues {
+ ui64 SubdomainVersion;
+ ui64 TabletsInsideRootHive;
+ ui64 TabletsInsideExtSubdomainHive;
+ };
+
+ ExpectedValues expectedFlagSet;
+ expectedFlagSet.SubdomainVersion = 3;
+ expectedFlagSet.TabletsInsideRootHive = 1;
+ expectedFlagSet.TabletsInsideExtSubdomainHive = 3;
+
+ ExpectedValues expectedFlagUnset;
+ expectedFlagUnset.SubdomainVersion = 2;
+ expectedFlagUnset.TabletsInsideRootHive = 4;
+ expectedFlagUnset.TabletsInsideExtSubdomainHive = 0;
+
+ const auto& expected = AlterDatabaseCreateHiveFirst ? expectedFlagSet : expectedFlagUnset;
+
+ ui64 subhiveId = 0;
+
+ // check that all requested domain innards are created:
+ // 1 coordinator, 1 mediator, 1 schemeshard and 1 hive
+ TestDescribeResult(DescribePath(runtime, "/MyRoot/USER_0"), {
+ NLs::PathExist,
+ NLs::IsExternalSubDomain("USER_0"),
+ NLs::ShardsInsideDomain(4),
+ NLs::ExtractDomainHive(&subhiveId),
+ NLs::DomainSettings(50, 2),
+ NLs::SubDomainVersion(expected.SubdomainVersion)
+ });
+
+ // ...and there are no new shards inside root domain
+ TestDescribeResult(DescribePath(runtime, "/MyRoot"), {
+ NLs::ShardsInsideDomain(0)
+ });
+
+ // ...but extsubdomain hive is created
+ UNIT_ASSERT(subhiveId != 0
+ && subhiveId != (ui64)-1
+ && subhiveId != TTestTxConfig::Hive
+ );
+ UNIT_ASSERT(subhiveId != rootHiveId);
+
+ // ...and global hive holds new subdomain hive but nothing else
+ UNIT_ASSERT_VALUES_EQUAL_C(expected.TabletsInsideRootHive, env.GetHiveState()->Tablets.size(), "-- unexpected tablet count in global hive");
+ UNIT_ASSERT_VALUES_EQUAL(ETabletType::Hive, env.GetHiveState()->Tablets.begin()->second.Type);
+
+ // ...and extsubdomain hive holds all other tablets (1 coordinator, 1 mediator, 1 schemeshard -- 3 total)
+ {
+ TActorId senderA = runtime.AllocateEdgeActor();
+ runtime.SendToPipe(subhiveId, senderA, new TEvHive::TEvRequestHiveInfo());
+ TAutoPtr<IEventHandle> handle;
+ TEvHive::TEvResponseHiveInfo* response = runtime.GrabEdgeEventRethrow<TEvHive::TEvResponseHiveInfo>(handle);
+
+ UNIT_ASSERT_VALUES_EQUAL_C(expected.TabletsInsideExtSubdomainHive, response->Record.GetTablets().size(), "-- unexpected tablet count in extsubdomain hive");
+
+ if (AlterDatabaseCreateHiveFirst) {
+ std::array<ETabletType::EType, 3> expectedTypes = {
+ ETabletType::SchemeShard,
+ ETabletType::Coordinator,
+ ETabletType::Mediator
+ };
+
+ for (const auto& tablet : response->Record.GetTablets()) {
+ Cdbg << "extsubdomain hive tablet, type " << tablet.GetTabletType() << Endl;
+ auto found = std::find(expectedTypes.begin(), expectedTypes.end(), tablet.GetTabletType());
+ UNIT_ASSERT_C(found != expectedTypes.end(), "-- extsubdomain hive holds tablet of unexpected type " << tablet.GetTabletType());
+ }
+ }
+ }
+ }
+
+ Y_UNIT_TEST_FLAG(AlterCantChangeExternalSchemeShard, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "USER_0")");
+ env.TestWaitNotification(runtime, txId);
+
+ // Minimally correct ExtSubDomain settings
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 0 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: false
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExternalSchemeShard could only be added, not removed"}}
+ );
+ }
+ Y_UNIT_TEST_FLAG(AlterCantChangeExternalHive, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "USER_0")");
+ env.TestWaitNotification(runtime, txId);
+
+ // Minimally correct ExtSubDomain settings
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 0 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+
+ ExternalHive: true
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalHive: false
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExternalHive could only be added, not removed"}}
+ );
+ }
+
+ Y_UNIT_TEST_FLAG(AlterCantChangeExternalSysViewProcessor, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "USER_0")");
+ env.TestWaitNotification(runtime, txId);
+ // Minimally correct ExtSubDomain settings
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: false "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 ",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+
+ ExternalSysViewProcessor: true
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 ",
- {NKikimrScheme::StatusInvalidParameter});
+ R"(
+ Name: "USER_0"
+ ExternalSysViewProcessor: false
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "ExternalSysViewProcessor could only be added, not removed"}}
+ );
+ }
+ Y_UNIT_TEST_FLAG(AlterCantChangeSetParams, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ // create and setup extsubdomain (by altering it the first time)
+ // then try to change parameters with a second alter
+
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(Name: "USER_0")");
+ env.TestWaitNotification(runtime, txId);
+ // Minimally correct ExtSubDomain settings
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_1\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 ",
- {NKikimrScheme::StatusPathDoesNotExist});
-
- TestMkDir(runtime, ++txId, "/MyRoot", "Dir");
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, txId);
+
+ // Alter treats input TSubDomainSettings structure as a diff to existing state
+ // (but most parameters couldn't be changed once set to non default values)
+
+ std::vector<std::tuple<TString, TString, TString, TString>> params = {
+ // {param-name, default-value, current-value, next-value}
+ {"PlanResolution", "0", "50", "15"},
+ {"TimeCastBucketsPerMediator", "0", "2", "3"},
+ // {"Coordinators", 1, 2},
+ // {"Mediators", 1, 2},
+ };
+
+ constexpr char TextTemplateSubDomainSettingsSetSingleParameter[] = R"(
+ Name: "USER_0"
+ %s: %s
+ )";
+
+ auto MakeSubdomainSettings = [=](const TString& name, const TString& value) {
+ return Sprintf(TextTemplateSubDomainSettingsSetSingleParameter, name.c_str(), value.c_str());
+ };
+
+ for (const auto& i : params) {
+ const auto& [name, defaultVal, current, next] = i;
+
+ Cdbg << "==== parameter " << name << Endl;
+
+ {
+ // accept current value -- noop
+ Cdbg << "==== parameter " << name << ": set current value " << Endl;
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ MakeSubdomainSettings(name, current)
+ );
+ // reject default value -- can't unset
+ Cdbg << "==== parameter " << name << ": set default value" << Endl;
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ MakeSubdomainSettings(name, defaultVal),
+ {{NKikimrScheme::StatusInvalidParameter, "could be set only once"}}
+ );
+ // reject next value -- can't change
+ Cdbg << "==== parameter " << name << ": set next value " << Endl;
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ MakeSubdomainSettings(name, next),
+ {{NKikimrScheme::StatusInvalidParameter, "could be set only once"}}
+ );
+ }
+ }
+ }
+
+ Y_UNIT_TEST_FLAG(AlterRequiresParamCombinations, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"Dir\"",
- {NKikimrScheme::StatusNameConflict});
-
- TestCreateTable(runtime, ++txId, "/MyRoot",
- "Name: \"table\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]"
- );
+ R"(
+ Name: "USER_0"
+ )"
+ );
env.TestWaitNotification(runtime, txId);
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"table\"",
- {NKikimrScheme::StatusNameConflict});
- TestCreateSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"subdomain\"");
- env.TestWaitNotification(runtime, txId);
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"subdomain\"",
- {NKikimrScheme::StatusNameConflict});
+ // PlanResolution
+ {
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 0
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with PlanResolution not set"}}
+ );
+ }
+ // Coordinators and Mediators
+ {
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with zero Coordinators"}}
+ );
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 0
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with zero Coordinators"}}
+ );
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 0
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with zero Mediators"}}
+ );
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ TimeCastBucketsPerMediator: 2
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with zero Mediators"}}
+ );
+ }
+
+ // TimeCastBucketsPerMediator with coordinators
+ {
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: false
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with TimeCastBucketsPerMediator not set"}}
+ );
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "USER_0"
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ )",
+ {{NKikimrScheme::StatusInvalidParameter, "can not create ExtSubDomain with TimeCastBucketsPerMediator not set"}}
+ );
+ }
+ }
+
+ Y_UNIT_TEST_FLAG(AlterNameConflicts, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ // alter non existing domain
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"subdomain\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2",
- {NKikimrScheme::StatusNameConflict});
+ R"(
+ Name: "USER_1"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ )",
+ {NKikimrScheme::StatusPathDoesNotExist}
+ );
+ }
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"extSubdomain\"");
- env.TestWaitNotification(runtime, txId);
- TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"extSubdomain\"",
- {NKikimrScheme::StatusAlreadyExists});
+ Y_UNIT_TEST_FLAG(CreateNameConflicts, AlterDatabaseCreateHiveFirst) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
+ ui64 txId = 100;
+
+ // can't create extsubdomain at existing directory
+ {
+ TestMkDir(runtime, ++txId, "/MyRoot", "Dir");
+ env.TestWaitNotification(runtime, txId);
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "Dir"
+ )",
+ {NKikimrScheme::StatusNameConflict}
+ );
+ }
+
+ // can't create extsubdomain at existing table
+ {
+ TestCreateTable(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "table"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "table"
+ )",
+ {NKikimrScheme::StatusNameConflict}
+ );
+ }
+
+ // can't create extsubdomain at existing subdomain
+ // and can't alter subdomain as extsubdomain
+ {
+ TestCreateSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "subdomain"
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "subdomain"
+ )",
+ {NKikimrScheme::StatusNameConflict}
+ );
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "subdomain"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ )",
+ {NKikimrScheme::StatusNameConflict}
+ );
+ }
+
+ // can't create extsubdomain at existing extsubdomain
+ {
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "extSubdomain"
+ )"
+ );
+ env.TestWaitNotification(runtime, txId);
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
+ R"(
+ Name: "extSubdomain"
+ )",
+ {NKikimrScheme::StatusAlreadyExists}
+ );
+ }
}
- Y_UNIT_TEST(NothingInsideGSS) {
+ Y_UNIT_TEST_FLAG(NothingInsideGSS, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
env.TestWaitNotification(runtime, txId);
auto testCreations = [&] () {
//dir
- TestMkDir(runtime, ++txId, "/MyRoot/USER_0", "DirA", {NKikimrScheme::StatusRedirectDomain});
+ TestMkDir(runtime, ++txId, "/MyRoot/USER_0", "DirA", {{NKikimrScheme::StatusRedirectDomain}});
//table
TestCreateTable(runtime, ++txId, "/MyRoot/USER_0", R"(
@@ -549,7 +979,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
Columns { Name: "key" Type: "Uint64" }
Columns { Name: "value" Type: "Utf8" }
KeyColumnNames: ["key"]
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//bsv
TestCreateBlockStoreVolume(runtime, ++txId, "/MyRoot/USER_0", R"(
@@ -560,34 +990,34 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
BlockSize: 4096
Partitions { BlockCount: 16 }
}
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//extSub
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot/USER_0", R"(
Name: "ExtSubDomain"
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//sub
TestCreateSubDomain(runtime, ++txId, "/MyRoot/USER_0", R"(
Name: "SubDomain"
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//kesus
TestCreateKesus(runtime, ++txId, "/MyRoot/USER_0", R"(
Name: "Kesus"
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//rtmr
TestCreateRtmrVolume(runtime, ++txId, "/MyRoot/USER_0", R"(
Name: "rtmr1"
PartitionsCount: 0
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//solomon
TestCreateSolomon(runtime, ++txId, "/MyRoot/USER_0", R"(
Name: "Solomon"
PartitionCount: 40
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
//pq
TestCreatePQGroup(runtime, ++txId, "/MyRoot/USER_0", R"(
@@ -595,47 +1025,54 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
TotalGroupCount: 2
PartitionPerTablet: 1
PQTabletConfig: {PartitionConfig { LifetimeSeconds : 10 } }
- )", {NKikimrScheme::StatusRedirectDomain});
+ )", {{NKikimrScheme::StatusRedirectDomain}});
};
testCreations();
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, txId);
testCreations();
}
- Y_UNIT_TEST(Drop) {
+ Y_UNIT_TEST_FLAG(Drop, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSchemeShard: true "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "StoragePools { "
- " Name: \"pool-1\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER_0"
+ ExternalSchemeShard: true
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ StoragePools {
+ Name: "pool-1"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1});
UNIT_ASSERT(CheckLocalRowExists(runtime, TTestTxConfig::SchemeShard, "Paths", "Id", 2));
@@ -653,11 +1090,14 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
TestMkDir(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER_0", "dir");
TestCreateTable(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER_0/dir",
- "Name: \"table_1\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]"
- "UniformPartitionsCount: 2");
+ R"(
+ Name: "table_1"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ UniformPartitionsCount: 2
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId -1}, tenantSchemeShard);
@@ -696,9 +1136,9 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
UNIT_ASSERT(!CheckLocalRowExists(runtime, TTestTxConfig::SchemeShard, "Paths", "Id", 2));
}
- Y_UNIT_TEST(SysViewProcessorSync) {
+ Y_UNIT_TEST_FLAG(SysViewProcessorSync, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
NSchemeShard::TSchemeLimits lowLimits;
@@ -706,30 +1146,37 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
SetSchemeshardSchemaLimits(runtime, lowLimits);
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
// check that limits have a power, try create 4 shards
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "PlanResolution: 50 "
- "Coordinators: 2 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- , {NKikimrScheme::StatusResourceExhausted});
+ R"(
+ Name: "USER_0"
+ PlanResolution: 50
+ Coordinators: 2
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ )",
+ {{NKikimrScheme::StatusResourceExhausted}}
+ );
// create 3 shards
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- "StoragePools { "
- " Name: \"/dc-1/users/tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} ");
+ R"(
+ Name: "USER_0"
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ StoragePools {
+ Name: "/dc-1/users/tenant-1:hdd"
+ Kind: "hdd"
+ }
+ )"
+ );
env.TestWaitNotification(runtime, {txId, txId - 1});
lowLimits.MaxShardsInPath = 2;
@@ -737,8 +1184,11 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
// one more, but for free
TestAlterExtSubDomain(runtime, ++txId, "/MyRoot",
- "Name: \"USER_0\" "
- "ExternalSysViewProcessor: true ");
+ R"(
+ Name: "USER_0"
+ ExternalSysViewProcessor: true
+ )"
+ );
env.TestWaitNotification(runtime, txId);
@@ -766,10 +1216,13 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
RebootTablet(runtime, tenantSchemeShard, runtime.AllocateEdgeActor());
TestCreateTable(runtime, tenantSchemeShard, ++txId, "/MyRoot/USER_0",
- "Name: \"table\""
- "Columns { Name: \"RowId\" Type: \"Uint64\"}"
- "Columns { Name: \"Value\" Type: \"Utf8\"}"
- "KeyColumnNames: [\"RowId\"]");
+ R"(
+ Name: "table"
+ Columns { Name: "RowId" Type: "Uint64"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["RowId"]
+ )"
+ );
env.TestWaitNotification(runtime, txId, tenantSchemeShard);
@@ -785,9 +1238,9 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
UNIT_ASSERT_EQUAL(tenantSVP, tenantSVPOnTSS);
}
- Y_UNIT_TEST(SchemeQuotas) {
+ Y_UNIT_TEST_FLAG(SchemeQuotas, AlterDatabaseCreateHiveFirst) {
TTestBasicRuntime runtime;
- TTestEnv env(runtime);
+ TTestEnv env(runtime, TTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst));
ui64 txId = 100;
TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(
@@ -872,7 +1325,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardExtSubDomainTest) {
KeyColumnNames: ["key"]
)", {NKikimrScheme::StatusAccepted});
- // Quotas consuption is persistent, on reboot they should stay consumed
+ // Quotas consumption is persistent, on reboot they should stay consumed
{
TActorId sender = runtime.AllocateEdgeActor();
RebootTablet(runtime, tenantSchemeShard, sender);
diff --git a/ydb/core/tx/schemeshard/ut_extsubdomain_reboots.cpp b/ydb/core/tx/schemeshard/ut_extsubdomain_reboots.cpp
index cf819403e8f..c579cef26f1 100644
--- a/ydb/core/tx/schemeshard/ut_extsubdomain_reboots.cpp
+++ b/ydb/core/tx/schemeshard/ut_extsubdomain_reboots.cpp
@@ -13,12 +13,14 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
Y_UNIT_TEST(Fake) {
}
- Y_UNIT_TEST(CreateExternalSubdomain) {
+ Y_UNIT_TEST_FLAG(CreateExternalSubdomain, AlterDatabaseCreateHiveFirst) {
TTestWithReboots t;
+ t.GetTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst);
t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
TestCreateExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
t.TestEnv->TestWaitNotification(runtime, t.TxId);
{
@@ -29,7 +31,8 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
NLs::DomainKey(3, TTestTxConfig::SchemeShard),
NLs::DomainCoordinators({}),
NLs::DomainMediators({}),
- NLs::DomainSchemeshard(0)
+ NLs::DomainSchemeshard(0),
+ NLs::DomainHive(0)
});
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::ChildrenCount(2)});
@@ -38,36 +41,131 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
}
TestAlterExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "StoragePools { "
- " Name: \"tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} "
- "PlanResolution: 50 "
- "Coordinators: 3 "
- "Mediators: 2 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- "Name: \"USER_0\"");
+ R"(
+ Name: "USER_0"
+
+ StoragePools {
+ Name: "tenant-1:hdd"
+ Kind: "hdd"
+ }
+ PlanResolution: 50
+ Coordinators: 3
+ Mediators: 2
+ TimeCastBucketsPerMediator: 2
+
+ ExternalHive: true
+ ExternalSchemeShard: true
+ )"
+ );
+ t.TestEnv->TestWaitNotification(runtime, t.TxId);
+
+ {
+ TInactiveZone inactive(activeZone);
+
+ //NOTE: AlterDatabaseCreateHiveFirst create system tablets in a child hive, AlterDatabaseGen1 create system tablets in the root hive
+ ui64 subdomainHiveTablets = TTestTxConfig::FakeHiveTablets + (AlterDatabaseCreateHiveFirst ? TFakeHiveState::TABLETS_PER_CHILD_HIVE : 1);
+ ui64 subdomainSchemeshard = subdomainHiveTablets;
+
+ // check scheme from root schemeshard
+ TestDescribeResult(DescribePath(runtime, "/MyRoot/USER_0"),
+ {NLs::PathExist,
+ NLs::IsExternalSubDomain("USER_0"),
+ NLs::DomainKey(3, TTestTxConfig::SchemeShard),
+ NLs::ShardsInsideDomain(7),
+ // internal knowledge of shard declaration sequence is used here
+ NLs::DomainHive(TTestTxConfig::FakeHiveTablets),
+ NLs::DomainSchemeshard(subdomainSchemeshard),
+ NLs::DomainCoordinators({subdomainHiveTablets+1, subdomainHiveTablets+2, subdomainHiveTablets+3}),
+ NLs::DomainMediators({subdomainHiveTablets+4, subdomainHiveTablets+5}),
+ });
+
+ // check scheme from extsubdomain schemeshard
+ TestDescribeResult(DescribePath(runtime, subdomainSchemeshard, "/MyRoot/USER_0"),
+ {NLs::PathExist,
+ NLs::IsSubDomain("MyRoot/USER_0"),
+ NLs::DomainKey(3, TTestTxConfig::SchemeShard),
+ NLs::ShardsInsideDomain(7),
+ // internal knowledge of shard declaration sequence is used here
+ NLs::DomainHive(TTestTxConfig::FakeHiveTablets),
+ NLs::DomainSchemeshard(subdomainSchemeshard),
+ NLs::DomainCoordinators({subdomainHiveTablets+1, subdomainHiveTablets+2, subdomainHiveTablets+3}),
+ NLs::DomainMediators({subdomainHiveTablets+4, subdomainHiveTablets+5}),
+ });
+ TestDescribeResult(DescribePath(runtime, "/MyRoot"),
+ {NLs::ChildrenCount(2)});
+ }
+
+ });
+ }
+
+ Y_UNIT_TEST_FLAG(CreateExternalSubdomainWithoutHive, AlterDatabaseCreateHiveFirst) {
+ TTestWithReboots t;
+ t.GetTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst);
+ t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
+
+ TestCreateExtSubDomain(runtime, ++t.TxId, "/MyRoot",
+ R"(Name: "USER_0")"
+ );
+ t.TestEnv->TestWaitNotification(runtime, t.TxId);
+
+ {
+ TInactiveZone inactive(activeZone);
+ TestDescribeResult(DescribePath(runtime, "/MyRoot/USER_0"),
+ {NLs::PathExist,
+ NLs::IsExternalSubDomain("USER_0"),
+ NLs::DomainKey(3, TTestTxConfig::SchemeShard),
+ NLs::DomainCoordinators({}),
+ NLs::DomainMediators({}),
+ NLs::DomainSchemeshard(0),
+ NLs::DomainHive(0)
+ });
+ TestDescribeResult(DescribePath(runtime, "/MyRoot"),
+ {NLs::ChildrenCount(2)});
+ UNIT_ASSERT(CheckLocalRowExists(runtime, TTestTxConfig::SchemeShard, "Paths", "Id", 3));
+ UNIT_ASSERT(CheckLocalRowExists(runtime, TTestTxConfig::SchemeShard, "SubDomains", "PathId", 3));
+ }
+
+ TestAlterExtSubDomain(runtime, ++t.TxId, "/MyRoot",
+ R"(
+ StoragePools {
+ Name: "tenant-1:hdd"
+ Kind: "hdd"
+ }
+ PlanResolution: 50
+ Coordinators: 3
+ Mediators: 2
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ Name: "USER_0"
+ )"
+ );
t.TestEnv->TestWaitNotification(runtime, t.TxId);
{
TInactiveZone inactive(activeZone);
+
+ // check scheme from root schemeshard
TestDescribeResult(DescribePath(runtime, "/MyRoot/USER_0"),
{NLs::PathExist,
NLs::IsExternalSubDomain("USER_0"),
NLs::DomainKey(3, TTestTxConfig::SchemeShard),
- NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+1, TTestTxConfig::FakeHiveTablets+2}),
- NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+3, TTestTxConfig::FakeHiveTablets+4}),
- NLs::DomainSchemeshard(TTestTxConfig::FakeHiveTablets+5)
+ NLs::ShardsInsideDomain(6),
+ // internal knowledge of shard declaration sequence is used here
+ NLs::DomainSchemeshard(TTestTxConfig::FakeHiveTablets),
+ NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets+1, TTestTxConfig::FakeHiveTablets+2, TTestTxConfig::FakeHiveTablets+3}),
+ NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+4, TTestTxConfig::FakeHiveTablets+5}),
});
- TestDescribeResult(DescribePath(runtime, TTestTxConfig::FakeHiveTablets+5, "/MyRoot/USER_0"),
+ // check scheme from extsubdomain schemeshard
+ TestDescribeResult(DescribePath(runtime, TTestTxConfig::FakeHiveTablets, "/MyRoot/USER_0"),
{NLs::PathExist,
NLs::IsSubDomain("MyRoot/USER_0"),
NLs::DomainKey(3, TTestTxConfig::SchemeShard),
- NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets, TTestTxConfig::FakeHiveTablets+1, TTestTxConfig::FakeHiveTablets+2}),
- NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+3, TTestTxConfig::FakeHiveTablets+4}),
- NLs::DomainSchemeshard(TTestTxConfig::FakeHiveTablets+5)
+ NLs::ShardsInsideDomain(6),
+ // internal knowledge of shard declaration sequence is used here
+ NLs::DomainSchemeshard(TTestTxConfig::FakeHiveTablets),
+ NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets+1, TTestTxConfig::FakeHiveTablets+2, TTestTxConfig::FakeHiveTablets+3}),
+ NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+4, TTestTxConfig::FakeHiveTablets+5}),
});
TestDescribeResult(DescribePath(runtime, "/MyRoot"),
{NLs::ChildrenCount(2)});
@@ -76,12 +174,14 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
});
}
- Y_UNIT_TEST(CreateForceDrop) {
+ Y_UNIT_TEST_FLAG(CreateForceDrop, AlterDatabaseCreateHiveFirst) {
TTestWithReboots t;
+ t.GetTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst);
t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
AsyncCreateExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
t.TestEnv->ReliablePropose(runtime, ForceDropExtSubDomainRequest(++t.TxId, "/MyRoot", "USER_0"),
{NKikimrScheme::StatusAccepted});
t.TestEnv->TestWaitNotification(runtime, {t.TxId, t.TxId-1});
@@ -98,27 +198,32 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
});
}
- Y_UNIT_TEST(AlterForceDrop) {
+ Y_UNIT_TEST_FLAG(AlterForceDrop, AlterDatabaseCreateHiveFirst) {
TTestWithReboots t;
+ t.GetTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst);
t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
{
TInactiveZone inactive(activeZone);
TestCreateExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
t.TestEnv->TestWaitNotification(runtime, {t.TxId, t.TxId-1});
}
AsyncAlterExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "StoragePools { "
- " Name: \"tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} "
- "PlanResolution: 50 "
- "Coordinators: 3 "
- "Mediators: 2 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- "Name: \"USER_0\"");
+ R"(
+ StoragePools {
+ Name: "tenant-1:hdd"
+ Kind: "hdd"
+ }
+ PlanResolution: 50
+ Coordinators: 3
+ Mediators: 2
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ Name: "USER_0"
+ )"
+ );
t.TestEnv->ReliablePropose(runtime, ForceDropExtSubDomainRequest(++t.TxId, "/MyRoot", "USER_0"),
{NKikimrScheme::StatusAccepted});
@@ -139,8 +244,9 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
}
- Y_UNIT_TEST(SchemeLimits) {
+ Y_UNIT_TEST_FLAG(SchemeLimits, AlterDatabaseCreateHiveFirst) {
TTestWithReboots t;
+ t.GetTestEnvOptions().EnableAlterDatabaseCreateHiveFirst(AlterDatabaseCreateHiveFirst);
t.Run([&](TTestActorRuntime& runtime, bool& activeZone) {
TSchemeLimits limits;
limits.MaxDepth = 2;
@@ -153,21 +259,25 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
SetSchemeshardSchemaLimits(runtime, limits);
TestCreateExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "Name: \"USER_0\"");
+ R"(Name: "USER_0")"
+ );
t.TestEnv->TestWaitNotification(runtime, t.TxId);
}
TestAlterExtSubDomain(runtime, ++t.TxId, "/MyRoot",
- "StoragePools { "
- " Name: \"tenant-1:hdd\" "
- " Kind: \"hdd\" "
- "} "
- "PlanResolution: 50 "
- "Coordinators: 1 "
- "Mediators: 1 "
- "TimeCastBucketsPerMediator: 2 "
- "ExternalSchemeShard: true "
- "Name: \"USER_0\"");
+ R"(
+ StoragePools {
+ Name: "tenant-1:hdd"
+ Kind: "hdd"
+ }
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ Name: "USER_0"
+ )"
+ );
t.TestEnv->TestWaitNotification(runtime, t.TxId);
{
@@ -181,29 +291,30 @@ Y_UNIT_TEST_SUITE(TSchemeShardTestExtSubdomainReboots) {
{NLs::ChildrenCount(2),
NLs::DomainLimitsIs(limits.MaxPaths, limits.MaxShards)});
- ui64 extSchemeSahrd = TTestTxConfig::FakeHiveTablets+2;
+ ui64 subdomainSchemeshard = TTestTxConfig::FakeHiveTablets;
- TestDescribeResult(DescribePath(runtime, TTestTxConfig::FakeHiveTablets+2, "/MyRoot/USER_0"),
+ TestDescribeResult(DescribePath(runtime, subdomainSchemeshard, "/MyRoot/USER_0"),
{NLs::PathExist,
NLs::IsSubDomain("MyRoot/USER_0"),
NLs::DomainKey(3, TTestTxConfig::SchemeShard),
- NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets}),
- NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+1}),
- NLs::DomainSchemeshard(extSchemeSahrd),
+ // internal knowledge of shard declaration sequence is used here
+ NLs::DomainSchemeshard(subdomainSchemeshard),
+ NLs::DomainCoordinators({TTestTxConfig::FakeHiveTablets+1}),
+ NLs::DomainMediators({TTestTxConfig::FakeHiveTablets+2}),
NLs::DomainLimitsIs(limits.MaxPaths, limits.MaxShards),
NLs::ShardsInsideDomain(3),
NLs::PathsInsideDomain(0)
});
- TestCreateTable(runtime, extSchemeSahrd, ++t.TxId, "/MyRoot/USER_0", R"(
+ TestCreateTable(runtime, subdomainSchemeshard, ++t.TxId, "/MyRoot/USER_0", R"(
Name: "Table"
Columns { Name: "Id" Type: "Uint32" }
KeyColumnNames: ["Id"]
)", {NKikimrScheme::StatusResourceExhausted});
- TestMkDir(runtime, extSchemeSahrd, ++t.TxId, "/MyRoot/USER_0", "A");
- TestMkDir(runtime, extSchemeSahrd, ++t.TxId, "/MyRoot/USER_0", "B");
- TestMkDir(runtime, extSchemeSahrd, ++t.TxId, "/MyRoot/USER_0", "C", {NKikimrScheme::StatusResourceExhausted});
+ TestMkDir(runtime, subdomainSchemeshard, ++t.TxId, "/MyRoot/USER_0", "A");
+ TestMkDir(runtime, subdomainSchemeshard, ++t.TxId, "/MyRoot/USER_0", "B");
+ TestMkDir(runtime, subdomainSchemeshard, ++t.TxId, "/MyRoot/USER_0", "C", {NKikimrScheme::StatusResourceExhausted});
}
});
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
index 90df1694fcc..763ca1875be 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
@@ -54,13 +54,13 @@ namespace NSchemeShardUT_Private {
if constexpr (std::is_same_v<TEvSchemeShard::TEvModifySchemeTransactionResult, TEvResponse>) {
result = record.GetStatus();
- CheckExpected(expectedStatuses, record.GetStatus(), record.GetReason());
+ CheckExpectedStatus(expectedStatuses, record.GetStatus(), record.GetReason());
} else if constexpr (std::is_same_v<TEvSchemeShard::TEvCancelTxResult, TEvResponse>) {
result = record.GetStatus();
- CheckExpected(expectedStatuses, record.GetStatus(), record.GetResult());
+ CheckExpectedStatus(expectedStatuses, record.GetStatus(), record.GetResult());
} else {
result = record.GetResponse().GetStatus();
- CheckExpected(expectedStatuses, record.GetResponse().GetStatus(), "unexpected");
+ CheckExpectedStatusCode(expectedStatuses, record.GetResponse().GetStatus(), "unexpected");
}
return result;
@@ -120,15 +120,37 @@ namespace NSchemeShardUT_Private {
return result;
}
- void CheckExpected(const TVector<TEvSchemeShard::EStatus>& expected, TEvSchemeShard::EStatus result, const TString& reason)
+ //
+ // CheckExpectedResult checks actual result (status-reason pair) against a list of equally acceptable results.
+ // That is: result should match one of the `expected` items to be accepted as good.
+ //
+ // Reasons are matched by finding if expected fragment is contained in full actual reason.
+ // Empty expected fragment disables reason check.
+ //
+ void CheckExpectedResult(const TVector<TExpectedResult>& expected, TEvSchemeShard::EStatus actualStatus, const TString& actualReason)
{
- for (TEvSchemeShard::EStatus exp : expected) {
- if (result == exp) {
+ for (auto i : expected) {
+ if (actualStatus == i.Status) {
+ if (i.ReasonFragment.empty() || actualReason.Contains(i.ReasonFragment)) {
+ return;
+ }
+ }
+ }
+ Cdbg << "Unexpected result: " << NKikimrScheme::EStatus_Name(actualStatus) << ": " << actualReason << Endl;
+ UNIT_FAIL("Unexpected result: " << NKikimrScheme::EStatus_Name(actualStatus) << ": " << actualReason);
+ }
+
+ // CheckExpectedStatus is a deprecated version of CheckExpectedResult that can't check reasons.
+ // Used by non generic test helpers. Should be replaced by CheckExpectedResult.
+ void CheckExpectedStatus(const TVector<NKikimrScheme::EStatus>& expected, TEvSchemeShard::EStatus actualStatus, const TString& actualReason)
+ {
+ for (auto expectedStatus : expected) {
+ if (actualStatus == expectedStatus) {
return;
}
}
- Cdbg << "Unexpected status: " << NKikimrScheme::EStatus_Name(result) << ": " << reason << Endl;
- UNIT_FAIL("Unexpected status: " << NKikimrScheme::EStatus_Name(result) << ": " << reason);
+ Cdbg << "Unexpected result: " << NKikimrScheme::EStatus_Name(actualStatus) << ": " << actualReason << Endl;
+ UNIT_FAIL("Unexpected result: " << NKikimrScheme::EStatus_Name(actualStatus) << ": " << actualReason);
}
void SkipModificationReply(TTestActorRuntime& runtime, ui32 num) {
@@ -137,13 +159,11 @@ namespace NSchemeShardUT_Private {
runtime.GrabEdgeEvent<TEvSchemeShard::TEvModifySchemeTransactionResult>(handle);
}
- void TestModificationResult(TTestActorRuntime& runtime, ui64 txId,
- TEvSchemeShard::EStatus expectedResult) {
- TestModificationResults(runtime, txId, {expectedResult});
+ void TestModificationResult(TTestActorRuntime& runtime, ui64 txId, TEvSchemeShard::EStatus expectedStatus) {
+ TestModificationResults(runtime, txId, {{expectedStatus, ""}});
}
- ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId,
- const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId, const TVector<TExpectedResult>& expectedResults) {
TAutoPtr<IEventHandle> handle;
TEvSchemeShard::TEvModifySchemeTransactionResult* event;
do {
@@ -154,7 +174,7 @@ namespace NSchemeShardUT_Private {
} while(event->Record.GetTxId() < txId);
UNIT_ASSERT_VALUES_EQUAL(event->Record.GetTxId(), txId);
- CheckExpected(expectedResults, event->Record.GetStatus(), event->Record.GetReason());
+ CheckExpectedResult(expectedResults, event->Record.GetStatus(), event->Record.GetReason());
return event->Record.GetStatus();
}
@@ -263,7 +283,7 @@ namespace NSchemeShardUT_Private {
return DescribePath(runtime, TTestTxConfig::SchemeShard, path, returnPartitioning, returnBoundaries, showPrivate, returnBackups);
}
- TPathVersion ExtructPathVersion(const NKikimrScheme::TEvDescribeSchemeResult& describe) {
+ TPathVersion ExtractPathVersion(const NKikimrScheme::TEvDescribeSchemeResult& describe) {
TPathVersion result;
result.PathId = TPathId(describe.GetPathDescription().GetSelf().GetSchemeshardId(), describe.GetPathDescription().GetSelf().GetPathId());
result.Version = describe.GetPathDescription().GetSelf().GetPathVersion();
@@ -276,7 +296,7 @@ namespace NSchemeShardUT_Private {
check(describe);
}
}
- return ExtructPathVersion(describe);
+ return ExtractPathVersion(describe);
}
TString TestLs(TTestActorRuntime& runtime, const TString& path, bool returnPartitioningInfo,
@@ -396,11 +416,11 @@ namespace NSchemeShardUT_Private {
ForwardToTablet(runtime, schemeShard, sender, MoveTableRequest(txId, srcPath, dstPath, schemeShard));
}
- void TestMoveTable(TTestActorRuntime& runtime, ui64 txId, const TString& src, const TString& dst, const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ void TestMoveTable(TTestActorRuntime& runtime, ui64 txId, const TString& src, const TString& dst, const TVector<TExpectedResult>& expectedResults) {
TestMoveTable(runtime, TTestTxConfig::SchemeShard, txId, src, dst, expectedResults);
}
- void TestMoveTable(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& src, const TString& dst, const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ void TestMoveTable(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& src, const TString& dst, const TVector<TExpectedResult>& expectedResults) {
AsyncMoveTable(runtime, txId, src, dst, schemeShard);
TestModificationResults(runtime, txId, expectedResults);
}
@@ -425,11 +445,11 @@ namespace NSchemeShardUT_Private {
ForwardToTablet(runtime, schemeShard, sender, MoveIndexRequest(txId, tablePath, srcPath, dstPath, allowOverwrite, schemeShard));
}
- void TestMoveIndex(TTestActorRuntime& runtime, ui64 txId, const TString& tablePath, const TString& src, const TString& dst, bool allowOverwrite, const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ void TestMoveIndex(TTestActorRuntime& runtime, ui64 txId, const TString& tablePath, const TString& src, const TString& dst, bool allowOverwrite, const TVector<TExpectedResult>& expectedResults) {
TestMoveIndex(runtime, TTestTxConfig::SchemeShard, txId, tablePath, src, dst, allowOverwrite, expectedResults);
}
- void TestMoveIndex(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& tablePath, const TString& src, const TString& dst, bool allowOverwrite, const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ void TestMoveIndex(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& tablePath, const TString& src, const TString& dst, bool allowOverwrite, const TVector<TExpectedResult>& expectedResults) {
AsyncMoveIndex(runtime, txId, tablePath, src, dst, allowOverwrite, schemeShard);
TestModificationResults(runtime, txId, expectedResults);
}
@@ -454,13 +474,13 @@ namespace NSchemeShardUT_Private {
}
void TestLock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults) {
+ const TVector<TExpectedResult> expectedResults) {
AsyncLock(runtime, schemeShard, txId, parentPath, name);
TestModificationResults(runtime, txId, expectedResults);
}
void TestLock(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults) {
+ const TVector<TExpectedResult> expectedResults) {
TestLock(runtime, TTestTxConfig::SchemeShard, txId, parentPath, name, expectedResults);
}
@@ -486,13 +506,13 @@ namespace NSchemeShardUT_Private {
}
void TestUnlock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults) {
+ const TVector<TExpectedResult> expectedResults) {
AsyncUnlock(runtime, schemeShard, txId, lockId, parentPath, name);
TestModificationResults(runtime, txId, expectedResults);
}
void TestUnlock(TTestActorRuntime& runtime, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults) {
+ const TVector<TExpectedResult> expectedResults) {
TestUnlock(runtime, TTestTxConfig::SchemeShard, txId, lockId, parentPath, name, expectedResults);
}
@@ -683,14 +703,14 @@ namespace NSchemeShardUT_Private {
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId, const TString& parentPath, const TString& scheme, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const TApplyIf& applyIf) \
{ \
Async##name(runtime, schemeShardId, txId, parentPath, scheme, applyIf); \
return TestModificationResults(runtime, txId, expectedResults); \
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& scheme, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const TApplyIf& applyIf) \
{ \
return Test##name(runtime, TTestTxConfig::SchemeShard, txId, parentPath, scheme, expectedResults, applyIf); \
}
@@ -729,14 +749,14 @@ namespace NSchemeShardUT_Private {
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId, const TString& parentPath, const TString& scheme, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const NKikimrSchemeOp::TAlterUserAttributes& userAttrs, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const NKikimrSchemeOp::TAlterUserAttributes& userAttrs, const TApplyIf& applyIf) \
{ \
Async##name(runtime, schemeShardId, txId, parentPath, scheme, userAttrs, applyIf); \
return TestModificationResults(runtime, txId, expectedResults); \
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& scheme, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const NKikimrSchemeOp::TAlterUserAttributes& userAttrs, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const NKikimrSchemeOp::TAlterUserAttributes& userAttrs, const TApplyIf& applyIf) \
{ \
return Test##name(runtime, TTestTxConfig::SchemeShard, txId, parentPath, scheme, expectedResults, userAttrs, applyIf); \
}
@@ -760,14 +780,14 @@ namespace NSchemeShardUT_Private {
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId, ui64 pathId, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const TApplyIf& applyIf) \
{ \
Async##name(runtime, schemeShardId, txId, pathId, applyIf); \
return TestModificationResults(runtime, txId, expectedResults); \
} \
\
ui64 Test##name(TTestActorRuntime& runtime, ui64 txId, ui64 pathId, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults, const TApplyIf& applyIf) \
+ const TVector<TExpectedResult>& expectedResults, const TApplyIf& applyIf) \
{ \
return Test##name(runtime, TTestTxConfig::SchemeShard, txId, pathId, expectedResults, applyIf); \
}
@@ -907,7 +927,7 @@ namespace NSchemeShardUT_Private {
}
void TestAssignBlockStoreVolume(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name,
- const TString& mountToken, ui64 tokenVersion, const TVector<TEvSchemeShard::EStatus>& expectedResults)
+ const TString& mountToken, ui64 tokenVersion, const TVector<TExpectedResult>& expectedResults)
{
AsyncAssignBlockStoreVolume(runtime, txId, parentPath, name, mountToken, tokenVersion);
TestModificationResults(runtime, txId, expectedResults);
@@ -926,7 +946,7 @@ namespace NSchemeShardUT_Private {
}
void TestCancelTxTable(TTestActorRuntime& runtime, ui64 txId, ui64 targetTxId,
- const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ const TVector<TExpectedResult>& expectedResults) {
AsyncCancelTxTable(runtime, txId, targetTxId);
TAutoPtr<IEventHandle> handle;
@@ -938,7 +958,7 @@ namespace NSchemeShardUT_Private {
} while(event->Record.GetTargetTxId() < targetTxId);
UNIT_ASSERT_VALUES_EQUAL(event->Record.GetTargetTxId(), targetTxId);
- CheckExpected(expectedResults, event->Record.GetStatus(), event->Record.GetResult());
+ CheckExpectedResult(expectedResults, event->Record.GetStatus(), event->Record.GetResult());
}
void AsyncExport(TTestActorRuntime& runtime, ui64 schemeshardId, ui64 id, const TString& dbName, const TString& requestStr, const TString& userSID) {
@@ -1543,14 +1563,14 @@ namespace NSchemeShardUT_Private {
ForwardToTablet(runtime, TTestTxConfig::SchemeShard, sender, evTx);
}
- void TestUpgradeSubDomain(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name, const TVector<TEvSchemeShard::EStatus> &expectedResults) {
+ void TestUpgradeSubDomain(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name, const TVector<TExpectedResult> &expectedResults) {
AsyncUpgradeSubDomain(runtime, txId, parentPath, name);
TestModificationResults(runtime, txId, expectedResults);
}
void TestUpgradeSubDomain(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name) {
AsyncUpgradeSubDomain(runtime, txId, parentPath, name);
- TestModificationResults(runtime, txId, {TEvSchemeShard::EStatus::StatusAccepted});
+ TestModificationResults(runtime, txId, {{TEvSchemeShard::EStatus::StatusAccepted, ""}});
}
TEvSchemeShard::TEvModifySchemeTransaction *UpgradeSubDomainDecisionRequest(ui64 txId, const TString &parentPath, const TString &name, NKikimrSchemeOp::TUpgradeSubDomain::EDecision decision) {
@@ -1569,14 +1589,14 @@ namespace NSchemeShardUT_Private {
ForwardToTablet(runtime, TTestTxConfig::SchemeShard, sender, evTx);
}
- void TestUpgradeSubDomainDecision(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name, const TVector<TEvSchemeShard::EStatus> &expectedResults, NKikimrSchemeOp::TUpgradeSubDomain::EDecision decision) {
+ void TestUpgradeSubDomainDecision(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name, const TVector<TExpectedResult> &expectedResults, NKikimrSchemeOp::TUpgradeSubDomain::EDecision decision) {
AsyncUpgradeSubDomainDecision(runtime, txId, parentPath, name, decision);
TestModificationResults(runtime, txId, expectedResults);
}
void TestUpgradeSubDomainDecision(TTestActorRuntime &runtime, ui64 txId, const TString &parentPath, const TString &name, NKikimrSchemeOp::TUpgradeSubDomain::EDecision decision) {
AsyncUpgradeSubDomainDecision(runtime, txId, parentPath, name, decision);
- TestModificationResults(runtime, txId, {TEvSchemeShard::EStatus::StatusAccepted});
+ TestModificationResults(runtime, txId, {{TEvSchemeShard::EStatus::StatusAccepted, ""}});
}
TRowVersion CreateVolatileSnapshot(
@@ -1690,7 +1710,7 @@ namespace NSchemeShardUT_Private {
return new TEvIndexBuilder::TEvCancelRequest(id, dbName, buildIndexId);
}
- void CheckExpected(const TVector<Ydb::StatusIds::StatusCode>& expected, Ydb::StatusIds_StatusCode result, const TString& reason)
+ void CheckExpectedStatusCode(const TVector<Ydb::StatusIds::StatusCode>& expected, Ydb::StatusIds_StatusCode result, const TString& reason)
{
bool isExpectedStatus = false;
for (Ydb::StatusIds::StatusCode exp : expected) {
@@ -1700,8 +1720,8 @@ namespace NSchemeShardUT_Private {
}
}
if (!isExpectedStatus)
- Cdbg << "Unexpected status: " << Ydb::StatusIds::StatusCode_Name(result) << ": " << reason << Endl;
- UNIT_ASSERT_C(isExpectedStatus, "Unexpected status: " << Ydb::StatusIds::StatusCode_Name(result) << ": " << reason);
+ Cdbg << "Unexpected status code: " << Ydb::StatusIds::StatusCode_Name(result) << ": " << reason << Endl;
+ UNIT_ASSERT_C(isExpectedStatus, "Unexpected status code: " << Ydb::StatusIds::StatusCode_Name(result) << ": " << reason);
}
NKikimrIndexBuilder::TEvCancelResponse TestCancelBuildIndex(TTestActorRuntime& runtime, const ui64 id, const ui64 schemeShard, const TString &dbName,
@@ -1718,7 +1738,7 @@ namespace NSchemeShardUT_Private {
UNIT_ASSERT(event);
Cerr << "BUILDINDEX RESPONSE CANCEL: " << event->ToString() << Endl;
- CheckExpected(expectedStatuses, event->Record.GetStatus(), PrintIssues(event->Record.GetIssues()));
+ CheckExpectedStatusCode(expectedStatuses, event->Record.GetStatus(), PrintIssues(event->Record.GetIssues()));
return event->Record;
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.h b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
index b9e7c852dc4..878df633764 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
@@ -30,6 +30,23 @@
#define Cdbg Cnull
#endif
+
+// ad-hoc test parametrization support: only for single boolean flag
+// taken from ydb/core/ut/common/kqp_ut_common.h:Y_UNIT_TEST_TWIN
+//TODO: introduce general support for test parametrization?
+#define Y_UNIT_TEST_FLAG(N, OPT) \
+ template<bool OPT> void N(NUnitTest::TTestContext&); \
+ struct TTestRegistration##N { \
+ TTestRegistration##N() { \
+ TCurrentTest::AddTest(#N "-" #OPT "-false", static_cast<void (*)(NUnitTest::TTestContext&)>(&N<false>), false); \
+ TCurrentTest::AddTest(#N "-" #OPT "-true", static_cast<void (*)(NUnitTest::TTestContext&)>(&N<true>), false); \
+ } \
+ }; \
+ static TTestRegistration##N testRegistration##N; \
+ template<bool OPT> \
+ void N(NUnitTest::TTestContext&)
+
+
namespace NSchemeShardUT_Private {
using namespace NKikimr;
@@ -63,7 +80,7 @@ namespace NSchemeShardUT_Private {
NKikimrScheme::TEvDescribeSchemeResult DescribePrivatePath(TTestActorRuntime& runtime, const TString& path, bool returnPartitioning = false, bool returnBoundaries = false);
NKikimrScheme::TEvDescribeSchemeResult DescribePath(TTestActorRuntime& runtime, ui64 schemeShard, const TString& path, bool returnPartitioning = false, bool returnBoundaries = false, bool showPrivate = false, bool returnBackups = false);
NKikimrScheme::TEvDescribeSchemeResult DescribePath(TTestActorRuntime& runtime, const TString& path, bool returnPartitioning = false, bool returnBoundaries = false, bool showPrivate = false, bool returnBackups = false);
- TPathVersion ExtructPathVersion(const NKikimrScheme::TEvDescribeSchemeResult& describe);
+ TPathVersion ExtractPathVersion(const NKikimrScheme::TEvDescribeSchemeResult& describe);
TPathVersion TestDescribeResult(const NKikimrScheme::TEvDescribeSchemeResult& describe, TVector<NLs::TCheckFunc> checks = {});
TString TestDescribe(TTestActorRuntime& runtime, const TString& path);
@@ -74,11 +91,28 @@ namespace NSchemeShardUT_Private {
THolder<NSchemeCache::TSchemeCacheNavigate> Navigate(TTestActorRuntime& runtime, const TString& path,
NSchemeCache::TSchemeCacheNavigate::EOp op = NSchemeCache::TSchemeCacheNavigate::EOp::OpPath);
+ ////////// expected results
+ struct TExpectedResult {
+ TEvSchemeShard::EStatus Status;
+ TString ReasonFragment;
+
+ TExpectedResult(TEvSchemeShard::EStatus status)
+ : Status(status)
+ {}
+ TExpectedResult(TEvSchemeShard::EStatus status, TString reasonFragment)
+ : Status(status)
+ , ReasonFragment(reasonFragment)
+ {}
+ };
+
////////// modification results
- void CheckExpected(const TVector<TEvSchemeShard::EStatus>& expected, TEvSchemeShard::EStatus result, const TString& reason);
- void CheckExpected(const TVector<Ydb::StatusIds::StatusCode>& expected, Ydb::StatusIds::StatusCode result, const TString& reason);
- void TestModificationResult(TTestActorRuntime& runtime, ui64 txId,TEvSchemeShard::EStatus expectedResult = NKikimrScheme::StatusAccepted);
- ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId, const TVector<TEvSchemeShard::EStatus>& expectedResults);
+ void CheckExpectedResult(const TVector<TExpectedResult>& expected, TEvSchemeShard::EStatus actualStatus, const TString& actualReason);
+ // CheckExpectedStatus is a deprecated version of CheckExpectedResult that can't check reasons.
+ // Used by non generic test helpers. Should be replaced by CheckExpectedResult.
+ void CheckExpectedStatus(const TVector<NKikimrScheme::EStatus>& expected, TEvSchemeShard::EStatus actualStatus, const TString& actualReason);
+ void CheckExpectedStatusCode(const TVector<Ydb::StatusIds::StatusCode>& expected, Ydb::StatusIds::StatusCode result, const TString& reason);
+ void TestModificationResult(TTestActorRuntime& runtime, ui64 txId, TEvSchemeShard::EStatus expectedStatus = NKikimrScheme::StatusAccepted);
+ ui64 TestModificationResults(TTestActorRuntime& runtime, ui64 txId, const TVector<TExpectedResult>& expectedResults);
void SkipModificationReply(TTestActorRuntime& runtime, ui32 num = 1);
TEvTx* CombineSchemeTransactions(const TVector<TEvTx*>& transactions);
@@ -97,9 +131,9 @@ namespace NSchemeShardUT_Private {
void Async##name(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId, params, __VA_ARGS__); \
void Async##name(TTestActorRuntime& runtime, ui64 txId, params, __VA_ARGS__); \
ui64 Test##name(TTestActorRuntime& runtime, ui64 schemeShardId, ui64 txId, params, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted}, __VA_ARGS__); \
+ const TVector<TExpectedResult>& expectedResults = {{NKikimrScheme::StatusAccepted}}, __VA_ARGS__); \
ui64 Test##name(TTestActorRuntime& runtime, ui64 txId, params, \
- const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted}, __VA_ARGS__)
+ const TVector<TExpectedResult>& expectedResults = {{NKikimrScheme::StatusAccepted}}, __VA_ARGS__) \
#define GENERIC_HELPERS(name) DEFINE_HELPERS(name, UT_GENERIC_PARAMS, const TApplyIf& applyIf = {})
#define GENERIC_WITH_ATTRS_HELPERS(name) DEFINE_HELPERS(name, UT_GENERIC_PARAMS, const NKikimrSchemeOp::TAlterUserAttributes& userAttrs = {}, const TApplyIf& applyIf = {})
@@ -209,7 +243,7 @@ namespace NSchemeShardUT_Private {
GENERIC_HELPERS(DropBlockStoreVolume);
DROP_BY_PATH_ID_HELPERS(DropBlockStoreVolume);
void AsyncAssignBlockStoreVolume(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TString& mountToken, ui64 tokenVersion = 0);
- void TestAssignBlockStoreVolume(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TString& mountToken, ui64 tokenVersion = 0, const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusSuccess});
+ void TestAssignBlockStoreVolume(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TString& mountToken, ui64 tokenVersion = 0, const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusSuccess});
#undef DROP_BY_PATH_ID_HELPERS
#undef GENERIC_WITH_ATTRS_HELPERS
@@ -224,7 +258,7 @@ namespace NSchemeShardUT_Private {
TEvSchemeShard::TEvCancelTx* CancelTxRequest(ui64 txId, ui64 targetTxId);
void AsyncCancelTxTable(TTestActorRuntime& runtime, ui64 txId, ui64 targetTxId);
void TestCancelTxTable(TTestActorRuntime& runtime, ui64 txId, ui64 targetTxId,
- const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted});
+ const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusAccepted});
// modify acl
TEvTx* CreateModifyACLRequest(ui64 txId, TString parentPath, TString name, const TString& diffAcl, const TString& newOwner);
@@ -236,12 +270,12 @@ namespace NSchemeShardUT_Private {
// upgrade subdomain
TEvTx* UpgradeSubDomainRequest(ui64 txId, const TString& parentPath, const TString& name);
void AsyncUpgradeSubDomain(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name);
- void TestUpgradeSubDomain(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TVector<TEvSchemeShard::EStatus>& expectedResults);
+ void TestUpgradeSubDomain(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TVector<TExpectedResult>& expectedResults);
void TestUpgradeSubDomain(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name);
TEvTx* UpgradeSubDomainDecisionRequest(ui64 txId, const TString& parentPath, const TString& name, NKikimrSchemeOp::TUpgradeSubDomain::EDecision taskType);
void AsyncUpgradeSubDomainDecision(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, NKikimrSchemeOp::TUpgradeSubDomain::EDecision taskType);
- void TestUpgradeSubDomainDecision(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TVector<TEvSchemeShard::EStatus>& expectedResults, NKikimrSchemeOp::TUpgradeSubDomain::EDecision taskType);
+ void TestUpgradeSubDomainDecision(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, const TVector<TExpectedResult>& expectedResults, NKikimrSchemeOp::TUpgradeSubDomain::EDecision taskType);
void TestUpgradeSubDomainDecision(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name, NKikimrSchemeOp::TUpgradeSubDomain::EDecision taskType);
// copy table
@@ -254,29 +288,27 @@ namespace NSchemeShardUT_Private {
// move table
TEvTx* MoveTableRequest(ui64 txId, const TString& srcPath, const TString& dstPath, ui64 schemeShard = TTestTxConfig::SchemeShard, const TApplyIf& applyIf = {});
void AsyncMoveTable(TTestActorRuntime& runtime, ui64 txId, const TString& srcPath, const TString& dstPath, ui64 schemeShard = TTestTxConfig::SchemeShard);
- void TestMoveTable(TTestActorRuntime& runtime, ui64 txId, const TString& srcMove, const TString& dstMove, const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted});
- void TestMoveTable(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& srcMove, const TString& dstMove, const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted});
+ void TestMoveTable(TTestActorRuntime& runtime, ui64 txId, const TString& srcMove, const TString& dstMove, const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusAccepted});
+ void TestMoveTable(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& srcMove, const TString& dstMove, const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusAccepted});
// move index
TEvTx* MoveIndexRequest(ui64 txId, const TString& tablePath, const TString& srcPath, const TString& dstPath, bool allowOverwrite, ui64 schemeShard = TTestTxConfig::SchemeShard, const TApplyIf& applyIf = {});
void AsyncMoveIndex(TTestActorRuntime& runtime, ui64 txId, const TString& tablePath, const TString& srcPath, const TString& dstPath, bool allowOverwrite, ui64 schemeShard = TTestTxConfig::SchemeShard);
- void TestMoveIndex(TTestActorRuntime& runtime, ui64 txId, const TString& tablePath, const TString& srcMove, const TString& dstMove, bool allowOverwrite, const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted});
- void TestMoveIndex(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& tablePath, const TString& srcMove, const TString& dstMove, bool allowOverwrite, const TVector<TEvSchemeShard::EStatus>& expectedResults = {NKikimrScheme::StatusAccepted});
+ void TestMoveIndex(TTestActorRuntime& runtime, ui64 txId, const TString& tablePath, const TString& srcMove, const TString& dstMove, bool allowOverwrite, const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusAccepted});
+ void TestMoveIndex(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& tablePath, const TString& srcMove, const TString& dstMove, bool allowOverwrite, const TVector<TExpectedResult>& expectedResults = {NKikimrScheme::StatusAccepted});
// locks
TEvTx* LockRequest(ui64 txId, const TString &parentPath, const TString& name);
void AsyncLock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& parentPath, const TString& name);
void AsyncLock(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name);
void TestLock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults = {NKikimrScheme::StatusAccepted});
+ const TVector<TExpectedResult> expectedResults = {NKikimrScheme::StatusAccepted});
void TestLock(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults = {NKikimrScheme::StatusAccepted});
- void AsyncUnkock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name);
- void AsyncUnkock(TTestActorRuntime& runtime, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name);
+ const TVector<TExpectedResult> expectedResults = {NKikimrScheme::StatusAccepted});
void TestUnlock(TTestActorRuntime& runtime, ui64 schemeShard, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults = {NKikimrScheme::StatusAccepted});
+ const TVector<TExpectedResult> expectedResults = {NKikimrScheme::StatusAccepted});
void TestUnlock(TTestActorRuntime& runtime, ui64 txId, ui64 lockId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus> expectedResults = {NKikimrScheme::StatusAccepted});
+ const TVector<TExpectedResult> expectedResults = {NKikimrScheme::StatusAccepted});
// index build
struct TBuildIndexConfig {
diff --git a/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp b/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp
index 2cbc8f48a51..1a4dd601600 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/ls_checks.cpp
@@ -109,6 +109,18 @@ TCheckFunc ExtractTenantSysViewProcessor(ui64* tenantSVPId) {
};
}
+TCheckFunc ExtractDomainHive(ui64* domainHiveId) {
+ return [=] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
+ UNIT_ASSERT_VALUES_EQUAL(record.GetStatus(), NKikimrScheme::StatusSuccess);
+ const auto& pathDescr = record.GetPathDescription();
+ UNIT_ASSERT(pathDescr.HasDomainDescription());
+ const auto& domainDesc = pathDescr.GetDomainDescription();
+ UNIT_ASSERT(domainDesc.HasProcessingParams());
+ const auto& procParams = domainDesc.GetProcessingParams();
+ *domainHiveId = procParams.GetHive();
+ };
+}
+
void InExternalSubdomain(const NKikimrScheme::TEvDescribeSchemeResult& record) {
PathRedirected(record);
@@ -179,7 +191,7 @@ TCheckFunc DomainCoordinators(TVector<ui64> coordinators) {
const auto& processingParams = pathDescr.GetDomainDescription().GetProcessingParams();
UNIT_ASSERT_VALUES_EQUAL(processingParams.CoordinatorsSize(), coordinators.size());
- TVector<ui64> actual(processingParams.GetCoordinators().begin(),processingParams.GetCoordinators().end());
+ TVector<ui64> actual(processingParams.GetCoordinators().begin(), processingParams.GetCoordinators().end());
UNIT_ASSERT_EQUAL(actual, coordinators);
};
}
@@ -197,21 +209,36 @@ TCheckFunc DomainMediators(TVector<ui64> mediators) {
};
}
-TCheckFunc DomainSchemeshard(ui64 schemeshard) {
+TCheckFunc DomainSchemeshard(ui64 domainSchemeshardId) {
return [=] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
UNIT_ASSERT_C(IsGoodDomainStatus(record.GetStatus()), "Unexpected status: " << record.GetStatus());
const auto& pathDescr = record.GetPathDescription();
const auto& processingParams = pathDescr.GetDomainDescription().GetProcessingParams();
- if (schemeshard) {
- UNIT_ASSERT_VALUES_EQUAL(processingParams.GetSchemeShard(), schemeshard);
+ if (domainSchemeshardId) {
+ UNIT_ASSERT_VALUES_EQUAL(processingParams.GetSchemeShard(), domainSchemeshardId);
} else {
UNIT_ASSERT(!processingParams.HasSchemeShard());
}
};
}
+TCheckFunc DomainHive(ui64 domainHiveId) {
+ return [=] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
+ UNIT_ASSERT_C(IsGoodDomainStatus(record.GetStatus()), "Unexpected status: " << record.GetStatus());
+
+ const auto& pathDescr = record.GetPathDescription();
+ const auto& processingParams = pathDescr.GetDomainDescription().GetProcessingParams();
+
+ if (domainHiveId) {
+ UNIT_ASSERT_VALUES_EQUAL(processingParams.GetHive(), domainHiveId);
+ } else {
+ UNIT_ASSERT(!processingParams.HasHive());
+ }
+ };
+}
+
TCheckFunc DomainSettings(ui32 planResolution, ui32 timeCastBucketsPerMediator) {
return [=] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
UNIT_ASSERT_VALUES_EQUAL(record.GetStatus(), NKikimrScheme::StatusSuccess);
diff --git a/ydb/core/tx/schemeshard/ut_helpers/ls_checks.h b/ydb/core/tx/schemeshard/ut_helpers/ls_checks.h
index 72ed6120b6e..c183a8042ec 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/ls_checks.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/ls_checks.h
@@ -51,7 +51,8 @@ namespace NLs {
TCheckFunc StoragePoolsEqual(TSet<TString> poolNames);
TCheckFunc DomainCoordinators(TVector<ui64> coordinators);
TCheckFunc DomainMediators(TVector<ui64> mediators);
- TCheckFunc DomainSchemeshard(ui64 schemeshard);
+ TCheckFunc DomainSchemeshard(ui64 domainSchemeshardId);
+ TCheckFunc DomainHive(ui64 domainHiveId);
TCheckFunc DomainKey(ui64 pathId, ui64 schemeshardId);
TCheckFunc DomainKey(TPathId pathId);
TCheckFunc DomainSettings(ui32 planResolution, ui32 timeCastBucketsPerMediator);
@@ -59,8 +60,9 @@ namespace NLs {
TCheckFunc IsExternalSubDomain(const TString& name);
void InExternalSubdomain(const NKikimrScheme::TEvDescribeSchemeResult& record);
- TCheckFunc ExtractTenantSchemeshard(ui64* tenantSchemeShard);
- TCheckFunc ExtractTenantSysViewProcessor(ui64* tenantSVP);
+ TCheckFunc ExtractTenantSchemeshard(ui64* tenantSchemeShardId);
+ TCheckFunc ExtractTenantSysViewProcessor(ui64* tenantSVPId);
+ TCheckFunc ExtractDomainHive(ui64* domainHiveId);
void NotFinished(const NKikimrScheme::TEvDescribeSchemeResult& record);
void Finished(const NKikimrScheme::TEvDescribeSchemeResult& record);
diff --git a/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp b/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
index cbf70ee116e..f14a2351c39 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
@@ -509,6 +509,7 @@ NSchemeShardUT_Private::TTestEnv::TTestEnv(TTestActorRuntime& runtime, const TTe
app.SetEnableMoveIndex(opts.EnableMoveIndex_);
app.SetEnableChangefeedInitialScan(opts.EnableChangefeedInitialScan_);
app.SetEnableNotNullDataColumns(opts.EnableNotNullDataColumns_);
+ app.SetEnableAlterDatabaseCreateHiveFirst(opts.EnableAlterDatabaseCreateHiveFirst_);
if (opts.DisableStatsBatching_.value_or(false)) {
app.SchemeShardConfig.SetStatsMaxBatchSize(0);
@@ -884,7 +885,9 @@ void NSchemeShardUT_Private::TTestWithReboots::Run(std::function<void (TTestActo
void NSchemeShardUT_Private::TTestWithReboots::Run(std::function<void (TTestActorRuntime &, bool &)> testScenario, bool allowLogBatching) {
TDatashardLogBatchingSwitch logBatchingSwitch(allowLogBatching);
+ Cerr << "==== RunWithTabletReboots" << Endl;
RunWithTabletReboots(testScenario);
+ Cerr << "==== RunWithPipeResets" << Endl;
RunWithPipeResets(testScenario);
//RunWithDelays(testScenario);
}
@@ -1017,5 +1020,6 @@ NSchemeShardUT_Private::TTestEnvOptions NSchemeShardUT_Private::TTestWithReboots
.EnableNotNullColumns(true)
.EnableProtoSourceIdInfo(true)
.DisableStatsBatching(true)
- .EnableMoveIndex(true);
+ .EnableMoveIndex(true)
+ ;
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/test_env.h b/ydb/core/tx/schemeshard/ut_helpers/test_env.h
index 638d3d40eb5..f40df8b2c29 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/test_env.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/test_env.h
@@ -47,6 +47,7 @@ namespace NSchemeShardUT_Private {
OPTION(std::optional<bool>, EnableMoveIndex, std::nullopt);
OPTION(std::optional<bool>, EnableChangefeedInitialScan, std::nullopt);
OPTION(std::optional<bool>, EnableNotNullDataColumns, std::nullopt);
+ OPTION(std::optional<bool>, EnableAlterDatabaseCreateHiveFirst, std::nullopt);
#undef OPTION
};
diff --git a/ydb/core/tx/schemeshard/ut_index_build.cpp b/ydb/core/tx/schemeshard/ut_index_build.cpp
index ba4e5ef4577..7aaec7fe479 100644
--- a/ydb/core/tx/schemeshard/ut_index_build.cpp
+++ b/ydb/core/tx/schemeshard/ut_index_build.cpp
@@ -291,7 +291,7 @@ Y_UNIT_TEST_SUITE(IndexBuildTest) {
auto descr = TestGetBuilIndex(runtime, tenantSchemeShard, "/MyRoot/ServerLessDB", txId);
Y_ASSERT(descr.GetIndexBuild().GetState() == Ydb::Table::IndexBuildState::STATE_DONE);
- const TString meteringData = R"({"usage":{"start":0,"quantity":179,"finish":0,"unit":"request_unit","type":"delta"},"tags":{},"id":"106-9437199-2-101-1818-101-1818","cloud_id":"CLOUD_ID_VAL","source_wt":0,"source_id":"sless-docapi-ydb-ss","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.requests.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"})";
+ const TString meteringData = R"({"usage":{"start":0,"quantity":179,"finish":0,"unit":"request_unit","type":"delta"},"tags":{},"id":"106-9437197-2-101-1818-101-1818","cloud_id":"CLOUD_ID_VAL","source_wt":0,"source_id":"sless-docapi-ydb-ss","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.requests.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"})";
UNIT_ASSERT_NO_DIFF(meteringMessages, meteringData + "\n");
diff --git a/ydb/core/tx/schemeshard/ut_restore.cpp b/ydb/core/tx/schemeshard/ut_restore.cpp
index e454425f541..a93f3b43974 100644
--- a/ydb/core/tx/schemeshard/ut_restore.cpp
+++ b/ydb/core/tx/schemeshard/ut_restore.cpp
@@ -937,7 +937,7 @@ Y_UNIT_TEST_SUITE(TRestoreTests) {
}
void TestRestoreNegative(TTestActorRuntime& runtime, ui64 txId, const TString& parentPath, const TString& name,
- const TVector<TEvSchemeShard::EStatus>& expectedResults) {
+ const TVector<TExpectedResult>& expectedResults) {
TestRestore(runtime, ++txId, parentPath, Sprintf(R"(
TableName: "%s"
@@ -1802,7 +1802,7 @@ Y_UNIT_TEST_SUITE(TImportTests) {
runtime.DispatchEvents(opts);
}
- const TString expectedBillRecord = R"({"usage":{"start":0,"quantity":50,"finish":0,"unit":"request_unit","type":"delta"},"tags":{},"id":"281474976725758-9437199-2-9437199-4","cloud_id":"CLOUD_ID_VAL","source_wt":0,"source_id":"sless-docapi-ydb-ss","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.requests.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"})";
+ const TString expectedBillRecord = R"({"usage":{"start":0,"quantity":50,"finish":0,"unit":"request_unit","type":"delta"},"tags":{},"id":"281474976725758-9437197-2-9437197-4","cloud_id":"CLOUD_ID_VAL","source_wt":0,"source_id":"sless-docapi-ydb-ss","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.requests.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"})";
UNIT_ASSERT_VALUES_EQUAL(billRecords.size(), 1);
UNIT_ASSERT_NO_DIFF(billRecords[0], expectedBillRecord + "\n");
diff --git a/ydb/core/tx/schemeshard/ut_subdomain.cpp b/ydb/core/tx/schemeshard/ut_subdomain.cpp
index 006607097af..4a48cb69004 100644
--- a/ydb/core/tx/schemeshard/ut_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/ut_subdomain.cpp
@@ -198,7 +198,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) {
}
- Y_UNIT_TEST(CreataWithoutPlanResolution) {
+ Y_UNIT_TEST(CreateWithoutPlanResolution) {
TTestBasicRuntime runtime;
TTestEnv env(runtime);
ui64 txId = 100;
@@ -216,7 +216,7 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) {
{NLs::PathNotExist});
}
- Y_UNIT_TEST(CreataWithoutTimeCastBuckets) {
+ Y_UNIT_TEST(CreateWithoutTimeCastBuckets) {
TTestBasicRuntime runtime;
TTestEnv env(runtime);
ui64 txId = 100;
diff --git a/ydb/core/tx/tx_proxy/schemereq.cpp b/ydb/core/tx/tx_proxy/schemereq.cpp
index 1ef0d210b69..ccd7e4ae469 100644
--- a/ydb/core/tx/tx_proxy/schemereq.cpp
+++ b/ydb/core/tx/tx_proxy/schemereq.cpp
@@ -173,6 +173,9 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
case NKikimrSchemeOp::ESchemeOpAlterExtSubDomain:
return *modifyScheme.MutableSubDomain()->MutableName();
+ case NKikimrSchemeOp::ESchemeOpAlterExtSubDomainCreateHive:
+ Y_FAIL("no implementation for ESchemeOpAlterExtSubDomainCreateHive");
+
case NKikimrSchemeOp::ESchemeOpCreateRtmrVolume:
return *modifyScheme.MutableCreateRtmrVolume()->MutableName();
@@ -737,6 +740,7 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
case NKikimrSchemeOp::ESchemeOpDropCdcStreamImpl:
case NKikimrSchemeOp::ESchemeOpDropCdcStreamAtTable:
case NKikimrSchemeOp::ESchemeOpMoveTableIndex:
+ case NKikimrSchemeOp::ESchemeOpAlterExtSubDomainCreateHive:
return false;
}
return true;
diff --git a/ydb/tests/functional/rename/conftest.py b/ydb/tests/functional/rename/conftest.py
index c8fb07b6aa4..b6c9a98e3f4 100644
--- a/ydb/tests/functional/rename/conftest.py
+++ b/ydb/tests/functional/rename/conftest.py
@@ -1,108 +1,5 @@
-# -*- coding: utf-8 -*-
-import os
-import logging
-import pytest
-import contextlib
-
-from ydb.tests.library.harness.kikimr_cluster import kikimr_cluster_factory
-from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
-from ydb.tests.library.common.types import Erasure
-
-
-logger = logging.getLogger(__name__)
-
-
-@pytest.fixture(scope='module')
-def local_cluster_configuration():
- configurator = KikimrConfigGenerator(
- erasure=Erasure.NONE,
- nodes=3,
- n_to_select=1,
- additional_log_configs={
- 'FLAT_TX_SCHEMESHARD': 7,
- 'SCHEME_BOARD_POPULATOR': 4,
- 'SCHEME_BOARD_SUBSCRIBER': 4,
- 'TX_DATASHARD': 7,
- 'CHANGE_EXCHANGE': 7,
- }
- )
- return configurator
-
-
-@pytest.fixture(scope='module')
-def ydb_cluster(local_cluster_configuration, request):
- module_name = request.module.__name__
-
- logger.info("setup ydb_cluster for %s", module_name)
-
- logger.info("setup ydb_cluster as local")
- cluster = kikimr_cluster_factory(
- configurator=local_cluster_configuration
- )
- cluster.is_local_test = True
-
- cluster.start()
-
- yield cluster
-
- logger.info("destroy ydb_cluster for %s", module_name)
- cluster.stop()
-
-
-@pytest.fixture(scope='module')
-def ydb_root(ydb_cluster):
- return os.path.join("/", ydb_cluster.domain_name)
-
-
-@pytest.fixture(scope='module')
-def ydb_private_client(ydb_cluster):
- return ydb_cluster.client
-
-
-@pytest.fixture(scope='module')
-def ydb_endpoint(ydb_cluster):
- return "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port)
-
-
-@pytest.fixture(scope='function')
-def extended_test_name(request):
- return request.node.name
-
-
-@contextlib.contextmanager
-def ydb_database_ctx(ydb_cluster, database, timeout_seconds=300):
- logger.info("setup ydb_database %s", database)
-
- ydb_cluster.remove_database(
- database,
- timeout_seconds=timeout_seconds
- )
-
- ydb_cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- },
- timeout_seconds=timeout_seconds
- )
- slots = ydb_cluster.register_and_start_slots(database, count=1)
-
- try:
- yield database
- finally:
- logger.info("destroy ydb_database for %s", database)
- for slot in slots:
- slot.stop()
-
- ydb_cluster.remove_database(
- database,
- timeout_seconds=timeout_seconds
- )
-
-
-@pytest.fixture(scope='function')
-def ydb_database(ydb_cluster, ydb_root, extended_test_name):
- database = os.path.join(ydb_root, extended_test_name.replace("[", "_").replace("]", "_"))
-
- with ydb_database_ctx(ydb_cluster, database):
- yield database
+# XXX: setting of pytest_plugins should work if specified directly in test modules
+# but somehow it does not
+#
+# for ydb_{cluster, database, ...} fixture family
+pytest_plugins = 'ydb.tests.library.harness.ydb_fixtures'
diff --git a/ydb/tests/functional/rename/test.py b/ydb/tests/functional/rename/test.py
index 01f5e421b8a..00699a9cce2 100644
--- a/ydb/tests/functional/rename/test.py
+++ b/ydb/tests/functional/rename/test.py
@@ -1,60 +1,54 @@
# -*- coding: utf-8 -*-
-import os
import logging
-import pytest
-import ydb
+import os
+import pytest
from tornado import gen
from tornado.ioloop import IOLoop
-from ydb.tests.library.common.types import from_bytes
+import ydb
from ydb.tornado import as_tornado_future
+from ydb.tests.library.common.types import Erasure, from_bytes
+from ydb.tests.library.harness.util import LogLevels
+
from common import (
async_execute_serializable_job,
async_execute_stale_ro_job,
async_scheme_job,
- async_repeat_n_times
+ async_repeat_n_times,
)
logger = logging.getLogger(__name__)
-SIMPLE_TABLE_TEMPLATE = (
-"""
+SIMPLE_TABLE_TEMPLATE = (r"""
CREATE TABLE `{table}` (
`id` Uint64,
`value` Utf8,
PRIMARY KEY (`id`)
)
-"""
-)
+""")
-INDEXED_TABLE_TEMPLATE = (
-"""
+INDEXED_TABLE_TEMPLATE = (r"""
CREATE TABLE `{table}` (
`id` Uint64,
`value` Utf8,
PRIMARY KEY (`id`),
INDEX value_index GLOBAL ON (`value`)
)
-"""
-)
+""")
-INDEXED_ASYNC_TABLE_TEMPLATE = (
-"""
+INDEXED_ASYNC_TABLE_TEMPLATE = (r"""
CREATE TABLE `{table}` (
`id` Uint64,
`value` Utf8,
PRIMARY KEY (`id`),
INDEX value_index GLOBAL ASYNC ON (`value`)
)
-"""
-)
+""")
-DROP_TABLE_TEMPLATE = (
-"""
+DROP_TABLE_TEMPLATE = (r"""
DROP TABLE `{table}`
-"""
-)
+""")
def create_simple_table(pool, path):
@@ -140,7 +134,7 @@ class Simple:
self._select_from_index = select_from_index
upsert_table_template = (
- """
+ r"""
DECLARE $key AS Uint64;
DECLARE $value AS Utf8;
@@ -151,14 +145,14 @@ class Simple:
)
select_table_template = (
- """
+ r"""
DECLARE $key AS Uint64;
SELECT value FROM `{table}` WHERE id = $key;
"""
)
select_index_table_template = (
- """
+ r"""
DECLARE $value AS Utf8;
SELECT id FROM `{table}` VIEW `value_index` WHERE value = $value;
"""
@@ -237,9 +231,30 @@ class Simple:
IOLoop.current().run_sync(lambda: calle())
+# local configuration for the ydb cluster (fetched by ydb_cluster_configuration fixture)
+CLUSTER_CONFIG = dict(
+ erasure=Erasure.NONE,
+ nodes=3,
+ n_to_select=1,
+ additional_log_configs={
+ 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
+ 'SCHEME_BOARD_POPULATOR': LogLevels.WARN,
+ 'SCHEME_BOARD_SUBSCRIBER': LogLevels.WARN,
+ 'TX_DATASHARD': LogLevels.DEBUG,
+ 'CHANGE_EXCHANGE': LogLevels.DEBUG,
+ },
+)
+
+
@pytest.mark.parametrize("create_method,select_from_index", [
- (create_simple_table, False), (create_indexed_table, True), (create_indexed_async_table, True)])
-@pytest.mark.parametrize("replace_method", [replace_table, substitute_table])
+ (create_simple_table, False),
+ (create_indexed_table, True),
+ (create_indexed_async_table, True),
+])
+@pytest.mark.parametrize("replace_method", [
+ replace_table,
+ substitute_table,
+])
def test_client_gets_retriable_errors_when_rename(create_method, select_from_index, replace_method, ydb_database, ydb_endpoint):
database = ydb_database
logger.info(" database is %s", database)
diff --git a/ydb/tests/functional/serverless/conftest.py b/ydb/tests/functional/serverless/conftest.py
index 0edf2c4bb73..bc197561d2c 100644
--- a/ydb/tests/functional/serverless/conftest.py
+++ b/ydb/tests/functional/serverless/conftest.py
@@ -4,130 +4,20 @@ import logging
import pytest
import contextlib
-from ydb.tests.library.harness.kikimr_cluster import kikimr_cluster_factory
-from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
-from ydb.tests.library.harness.util import LogLevels
-from ydb.tests.library.common.types import Erasure
+# XXX: setting of pytest_plugins should work if specified directly in test modules
+# but somehow it does not
+#
+# for ydb_{cluster, database, ...} fixture family
+pytest_plugins = 'ydb.tests.library.harness.ydb_fixtures'
-logger = logging.getLogger(__name__)
-
-
-@pytest.fixture(scope='module')
-def local_cluster_configuration():
- configurator = KikimrConfigGenerator(
- erasure=Erasure.NONE,
- nodes=1,
- enable_metering=True,
- disable_mvcc=True,
- additional_log_configs={
- 'TX_PROXY': LogLevels.DEBUG,
- 'KQP_PROXY': LogLevels.DEBUG,
- 'KQP_WORKER': LogLevels.DEBUG,
- 'KQP_GATEWAY': LogLevels.DEBUG,
- 'GRPC_PROXY': LogLevels.TRACE,
- 'KQP_YQL': LogLevels.DEBUG,
- 'TX_DATASHARD': LogLevels.DEBUG,
- 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
- 'SCHEMESHARD_DESCRIBE': LogLevels.DEBUG,
-
- 'SCHEME_BOARD_POPULATOR': LogLevels.DEBUG,
-
- 'SCHEME_BOARD_REPLICA': LogLevels.ERROR,
- 'SCHEME_BOARD_SUBSCRIBER': LogLevels.ERROR,
- 'TX_PROXY_SCHEME_CACHE': LogLevels.ERROR,
-
- 'CMS': LogLevels.DEBUG,
- 'CMS_TENANTS': LogLevels.DEBUG,
-
- }
- )
- return configurator
-
-
-@pytest.fixture(scope='module')
-def metering_file_path(local_cluster_configuration):
- return local_cluster_configuration.metering_file_path
-
-
-@pytest.fixture(scope='module')
-def ydb_cluster(local_cluster_configuration, request):
- module_name = request.module.__name__
-
- logger.info("setup ydb_cluster for %s", module_name)
-
- cluster = kikimr_cluster_factory(
- configurator=local_cluster_configuration
- )
- cluster.is_local_test = True
-
- cluster.start()
- yield cluster
-
- logger.info("destroy ydb_cluster for %s", module_name)
- cluster.stop()
-
-
-@pytest.fixture(scope='module')
-def ydb_root(ydb_cluster):
- return os.path.join("/", ydb_cluster.domain_name)
-
-
-@pytest.fixture(scope='module')
-def ydb_private_client(ydb_cluster):
- return ydb_cluster.client
+logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
-def ydb_endpoint(ydb_cluster):
- return "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port)
-
-
-@pytest.fixture(scope='function')
-def extended_test_name(request):
- return request.node.name
-
-
-@contextlib.contextmanager
-def ydb_database_ctx(ydb_cluster, database, timeout_seconds=100):
- logger.info("setup ydb_database %s", database)
-
- ydb_cluster.remove_database(
- database,
- timeout_seconds=timeout_seconds
- )
-
- ydb_cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- },
- timeout_seconds=timeout_seconds
- )
-
- slots = ydb_cluster.register_and_start_slots(database, count=1)
- ydb_cluster.wait_tenant_up(database)
-
- try:
- yield database
- finally:
- logger.info("destroy ydb_database for %s", database)
- for slot in slots:
- slot.stop()
-
- ydb_cluster.remove_database(
- database,
- timeout_seconds=timeout_seconds
- )
-
-
-@pytest.fixture(scope='function')
-def ydb_database(ydb_cluster, ydb_root, extended_test_name):
- database = os.path.join(ydb_root, extended_test_name.replace("[", "_").replace("]", "_"))
-
- with ydb_database_ctx(ydb_cluster, database):
- yield database
+def metering_file_path(ydb_cluster_configuration):
+ return ydb_cluster_configuration.metering_file_path
@contextlib.contextmanager
@@ -148,21 +38,21 @@ def ydb_hostel_db_ctx(ydb_cluster, ydb_root, timeout_seconds=100):
timeout_seconds=timeout_seconds
)
- slots = ydb_cluster.register_and_start_slots(database, count=3)
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=3)
ydb_cluster.wait_tenant_up(database)
try:
yield database
finally:
logger.info("destroy ydb_hostel_db for %s", database)
- for slot in slots:
- slot.stop()
ydb_cluster.remove_database(
database,
timeout_seconds=timeout_seconds
)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
+
@pytest.fixture(scope='module')
def ydb_hostel_db(ydb_cluster, ydb_root):
@@ -203,16 +93,16 @@ def ydb_serverless_db_ctx(ydb_cluster, database, hostel_db, timeout_seconds=100,
@pytest.fixture(scope='function')
-def ydb_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, extended_test_name):
- database_name = os.path.join(ydb_root, "serverless", extended_test_name.replace("[", "_").replace("]", "_"))
+def ydb_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, ydb_safe_test_name):
+ database_name = os.path.join(ydb_root, "serverless", ydb_safe_test_name)
with ydb_serverless_db_ctx(ydb_cluster, database_name, ydb_hostel_db):
yield database_name
@pytest.fixture(scope='function')
-def ydb_quoted_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, extended_test_name):
- database_name = os.path.join(ydb_root, "quoted_serverless", extended_test_name.replace("[", "_").replace("]", "_"))
+def ydb_quoted_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, ydb_safe_test_name):
+ database_name = os.path.join(ydb_root, "quoted_serverless", ydb_safe_test_name)
schema_quotas = ((2, 60), (4, 600))
with ydb_serverless_db_ctx(ydb_cluster, database_name, ydb_hostel_db, schema_quotas=schema_quotas):
@@ -220,8 +110,8 @@ def ydb_quoted_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, extended_test
@pytest.fixture(scope='function')
-def ydb_disk_quoted_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, extended_test_name):
- database_name = os.path.join(ydb_root, "quoted_serverless", extended_test_name.replace("[", "_").replace("]", "_"))
+def ydb_disk_quoted_serverless_db(ydb_cluster, ydb_root, ydb_hostel_db, ydb_safe_test_name):
+ database_name = os.path.join(ydb_root, "quoted_serverless", ydb_safe_test_name)
disk_quotas = {'hard': 64 * 1024 * 1024, 'soft': 32 * 1024 * 1024}
with ydb_serverless_db_ctx(ydb_cluster, database_name, ydb_hostel_db, disk_quotas=disk_quotas):
diff --git a/ydb/tests/functional/serverless/test.py b/ydb/tests/functional/serverless/test.py
index 55fc623b1bd..c834e26b7f4 100644
--- a/ydb/tests/functional/serverless/test.py
+++ b/ydb/tests/functional/serverless/test.py
@@ -1,31 +1,57 @@
# -*- coding: utf-8 -*-
+import functools
import logging
import os
import time
-import functools
import pytest
-from hamcrest import (
- assert_that,
- contains_inanyorder,
- not_none,
-)
-
-import ydb
-
+from hamcrest import assert_that, contains_inanyorder, not_none
from tornado import gen
from tornado.ioloop import IOLoop
+import ydb
+from ydb.tests.library.common.types import Erasure
+from ydb.tests.library.harness.util import LogLevels
+
logger = logging.getLogger(__name__)
+# local configuration for the ydb cluster (fetched by ydb_cluster_configuration fixture)
+CLUSTER_CONFIG = dict(
+ erasure=Erasure.NONE,
+ nodes=1,
+ enable_metering=True,
+ disable_mvcc=True,
+ additional_log_configs={
+ 'TX_PROXY': LogLevels.DEBUG,
+ 'KQP_PROXY': LogLevels.DEBUG,
+ 'KQP_WORKER': LogLevels.DEBUG,
+ 'KQP_GATEWAY': LogLevels.DEBUG,
+ 'GRPC_PROXY': LogLevels.TRACE,
+ 'KQP_YQL': LogLevels.DEBUG,
+ 'TX_DATASHARD': LogLevels.DEBUG,
+ 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
+ 'SCHEMESHARD_DESCRIBE': LogLevels.DEBUG,
+
+ 'SCHEME_BOARD_POPULATOR': LogLevels.DEBUG,
+
+ 'SCHEME_BOARD_REPLICA': LogLevels.ERROR,
+ 'SCHEME_BOARD_SUBSCRIBER': LogLevels.ERROR,
+ 'TX_PROXY_SCHEME_CACHE': LogLevels.ERROR,
+
+ 'CMS': LogLevels.DEBUG,
+ 'CMS_TENANTS': LogLevels.DEBUG,
+ },
+)
+
+
def test_fixtures(ydb_hostel_db, ydb_serverless_db):
logger.debug(
"test for serverless db %s over hostel db %s", ydb_serverless_db, ydb_hostel_db
)
-def test_create_table(ydb_hostel_db, ydb_serverless_db, ydb_endpoint, metering_file_path, ydb_private_client):
+def test_create_table(ydb_hostel_db, ydb_serverless_db, ydb_endpoint):
logger.debug(
"test for serverless db %s over hostel db %s", ydb_serverless_db, ydb_hostel_db
)
@@ -145,7 +171,7 @@ def test_turn_on_serverless_storage_billing(ydb_hostel_db, ydb_serverless_db, yd
pool.retry_operation_sync(drop_table, None, os.path.join(database, "dirA1", "dirB1", "table"))
-def test_create_table_with_quotas(ydb_hostel_db, ydb_quoted_serverless_db, ydb_endpoint, ydb_cluster):
+def test_create_table_with_quotas(ydb_hostel_db, ydb_quoted_serverless_db, ydb_endpoint):
logger.debug(
"test for serverless db %s over hostel db %s", ydb_quoted_serverless_db, ydb_hostel_db
)
diff --git a/ydb/tests/functional/tenants/common.py b/ydb/tests/functional/tenants/common.py
deleted file mode 100644
index 827005e5aec..00000000000
--- a/ydb/tests/functional/tenants/common.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# -*- coding: utf-8 -*-
-import os
-import logging
-
-from ydb.tests.library.harness.kikimr_http_client import HiveClient
-from ydb.tests.library.harness.kikimr_cluster import kikimr_cluster_factory
-from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
-
-from ydb.tests.library.harness.util import LogLevels
-import ydb
-
-
-logger = logging.getLogger(__name__)
-
-
-class Runtime(object):
- def __init__(self, cluster, tenant_affiliation=None, initial_count=1):
- self._cluster = cluster
- self._tenant_affiliation = tenant_affiliation
- self._initial_count = initial_count
-
- # use console features
- self._scheme_root_key = os.path.join(
- "/",
- cluster.domain_name,
- self._tenant_affiliation
- )
-
- self._allocated_slots = self._cluster.register_slots(self._scheme_root_key, self._initial_count)
- for _ in range(self._initial_count):
- self._allocated_slots.append(
- self._cluster.register_slot(
- self._scheme_root_key
- )
- )
-
- @property
- def root_dir(self):
- return self._scheme_root_key
-
- @property
- def slots(self):
- return self._allocated_slots
-
- def __enter__(self):
- for slot in self._allocated_slots:
- slot.start()
-
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- for slot in self._allocated_slots:
- try:
- slot.stop()
- except Exception:
- logger.exception(
- "failed to stop daemon...."
- )
-
-
-class DBForStaticSlots(object):
- @classmethod
- def setup_class(cls):
- cls.cluster = kikimr_cluster_factory(
- configurator=KikimrConfigGenerator(
- additional_log_configs={
- 'TX_PROXY': LogLevels.DEBUG,
- 'KQP_PROXY': LogLevels.DEBUG,
- 'KQP_WORKER': LogLevels.DEBUG,
- 'KQP_GATEWAY': LogLevels.DEBUG,
- 'GRPC_PROXY': LogLevels.TRACE,
- 'KQP_YQL': LogLevels.DEBUG,
- 'TX_DATASHARD': LogLevels.DEBUG,
- 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
- 'TX_PROXY_SCHEME_CACHE': LogLevels.DEBUG,
- 'GRPC_SERVER': LogLevels.DEBUG,
- }
- ),
- )
- cls.cluster.start()
- cls.client = cls.cluster.client
- cls.root_dir = os.path.join("/", cls.cluster.domain_name)
- first_node = cls.cluster.nodes[1]
- cls.boot_per_node = 1
- cls.boot_batch_size = 5
- hive_cli = HiveClient(first_node.host, first_node.mon_port)
- hive_cli.set_max_scheduled_tablets(cls.boot_per_node)
- hive_cli.set_max_boot_batch_size(cls.boot_batch_size)
- cls.database_name = None
-
- cls.robust_retries = ydb.RetrySettings().with_fast_backoff(
- ydb.BackoffSettings(ceiling=10, slot_duration=0.05, uncertain_ratio=0.1)
- )
-
- @classmethod
- def teardown_class(cls):
- logger.info("teardown class")
- if hasattr(cls, 'cluster'):
- cls.cluster.stop()
-
- def setup_method(self, method=None):
- self.database_name = "/Root/users/{class_name}_{method_name}".format(
- class_name=self.__class__.__name__,
- method_name=method.__name__,
- )
- self.cluster.create_database(
- self.database_name,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- self.driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- self.database_name
- )
-
- def teardown_method(self, method=None):
- self.cluster.remove_database(self.database_name)
- self.database_name = None
-
-
-class DBWithDynamicSlot(object):
- SLOT_COUNT = 1
-
- @classmethod
- def setup_class(cls):
- cls.cluster = kikimr_cluster_factory(
- KikimrConfigGenerator(
- additional_log_configs={
- 'CMS_TENANTS': LogLevels.TRACE,
- 'TENANT_SLOT_BROKER': LogLevels.DEBUG,
- 'TENANT_POOL': LogLevels.DEBUG,
- 'LOCAL': LogLevels.DEBUG,
- 'NODE_BROKER': LogLevels.DEBUG,
- 'TX_DATASHARD': LogLevels.DEBUG,
- 'TX_PROXY': LogLevels.DEBUG,
- 'GRPC_SERVER': LogLevels.DEBUG,
- }
- )
- )
- cls.cluster.start()
-
- cls.robust_retries = ydb.RetrySettings(max_retries=20).with_fast_backoff(
- ydb.BackoffSettings(ceiling=10, slot_duration=0.05, uncertain_ratio=0.1)
- )
-
- @classmethod
- def teardown_class(cls):
- if hasattr(cls, 'cluster'):
- cls.cluster.stop()
diff --git a/ydb/tests/functional/tenants/conftest.py b/ydb/tests/functional/tenants/conftest.py
new file mode 100644
index 00000000000..fa7acf71159
--- /dev/null
+++ b/ydb/tests/functional/tenants/conftest.py
@@ -0,0 +1,26 @@
+import pytest
+
+import ydb
+from ydb.tests.library.harness.kikimr_http_client import HiveClient
+
+# XXX: setting of pytest_plugins should work if specified directly in test modules
+# but somehow it does not
+#
+# for ydb_{cluster, database, ...} fixture family
+pytest_plugins = 'ydb.tests.library.harness.ydb_fixtures'
+
+
+@pytest.fixture(scope='module')
+def robust_retries():
+ return ydb.RetrySettings().with_fast_backoff(
+ ydb.BackoffSettings(ceiling=10, slot_duration=0.05, uncertain_ratio=0.1)
+ )
+
+
+@pytest.fixture(scope='module')
+def config_hive(ydb_cluster):
+ def _config_hive(boot_per_node=1, boot_batch_size=5):
+ hive_cli = HiveClient(ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].mon_port)
+ hive_cli.set_max_scheduled_tablets(boot_per_node)
+ hive_cli.set_max_boot_batch_size(boot_batch_size)
+ return _config_hive
diff --git a/ydb/tests/functional/tenants/test_dynamic_tenants.py b/ydb/tests/functional/tenants/test_dynamic_tenants.py
index 1ea37838e85..e657b7ef937 100644
--- a/ydb/tests/functional/tenants/test_dynamic_tenants.py
+++ b/ydb/tests/functional/tenants/test_dynamic_tenants.py
@@ -12,74 +12,140 @@ from hamcrest import (
)
import ydb
+from ydb.tests.library.harness.util import LogLevels
-from common import DBWithDynamicSlot, DBForStaticSlots, Runtime
logger = logging.getLogger(__name__)
-class TestCreateTenantNoCPU(DBWithDynamicSlot):
- def test_case(self):
- database = '/Root/users/database'
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- self.cluster.remove_database(database)
+# local configuration for the ydb cluster (fetched by ydb_cluster_configuration fixture)
+CLUSTER_CONFIG = dict(
+ additional_log_configs={
+ 'TX_PROXY': LogLevels.DEBUG,
+ 'KQP_PROXY': LogLevels.DEBUG,
+ 'KQP_WORKER': LogLevels.DEBUG,
+ 'KQP_GATEWAY': LogLevels.DEBUG,
+ 'GRPC_PROXY': LogLevels.TRACE,
+ 'TX_DATASHARD': LogLevels.DEBUG,
+ 'TX_PROXY_SCHEME_CACHE': LogLevels.DEBUG,
+ 'GRPC_SERVER': LogLevels.DEBUG,
+ # more logs
+ 'FLAT_TX_SCHEMESHARD': LogLevels.TRACE,
+ 'HIVE': LogLevels.TRACE,
+ 'CMS_TENANTS': LogLevels.TRACE,
+ # less logs
+ 'KQP_YQL': LogLevels.ERROR,
+ 'KQP_SESSION': LogLevels.CRIT,
+ 'KQP_COMPILE_ACTOR': LogLevels.CRIT,
+ 'PERSQUEUE_CLUSTER_TRACKER': LogLevels.CRIT,
+ },
+ enable_alter_database_create_hive_first=True,
+)
-class TestCreateTenantWithCPU(DBWithDynamicSlot):
- def test_case(self):
- database = '/Root/users/database'
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- self.cluster.register_and_start_slots(database, count=1)
- self.cluster.wait_tenant_up(database)
- self.cluster.remove_database(database)
+def test_create_tenant_no_cpu(ydb_cluster):
+ database = '/Root/users/database'
+ ydb_cluster.create_database(
+ database,
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
+ ydb_cluster.remove_database(database)
+
+
+def test_create_tenant_with_cpu(ydb_cluster):
+ database = '/Root/users/database'
+ ydb_cluster.create_database(
+ database,
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=1)
+ ydb_cluster.wait_tenant_up(database)
+ time.sleep(1)
+ ydb_cluster.remove_database(database)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
+
+
+def test_create_tenant_then_exec_yql_empty_database_header(ydb_cluster, ydb_endpoint):
+ database = '/Root/users/database'
+
+ driver_config = ydb.DriverConfig(ydb_endpoint, database)
+
+ ydb_cluster.create_database(
+ database,
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=1)
+ ydb_cluster.wait_tenant_up(database)
+
+ def list_endpoints(database):
+ logger.debug("List endpoints of %s", database)
+ resolver = ydb.DiscoveryEndpointsResolver(driver_config)
+ result = resolver.resolve()
+ if result is not None:
+ return result.endpoints
+ return result
+
+ endpoints = list_endpoints(database)
+
+ driver_config2 = ydb.DriverConfig(
+ "%s" % endpoints[0].endpoint,
+ None,
+ credentials=ydb.AuthTokenCredentials("root@builtin")
+ )
+
+ table_path = '%s/table-1' % database
+ with ydb.Driver(driver_config2) as driver:
+ with ydb.SessionPool(driver, size=1) as pool:
+ with pool.checkout() as session:
+ session.execute_scheme(
+ "create table `{}` (key Int32, value String, primary key(key));".format(
+ table_path
+ )
+ )
+ session.transaction().execute(
+ "upsert into `{}` (key) values (101);".format(table_path),
+ commit_tx=True
+ )
-class TestCreateTenantThenExecYQLEmptyDatabaseHeader(DBWithDynamicSlot):
- def test_case(self):
- database = '/Root/users/database'
+ session.transaction().execute("select key from `{}`;".format(table_path), commit_tx=True)
- driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- database
- )
+ ydb_cluster.remove_database(database)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- self.cluster.register_and_start_slots(database, count=1)
- self.cluster.wait_tenant_up(database)
-
- def list_endpoints(database):
- logger.debug("List endpoints of %s", database)
- resolver = ydb.DiscoveryEndpointsResolver(driver_config)
- result = resolver.resolve()
- if result is not None:
- return result.endpoints
- return result
-
- endpoints = list_endpoints(database)
-
- driver_config2 = ydb.DriverConfig(
- "%s" % endpoints[0].endpoint,
- None,
- credentials=ydb.AuthTokenCredentials("root@builtin")
- )
+def test_create_tenant_then_exec_yql(ydb_cluster):
+ database = '/Root/users/database'
+
+ driver_config = ydb.DriverConfig(
+ "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port),
+ database
+ )
+
+ driver_config2 = ydb.DriverConfig(
+ "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port),
+ database + "/"
+ )
+
+ ydb_cluster.create_database(
+ database,
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=1)
+ ydb_cluster.wait_tenant_up(database)
+
+ d_configs = [driver_config, driver_config2]
+ for d_config in d_configs:
table_path = '%s/table-1' % database
- with ydb.Driver(driver_config2) as driver:
+ with ydb.Driver(d_config) as driver:
with ydb.SessionPool(driver, size=1) as pool:
with pool.checkout() as session:
session.execute_scheme(
@@ -95,315 +161,278 @@ class TestCreateTenantThenExecYQLEmptyDatabaseHeader(DBWithDynamicSlot):
session.transaction().execute("select key from `{}`;".format(table_path), commit_tx=True)
+ ydb_cluster.remove_database(database)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
-class TestCreateTenantThenExecYQL(DBWithDynamicSlot):
- def test_case(self):
- database = '/Root/users/database'
+
+def test_test_create_and_drop_tenants(ydb_cluster, robust_retries):
+ for iNo in range(10):
+ database = '/Root/users/database_%d' % iNo
driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
+ "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port),
database
)
- driver_config2 = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- database + "/"
- )
-
- self.cluster.create_database(
+ ydb_cluster.create_database(
database,
storage_pool_units_count={
'hdd': 1
}
)
- self.cluster.register_and_start_slots(database, count=1)
- self.cluster.wait_tenant_up(database)
-
- d_configs = [driver_config, driver_config2]
- for d_config in d_configs:
- table_path = '%s/table-1' % database
- with ydb.Driver(d_config) as driver:
- with ydb.SessionPool(driver, size=1) as pool:
- with pool.checkout() as session:
- session.execute_scheme(
- "create table `{}` (key Int32, value String, primary key(key));".format(
- table_path
- )
- )
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=1)
+ ydb_cluster.wait_tenant_up(database)
- session.transaction().execute(
- "upsert into `{}` (key) values (101);".format(table_path),
- commit_tx=True
- )
+ with ydb.Driver(driver_config) as driver:
+ with ydb.SessionPool(driver) as pool:
+ def create_table(session, table):
+ session.create_table(
+ os.path.join(database, table),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_column(ydb.Column('value', ydb.OptionalType(ydb.DataType.Utf8)))
+ .with_primary_key('id')
+ )
- session.transaction().execute("select key from `{}`;".format(table_path), commit_tx=True)
+ pool.retry_operation_sync(create_table, robust_retries, "table")
+ pool.retry_operation_sync(create_table, robust_retries, "table_for_rm")
+ def write_some_data(session, table_one, table_two, value):
+ session.transaction().execute(
+ fr'''
+ upsert into {table_one} (id, value)
+ values (1u, "{value}");
+ upsert into {table_two} (id, value)
+ values (2u, "{value}");
+ ''',
+ commit_tx=True,
+ )
+ pool.retry_operation_sync(write_some_data, robust_retries, "table", "table_for_rm", database)
+
+ def read_some_data(session, table_one, table_two):
+ result = session.transaction().execute(
+ fr'''
+ select id, value FROM {table_one};
+ select id, value FROM {table_two};
+ ''',
+ commit_tx=True,
+ )
+ return result
-class TestCreateAndDropTenants(DBWithDynamicSlot):
- def test_case(self):
- for iNo in range(10):
- database = '/Root/users/database_%d' % iNo
+ result = pool.retry_operation_sync(read_some_data, robust_retries, "table", "table_for_rm")
- driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- database
- )
+ assert len(result) == 2
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- self.cluster.register_and_start_slots(database, count=1)
- self.cluster.wait_tenant_up(database)
-
- with ydb.Driver(driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session, table):
- session.create_table(
- os.path.join(database, table),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_column(ydb.Column('value', ydb.OptionalType(ydb.DataType.Utf8)))
- .with_primary_key('id')
+ for lineNo in range(2):
+ assert_that(
+ (1 + lineNo, database),
+ equal_to(
+ (result[lineNo].rows[0].id, result[lineNo].rows[0].value)
)
+ )
- pool.retry_operation_sync(create_table, self.robust_retries, "table")
- pool.retry_operation_sync(create_table, self.robust_retries, "table_for_rm")
-
- def write_some_data(session, table_one, table_two, value):
- session.transaction().execute(
- "upsert into {table_one} (id, value) "
- "values (1u, \"{val_one}\");"
- "upsert into {table_two} (id, value) "
- "values (2u, \"{val_two}\");"
- "".format(table_one=table_one, val_one=value,
- table_two=table_two, val_two=value),
- commit_tx=True,
- )
- pool.retry_operation_sync(write_some_data, self.robust_retries, "table", "table_for_rm", database)
-
- def read_some_data(session, table_one, table_two):
- result = session.transaction().execute(
- "select id, value FROM {table_one};"
- "select id, value FROM {table_two};"
- "".format(table_one=table_one, table_two=table_two),
- commit_tx=True,
- )
- return result
+ def drop_table(session, table):
+ session.drop_table(
+ os.path.join(database, table)
+ )
+ pool.retry_operation_sync(drop_table, robust_retries, "table_for_rm")
- result = pool.retry_operation_sync(read_some_data, self.robust_retries, "table", "table_for_rm")
+ ydb_cluster.remove_database(database)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
- assert len(result) == 2
- for lineNo in range(2):
- assert_that(
- (1 + lineNo, database),
- equal_to(
- (result[lineNo].rows[0].id, result[lineNo].rows[0].value)
- )
- )
+def test_create_and_drop_the_same_tenant2(ydb_cluster, ydb_endpoint, robust_retries):
+ for iNo in range(4):
+ database = '/Root/users/database'
+ value = database + "_" + str(iNo)
- def drop_table(session, table):
- session.drop_table(
- os.path.join(database, table)
- )
- pool.retry_operation_sync(drop_table, self.robust_retries, "table_for_rm")
+ logger.debug("create_database")
+ # without dynamic stots, allocate node manually as static slot
+ ydb_cluster.create_database(
+ database,
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
- self.cluster.remove_database(database)
+ driver_config = ydb.DriverConfig(ydb_endpoint, database)
+ database_nodes = ydb_cluster.register_and_start_slots(database, count=1)
-class TestCreateAndDropTheSameTenant2(DBForStaticSlots):
- def test_case(self):
- for iNo in range(4):
- database = '/Root/users/database'
- value = database + "_" + str(iNo)
+ with ydb.Driver(driver_config) as driver:
+ with ydb.SessionPool(driver, size=1) as pool:
+ def create_table(session, table):
+ session.create_table(
+ os.path.join(database, table),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_column(ydb.Column('value', ydb.OptionalType(ydb.DataType.Utf8)))
+ .with_primary_key('id')
+ )
- logger.debug("create_database")
- # without dynamic stots, allocate node manually as static slot
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
+ logger.debug("create table one")
+ pool.retry_operation_sync(create_table, robust_retries, "table")
+ logger.debug("create table two")
+ pool.retry_operation_sync(create_table, None, "table_for_rm")
- driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- database
- )
+ def write_some_data(session, table_one, table_two, value):
+ session.transaction().execute(
+ fr'''
+ upsert into {table_one} (id, value)
+ values (1u, "{value}");
+ upsert into {table_two} (id, value)
+ values (2u, "{value}");
+ ''',
+ commit_tx=True,
+ )
+ logger.debug("write_some_data")
+ pool.retry_operation_sync(write_some_data, None, "table", "table_for_rm", value)
+
+ def read_some_data(session, table_one, table_two):
+ result = session.transaction().execute(
+ fr'''
+ select id, value FROM {table_one};
+ select id, value FROM {table_two};
+ ''',
+ commit_tx=True,
+ )
+ return result
- with Runtime(self.cluster, database):
- with ydb.Driver(driver_config) as driver:
- with ydb.SessionPool(driver, size=1) as pool:
- def create_table(session, table):
- session.create_table(
- os.path.join(database, table),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_column(ydb.Column('value', ydb.OptionalType(ydb.DataType.Utf8)))
- .with_primary_key('id')
- )
-
- logger.debug("create table one")
- pool.retry_operation_sync(create_table, self.robust_retries, "table")
- logger.debug("create table two")
- pool.retry_operation_sync(create_table, None, "table_for_rm")
-
- def write_some_data(session, table_one, table_two, value):
- session.transaction().execute(
- "upsert into {table_one} (id, value) "
- "values (1u, \"{val_one}\");"
- "upsert into {table_two} (id, value) "
- "values (2u, \"{val_two}\");"
- "".format(table_one=table_one, val_one=value,
- table_two=table_two, val_two=value),
- commit_tx=True,
- )
- logger.debug("write_some_data")
- pool.retry_operation_sync(write_some_data, None, "table", "table_for_rm", value)
-
- def read_some_data(session, table_one, table_two):
- result = session.transaction().execute(
- "select id, value FROM {table_one};"
- "select id, value FROM {table_two};"
- "".format(table_one=table_one, table_two=table_two),
- commit_tx=True,
- )
- return result
-
- logger.debug("read_some_data")
- result = pool.retry_operation_sync(read_some_data, None, "table", "table_for_rm")
-
- assert len(result) == 2
-
- for lineNo in range(2):
- assert_that(
- (1 + lineNo, value),
- equal_to(
- (result[lineNo].rows[0].id, result[lineNo].rows[0].value)
- )
- )
-
- def drop_table(session, table):
- session.drop_table(
- os.path.join(database, table)
- )
-
- logger.debug("drop table two")
- pool.retry_operation_sync(drop_table, None, "table_for_rm")
-
- logger.debug("remove_database")
- self.cluster.remove_database(database)
-
- logger.debug("done %d", iNo)
-
-
-class TestCheckAccess(DBWithDynamicSlot):
- SLOT_COUNT = 2
-
- def test_case(self):
- users = {}
- for user in ('user_1', 'user_2'):
- users[user] = {
- 'path': os.path.join('/Root/users', user),
- 'owner': '%s@builtin' % user,
- }
+ logger.debug("read_some_data")
+ result = pool.retry_operation_sync(read_some_data, None, "table", "table_for_rm")
- for user in users.values():
- self.cluster.create_database(
- user['path'],
- storage_pool_units_count={
- 'hdd': 1
- }
- )
+ assert len(result) == 2
- self.cluster.register_and_start_slots(user['path'], count=1)
- self.cluster.wait_tenant_up(user['path'])
+ for lineNo in range(2):
+ assert_that(
+ (1 + lineNo, value),
+ equal_to(
+ (result[lineNo].rows[0].id, result[lineNo].rows[0].value)
+ )
+ )
- driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- user['path']
- )
+ def drop_table(session, table):
+ session.drop_table(
+ os.path.join(database, table)
+ )
- with ydb.Driver(driver_config) as driver:
- driver.wait(timeout=10)
+ logger.debug("drop table two")
+ pool.retry_operation_sync(drop_table, None, "table_for_rm")
- client = ydb.SchemeClient(driver)
- client.modify_permissions(
- user['path'],
- ydb.ModifyPermissionsSettings().change_owner(user['owner'])
- )
+ logger.debug("remove_database")
+ ydb_cluster.remove_database(database)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
+
+ logger.debug("done %d", iNo)
+
+
+def test_check_access(ydb_cluster):
+ users = {}
+ for user in ('user_1', 'user_2'):
+ users[user] = {
+ 'path': os.path.join('/Root/users', user),
+ 'owner': '%s@builtin' % user,
+ }
+
+ database_nodes = {}
+
+ for user in users.values():
+ ydb_cluster.create_database(
+ user['path'],
+ storage_pool_units_count={
+ 'hdd': 1
+ }
+ )
- user_1 = users['user_1']
- user_2 = users['user_2']
+ database_nodes[user['path']] = ydb_cluster.register_and_start_slots(user['path'], count=1)
+ ydb_cluster.wait_tenant_up(user['path'])
driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- user_1['path'], auth_token=user_1['owner']
+ "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port),
+ user['path']
)
with ydb.Driver(driver_config) as driver:
driver.wait(timeout=10)
- client = ydb.SchemeClient(driver)
- while True:
- try:
- client.list_directory(user_1['path'])
- except ydb.Unauthorized:
- time.sleep(5) # wait until caches are refreshed
- else:
- break
-
- assert_that(
- calling(client.list_directory).with_args(
- user_2['path']
- ),
- raises(ydb.Unauthorized)
+ client = ydb.SchemeClient(driver)
+ client.modify_permissions(
+ user['path'],
+ ydb.ModifyPermissionsSettings().change_owner(user['owner'])
)
- assert_that(
- calling(client.list_directory).with_args(
- os.path.join(user_1['path'], 'a')
- ),
- raises(ydb.SchemeError)
- )
- assert_that(
- calling(client.list_directory).with_args(
- os.path.join(user_2['path'], 'a')
- ),
- raises(ydb.SchemeError)
- )
+ user_1 = users['user_1']
+ user_2 = users['user_2']
+
+ driver_config = ydb.DriverConfig(
+ "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port),
+ user_1['path'], auth_token=user_1['owner']
+ )
+
+ with ydb.Driver(driver_config) as driver:
+ driver.wait(timeout=10)
+ client = ydb.SchemeClient(driver)
+
+ while True:
+ try:
+ client.list_directory(user_1['path'])
+ except ydb.Unauthorized:
+ time.sleep(5) # wait until caches are refreshed
+ else:
+ break
+
+ assert_that(
+ calling(client.list_directory).with_args(
+ user_2['path']
+ ),
+ raises(ydb.Unauthorized)
+ )
- client.make_directory(os.path.join(user_1['path'], 'a'))
- assert_that(
- calling(client.make_directory).with_args(
- os.path.join(user_2['path'], 'a')
- ),
- raises(ydb.BadRequest)
- )
+ assert_that(
+ calling(client.list_directory).with_args(
+ os.path.join(user_1['path'], 'a')
+ ),
+ raises(ydb.SchemeError)
+ )
+ assert_that(
+ calling(client.list_directory).with_args(
+ os.path.join(user_2['path'], 'a')
+ ),
+ raises(ydb.SchemeError)
+ )
- with ydb.SessionPool(driver, size=1) as pool:
- with pool.checkout() as session:
- session.execute_scheme(
+ client.make_directory(os.path.join(user_1['path'], 'a'))
+ assert_that(
+ calling(client.make_directory).with_args(
+ os.path.join(user_2['path'], 'a')
+ ),
+ raises(ydb.BadRequest)
+ )
+
+ with ydb.SessionPool(driver, size=1) as pool:
+ with pool.checkout() as session:
+ session.execute_scheme(
+ "create table `{}` (id Int64, primary key(id));".format(
+ os.path.join(user_1['path'], 'q/w/table')
+ )
+ )
+ assert_that(
+ calling(session.execute_scheme).with_args(
"create table `{}` (id Int64, primary key(id));".format(
- os.path.join(user_1['path'], 'q/w/table')
+ os.path.join(user_2['path'], 'q/w/table')
)
- )
- assert_that(
- calling(session.execute_scheme).with_args(
- "create table `{}` (id Int64, primary key(id));".format(
- os.path.join(user_2['path'], 'q/w/table')
- )
- ),
- any_of(raises(ydb.GenericError), raises(ydb.Unauthorized))
- )
+ ),
+ any_of(raises(ydb.GenericError), raises(ydb.Unauthorized))
+ )
- assert_that(
- calling(client.list_directory).with_args(
- '/Root/'
- ),
- raises(ydb.SchemeError)
- )
- client.list_directory('/')
+ assert_that(
+ calling(client.list_directory).with_args(
+ '/Root/'
+ ),
+ raises(ydb.SchemeError)
+ )
+ client.list_directory('/')
+
+ for user in users.values():
+ ydb_cluster.remove_database(user['path'])
+ ydb_cluster.unregister_and_stop_slots(database_nodes[user['path']])
diff --git a/ydb/tests/functional/tenants/test_storage_config.py b/ydb/tests/functional/tenants/test_storage_config.py
index 94f1789ed58..e5915b1f1f0 100644
--- a/ydb/tests/functional/tenants/test_storage_config.py
+++ b/ydb/tests/functional/tenants/test_storage_config.py
@@ -532,6 +532,7 @@ def case_10():
)
return (creation_options, has_external, scheme)
+
TESTS = [
case_0,
case_1,
diff --git a/ydb/tests/functional/tenants/test_tenants.py b/ydb/tests/functional/tenants/test_tenants.py
index f295e8740d6..78fb720d959 100644
--- a/ydb/tests/functional/tenants/test_tenants.py
+++ b/ydb/tests/functional/tenants/test_tenants.py
@@ -2,494 +2,533 @@
import os
import logging
import random
+import time
import pytest
from hamcrest import assert_that, greater_than, is_, not_, none
import ydb
-
-from common import Runtime, DBForStaticSlots
+from ydb.tests.library.harness.util import LogLevels
+from ydb.tests.library.harness.ydb_fixtures import ydb_database_ctx
logger = logging.getLogger(__name__)
-class TestTenants(DBForStaticSlots):
- def test_when_deactivate_fat_tenant_creation_another_tenant_is_ok(self):
+# local configuration for the ydb cluster (fetched by ydb_cluster_configuration fixture)
+CLUSTER_CONFIG = dict(
+ additional_log_configs={
+ 'TX_PROXY': LogLevels.DEBUG,
+ 'KQP_PROXY': LogLevels.DEBUG,
+ 'KQP_WORKER': LogLevels.DEBUG,
+ 'KQP_GATEWAY': LogLevels.DEBUG,
+ 'GRPC_PROXY': LogLevels.TRACE,
+ 'TX_DATASHARD': LogLevels.DEBUG,
+ 'TX_PROXY_SCHEME_CACHE': LogLevels.DEBUG,
+ 'GRPC_SERVER': LogLevels.DEBUG,
+ # more logs
+ 'FLAT_TX_SCHEMESHARD': LogLevels.TRACE,
+ 'HIVE': LogLevels.TRACE,
+ 'CMS_TENANTS': LogLevels.TRACE,
+ # less logs
+ 'KQP_YQL': LogLevels.ERROR,
+ 'KQP_SESSION': LogLevels.CRIT,
+ 'KQP_COMPILE_ACTOR': LogLevels.CRIT,
+ 'PERSQUEUE_CLUSTER_TRACKER': LogLevels.CRIT,
+ },
+ enable_alter_database_create_hive_first=True,
+)
+
+
+class TestTenants():
+
+ def test_create_remove_database(self, ydb_database):
+ ydb_database
+
+ def test_create_remove_database_wait(self, ydb_database):
+ ydb_database
+ time.sleep(5)
+
+ def test_when_deactivate_fat_tenant_creation_another_tenant_is_ok(self, ydb_cluster, ydb_root, ydb_client_session, config_hive, robust_retries):
logger.info("create fat tenant")
- databases = [os.path.join(self.root_dir, "database_%d") % idx for idx in range(3)]
- for database in databases:
- self.cluster.create_database(
- database,
- storage_pool_units_count={
- 'hdd': 1
- }
- )
- driver_config = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- database
- )
+ boot_batch_size = 5
+ config_hive(boot_batch_size=boot_batch_size)
- with Runtime(self.cluster, database):
- with ydb.Driver(driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.create_table(
- os.path.join(database, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- .with_profile(
- ydb.TableProfile()
- .with_partitioning_policy(
- ydb.PartitioningPolicy().with_uniform_partitions(self.boot_batch_size * 2)
- )
- )
- )
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def describe_table(session):
- result = session.describe_table(os.path.join(database, 'table_0'))
- logger.debug("> describe table: series, %s", str(result))
- return result
- pool.retry_operation_sync(describe_table)
-
- logger.info("remove tenants")
- for database in databases:
- self.cluster.remove_database(database)
-
- def test_register_tenant_and_force_drop_with_table(self):
- with Runtime(self.cluster, self.database_name, 1):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- )
+ databases = [os.path.join(ydb_root, "database_{}".format(i)) for i in range(3)]
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def test_force_delete_tenant_when_table_has_been_deleted(self):
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session_):
- session_.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- )
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def describe_table(session_):
- result = session_.describe_table(os.path.join(self.database_name, 'table_0'))
- logger.debug("> describe table: series, %s", str(result))
- return result
- pool.retry_operation_sync(describe_table)
-
- with pool.checkout() as session:
- session.drop_table(os.path.join(self.database_name, 'table_0'))
-
- def test_progress_when_tenant_tablets_run_on_dynamic_nodes(self):
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- )
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def describe_table(session):
- result = session.describe_table(os.path.join(self.database_name, 'table_0'))
- logger.debug("> describe table: series, %s", str(result))
- return result
- pool.retry_operation_sync(describe_table)
-
- def drop_table(session):
- result = session.drop_table(os.path.join(self.database_name, 'table_0'))
- logger.debug("> drop table: series, %s", str(result))
- return result
- pool.retry_operation_sync(drop_table)
-
- def test_yql_operations_over_dynamic_nodes(self):
- with Runtime(self.cluster, self.database_name):
- table_path_1 = os.path.join(self.database_name, 'table_1')
- table_path_2 = os.path.join(self.database_name, 'table_2')
-
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.execute_scheme(
- "create table `{first}` (key Int32, value String, primary key(key));"
- "create table `{second}` (key Int32, value String, primary key(key));"
- "".format(
- first=table_path_1,
- second=table_path_2
+ for database_path in databases:
+ with ydb_database_ctx(ydb_cluster, database_path, timeout_seconds=20):
+
+ def create_table(session):
+ session.create_table(
+ os.path.join(database_path, 'table_0'),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
+ .with_profile(
+ ydb.TableProfile()
+ .with_partitioning_policy(
+ ydb.PartitioningPolicy().with_uniform_partitions(boot_batch_size * 2)
)
)
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def upsert(session):
- session.transaction().execute(
- "upsert into `{first}` (key) values (101);"
- "upsert into `{second}` (key) values (102);"
- "".format(
- first=table_path_1,
- second=table_path_2
- ),
- commit_tx=True
- )
- pool.retry_operation_sync(upsert)
-
- def select(session):
- session.transaction().execute(
- "select key from `{first}`;"
- "select key from `{second}`;"
- "".format(
- first=table_path_1,
- second=table_path_2
- ),
- commit_tx=True
- )
- pool.retry_operation_sync(select)
+ )
- def test_resolve_nodes(self):
- driver = ydb.Driver(self.driver_config)
+ def describe_table(session):
+ result = session.describe_table(os.path.join(database_path, 'table_0'))
+ logger.debug("> describe table: series, %s", str(result))
+ return result
- try:
- driver.wait(self.robust_retries.get_session_client_timeout)
- except Exception as e:
- logger.info("failed to find endpoints as expected: " + str(e))
- errors = driver.discovery_debug_details()
- logger.info("discovery details: " + str(errors))
- else:
- logger.exception("endpoints have found, not expected")
- assert False
+ pool = ydb_client_session(database_path)
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver, size=1) as pool:
- def callee(session):
- session.execute_scheme(
- "CREATE TABLE wormUp (id utf8, PRIMARY KEY (id));"
- )
- pool.retry_operation_sync(callee, self.robust_retries)
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(describe_table)
- def test_create_tables(self):
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
+ def test_register_tenant_and_force_drop_with_table(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
- def wormUp(session):
- session.execute_scheme(
- "CREATE TABLE wormUp (id utf8, PRIMARY KEY (id));"
- )
+ def create_table(session):
+ session.create_table(
+ os.path.join(database_path, 'table_0'),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
+ )
- pool.retry_operation_sync(wormUp, self.robust_retries)
+ pool.retry_operation_sync(create_table, robust_retries)
- create_futures = []
- table = os.path.join(self.database_name, "temp/hardware/default/compute_az", "allocations")
- sessions = [pool.acquire() for _ in range(10)]
+ def test_force_delete_tenant_when_table_has_been_deleted(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
- for session in sessions:
- create_futures.append(
- session.async_execute_scheme(
- "CREATE TABLE `{table}` (id utf8, PRIMARY KEY (id));".format(
- table=table
- )
- )
- )
+ def create_table(session):
+ session.create_table(
+ os.path.join(database_path, 'table_0'),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
+ )
- for session in sessions:
- pool.release(session)
-
- success_responses_count = 0
- for create_future in create_futures:
- try:
- create_future.result()
- success_responses_count += 1
- except ydb.Overloaded:
- pass
- except ydb.Unavailable as e:
- logger.info("ydb.Unavailable: " + str(e))
-
- with pool.checkout() as session:
- assert_that(
- session.describe_table(table),
- is_(
- not_(
- none()
- )
- )
- )
+ def describe_table(session):
+ result = session.describe_table(os.path.join(database_path, 'table_0'))
+ logger.debug("> describe table: series, %s", str(result))
+ return result
- assert_that(
- success_responses_count,
- greater_than(0)
- )
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(describe_table)
+ with pool.checkout() as session:
+ session.drop_table(os.path.join(database_path, 'table_0'))
+
+ def test_progress_when_tenant_tablets_run_on_dynamic_nodes(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ def create_table(session):
+ session.create_table(
+ os.path.join(database_path, 'table_0'),
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
+ )
- def test_stop_start(self):
- def create_table(session, table):
+ def describe_table(session):
+ result = session.describe_table(os.path.join(database_path, 'table_0'))
+ logger.debug("> describe table: series, %s", str(result))
+ return result
+
+ def drop_table(session):
+ result = session.drop_table(os.path.join(database_path, 'table_0'))
+ logger.debug("> drop table: series, %s", str(result))
+ return result
+
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(describe_table)
+ pool.retry_operation_sync(drop_table)
+
+ def test_yql_operations_over_dynamic_nodes(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ table_path_1 = os.path.join(database_path, 'table_1')
+ table_path_2 = os.path.join(database_path, 'table_2')
+
+ def create_table(session):
session.execute_scheme(
- "CREATE TABLE `{table}` (id utf8, PRIMARY KEY (id));".format(table=table)
+ fr'''
+ create table `{table_path_1}` (key Int32, value String, primary key(key));
+ create table `{table_path_2}` (key Int32, value String, primary key(key));
+ '''
)
- for iNo in range(5):
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- pool.retry_operation_sync(
- create_table,
- self.robust_retries,
- "table_%d" % iNo)
-
- def test_create_drop_create_table(self):
- with Runtime(self.cluster, self.database_name, 1):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- )
+ def upsert(session):
+ session.transaction().execute(
+ fr'''
+ upsert into `{table_path_1}` (key) values (101);
+ upsert into `{table_path_2}` (key) values (102);
+ ''',
+ commit_tx=True
+ )
- def drop_table(session):
- session.drop_table(
- os.path.join(self.database_name, 'table_0'),
- )
+ def select(session):
+ session.transaction().execute(
+ fr'''
+ select key from `{table_path_1}`;
+ select key from `{table_path_2}`;
+ ''',
+ commit_tx=True
+ )
- pool.retry_operation_sync(create_table, self.robust_retries)
- pool.retry_operation_sync(drop_table, self.robust_retries)
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def test_create_drop_create_table2(self):
- with Runtime(self.cluster, self.database_name, 1):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_drop_create_table(session):
- session.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id')
- )
- session.drop_table(
- os.path.join(self.database_name, 'table_0'),
- )
- session.create_table(
- os.path.join(self.database_name, 'table_0'),
- ydb.TableDescription()
- .with_column(ydb.Column('id_1', ydb.OptionalType(ydb.DataType.Uint64)))
- .with_primary_key('id_1')
- )
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(upsert)
+ pool.retry_operation_sync(select)
+
+ def test_resolve_nodes(self, ydb_cluster, ydb_root, ydb_safe_test_name, ydb_client, robust_retries):
+ database_path = os.path.join(ydb_root, ydb_safe_test_name)
+ ydb_cluster.create_database(
+ database_path,
+ storage_pool_units_count={
+ 'hdd': 1
+ },
+ timeout_seconds=20
+ )
+ driver = ydb_client(database_path)
- pool.retry_operation_sync(create_drop_create_table, self.robust_retries)
+ try:
+ driver.wait(robust_retries.get_session_client_timeout)
+ except Exception as e:
+ logger.info("failed to find endpoints as expected: %s", e)
+ errors = driver.discovery_debug_details()
+ logger.info("discovery details: %s", errors)
+ else:
+ logger.exception("endpoints have found, not expected")
+ assert False
- @pytest.mark.xfail
- def test_create_drop_create_table3(self):
- with Runtime(self.cluster, self.database_name, 1):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_table(session):
- session.execute_scheme('''
- CREATE TABLE `{table}`
- (
- id Int64,
- primary key (id)
- );
- '''.format(table=os.path.join(self.database_name, 'table_0')))
- pool.retry_operation_sync(create_table, self.robust_retries)
-
- def drop_create_table(session):
- session.execute_scheme('''
- DROP TABLE `{table}`;
- CREATE TABLE `{table}`
- (
- id_1 Int64,
- primary key (id_1)
- );
- '''.format(table=os.path.join(self.database_name, 'table_0')))
-
- pool.retry_operation_sync(drop_create_table, self.robust_retries)
-
- def test_create_create_table(self):
- with Runtime(self.cluster, self.database_name, 1):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver) as pool:
- def create_tables(session, table_base):
- session.execute_scheme('''
- CREATE TABLE `{table}_0`
- (
- id Int64,
- primary key (id)
- );
- CREATE TABLE `{table}_1`
- (
- id Int64,
- primary key (id)
- );
- '''.format(table=os.path.join(self.database_name, table_base)))
-
- pool.retry_operation_sync(create_tables, self.robust_retries, "first_two")
- pool.retry_operation_sync(create_tables, self.robust_retries, "second_two")
-
- def test_list_database_above(self):
- database_path = self.database_name
- above_database, basename = os.path.split(database_path)
+ database_nodes = ydb_cluster.register_and_start_slots(database_path, count=1)
- def convert(item):
- def _get_entry_schema(entry):
- return ("name", os.path.basename(entry.name)), \
- ("type", str(entry.type)), \
- ("owner", entry.owner), \
- ("effective_permissions", [x.to_pb() for x in entry.effective_permissions]), \
- ("permissions", [x.to_pb() for x in entry.permissions])
+ with ydb.SessionPool(driver, size=1) as pool:
+ def callee(session):
+ session.execute_scheme(
+ "CREATE TABLE warmUp (id utf8, PRIMARY KEY (id));"
+ )
+ pool.retry_operation_sync(callee, robust_retries)
- if type(item) == ydb.scheme.Directory:
- return ("Directory",) + _get_entry_schema(item) + (("children", [convert(x) for x in item.children]),)
+ ydb_cluster.remove_database(
+ database_path,
+ timeout_seconds=20
+ )
+
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
- if type(item) == ydb.scheme.SchemeEntry:
- return ("SchemeEntry",) + _get_entry_schema(item)
+ def test_create_tables(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
- return ("UnknownEntry", type(item)) + _get_entry_schema(item)
+ def warmUp(session):
+ session.execute_scheme(
+ "CREATE TABLE warmUp (id utf8, PRIMARY KEY (id));"
+ )
- with Runtime(self.cluster, database_path, 1):
- with ydb.Driver(self.driver_config) as driver:
- driver.wait()
+ pool.retry_operation_sync(warmUp, robust_retries)
- result = driver.scheme_client.list_directory(database_path)
- logger.debug("From database: list database <%s> is %s", database_path, convert(result))
- assert len(result.children) > 0
- assert result.children[0].name == ".sys"
+ create_futures = []
+ table = os.path.join(database_path, "temp/hardware/default/compute_az", "allocations")
+ sessions = [pool.acquire() for _ in range(10)]
- driver_config_for_root = ydb.DriverConfig(
- "%s:%s" % (self.cluster.nodes[1].host, self.cluster.nodes[1].port),
- self.root_dir
+ for session in sessions:
+ create_futures.append(
+ session.async_execute_scheme(
+ "CREATE TABLE `{table}` (id utf8, PRIMARY KEY (id));".format(
+ table=table
+ )
+ )
)
- with ydb.Driver(driver_config_for_root) as driver:
- driver.wait()
+ for session in sessions:
+ pool.release(session)
- result = driver.scheme_client.list_directory(database_path)
- logger.debug("From root: list database <%s> is %s", database_path, convert(result))
- assert len(result.children) > 0
- assert result.children[0].name == ".sys"
+ success_responses_count = 0
+ for create_future in create_futures:
+ try:
+ create_future.result()
+ success_responses_count += 1
+ except ydb.Overloaded:
+ pass
+ except ydb.Unavailable as e:
+ logger.info("ydb.Unavailable: %s", e)
- result = driver.scheme_client.list_directory(above_database)
- logger.debug("From root: list above database <%s> is %s", above_database, convert(result))
- assert len(result.children) > 0
- assert result.children[0].name == basename
- assert result.children[0].type == ydb.scheme.SchemeEntryType.DATABASE
+ with pool.checkout() as session:
+ assert_that(
+ session.describe_table(table),
+ is_(
+ not_(
+ none()
+ )
+ )
+ )
+ assert_that(
+ success_responses_count,
+ greater_than(0)
+ )
-class TestYqlLocks(DBForStaticSlots):
- def _create_tables(self, pool):
- def callee(session):
- session.execute_scheme(
- "create table bills (account Uint64, deposit Int64, primary key(account)); "
- "create table transfers "
- "(tx Uint64, acc_from Uint64, acc_to Uint64, amount Int64, primary key(tx)); "
+ def test_stop_start(self, ydb_cluster, ydb_client_session, ydb_root, ydb_safe_test_name, robust_retries):
+ def create_table(session, table):
+ session.execute_scheme(fr'''
+ CREATE TABLE `{table}` (id utf8, PRIMARY KEY (id));
+ ''')
+
+ database_path = os.path.join(ydb_root, ydb_safe_test_name)
+
+ for i in range(5):
+ with ydb_database_ctx(ydb_cluster, database_path):
+ pool = ydb_client_session(database_path)
+ pool.retry_operation_sync(create_table, robust_retries, 'table_{}'.format(i))
+
+ def test_create_drop_create_table(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ table = os.path.join(database_path, 'table_0')
+
+ def create_table(session):
+ session.create_table(
+ table,
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
)
- pool.retry_operation_sync(callee, self.robust_retries)
- @staticmethod
- def _initial_credit(pool):
- with pool.checkout() as session:
- session.transaction().execute(
- "upsert into bills (account, deposit) "
- "values (1u, 1000), (2u, 1000); "
- "upsert into transfers (tx, acc_from, acc_to, amount) "
- "values (0u, 0u, 1u, 1000), (1u, 0u, 2u, 1000); ",
- commit_tx=True,
+ def drop_table(session):
+ session.drop_table(
+ table
)
- @staticmethod
- def _plan_transactions(pool):
- operations = []
- for x in range(1, 10):
- operations += [x, -x]
- random.shuffle(operations)
- return zip(range(2, 2 + len(operations)), operations)
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(drop_table, robust_retries)
+ pool.retry_operation_sync(create_table, robust_retries)
- @staticmethod
- def _perform_transaction(pool, tx, operation):
- with pool.checkout() as session:
- session.transaction().execute(
- "$delta = ({delta}); "
- "$tx = ({tx}); "
- "$first = (select account, deposit from bills where account = 1u); "
- "$second = (select account, deposit from bills where account = 2u); "
- ""
- "upsert into bills (account, deposit) "
- "select account as account, deposit - $delta as deposit FROM $first; "
- ""
- "upsert into bills (account, deposit) "
- "select account as account, deposit + $delta as deposit FROM $second; "
- ""
- "upsert into transfers (tx, acc_from, acc_to, amount) "
- "values ($tx, 1u, 2u, $delta); ".format(delta=operation, tx=tx),
- commit_tx=True,
+ def test_create_drop_create_table2(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ table = os.path.join(database_path, 'table_0')
+
+ def create_drop_create_table(session):
+ session.create_table(
+ table,
+ ydb.TableDescription()
+ .with_column(ydb.Column('id', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id')
+ )
+ session.drop_table(
+ table,
+ )
+ session.create_table(
+ table,
+ ydb.TableDescription()
+ .with_column(ydb.Column('id_1', ydb.OptionalType(ydb.DataType.Uint64)))
+ .with_primary_key('id_1')
)
- @staticmethod
- def _control_read(pool):
- account_first = "select account, deposit from bills where account = 1u;"
- account_second = "select account, deposit from bills where account = 2u;"
- transfers = "select tx, acc_from, acc_to, amount from transfers; "
+ pool.retry_operation_sync(create_drop_create_table, robust_retries)
- data = []
- with pool.checkout() as session:
- with session.transaction() as tx:
- for query in (account_first, account_second, transfers):
- data.append(
- tx.execute(
- query
- )
+ @pytest.mark.xfail
+ def test_create_drop_create_table3(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ table = os.path.join(database_path, 'table_0')
+
+ def create_table(session):
+ session.execute_scheme(fr'''
+ CREATE TABLE `{table}`
+ (
+ id Int64,
+ primary key (id)
+ );
+ ''')
+
+ def drop_create_table(session):
+ session.execute_scheme(fr'''
+ DROP TABLE `{table}`;
+ CREATE TABLE `{table}`
+ (
+ id_1 Int64,
+ primary key (id_1)
+ );
+ ''')
+
+ pool.retry_operation_sync(create_table, robust_retries)
+ pool.retry_operation_sync(drop_create_table, robust_retries)
+
+ def test_create_create_table(self, ydb_database, ydb_client_session, robust_retries):
+ database_path = ydb_database
+ pool = ydb_client_session(database_path)
+
+ def create_tables(session, table_base):
+ session.execute_scheme(fr'''
+ CREATE TABLE `{table_base}_0`
+ (
+ id Int64,
+ primary key (id)
+ );
+ CREATE TABLE `{table_base}_1`
+ (
+ id Int64,
+ primary key (id)
+ );
+ ''')
+
+ pool.retry_operation_sync(create_tables, robust_retries, "first_two")
+ pool.retry_operation_sync(create_tables, robust_retries, "second_two")
+
+ def test_list_database_above(self, ydb_database, ydb_endpoint, ydb_root):
+ def convert(item):
+ def _get_entry_schema(entry):
+ return dict(
+ name=os.path.basename(entry.name),
+ type=str(entry.type),
+ owner=entry.owner,
+ effective_permissions=[x.to_pb() for x in entry.effective_permissions],
+ permissions=[x.to_pb() for x in entry.permissions],
+ )
+
+ if type(item) == ydb.scheme.Directory:
+ d = dict(scheme_type='Directory')
+ d.update(_get_entry_schema(item))
+ d.update(dict(
+ children=[convert(x) for x in item.children]
+ ))
+ elif type(item) == ydb.scheme.SchemeEntry:
+ d = dict(scheme_type='SchemeEntry')
+ d.update(_get_entry_schema(item))
+ else:
+ d = dict(scheme_type='UnknownEntry:{}'.format(type(item)))
+ d.update(_get_entry_schema(item))
+ return d
+
+ database_path = ydb_database
+
+ driver_config = ydb.DriverConfig(ydb_endpoint, database_path)
+ with ydb.Driver(driver_config) as driver:
+ driver.wait()
+
+ result = driver.scheme_client.list_directory(database_path)
+ logger.debug("From database: list database <%s> is %s", database_path, convert(result))
+ assert len(result.children) > 0
+ assert result.children[0].name == ".sys"
+
+ driver_config_for_root = ydb.DriverConfig(ydb_endpoint, ydb_root)
+ with ydb.Driver(driver_config_for_root) as driver:
+ driver.wait()
+
+ result = driver.scheme_client.list_directory(database_path)
+ logger.debug("From root: list database <%s> is %s", database_path, convert(result))
+ assert len(result.children) > 0
+ assert result.children[0].name == ".sys"
+
+ dirname, basename = os.path.split(database_path)
+ result = driver.scheme_client.list_directory(dirname)
+ logger.debug("From root: list above database <%s> is %s", dirname, convert(result))
+ assert len(result.children) > 0
+ assert result.children[0].name == basename
+ assert result.children[0].type == ydb.scheme.SchemeEntryType.DATABASE
+
+
+def _initial_credit(pool):
+ with pool.checkout() as session:
+ session.transaction().execute(
+ "upsert into bills (account, deposit) "
+ "values (1u, 1000), (2u, 1000); "
+ "upsert into transfers (tx, acc_from, acc_to, amount) "
+ "values (0u, 0u, 1u, 1000), (1u, 0u, 2u, 1000); ",
+ commit_tx=True,
+ )
+
+
+def _plan_transactions():
+ operations = []
+ for x in range(1, 10):
+ operations += [x, -x]
+ random.shuffle(operations)
+ return zip(range(2, 2 + len(operations)), operations)
+
+
+def _perform_transaction(pool, tx, operation):
+ with pool.checkout() as session:
+ session.transaction().execute(
+ "$delta = ({delta}); "
+ "$tx = ({tx}); "
+ "$first = (select account, deposit from bills where account = 1u); "
+ "$second = (select account, deposit from bills where account = 2u); "
+ ""
+ "upsert into bills (account, deposit) "
+ "select account as account, deposit - $delta as deposit FROM $first; "
+ ""
+ "upsert into bills (account, deposit) "
+ "select account as account, deposit + $delta as deposit FROM $second; "
+ ""
+ "upsert into transfers (tx, acc_from, acc_to, amount) "
+ "values ($tx, 1u, 2u, $delta); ".format(delta=operation, tx=tx),
+ commit_tx=True,
+ )
+
+
+def _control_read(pool):
+ account_first = "select account, deposit from bills where account = 1u;"
+ account_second = "select account, deposit from bills where account = 2u;"
+ transfers = "select tx, acc_from, acc_to, amount from transfers; "
+
+ data = []
+ with pool.checkout() as session:
+ with session.transaction() as tx:
+ for query in (account_first, account_second, transfers):
+ data.append(
+ tx.execute(
+ query
)
+ )
- tx.commit()
- assert len(data) > 0
- return data
+ tx.commit()
+ assert len(data) > 0
+ return data
- def test_operation_with_locks(self):
- with Runtime(self.cluster, self.database_name):
- with ydb.Driver(self.driver_config) as driver:
- with ydb.SessionPool(driver, size=1) as pool:
- self._create_tables(pool)
+def test_operation_with_locks(ydb_database, ydb_client, robust_retries):
+ def create_tables(session):
+ session.execute_scheme(r'''
+ create table bills (account Uint64, deposit Int64, primary key(account));
+ create table transfers (tx Uint64, acc_from Uint64, acc_to Uint64, amount Int64, primary key(tx));
+ ''')
- self._initial_credit(pool)
+ database_path = ydb_database
+ driver = ydb_client(database_path)
- transactions = self._plan_transactions(pool)
+ with ydb.SessionPool(driver, size=1) as pool:
+ pool.retry_operation_sync(create_tables, robust_retries)
- for tx, operation in transactions:
- logger.debug("transaction %s operation %s", tx, operation)
- self._perform_transaction(
- pool,
- tx,
- operation
- )
+ _initial_credit(pool)
+
+ transactions = _plan_transactions()
+
+ for tx, operation in transactions:
+ logger.debug("transaction %s operation %s", tx, operation)
+ _perform_transaction(
+ pool,
+ tx,
+ operation
+ )
- logger.debug("read with two hops")
- account_1, account_2, transfers = self._control_read(pool)
+ logger.debug("read with two hops")
+ account_1, account_2, transfers = _control_read(pool)
- assert account_1[0].rows[0].account == 1 and account_1[0].rows[0].deposit == 1000
- assert account_2[0].rows[0].account == 2 and account_1[0].rows[0].deposit == 1000
+ assert account_1[0].rows[0].account == 1 and account_1[0].rows[0].deposit == 1000
+ assert account_2[0].rows[0].account == 2 and account_1[0].rows[0].deposit == 1000
- sum = 0
- for tx, row in zip(range(len(transfers[0].rows)), transfers[0].rows):
- assert tx == row.tx and row.amount != 0
- sum += row.amount
+ sum = 0
+ for tx, row in zip(range(len(transfers[0].rows)), transfers[0].rows):
+ assert tx == row.tx and row.amount != 0
+ sum += row.amount
- assert sum == 2000
+ assert sum == 2000
diff --git a/ydb/tests/library/harness/kikimr_cluster_interface.py b/ydb/tests/library/harness/kikimr_cluster_interface.py
index 96a891da11b..7717281d3ec 100644
--- a/ydb/tests/library/harness/kikimr_cluster_interface.py
+++ b/ydb/tests/library/harness/kikimr_cluster_interface.py
@@ -11,6 +11,7 @@ from ydb.tests.library.common.protobuf_console import (
RemoveTenantRequest, GetOperationRequest)
import ydb.public.api.protos.ydb_cms_pb2 as cms_tenants_pb
from ydb.public.api.protos.ydb_status_codes_pb2 import StatusIds
+from ydb import issues
logger = logging.getLogger(__name__)
@@ -170,7 +171,7 @@ class KiKiMRClusterInterface(object):
if not operation.ready:
operation = self.__wait_console_op(operation.id, timeout_seconds=timeout_seconds)
if operation.status != StatusIds.SUCCESS:
- raise RuntimeError('create_database failed: %s' % (operation.status,))
+ raise RuntimeError('create_database failed: %s, %s' % (operation.status, issues._format_issues(operation.issues)))
self.__wait_tenant_up(
database_name,
@@ -281,16 +282,21 @@ class KiKiMRClusterInterface(object):
def remove_database(
self,
database_name,
- timeout_seconds=120
+ timeout_seconds=20
):
+ logger.debug(database_name)
+
req = RemoveTenantRequest(database_name)
response = self.client.send_request(req.protobuf, method='ConsoleRequest')
operation = response.RemoveTenantResponse.Response.operation
+ logger.debug('response from console: %s', response)
if not operation.ready and response.Status.Code != StatusIds.STATUS_CODE_UNSPECIFIED:
raise RuntimeError('remove_database failed: %s: %s' % (response.Status.Code, response.Status.Reason))
if not operation.ready:
+ logger.debug('waiting for operation done')
operation = self.__wait_console_op(operation.id, timeout_seconds=timeout_seconds)
+ logger.debug('operation done')
if operation.status not in (StatusIds.SUCCESS, StatusIds.NOT_FOUND):
raise RuntimeError('remove_database failed: %s' % (operation.status,))
@@ -299,12 +305,17 @@ class KiKiMRClusterInterface(object):
GetTenantStatusRequest(database_name).protobuf, method='ConsoleRequest').GetTenantStatusResponse
return response.Response.operation.status == StatusIds.NOT_FOUND
+ logger.debug('waiting tenant gone')
+
tenant_not_found = wait_for(
predicate=predicate,
timeout_seconds=timeout_seconds,
step_seconds=1
)
assert tenant_not_found
+
+ logger.debug('tenant gone')
+
return database_name
def __str__(self):
diff --git a/ydb/tests/library/harness/kikimr_config.py b/ydb/tests/library/harness/kikimr_config.py
index 2ff49601b6d..08c560a3c5d 100644
--- a/ydb/tests/library/harness/kikimr_config.py
+++ b/ydb/tests/library/harness/kikimr_config.py
@@ -139,6 +139,7 @@ class KikimrConfigGenerator(object):
yq_tenant=None,
use_legacy_pq=False,
dc_mapping={},
+ enable_alter_database_create_hive_first=False,
):
self._version = version
self.use_log_files = use_log_files
@@ -210,6 +211,8 @@ class KikimrConfigGenerator(object):
self.yaml_config = load_default_yaml(self.__node_ids, self.domain_name, self.static_erasure, self.__additional_log_configs)
self.yaml_config["feature_flags"]["enable_public_api_external_blobs"] = enable_public_api_external_blobs
self.yaml_config["feature_flags"]["enable_mvcc"] = "VALUE_FALSE" if disable_mvcc else "VALUE_TRUE"
+ if enable_alter_database_create_hive_first:
+ self.yaml_config["feature_flags"]["enable_alter_database_create_hive_first"] = enable_alter_database_create_hive_first
self.yaml_config['pqconfig']['enabled'] = enable_pq
self.yaml_config['pqconfig']['enable_proto_source_id_info'] = True
self.yaml_config['pqconfig']['max_storage_node_port'] = 65535
diff --git a/ydb/tests/library/harness/kikimr_runner.py b/ydb/tests/library/harness/kikimr_runner.py
index 2b52ac32148..73a708c9b12 100644
--- a/ydb/tests/library/harness/kikimr_runner.py
+++ b/ydb/tests/library/harness/kikimr_runner.py
@@ -32,6 +32,8 @@ def get_unique_path_for_current_test(output_path, sub_folder):
def ensure_path_exists(path):
+ # NOTE: can't switch to os.makedirs(path, exist_ok=True) as some tests
+ # are still running under python2 (exist_ok was added in py3.2)
if not os.path.isdir(path):
os.makedirs(path)
return path
@@ -46,12 +48,12 @@ def join(a, b):
class KiKiMRNode(daemon.Daemon, kikimr_node_interface.NodeInterface):
- def __init__(self, node_idx, config_path, port_allocator, cluster_name, configurator,
+ def __init__(self, node_id, config_path, port_allocator, cluster_name, configurator,
udfs_dir=None, role='node', node_broker_port=None, tenant_affiliation=None, encryption_key=None,
binary_path=None, data_center=None):
super(kikimr_node_interface.NodeInterface, self).__init__()
- self.node_id = node_idx
+ self.node_id = node_id
self.data_center = data_center
self.__cwd = None
self.__config_path = config_path
@@ -369,8 +371,8 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface):
if node_index in self.__configurator.dc_mapping:
data_center = self.__configurator.dc_mapping[node_index]
self._nodes[node_index] = KiKiMRNode(
- node_index,
- self.config_path,
+ node_id=node_index,
+ config_path=self.config_path,
port_allocator=self.__port_allocator.get_node_port_allocator(node_index),
cluster_name=self.__cluster_name,
configurator=self.__configurator,
@@ -399,8 +401,8 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface):
else self.nodes[1].grpc_port
)
self._slots[slot_index] = KiKiMRNode(
- slot_index,
- self.config_path,
+ node_id=slot_index,
+ config_path=self.config_path,
port_allocator=self.__port_allocator.get_slot_port_allocator(slot_index),
cluster_name=self.__cluster_name,
configurator=self.__configurator,
@@ -412,6 +414,15 @@ class KiKiMR(kikimr_cluster_interface.KiKiMRClusterInterface):
)
return self._slots[slot_index]
+ def unregister_slots(self, slots):
+ for i in slots:
+ del self._slots[i.node_id]
+
+ def unregister_and_stop_slots(self, slots):
+ self.unregister_slots(slots)
+ for i in slots:
+ i.stop()
+
def __stop_node(self, node):
ret = None
try:
diff --git a/ydb/tests/library/harness/ydb_fixtures.py b/ydb/tests/library/harness/ydb_fixtures.py
new file mode 100644
index 00000000000..3b4182fda29
--- /dev/null
+++ b/ydb/tests/library/harness/ydb_fixtures.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+import contextlib
+import logging
+import os
+
+import pytest
+
+from ydb import Driver, DriverConfig, SessionPool
+
+from ydb.tests.library.common.types import Erasure
+from ydb.tests.library.harness.kikimr_cluster import kikimr_cluster_factory
+from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
+from ydb.tests.library.harness.util import LogLevels
+
+logger = logging.getLogger(__name__)
+
+
+DEFAULT_CLUSTER_CONFIG = dict(
+ erasure=Erasure.NONE,
+ nodes=1,
+ additional_log_configs={
+ 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
+ 'SCHEME_BOARD_POPULATOR': LogLevels.WARN,
+ 'SCHEME_BOARD_SUBSCRIBER': LogLevels.WARN,
+ 'TX_DATASHARD': LogLevels.DEBUG,
+ 'CHANGE_EXCHANGE': LogLevels.DEBUG,
+ }
+)
+
+
+@pytest.fixture(scope='module')
+def ydb_cluster_configuration(request):
+ conf = getattr(request.module, 'CLUSTER_CONFIG', DEFAULT_CLUSTER_CONFIG)
+ return KikimrConfigGenerator(**conf)
+
+
+@pytest.fixture(scope='module')
+def ydb_cluster(ydb_cluster_configuration, request):
+ module_name = request.module.__name__
+
+ logger.info("setup ydb_cluster for %s", module_name)
+
+ logger.info("setup ydb_cluster as local")
+ cluster = kikimr_cluster_factory(
+ configurator=ydb_cluster_configuration
+ )
+ cluster.is_local_test = True
+
+ cluster.start()
+
+ yield cluster
+
+ logger.info("destroy ydb_cluster for %s", module_name)
+ cluster.stop()
+
+
+@pytest.fixture(scope='module')
+def ydb_root(ydb_cluster):
+ return os.path.join("/", ydb_cluster.domain_name)
+
+
+@pytest.fixture(scope='module')
+def ydb_private_client(ydb_cluster):
+ return ydb_cluster.client
+
+
+@pytest.fixture(scope='function')
+def ydb_safe_test_name(request):
+ return request.node.name.replace("[", "_").replace("]", "_")
+
+
+@contextlib.contextmanager
+def ydb_database_ctx(ydb_cluster, database_path, node_count=1, timeout_seconds=20):
+ ''' ??? '''
+ assert os.path.abspath(database_path), 'database_path should be an (absolute) path, not a database name'
+
+ ydb_cluster.remove_database(
+ database_path,
+ timeout_seconds=timeout_seconds
+ )
+
+ logger.debug("create database %s: create path and declare internals", database_path)
+
+ ydb_cluster.create_database(
+ database_path,
+ storage_pool_units_count={
+ 'hdd': 1
+ },
+ timeout_seconds=timeout_seconds
+ )
+
+ logger.debug("create database %s: start nodes and construct internals", database_path)
+ database_nodes = ydb_cluster.register_and_start_slots(database_path, node_count)
+
+ logger.debug("create database %s: wait construction done", database_path)
+ ydb_cluster.wait_tenant_up(database_path)
+
+ logger.debug("create database %s: database up", database_path)
+ yield database_path
+
+ logger.debug("destroy database %s: remove path and dismantle internals", database_path)
+ ydb_cluster.remove_database(
+ database_path,
+ timeout_seconds=timeout_seconds
+ )
+
+ logger.debug("destroy database %s: stop nodes", database_path)
+ ydb_cluster.unregister_and_stop_slots(database_nodes)
+
+ logger.debug("destroy database %s: database down", database_path)
+
+
+@pytest.fixture(scope='function')
+def ydb_database(ydb_cluster, ydb_root, ydb_safe_test_name):
+ database = os.path.join(ydb_root, ydb_safe_test_name)
+
+ with ydb_database_ctx(ydb_cluster, database):
+ yield database
+
+
+@pytest.fixture(scope='function')
+def ydb_endpoint(ydb_cluster):
+ return "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port)
+
+
+@pytest.fixture(scope='function')
+def ydb_client(ydb_endpoint, request):
+ def _make_driver(database_path):
+ driver_config = DriverConfig(ydb_endpoint, database_path)
+ driver = Driver(driver_config)
+
+ def stop_driver():
+ driver.stop()
+
+ request.addfinalizer(stop_driver)
+ return driver
+ return _make_driver
+
+
+@pytest.fixture(scope='function')
+def ydb_client_session(ydb_client, request):
+ def _make_pool(database_path):
+ driver = ydb_client(database_path)
+ pool = SessionPool(driver)
+
+ def stop_pool():
+ pool.stop()
+
+ request.addfinalizer(stop_pool)
+ return pool
+ return _make_pool
+
+
+# possible replacement for both ydb_client and ydb_client_session
+# @pytest.fixture(scope='function')
+# def ydb_database_and_client(ydb_database, ydb_endpoint):
+# database_path = ydb_database
+# with Driver(DriverConfig(ydb_endpoint, database_path)) as driver:
+# with SessionPool(driver) as pool:
+# yield database_path, pool