aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorijon <ijon@yandex-team.com>2023-03-06 12:36:38 +0300
committerijon <ijon@yandex-team.com>2023-03-06 12:36:38 +0300
commit3c7bd95dcf90c78f62e5b6a2d53a49d88ad815cf (patch)
tree193008a3a461175bc3bbd7179755dc492af89a02
parentef1844705fb7c97205e18f8d635990225bb901a7 (diff)
downloadydb-3c7bd95dcf90c78f62e5b6a2d53a49d88ad815cf.tar.gz
schemeshard: set index table MaxPartitionsCount equal to main table PartitionsCount
Set index table MaxPartitionsCount equal to main table PartitionsCount. As default value and specifically for index tables, instead of hardcoded universal defaults. Explicit setting on the index table will take preference, of course. (But there is no easy way to change index table partitioning policy short of directly changing schemeshard's local db by wizardry of minikql).
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_info_types.cpp29
1 files changed, 16 insertions, 13 deletions
diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
index 578110f292..9eb71f22fc 100644
--- a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp
@@ -1593,29 +1593,32 @@ bool TTableInfo::CheckSplitByLoad(
if (!Shard2PartitionIdx.contains(shardIdx))
return false;
+ if (!IsSplitByLoadEnabled(mainTableForIndex)) {
+ return false;
+ }
+
// A shard can be overloaded by heavy reads of non-existing keys.
// So we want to be able to split it even if it has no data.
const ui64 MIN_ROWS_FOR_SPLIT_BY_LOAD = 0;
const ui64 MIN_SIZE_FOR_SPLIT_BY_LOAD = 0;
- const auto& partitionConfig = PartitionConfig();
- const auto& policy = partitionConfig.GetPartitioningPolicy();
+ const auto& policy = PartitionConfig().GetPartitioningPolicy();
+
+ const auto settings = GetEffectiveSplitByLoadSettings(mainTableForIndex);
+ const i64 cpuPercentage = settings.GetCpuPercentageThreshold();
+ const float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
ui64 maxShards = policy.GetMaxPartitionsCount();
if (maxShards == 0) {
- // Don't want to trigger "too many shards" or "too many readsets" errors
- maxShards = splitSettings.SplitByLoadMaxShardsDefault;
- }
-
- if (!IsSplitByLoadEnabled(mainTableForIndex)) {
- return false;
+ if (mainTableForIndex) {
+ // For index table maxShards defaults to a number of partitions of its main table
+ maxShards = mainTableForIndex->GetPartitions().size();
+ } else {
+ // Don't want to trigger "too many shards" or "too many readsets" errors
+ maxShards = splitSettings.SplitByLoadMaxShardsDefault;
+ }
}
- const auto settings = GetEffectiveSplitByLoadSettings(mainTableForIndex);
- i64 cpuPercentage = settings.GetCpuPercentageThreshold();
-
- float cpuUsageThreshold = 0.01 * (cpuPercentage ? cpuPercentage : (i64)splitSettings.FastSplitCpuPercentageThreshold);
-
const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx);
if (rowCount < MIN_ROWS_FOR_SPLIT_BY_LOAD ||
dataSize < MIN_SIZE_FOR_SPLIT_BY_LOAD ||