aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralexvru <alexvru@ydb.tech>2023-03-17 19:35:46 +0300
committeralexvru <alexvru@ydb.tech>2023-03-17 19:35:46 +0300
commit08f6d0d9bb84f493c5dfd3c0d1568445faa26d85 (patch)
tree307f3e4a5af6487719ada10454004556aeab57c0
parent7825c9057d3fad670eadd60509802152127d6e49 (diff)
downloadydb-08f6d0d9bb84f493c5dfd3c0d1568445faa26d85.tar.gz
Support BlobDepot thru dstool
-rw-r--r--ydb/apps/dstool/lib/commands.py11
-rw-r--r--ydb/apps/dstool/lib/common.py28
-rw-r--r--ydb/apps/dstool/lib/dstool_cmd_cluster_workload_run.py3
-rw-r--r--ydb/apps/dstool/lib/dstool_cmd_group_decommit.py42
-rw-r--r--ydb/apps/dstool/lib/dstool_cmd_group_list.py42
-rw-r--r--ydb/apps/dstool/lib/dstool_cmd_group_virtual_create.py77
-rw-r--r--ydb/apps/dstool/lib/dstool_cmd_pool_create_virtual.py23
-rw-r--r--ydb/apps/dstool/lib/ya.make3
-rw-r--r--ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp4
-rw-r--r--ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp5
-rw-r--r--ydb/core/mind/bscontroller/cmds_storage_pool.cpp139
-rw-r--r--ydb/core/mind/bscontroller/config.cpp21
-rw-r--r--ydb/core/mind/bscontroller/impl.h2
-rw-r--r--ydb/core/mind/bscontroller/register_node.cpp4
-rw-r--r--ydb/core/protos/blobstorage.proto11
-rw-r--r--ydb/core/protos/blobstorage_config.proto18
16 files changed, 332 insertions, 101 deletions
diff --git a/ydb/apps/dstool/lib/commands.py b/ydb/apps/dstool/lib/commands.py
index 2ceaa7adfe..3796dc291c 100644
--- a/ydb/apps/dstool/lib/commands.py
+++ b/ydb/apps/dstool/lib/commands.py
@@ -12,12 +12,15 @@ import ydb.apps.dstool.lib.dstool_cmd_vdisk_wipe as vdisk_wipe
import ydb.apps.dstool.lib.dstool_cmd_group_add as group_add
import ydb.apps.dstool.lib.dstool_cmd_group_check as group_check
+import ydb.apps.dstool.lib.dstool_cmd_group_decommit as group_decommit
import ydb.apps.dstool.lib.dstool_cmd_group_list as group_list
import ydb.apps.dstool.lib.dstool_cmd_group_show_blob_info as group_show_blob_info
import ydb.apps.dstool.lib.dstool_cmd_group_show_usage_by_tablets as group_show_usage_by_tablets
import ydb.apps.dstool.lib.dstool_cmd_group_state as group_state
import ydb.apps.dstool.lib.dstool_cmd_group_take_snapshot as group_take_snapshot
+import ydb.apps.dstool.lib.dstool_cmd_group_virtual_create as group_virtual_create
+import ydb.apps.dstool.lib.dstool_cmd_pool_create_virtual as pool_create_virtual
import ydb.apps.dstool.lib.dstool_cmd_pool_list as pool_list
import ydb.apps.dstool.lib.dstool_cmd_box_list as box_list
@@ -39,8 +42,8 @@ modules = [
cluster_balance, cluster_get, cluster_set, cluster_list, cluster_workload_run,
node_list,
box_list,
- pool_list,
- group_check, group_show_blob_info, group_show_usage_by_tablets, group_state, group_take_snapshot, group_add, group_list,
+ pool_list, pool_create_virtual,
+ group_check, group_decommit, group_show_blob_info, group_show_usage_by_tablets, group_state, group_take_snapshot, group_add, group_list, group_virtual_create,
pdisk_add_by_serial, pdisk_remove_by_serial, pdisk_set, pdisk_list,
vdisk_remove_donor, vdisk_evict, vdisk_list, vdisk_wipe,
device_list,
@@ -50,8 +53,8 @@ default_structure = [
('device', ['list']),
('pdisk', ['add-by-serial', 'remove-by-serial', 'set', 'list']),
('vdisk', ['evict', 'remove-donor', 'wipe', 'list']),
- ('group', ['add', 'check', ('show', ['blob-info', 'usage-by-tablets']), 'state', 'take-snapshot', 'list']),
- ('pool', ['list']),
+ ('group', ['add', 'check', 'decommit', ('show', ['blob-info', 'usage-by-tablets']), 'state', 'take-snapshot', 'list', ('virtual', ['create'])]),
+ ('pool', ['list', ('create', ['virtual'])]),
('box', ['list']),
('node', ['list']),
('cluster', ['balance', 'get', 'set', ('workload', ['run']), 'list']),
diff --git a/ydb/apps/dstool/lib/common.py b/ydb/apps/dstool/lib/common.py
index 6d983b6df8..637802b1e1 100644
--- a/ydb/apps/dstool/lib/common.py
+++ b/ydb/apps/dstool/lib/common.py
@@ -297,8 +297,8 @@ def fetch(path, params={}, explicit_host=None, fmt='json', host=None, cache=True
assert False, 'ERROR: invalid stream fmt specified: %s' % fmt
-@query_random_host_with_retry()
-def invoke_grpc(func, *params, host=None):
+@query_random_host_with_retry(explicit_host_param='explicit_host')
+def invoke_grpc(func, *params, explicit_host=None, host=None):
options = [
('grpc.max_receive_message_length', 256 << 20), # 256 MiB
]
@@ -398,10 +398,10 @@ def invoke_wipe_request(request):
return invoke_bsc_request(request)
-@inmemcache('base_config_and_storage_pools')
-def fetch_base_config_and_storage_pools(retrieveDevices=False):
+@inmemcache('base_config_and_storage_pools', cache_enable_param='cache')
+def fetch_base_config_and_storage_pools(retrieveDevices=False, virtualGroupsOnly=False, cache=True):
request = kikimr_bsconfig.TConfigRequest(Rollback=True)
- request.Command.add().QueryBaseConfig.CopyFrom(kikimr_bsconfig.TQueryBaseConfig(RetrieveDevices=retrieveDevices))
+ request.Command.add().QueryBaseConfig.CopyFrom(kikimr_bsconfig.TQueryBaseConfig(RetrieveDevices=retrieveDevices, VirtualGroupsOnly=virtualGroupsOnly))
request.Command.add().ReadStoragePool.BoxId = (1 << 64) - 1
response = invoke_bsc_request(request)
assert not response.Success
@@ -411,8 +411,8 @@ def fetch_base_config_and_storage_pools(retrieveDevices=False):
return dict(BaseConfig=response.Status[0].BaseConfig, StoragePools=response.Status[1].StoragePool)
-def fetch_base_config(retrieveDevices=False):
- return fetch_base_config_and_storage_pools(retrieveDevices)['BaseConfig']
+def fetch_base_config(retrieveDevices=False, virtualGroupsOnly=False, cache=True):
+ return fetch_base_config_and_storage_pools(retrieveDevices, virtualGroupsOnly, cache)['BaseConfig']
def fetch_storage_pools():
@@ -766,20 +766,18 @@ def fetch_json_info(entity, nodes=None, enums=1):
def merge(x, y):
return max([x, y], key=lambda x: x.get('GroupGeneration', 0))
-
elif entity == 'tabletinfo':
section, keycols = 'TabletStateInfo', ['TabletId']
+
+ def merge(x, y):
+ return max([x, y], key=lambda x: x.get('Generation', 0))
elif entity == 'bsgroupinfo':
section, keycols = 'BSGroupStateInfo', ['GroupID']
def merge(x, y):
- if x.get('GroupGeneration', 0) > y.get('GroupGeneration', 0):
- return x
- if y.get('GroupGeneration', 0) > x.get('GroupGeneration', 0):
- return y
- if x.get('VDiskIds', []):
- return x
- return y
+ return x if x.get('GroupGeneration', 0) > y.get('GroupGeneration', 0) else \
+ y if y.get('GroupGeneration', 0) > x.get('GroupGeneration', 0) else \
+ x if x.get('VDiskIds', []) else y
else:
assert False
res = {}
diff --git a/ydb/apps/dstool/lib/dstool_cmd_cluster_workload_run.py b/ydb/apps/dstool/lib/dstool_cmd_cluster_workload_run.py
index db5e255cb9..3256aafcc3 100644
--- a/ydb/apps/dstool/lib/dstool_cmd_cluster_workload_run.py
+++ b/ydb/apps/dstool/lib/dstool_cmd_cluster_workload_run.py
@@ -143,6 +143,7 @@ def do(args):
cmd.FailRealmIdx = vslot.FailRealmIdx
cmd.FailDomainIdx = vslot.FailDomainIdx
cmd.VDiskIdx = vslot.VDiskIdx
+ cmd.SuppressDonorMode = random.choice([True, False])
response = common.invoke_bsc_request(request)
if not response.Success:
if 'Error# failed to allocate group: no group options' in response.ErrorDescription:
@@ -180,7 +181,7 @@ def do(args):
for vslot in base_config.VSlot:
if common.is_dynamic_group(vslot.GroupId):
vslot_id = common.get_vslot_id(vslot.VSlotId)
- if can_act_on_vslot(*vslot_id) and recent_restarts:
+ if can_act_on_vslot(*vslot_id) and (recent_restarts or args.disable_restarts):
vdisk_id = '[%08x:%d:%d:%d]' % (vslot.GroupId, vslot.FailRealmIdx, vslot.FailDomainIdx, vslot.VDiskIdx)
if not args.disable_evicts:
possible_actions.append(('evict vslot id: %s, vdisk id: %s' % (vslot_id, vdisk_id), (do_evict, vslot_id)))
diff --git a/ydb/apps/dstool/lib/dstool_cmd_group_decommit.py b/ydb/apps/dstool/lib/dstool_cmd_group_decommit.py
new file mode 100644
index 0000000000..c3f09787a9
--- /dev/null
+++ b/ydb/apps/dstool/lib/dstool_cmd_group_decommit.py
@@ -0,0 +1,42 @@
+import ydb.apps.dstool.lib.common as common
+import ydb.core.protos.blob_depot_config_pb2 as blob_depot_config
+import sys
+
+description = 'Decommit physical group'
+
+
+def add_options(p):
+ common.add_group_ids_option(p, required=True)
+ p.add_argument('--hive-id', type=int, required=True, help='tablet id of containing hive')
+ p.add_argument('--log-channel-sp', type=str, metavar='POOL_NAME', help='channel 0 specifier')
+ p.add_argument('--snapshot-channel-sp', type=str, metavar='POOL_NAME', help='channel 1 specifier (defaults to channel 0)')
+ p.add_argument('--data-channel-sp', type=str, metavar='POOL_NAME[*COUNT]', nargs='*', help='data channel specifier')
+
+
+def do(args):
+ request = common.create_bsc_request(args)
+ cmd = request.Command.add().DecommitGroups
+ cmd.GroupIds.extend(args.group_ids)
+ cmd.HiveId = args.hive_id
+
+ if args.log_channel_sp or args.snapshot_channel_sp or args.data_channel_sp:
+ if args.log_channel_sp is None:
+ print('--log-channel-sp must be specified', file=sys.stderr)
+ sys.exit(1)
+ elif args.data_channel_sp is None:
+ print('--data-channel-sp must be specified', file=sys.stderr)
+ sys.exit(1)
+
+ cmd.ChannelProfiles.add(StoragePoolName=args.log_channel_sp, ChannelKind=blob_depot_config.TChannelKind.System)
+ chan1 = args.snapshot_channel_sp if args.snapshot_channel_sp is not None else args.log_channel_sp
+ cmd.ChannelProfiles.add(StoragePoolName=chan1, ChannelKind=blob_depot_config.TChannelKind.System)
+ for data_sp in args.data_channel_sp:
+ pool_name, sep, count = data_sp.rpartition('*')
+ if sep == '*':
+ count = int(count)
+ else:
+ pool_name, count = count, 1
+ cmd.ChannelProfiles.add(StoragePoolName=pool_name, ChannelKind=blob_depot_config.TChannelKind.Data, Count=count)
+
+ response = common.invoke_bsc_request(request)
+ common.print_request_result(args, request, response)
diff --git a/ydb/apps/dstool/lib/dstool_cmd_group_list.py b/ydb/apps/dstool/lib/dstool_cmd_group_list.py
index 76dc00cf65..2d988e9924 100644
--- a/ydb/apps/dstool/lib/dstool_cmd_group_list.py
+++ b/ydb/apps/dstool/lib/dstool_cmd_group_list.py
@@ -10,11 +10,12 @@ description = 'List groups'
def add_options(p):
p.add_argument('--show-vdisk-status', action='store_true', help='Show columns with VDisk status')
p.add_argument('--show-vdisk-usage', action='store_true', help='Show columns with VDisk usage')
+ p.add_argument('--virtual-groups-only', action='store_true', help='Show only virtual groups')
table.TableOutput([], col_units=[]).add_options(p)
def do(args):
- base_config_and_storage_pools = common.fetch_base_config_and_storage_pools()
+ base_config_and_storage_pools = common.fetch_base_config_and_storage_pools(virtualGroupsOnly=args.virtual_groups_only)
base_config = base_config_and_storage_pools['BaseConfig']
group_map = common.build_group_map(base_config)
@@ -43,6 +44,11 @@ def do(args):
'UsedSize',
'AvailableSize',
'TotalSize',
+ 'VirtualGroupState',
+ 'VirtualGroupName',
+ 'BlobDepotId',
+ 'ErrorReason',
+ 'DecommitStatus',
]
visible_columns = [
'GroupId',
@@ -66,9 +72,35 @@ def do(args):
if args.show_vdisk_usage or args.all_columns:
visible_columns.extend(['Usage', 'UsedSize', 'AvailableSize', 'TotalSize'])
+ if args.virtual_groups_only:
+ visible_columns.extend(['VirtualGroupState', 'VirtualGroupName', 'BlobDepotId', 'ErrorReason', 'DecommitStatus'])
+
table_output = table.TableOutput(all_columns, col_units=col_units, default_visible_columns=visible_columns)
group_stat_map = defaultdict(lambda: defaultdict(int))
+
+ for group_id, group in group_map.items():
+ group_stat = group_stat_map[group_id]
+ group_stat['BoxId:PoolId'] = '[%d:%d]' % (group.BoxId, group.StoragePoolId)
+ group_stat['PoolName'] = sp_name[(group.BoxId, group.StoragePoolId)]
+ group_stat['GroupId'] = group.GroupId
+ group_stat['Generation'] = group.GroupGeneration
+ group_stat['ErasureSpecies'] = group.ErasureSpecies
+ group_stat['ExpectedStatus'] = kikimr_bsconfig.TGroupStatus.E.Name(group.ExpectedStatus)
+ group_stat['OperatingStatus'] = kikimr_bsconfig.TGroupStatus.E.Name(group.OperatingStatus)
+ group_stat['SeenOperational'] = group.SeenOperational
+
+ if group.VirtualGroupInfo:
+ group_stat['VirtualGroupState'] = kikimr_bsconfig.EVirtualGroupState.Name(group.VirtualGroupInfo.State)
+ group_stat['VirtualGroupName'] = group.VirtualGroupInfo.Name
+ group_stat['BlobDepotId'] = group.VirtualGroupInfo.BlobDepotId
+ group_stat['ErrorReason'] = group.VirtualGroupInfo.ErrorReason
+ group_stat['DecommitStatus'] = kikimr_bsconfig.TGroupDecommitStatus.E.Name(group.VirtualGroupInfo.DecommitStatus)
+
+ group_stat['UsedSize'] = 0
+ group_stat['TotalSize'] = 0
+ group_stat['AvailableSize'] = 0
+
for vslot_id, vslot in vslot_map.items():
group_id = vslot.GroupId
if not common.is_dynamic_group(group_id):
@@ -82,14 +114,6 @@ def do(args):
group = group_map[group_id]
group_stat = group_stat_map[group_id]
- group_stat['BoxId:PoolId'] = '[%d:%d]' % (group.BoxId, group.StoragePoolId)
- group_stat['PoolName'] = sp_name[(group.BoxId, group.StoragePoolId)]
- group_stat['GroupId'] = group.GroupId
- group_stat['Generation'] = group.GroupGeneration
- group_stat['ErasureSpecies'] = group.ErasureSpecies
- group_stat['ExpectedStatus'] = kikimr_bsconfig.TGroupStatus.E.Name(group.ExpectedStatus)
- group_stat['OperatingStatus'] = kikimr_bsconfig.TGroupStatus.E.Name(group.OperatingStatus)
- group_stat['SeenOperational'] = group.SeenOperational
group_stat['UsedSize'] += vslot.VDiskMetrics.AllocatedSize
group_stat['TotalSize'] += vslot.VDiskMetrics.AllocatedSize
group_stat['AvailableSize'] += vslot.VDiskMetrics.AvailableSize
diff --git a/ydb/apps/dstool/lib/dstool_cmd_group_virtual_create.py b/ydb/apps/dstool/lib/dstool_cmd_group_virtual_create.py
new file mode 100644
index 0000000000..b805997208
--- /dev/null
+++ b/ydb/apps/dstool/lib/dstool_cmd_group_virtual_create.py
@@ -0,0 +1,77 @@
+import ydb.apps.dstool.lib.common as common
+import ydb.core.protos.blob_depot_config_pb2 as blob_depot_config
+import sys
+import time
+
+description = 'Create virtual group backed by BlobDepot'
+
+
+def add_options(p):
+ p.add_argument('--name', type=str, required=True, nargs='+', help='cluster-unique name(s) of newly created virtual groups')
+ p.add_argument('--hive-id', type=int, required=True, help='tablet id of containing hive')
+ g = p.add_mutually_exclusive_group(required=True)
+ g.add_argument('--storage-pool-name', type=str, metavar='POOL_NAME', help='name of the containing storage pool')
+ g.add_argument('--storage-pool-id', type=str, metavar='BOX:POOL', help='id of the cotaining storage pool')
+ p.add_argument('--log-channel-sp', type=str, metavar='POOL_NAME', required=True, help='channel 0 specifier')
+ p.add_argument('--snapshot-channel-sp', type=str, metavar='POOL_NAME', help='channel 1 specifier (defaults to channel 0)')
+ p.add_argument('--data-channel-sp', type=str, metavar='POOL_NAME[*COUNT]', nargs='+', required=True, help='data channel specifier')
+ p.add_argument('--wait', action='store_true', help='wait for operation to complete by polling')
+
+
+def do(args):
+ request = common.create_bsc_request(args)
+ for name in args.name:
+ cmd = request.Command.add().AllocateVirtualGroup
+
+ cmd.Name = name
+ cmd.HiveId = args.hive_id
+
+ if args.storage_pool_name is not None:
+ cmd.StoragePoolName = args.storage_pool_name
+ else:
+ id_ = cmd.StoragePoolId
+ try:
+ id_.BoxId, id_.StoragePoolId = map(int, args.storage_pool_id.split(':'))
+ except Exception:
+ print(f'Invalid --storage-pool-id={args.storage_pool_id} format, <number>:<number> expected', file=sys.stderr)
+ sys.exit(1)
+
+ cmd.ChannelProfiles.add(StoragePoolName=args.log_channel_sp, ChannelKind=blob_depot_config.TChannelKind.System)
+ chan1 = args.snapshot_channel_sp if args.snapshot_channel_sp is not None else args.log_channel_sp
+ cmd.ChannelProfiles.add(StoragePoolName=chan1, ChannelKind=blob_depot_config.TChannelKind.System)
+ for data_sp in args.data_channel_sp:
+ pool_name, sep, count = data_sp.rpartition('*')
+ if sep == '*':
+ count = int(count)
+ else:
+ pool_name, count = count, 1
+ cmd.ChannelProfiles.add(StoragePoolName=pool_name, ChannelKind=blob_depot_config.TChannelKind.Data, Count=count)
+
+ response = common.invoke_bsc_request(request)
+ common.print_request_result(args, request, response)
+
+ if args.wait and not args.dry_run:
+ while True:
+ base_config = common.fetch_base_config(virtualGroupsOnly=True, cache=False)
+ names_remaining = set(args.name)
+ errors = []
+
+ for group in base_config.Group:
+ if group.VirtualGroupInfo.Name in names_remaining:
+ if group.VirtualGroupInfo.State == common.kikimr_bsconfig.EVirtualGroupState.WORKING:
+ names_remaining.remove(group.VirtualGroupInfo.Name)
+ elif group.VirtualGroupInfo.State == common.kikimr_bsconfig.EVirtualGroupState.CREATE_FAILED:
+ names_remaining.remove(group.VirtualGroupInfo.Name)
+ errors.append(f'{group.VirtualGroupInfo.Name}: {group.ErrorReason}')
+
+ if names_remaining:
+ time.sleep(1)
+ continue
+
+ if errors:
+ print('Some of groups were not created:', file=sys.stderr)
+ for line in errors:
+ print(line, file=sys.stderr)
+ sys.exit(1)
+ else:
+ break
diff --git a/ydb/apps/dstool/lib/dstool_cmd_pool_create_virtual.py b/ydb/apps/dstool/lib/dstool_cmd_pool_create_virtual.py
new file mode 100644
index 0000000000..564e604e41
--- /dev/null
+++ b/ydb/apps/dstool/lib/dstool_cmd_pool_create_virtual.py
@@ -0,0 +1,23 @@
+import ydb.apps.dstool.lib.common as common
+
+description = 'Create pool suitable for virtual groups'
+
+
+def add_options(p):
+ p.add_argument('--box-id', type=int, required=True, help='Containing box id')
+ p.add_argument('--name', type=str, metavar='POOL_NAME', required=True, help='Virtual group pool name')
+ p.add_argument('--kind', type=str, help='Optional pool kind')
+
+
+def do(args):
+ request = common.create_bsc_request(args)
+ cmd = request.Command.add().DefineStoragePool
+ cmd.BoxId = args.box_id
+ cmd.ErasureSpecies = 'none'
+ cmd.VDiskKind = 'Default'
+ cmd.Name = args.name
+ if args.kind is not None:
+ cmd.Kind = args.kind
+
+ response = common.invoke_bsc_request(request)
+ common.print_request_result(args, request, response)
diff --git a/ydb/apps/dstool/lib/ya.make b/ydb/apps/dstool/lib/ya.make
index d83c23503b..5ea36b6682 100644
--- a/ydb/apps/dstool/lib/ya.make
+++ b/ydb/apps/dstool/lib/ya.make
@@ -23,12 +23,15 @@ PY_SRCS(
dstool_cmd_group_add.py
dstool_cmd_group_check.py
+ dstool_cmd_group_decommit.py
dstool_cmd_group_list.py
dstool_cmd_group_show_blob_info.py
dstool_cmd_group_show_usage_by_tablets.py
dstool_cmd_group_state.py
dstool_cmd_group_take_snapshot.py
+ dstool_cmd_group_virtual_create.py
+ dstool_cmd_pool_create_virtual.py
dstool_cmd_pool_list.py
dstool_cmd_box_list.py
diff --git a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp
index 230685a161..44bf2dba4d 100644
--- a/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp
+++ b/ydb/core/blobstorage/nodewarden/node_warden_proxy.cpp
@@ -43,6 +43,10 @@ void TNodeWarden::StartLocalProxy(ui32 groupId) {
proxy.reset(NBlobDepot::CreateBlobDepotAgent(groupId, info, proxyActorId));
group.AgentProxy = true;
break;
+
+ case NKikimrBlobStorage::TGroupDecommitStatus_E_TGroupDecommitStatus_E_INT_MIN_SENTINEL_DO_NOT_USE_:
+ case NKikimrBlobStorage::TGroupDecommitStatus_E_TGroupDecommitStatus_E_INT_MAX_SENTINEL_DO_NOT_USE_:
+ Y_UNREACHABLE();
}
} else {
// create proxy with configuration
diff --git a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp
index 801389cd7e..f0d586fc06 100644
--- a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp
+++ b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeleton.cpp
@@ -111,6 +111,11 @@ namespace NKikimr {
case NKikimrBlobStorage::TGroupDecommitStatus::IN_PROGRESS:
case NKikimrBlobStorage::TGroupDecommitStatus::DONE:
return true;
+
+ case NKikimrBlobStorage::TGroupDecommitStatus_E_TGroupDecommitStatus_E_INT_MIN_SENTINEL_DO_NOT_USE_:
+ case NKikimrBlobStorage::TGroupDecommitStatus_E_TGroupDecommitStatus_E_INT_MAX_SENTINEL_DO_NOT_USE_:
+ Y_VERIFY_DEBUG(false);
+ return true;
}
}
diff --git a/ydb/core/mind/bscontroller/cmds_storage_pool.cpp b/ydb/core/mind/bscontroller/cmds_storage_pool.cpp
index 22f098c8b9..0945e58a25 100644
--- a/ydb/core/mind/bscontroller/cmds_storage_pool.cpp
+++ b/ydb/core/mind/bscontroller/cmds_storage_pool.cpp
@@ -473,75 +473,100 @@ namespace NKikimr::NBsController {
});
}
+ const bool virtualGroupsOnly = cmd.GetVirtualGroupsOnly();
+
+ THashSet<TGroupId> groupFilter;
+ THashSet<TVSlotId> vslotFilter;
+ THashSet<TPDiskId> pdiskFilter;
+
+ if (virtualGroupsOnly) {
+ Groups.ForEach([&](TGroupId groupId, const TGroupInfo& groupInfo) {
+ if (groupInfo.VirtualGroupState || groupInfo.DecommitStatus != NKikimrBlobStorage::TGroupDecommitStatus::NONE) {
+ groupFilter.insert(groupId);
+ }
+ });
+ VSlots.ForEach([&](TVSlotId vslotId, const TVSlotInfo& vslotInfo) {
+ if (vslotInfo.Group && groupFilter.contains(vslotInfo.GroupId)) {
+ vslotFilter.insert(vslotId);
+ pdiskFilter.insert(vslotId.ComprisingPDiskId());
+ }
+ });
+ }
+
PDisks.ForEach([&](const TPDiskId& pdiskId, const TPDiskInfo& pdiskInfo) {
- Serialize(pb->AddPDisk(), pdiskId, pdiskInfo);
+ if (!virtualGroupsOnly || pdiskFilter.contains(pdiskId)) {
+ Serialize(pb->AddPDisk(), pdiskId, pdiskInfo);
+ }
});
- auto vslotFinder = [this](const TVSlotId& vslotId, const std::function<void(const TVSlotInfo&)>& callback) {
+ const TVSlotFinder vslotFinder{[this](const TVSlotId& vslotId, auto&& callback) {
if (const TVSlotInfo *vslot = VSlots.Find(vslotId)) {
callback(*vslot);
}
- };
- VSlots.ForEach([pb, &vslotFinder](const TVSlotId& /*vslotId*/, const TVSlotInfo& vslotInfo) {
- if (vslotInfo.Group) {
+ }};
+ VSlots.ForEach([&](TVSlotId vslotId, const TVSlotInfo& vslotInfo) {
+ if (vslotInfo.Group && (!virtualGroupsOnly || vslotFilter.contains(vslotId))) {
Serialize(pb->AddVSlot(), vslotInfo, vslotFinder);
}
});
- Groups.ForEach([pb](TGroupId /*groupId*/, const TGroupInfo& groupInfo) {
- Serialize(pb->AddGroup(), groupInfo);
+ Groups.ForEach([&](TGroupId groupId, const TGroupInfo& groupInfo) {
+ if (!virtualGroupsOnly || groupFilter.contains(groupId)) {
+ Serialize(pb->AddGroup(), groupInfo);
+ }
});
- // apply static group
- for (const auto& [pdiskId, pdisk] : StaticPDisks) {
- if (PDisks.Find(pdiskId)) {
- continue; // this pdisk was already reported
- }
- auto *x = pb->AddPDisk();
- x->SetNodeId(pdisk.NodeId);
- x->SetPDiskId(pdisk.PDiskId);
- x->SetPath(pdisk.Path);
- x->SetType(PDiskTypeToPDiskType(pdisk.Category.Type()));
- x->SetKind(pdisk.Category.Kind());
- if (pdisk.PDiskConfig) {
- bool success = x->MutablePDiskConfig()->ParseFromString(pdisk.PDiskConfig);
- Y_VERIFY(success);
- }
- x->SetGuid(pdisk.Guid);
- x->SetNumStaticSlots(pdisk.StaticSlotUsage);
- x->SetDriveStatus(NKikimrBlobStorage::EDriveStatus::ACTIVE);
- x->SetExpectedSlotCount(pdisk.ExpectedSlotCount);
- x->SetDecommitStatus(NKikimrBlobStorage::EDecommitStatus::DECOMMIT_NONE);
- if (pdisk.PDiskMetrics) {
- x->MutablePDiskMetrics()->CopyFrom(*pdisk.PDiskMetrics);
- x->MutablePDiskMetrics()->ClearPDiskId();
+ if (!virtualGroupsOnly) {
+ // apply static group
+ for (const auto& [pdiskId, pdisk] : StaticPDisks) {
+ if (PDisks.Find(pdiskId)) {
+ continue; // this pdisk was already reported
+ }
+ auto *x = pb->AddPDisk();
+ x->SetNodeId(pdisk.NodeId);
+ x->SetPDiskId(pdisk.PDiskId);
+ x->SetPath(pdisk.Path);
+ x->SetType(PDiskTypeToPDiskType(pdisk.Category.Type()));
+ x->SetKind(pdisk.Category.Kind());
+ if (pdisk.PDiskConfig) {
+ bool success = x->MutablePDiskConfig()->ParseFromString(pdisk.PDiskConfig);
+ Y_VERIFY(success);
+ }
+ x->SetGuid(pdisk.Guid);
+ x->SetNumStaticSlots(pdisk.StaticSlotUsage);
+ x->SetDriveStatus(NKikimrBlobStorage::EDriveStatus::ACTIVE);
+ x->SetExpectedSlotCount(pdisk.ExpectedSlotCount);
+ x->SetDecommitStatus(NKikimrBlobStorage::EDecommitStatus::DECOMMIT_NONE);
+ if (pdisk.PDiskMetrics) {
+ x->MutablePDiskMetrics()->CopyFrom(*pdisk.PDiskMetrics);
+ x->MutablePDiskMetrics()->ClearPDiskId();
+ }
}
- }
- for (const auto& [vslotId, vslot] : StaticVSlots) {
- auto *x = pb->AddVSlot();
- vslotId.Serialize(x->MutableVSlotId());
- x->SetGroupId(vslot.VDiskId.GroupID);
- x->SetGroupGeneration(vslot.VDiskId.GroupGeneration);
- x->SetFailRealmIdx(vslot.VDiskId.FailRealm);
- x->SetFailDomainIdx(vslot.VDiskId.FailDomain);
- x->SetVDiskIdx(vslot.VDiskId.VDisk);
- if (vslot.VDiskMetrics) {
- x->SetAllocatedSize(vslot.VDiskMetrics->GetAllocatedSize());
- x->MutableVDiskMetrics()->CopyFrom(*vslot.VDiskMetrics);
- x->MutableVDiskMetrics()->ClearVDiskId();
+ for (const auto& [vslotId, vslot] : StaticVSlots) {
+ auto *x = pb->AddVSlot();
+ vslotId.Serialize(x->MutableVSlotId());
+ x->SetGroupId(vslot.VDiskId.GroupID);
+ x->SetGroupGeneration(vslot.VDiskId.GroupGeneration);
+ x->SetFailRealmIdx(vslot.VDiskId.FailRealm);
+ x->SetFailDomainIdx(vslot.VDiskId.FailDomain);
+ x->SetVDiskIdx(vslot.VDiskId.VDisk);
+ if (vslot.VDiskMetrics) {
+ x->SetAllocatedSize(vslot.VDiskMetrics->GetAllocatedSize());
+ x->MutableVDiskMetrics()->CopyFrom(*vslot.VDiskMetrics);
+ x->MutableVDiskMetrics()->ClearVDiskId();
+ }
+ x->SetStatus(NKikimrBlobStorage::EVDiskStatus_Name(vslot.VDiskStatus));
}
- x->SetStatus(NKikimrBlobStorage::EVDiskStatus_Name(vslot.VDiskStatus));
- }
-
- if (const auto& ss = AppData()->StaticBlobStorageConfig) {
- for (const auto& group : ss->GetGroups()) {
- auto *x = pb->AddGroup();
- x->SetGroupId(group.GetGroupID());
- x->SetGroupGeneration(group.GetGroupGeneration());
- x->SetErasureSpecies(TBlobStorageGroupType::ErasureSpeciesName(group.GetErasureSpecies()));
- for (const auto& realm : group.GetRings()) {
- for (const auto& domain : realm.GetFailDomains()) {
- for (const auto& location : domain.GetVDiskLocations()) {
- const TVSlotId vslotId(location.GetNodeID(), location.GetPDiskID(), location.GetVDiskSlotID());
- vslotId.Serialize(x->AddVSlotId());
+ if (const auto& ss = AppData()->StaticBlobStorageConfig) {
+ for (const auto& group : ss->GetGroups()) {
+ auto *x = pb->AddGroup();
+ x->SetGroupId(group.GetGroupID());
+ x->SetGroupGeneration(group.GetGroupGeneration());
+ x->SetErasureSpecies(TBlobStorageGroupType::ErasureSpeciesName(group.GetErasureSpecies()));
+ for (const auto& realm : group.GetRings()) {
+ for (const auto& domain : realm.GetFailDomains()) {
+ for (const auto& location : domain.GetVDiskLocations()) {
+ const TVSlotId vslotId(location.GetNodeID(), location.GetPDiskID(), location.GetVDiskSlotID());
+ vslotId.Serialize(x->AddVSlotId());
+ }
}
}
}
diff --git a/ydb/core/mind/bscontroller/config.cpp b/ydb/core/mind/bscontroller/config.cpp
index 9f45b21245..665a096fcf 100644
--- a/ydb/core/mind/bscontroller/config.cpp
+++ b/ydb/core/mind/bscontroller/config.cpp
@@ -156,11 +156,11 @@ namespace NKikimr::NBsController {
if (const TGroupInfo *group = State.Groups.Find(vslotInfo.GroupId); group && mood != TMood::Delete) {
item.SetStoragePoolName(State.StoragePools.Get().at(group->StoragePoolId).Name);
- auto vslotFinder = [this](const TVSlotId& vslotId, auto&& callback) {
+ const TVSlotFinder vslotFinder{[this](TVSlotId vslotId, auto&& callback) {
if (const TVSlotInfo *vslot = State.VSlots.Find(vslotId)) {
callback(*vslot);
}
- };
+ }};
SerializeDonors(&item, vslotInfo, *group, vslotFinder);
} else {
@@ -912,6 +912,23 @@ namespace NKikimr::NBsController {
const auto& status = group.Status;
pb->SetOperatingStatus(status.OperatingStatus);
pb->SetExpectedStatus(status.ExpectedStatus);
+
+ if (group.DecommitStatus != NKikimrBlobStorage::TGroupDecommitStatus::NONE || group.VirtualGroupState) {
+ auto *vgi = pb->MutableVirtualGroupInfo();
+ if (group.VirtualGroupState) {
+ vgi->SetState(*group.VirtualGroupState);
+ }
+ if (group.VirtualGroupName) {
+ vgi->SetName(*group.VirtualGroupName);
+ }
+ if (group.BlobDepotId) {
+ vgi->SetBlobDepotId(*group.BlobDepotId);
+ }
+ if (group.ErrorReason) {
+ vgi->SetErrorReason(*group.ErrorReason);
+ }
+ vgi->SetDecommitStatus(group.DecommitStatus);
+ }
}
void TBlobStorageController::SerializeDonors(NKikimrBlobStorage::TNodeWardenServiceSet::TVDisk *vdisk,
diff --git a/ydb/core/mind/bscontroller/impl.h b/ydb/core/mind/bscontroller/impl.h
index a4e84d0a80..909da0b960 100644
--- a/ydb/core/mind/bscontroller/impl.h
+++ b/ydb/core/mind/bscontroller/impl.h
@@ -2140,7 +2140,7 @@ public:
void OnWardenDisconnected(TNodeId nodeId);
void EraseKnownDrivesOnDisconnected(TNodeInfo *nodeInfo);
- using TVSlotFinder = std::function<void(const TVSlotId&, const std::function<void(const TVSlotInfo&)>&)>;
+ using TVSlotFinder = std::function<void(TVSlotId, const std::function<void(const TVSlotInfo&)>&)>;
static void Serialize(NKikimrBlobStorage::TDefineHostConfig *pb, const THostConfigId &id, const THostConfigInfo &hostConfig);
static void Serialize(NKikimrBlobStorage::TDefineBox *pb, const TBoxId &id, const TBoxInfo &box);
diff --git a/ydb/core/mind/bscontroller/register_node.cpp b/ydb/core/mind/bscontroller/register_node.cpp
index 8f4aef31e7..626ab81bb7 100644
--- a/ydb/core/mind/bscontroller/register_node.cpp
+++ b/ydb/core/mind/bscontroller/register_node.cpp
@@ -428,11 +428,11 @@ void TBlobStorageController::ReadVSlot(const TVSlotInfo& vslot, TEvBlobStorage::
const TStoragePoolInfo& info = StoragePools.at(group->StoragePoolId);
vDisk->SetStoragePoolName(info.Name);
- auto vslotFinder = [this](const TVSlotId& vslotId, auto&& callback) {
+ const TVSlotFinder vslotFinder{[this](TVSlotId vslotId, auto&& callback) {
if (const TVSlotInfo *vslot = FindVSlot(vslotId)) {
callback(*vslot);
}
- };
+ }};
SerializeDonors(vDisk, vslot, *group, vslotFinder);
} else {
diff --git a/ydb/core/protos/blobstorage.proto b/ydb/core/protos/blobstorage.proto
index 1ab7de1894..78927019eb 100644
--- a/ydb/core/protos/blobstorage.proto
+++ b/ydb/core/protos/blobstorage.proto
@@ -165,15 +165,6 @@ enum EEntityStatus {
RESTART = 4; // entity has changed config or changed environment and should be restarted by warden
}
-message TGroupDecommitStatus {
- enum E {
- NONE = 0; // no decomission
- PENDING = 1; // decommission machinery is starting
- IN_PROGRESS = 2; // decomission underway
- DONE = 3; // group decomission complete
- }
-}
-
message TGroupInfo {
message TFailRealm {
message TFailDomain {
@@ -197,7 +188,7 @@ message TGroupInfo {
optional string StoragePoolName = 13;
optional EPDiskType DeviceType = 14;
optional uint64 BlobDepotId = 15; // if filled, then this is virtual group
- optional TGroupDecommitStatus.E DecommitStatus = 16;
+ optional NKikimrBlobStorage.TGroupDecommitStatus.E DecommitStatus = 16;
}
message TEvVPatchStart {
diff --git a/ydb/core/protos/blobstorage_config.proto b/ydb/core/protos/blobstorage_config.proto
index 9793cf80a2..c4a939f8a6 100644
--- a/ydb/core/protos/blobstorage_config.proto
+++ b/ydb/core/protos/blobstorage_config.proto
@@ -23,6 +23,15 @@ enum EVirtualGroupState {
WORKING = 2; // operational
}
+message TGroupDecommitStatus {
+ enum E {
+ NONE = 0; // no decomission
+ PENDING = 1; // decommission machinery is starting
+ IN_PROGRESS = 2; // decomission underway
+ DONE = 3; // group decomission complete
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// TYPICAL HOST CONFIGURATIONS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -253,6 +262,7 @@ message TProposeStoragePools {
message TQueryBaseConfig {
bool RetrieveDevices = 1;
+ bool VirtualGroupsOnly = 2;
}
message TReadSettings {
@@ -617,6 +627,13 @@ message TBaseConfig {
repeated TDonorDisk Donors = 11;
bool Ready = 12; // is disk READY in terms of BSC (stable READY status for some period of time)
}
+ message TVirtualGroupInfo {
+ EVirtualGroupState State = 1;
+ string Name = 2;
+ uint64 BlobDepotId = 3;
+ string ErrorReason = 4;
+ TGroupDecommitStatus.E DecommitStatus = 5;
+ }
message TGroup {
uint32 GroupId = 1;
uint32 GroupGeneration = 2;
@@ -627,6 +644,7 @@ message TBaseConfig {
bool SeenOperational = 7;
TGroupStatus.E OperatingStatus = 8; // group status based on latest VDisk reports only
TGroupStatus.E ExpectedStatus = 9; // status based not only on operational report, but on PDisk status and plans too
+ TVirtualGroupInfo VirtualGroupInfo = 10;
}
message TNode {
uint32 NodeId = 1;