diff options
author | mepershin <mepershin@yandex-team.ru> | 2022-02-10 16:52:26 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:52:26 +0300 |
commit | b466fd5cbe5cde08908f4580271493ce67d974f1 (patch) | |
tree | ab7fbbf3253d4c0e2793218f09378908beb025fb | |
parent | d44eaca2d95ae331ff3c8ce74be2fd2a933d45d7 (diff) | |
download | ydb-b466fd5cbe5cde08908f4580271493ce67d974f1.tar.gz |
Restoring authorship annotation for <mepershin@yandex-team.ru>. Commit 2 of 2.
-rw-r--r-- | ydb/core/base/blobstorage.h | 14 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/cluster_fit_scripts/cluster.py | 714 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/cluster_fit_scripts/parser.py | 212 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/config_cmd.cpp | 2 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/config_fit_groups.cpp | 2 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/group_geometry_info.h | 44 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/impl.h | 2 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/monitoring.cpp | 54 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/scheme.h | 70 | ||||
-rw-r--r-- | ydb/core/mind/bscontroller/ut_bscontroller/main.cpp | 218 | ||||
-rw-r--r-- | ydb/core/protos/blobstorage_config.proto | 80 |
11 files changed, 706 insertions, 706 deletions
diff --git a/ydb/core/base/blobstorage.h b/ydb/core/base/blobstorage.h index c08d8c2925..a2faee326e 100644 --- a/ydb/core/base/blobstorage.h +++ b/ydb/core/base/blobstorage.h @@ -792,13 +792,13 @@ struct TEvBlobStorage { EvControllerNodeReport, EvControllerScrubStartQuantum, - EvControllerMigrationPause, - EvControllerMigrationContinue, - EvControllerMigrationFinished, - EvControllerMigrationBatch, - EvControllerMigrationBatchRequest, - EvControllerMigrationDone, - + EvControllerMigrationPause, + EvControllerMigrationContinue, + EvControllerMigrationFinished, + EvControllerMigrationBatch, + EvControllerMigrationBatchRequest, + EvControllerMigrationDone, + EvControllerUpdateSystemViews, // proxy - node controller interface diff --git a/ydb/core/mind/bscontroller/cluster_fit_scripts/cluster.py b/ydb/core/mind/bscontroller/cluster_fit_scripts/cluster.py index 9e38a1d3e5..c005a26d8d 100644 --- a/ydb/core/mind/bscontroller/cluster_fit_scripts/cluster.py +++ b/ydb/core/mind/bscontroller/cluster_fit_scripts/cluster.py @@ -1,357 +1,357 @@ -#!/usr/bin/python3 -import argparse -import random -import string -import struct -import math -import sys - - -KB = 1024 -MB = 1024 * KB -GB = 1024 * MB - - -def padding(x = 0, t = None): - if t is None: - return ' ' * (x * 4) - else: - lines = t.strip().split('\n') - return '\n'.join(padding(1) + line for line in lines) - - -def human(value): - return value + 1 - - -class PrintLine: - def __init__(self, key, value, encode=False): - self.key = key - self.value = value - self.encode = encode - - def print(self, padding_size=0): - result = padding(padding_size) - result += str(self.key) - result += ": " - if self.encode: result += "\"" - result += str(self.value) - if self.encode: result += "\"" - return result - - def __str__(self): - return self.print() - - -class PrintNode: - def __init__(self, name, inline=False): - self.name = name - self.children = [] - - def add_line(self, key, value, encode=False): - self.children.append(PrintLine(key, value, encode)) - - def add_node(self, node): - self.children.append(node) - - def print(self, padding_size=0): - result = padding(padding_size) + self.name + " {\n" - for child in self.children: - result += child.print(padding_size + 1) + "\n" - result += padding(padding_size) + "}" - return result - - def __str__(self): - return self.print() - - -class PDisk: - def __init__(self, node_id, pdisk_id, box_id, disk_type='HDD'): - self.node_id = node_id - self.pdisk_id = pdisk_id - self.path = "/dev/node{}/pdisk{}".format(node_id, pdisk_id) - self.type = disk_type - self.box_id = box_id - self.num_static_slots = 0 - self.expected_slot_count = 16 - - def print(self): - printer = PrintNode("PDisk") - printer.add_line("NodeId", self.node_id) - printer.add_line("PDiskId", self.pdisk_id) - printer.add_line("Path", self.path, encode=True) - printer.add_line("Guid", ''.join(random.choice('123456789') for _ in range(9))) - printer.add_line("BoxId", self.box_id) - printer.add_line("NumStaticSlots", self.num_static_slots) - printer.add_line("DriveStatus", "ACTIVE") - printer.add_line("ExpectedSlotCount", self.expected_slot_count) - return printer - - -class VSlot: - def __init__(self, node_id, pdisk_id, vslot_id): - self.node_id = node_id - self.pdisk_id = pdisk_id - self.vslot_id = vslot_id - - def print(self): - printer = PrintNode("VSlotId") - printer.add_line("NodeId", self.node_id) - printer.add_line("PDiskId", self.pdisk_id) - printer.add_line("VSlotId", self.vslot_id) - return printer - - def __str__(self): - return "VSlot({}, {}, {})".format(self.node_id, self.pdisk_id, self.vslot_id) - - def __repr__(self): - return self.__str__() - - -class Group: - def __init__(self, box_id, storage_pool_id, erasure_species='block-4-2'): - self.erasure_species = erasure_species - self.box_id = box_id - self.storage_pool_id = storage_pool_id - self.vslots = [] - - def add_vslot(self, node_id, pdisk_id, vslot_id): - self.vslots.append(VSlot(node_id, pdisk_id, vslot_id)) - - def print(self): - printer = PrintNode("Group") - printer.add_line("GroupId", ''.join(random.choice('123456789') for _ in range(9))) - printer.add_line("ErasureSpecies", self.erasure_species, encode=True) - printer.add_line("BoxId", self.box_id) - printer.add_line("StoragePoolId", self.storage_pool_id) - for vslot in self.vslots: - printer.add_node(vslot.print()) - return printer - - -class FailDomain: - def __init__(self, dc=None, room=None, rack=None, body=None): - self.dc = dc - self.room = room - self.rack = rack - self.body = body - - def serialize(self): - result = bytes() - pack = lambda level, value: b'' if value is None else struct.pack('<BI', level, value) - result += pack(10, self.dc) - result += pack(20, self.room) - result += pack(30, self.rack) - result += pack(40, self.body) - result = ''.join('\\{0:03o}'.format(x) for x in result) - return result - - def deserialize(self, data): - value_count = len(data) // 5 - data = struct.unpack('<' + 'BI' * value_count, data) - for index in range(0, len(data), 2): - if data[index] == 10: - self.dc = data[index + 1] - elif data[index] == 20: - self.room = data[index + 1] - elif data[index] == 30: - self.rack = data[index + 1] - else: - self.body = data[index + 1] - - -class Node: - def __init__(self, node_id, dc=None, room=None, rack=None, body=None): - self.node_id = node_id - self.physical_location = FailDomain(dc, room, rack, body) - self.fqdn = "node{}.test.cluster.net".format(node_id) - self.icport = 1337 - - def print(self): - printer = PrintNode("Node") - printer.add_line("NodeId", self.node_id) - printer.add_line("PhysicalLocation", self.physical_location.serialize(), encode=True) - hostkey = PrintNode("HostKey") - hostkey.add_line("Fqdn", self.fqdn, encode=True) - hostkey.add_line("IcPort", self.icport) - printer.add_node(hostkey) - return printer - - -class Request: - def __init__(self, algorithm, iterations): - self.algorithm = algorithm - self.iterations = iterations - - def print(self): - printer = PrintNode("Request") - printer.add_line("Algorithm", self.algorithm) - printer.add_line("Iterations", self.iterations) - return printer - - @staticmethod - def choices(): - return ['ANNEALING', 'QUADRATIC', 'HUNGARIAN'] - - -class BaseConfig: - def __init__(self): - self.nodes = [] - self.pdisks = [] - self.groups = [] - - def print(self): - printer = PrintNode("BaseConfig") - for node in self.nodes: - printer.add_node(node.print()) - for pdisk in self.pdisks: - printer.add_node(pdisk.print()) - for group in self.groups: - printer.add_node(group.print()) - return printer - - -class StoragePool: - def __init__(self, groups, box_id, sp_id, erasure_species="block-4-2"): - self.box_id = box_id - self.storage_pool_id = sp_id - self.name = "StoragePool{}:{}".format(box_id, sp_id) - self.erasure_species = erasure_species - self.groups = groups - - def print(self): - printer = PrintNode("StoragePool") - printer.add_line("BoxId", self.box_id) - printer.add_line("StoragePoolId", self.storage_pool_id) - printer.add_line("Name", self.name, encode=True) - printer.add_line("ErasureSpecies", self.erasure_species, encode=True) - printer.add_line("VDiskKind", "Default", encode=True) - printer.add_line("Kind", "rot", encode=True) - printer.add_line("NumGroups", self.groups) - pdisk_filter = PrintNode("PDiskFilter") - prop = PrintNode("Property") - prop.add_line("Type", "ROT") - pdisk_filter.add_node(prop) - printer.add_node(pdisk_filter) - return printer - - -class Metric: - def __init__(self, vslot, size): - self.vslot = vslot - self.size = size - - def print(self): - printer = PrintNode("Metric") - printer.add_node(self.vslot.print()) - printer.add_line("Metric", self.size) - return printer - - def __str__(self): - return "Metric({}, {})".format(str(self.vslot), self.size) - - def __repr__(self): - return self.__str__() - - -class MetricStrategy: - def __init__(self, name): - if name not in MetricStrategy.choices(): - raise Exception("wrong strategy typename") - self.func = getattr(self, 'strategy_{}'.format(name)) - - @staticmethod - def choices(): - allowed_names = [name for name in dir(MetricStrategy) if name.startswith('strategy_')] - allowed_names = [name[name.find('_') + 1:] for name in allowed_names] - return allowed_names - - def execute(self, *args, **kwargs): - return self.func(*args, **kwargs) - - def strategy_random(self, minimum=10*MB, maximum=10*GB, *args, **kwargs): - return random.randint(minimum, maximum) - - def strategy_minmax(self, group_id, minimum=10*MB, maximum=10*GB, *args, **kwargs): - if group_id % 2 == 0: - return minimum - return maximum - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Test cluster crafting utility.') - parser.add_argument('--nodes', dest='total_nodes', type=int, default=0, help='count of nodes in cluster') - parser.add_argument('--groups', dest='groups', type=int, default=0, help='count of existing groups') - parser.add_argument('--nodes-in-rack', dest='nodes_in_rack', type=int, default=8, help='count of nodes in one rack') - parser.add_argument('--pdisks-per-node', dest='pdisks_per_node', type=int, default=1, help='count of pdisks on one node') - parser.add_argument('--cluster-output', dest='cluster_output', type=str, default='cluster', help='name of file with clusterfit config') - parser.add_argument('--metric-output', dest='metric_output', type=str, default='metric', help='name of file with vdisk metrics') - parser.add_argument('--strategy', dest='strategy', type=str, default='random', choices=MetricStrategy.choices(), help='vslot metric assignment mechanism') - parser.add_argument('--algorithm', dest='algorithm', type=str, choices=Request.choices(), help='clusterfit algorithm') - parser.add_argument('--iterations', dest='iterations', type=int, default=10**5, help='count of iteration in anneling algorithm') - args = parser.parse_args() - - racks = dict() - node_usage = dict() - strategy = MetricStrategy(args.strategy) - - nodes = [] - for id in range(args.total_nodes): - rack_id = id // args.nodes_in_rack - nodes.append(Node(human(id), 1, 1, human(id // args.nodes_in_rack), 1)) - node_usage[id] = [0 for _ in range(args.pdisks_per_node)] - if rack_id in racks: - racks[rack_id].append(id) - else: - racks[rack_id] = [id] - - pdisks = [] - for node in range(args.total_nodes): - for index in range(args.pdisks_per_node): - pdisk = PDisk(human(node), human(index), 1) - pdisks.append(pdisk) - - def make_vslots(): - def find_vslot(rack_id): - for node_id in racks[rack_id]: - for pdisk_id in range(args.pdisks_per_node): - if node_usage[node_id][pdisk_id] < 8: - node_usage[node_id][pdisk_id] += 1 - return VSlot(human(node_id), human(pdisk_id), node_usage[node_id][pdisk_id]) - return None - result = [] - for rack_id in racks: - if len(result) < 8: - vslot = find_vslot(rack_id) - if vslot is not None: - result.append(vslot) - return result - - groups = [] - metrics = [] - for group_id in range(args.groups): - group = Group(1, 1) - group.vslots = make_vslots() - for vslot in group.vslots: - metrics.append(Metric(vslot, strategy.execute(group_id=group_id, vslot=vslot))) - groups.append(group) - - storage_pools = [] - storage_pools.append(StoragePool(args.groups, 1, 1)) - - base_config = BaseConfig() - base_config.pdisks = pdisks - base_config.groups = groups - base_config.nodes = nodes - - with open(args.cluster_output, 'w') as cluster: - cluster.write(str(base_config.print()) + "\n") - for spool in storage_pools: - cluster.write(str(spool.print()) + "\n") - for metric in metrics: - cluster.write(str(metric.print()) + "\n") - cluster.write(str(Request(args.algorithm, args.iterations).print()) + "\n") - - with open(args.metric_output, 'w') as metrics_file: - metrics_file.write(str(metrics)) - +#!/usr/bin/python3 +import argparse +import random +import string +import struct +import math +import sys + + +KB = 1024 +MB = 1024 * KB +GB = 1024 * MB + + +def padding(x = 0, t = None): + if t is None: + return ' ' * (x * 4) + else: + lines = t.strip().split('\n') + return '\n'.join(padding(1) + line for line in lines) + + +def human(value): + return value + 1 + + +class PrintLine: + def __init__(self, key, value, encode=False): + self.key = key + self.value = value + self.encode = encode + + def print(self, padding_size=0): + result = padding(padding_size) + result += str(self.key) + result += ": " + if self.encode: result += "\"" + result += str(self.value) + if self.encode: result += "\"" + return result + + def __str__(self): + return self.print() + + +class PrintNode: + def __init__(self, name, inline=False): + self.name = name + self.children = [] + + def add_line(self, key, value, encode=False): + self.children.append(PrintLine(key, value, encode)) + + def add_node(self, node): + self.children.append(node) + + def print(self, padding_size=0): + result = padding(padding_size) + self.name + " {\n" + for child in self.children: + result += child.print(padding_size + 1) + "\n" + result += padding(padding_size) + "}" + return result + + def __str__(self): + return self.print() + + +class PDisk: + def __init__(self, node_id, pdisk_id, box_id, disk_type='HDD'): + self.node_id = node_id + self.pdisk_id = pdisk_id + self.path = "/dev/node{}/pdisk{}".format(node_id, pdisk_id) + self.type = disk_type + self.box_id = box_id + self.num_static_slots = 0 + self.expected_slot_count = 16 + + def print(self): + printer = PrintNode("PDisk") + printer.add_line("NodeId", self.node_id) + printer.add_line("PDiskId", self.pdisk_id) + printer.add_line("Path", self.path, encode=True) + printer.add_line("Guid", ''.join(random.choice('123456789') for _ in range(9))) + printer.add_line("BoxId", self.box_id) + printer.add_line("NumStaticSlots", self.num_static_slots) + printer.add_line("DriveStatus", "ACTIVE") + printer.add_line("ExpectedSlotCount", self.expected_slot_count) + return printer + + +class VSlot: + def __init__(self, node_id, pdisk_id, vslot_id): + self.node_id = node_id + self.pdisk_id = pdisk_id + self.vslot_id = vslot_id + + def print(self): + printer = PrintNode("VSlotId") + printer.add_line("NodeId", self.node_id) + printer.add_line("PDiskId", self.pdisk_id) + printer.add_line("VSlotId", self.vslot_id) + return printer + + def __str__(self): + return "VSlot({}, {}, {})".format(self.node_id, self.pdisk_id, self.vslot_id) + + def __repr__(self): + return self.__str__() + + +class Group: + def __init__(self, box_id, storage_pool_id, erasure_species='block-4-2'): + self.erasure_species = erasure_species + self.box_id = box_id + self.storage_pool_id = storage_pool_id + self.vslots = [] + + def add_vslot(self, node_id, pdisk_id, vslot_id): + self.vslots.append(VSlot(node_id, pdisk_id, vslot_id)) + + def print(self): + printer = PrintNode("Group") + printer.add_line("GroupId", ''.join(random.choice('123456789') for _ in range(9))) + printer.add_line("ErasureSpecies", self.erasure_species, encode=True) + printer.add_line("BoxId", self.box_id) + printer.add_line("StoragePoolId", self.storage_pool_id) + for vslot in self.vslots: + printer.add_node(vslot.print()) + return printer + + +class FailDomain: + def __init__(self, dc=None, room=None, rack=None, body=None): + self.dc = dc + self.room = room + self.rack = rack + self.body = body + + def serialize(self): + result = bytes() + pack = lambda level, value: b'' if value is None else struct.pack('<BI', level, value) + result += pack(10, self.dc) + result += pack(20, self.room) + result += pack(30, self.rack) + result += pack(40, self.body) + result = ''.join('\\{0:03o}'.format(x) for x in result) + return result + + def deserialize(self, data): + value_count = len(data) // 5 + data = struct.unpack('<' + 'BI' * value_count, data) + for index in range(0, len(data), 2): + if data[index] == 10: + self.dc = data[index + 1] + elif data[index] == 20: + self.room = data[index + 1] + elif data[index] == 30: + self.rack = data[index + 1] + else: + self.body = data[index + 1] + + +class Node: + def __init__(self, node_id, dc=None, room=None, rack=None, body=None): + self.node_id = node_id + self.physical_location = FailDomain(dc, room, rack, body) + self.fqdn = "node{}.test.cluster.net".format(node_id) + self.icport = 1337 + + def print(self): + printer = PrintNode("Node") + printer.add_line("NodeId", self.node_id) + printer.add_line("PhysicalLocation", self.physical_location.serialize(), encode=True) + hostkey = PrintNode("HostKey") + hostkey.add_line("Fqdn", self.fqdn, encode=True) + hostkey.add_line("IcPort", self.icport) + printer.add_node(hostkey) + return printer + + +class Request: + def __init__(self, algorithm, iterations): + self.algorithm = algorithm + self.iterations = iterations + + def print(self): + printer = PrintNode("Request") + printer.add_line("Algorithm", self.algorithm) + printer.add_line("Iterations", self.iterations) + return printer + + @staticmethod + def choices(): + return ['ANNEALING', 'QUADRATIC', 'HUNGARIAN'] + + +class BaseConfig: + def __init__(self): + self.nodes = [] + self.pdisks = [] + self.groups = [] + + def print(self): + printer = PrintNode("BaseConfig") + for node in self.nodes: + printer.add_node(node.print()) + for pdisk in self.pdisks: + printer.add_node(pdisk.print()) + for group in self.groups: + printer.add_node(group.print()) + return printer + + +class StoragePool: + def __init__(self, groups, box_id, sp_id, erasure_species="block-4-2"): + self.box_id = box_id + self.storage_pool_id = sp_id + self.name = "StoragePool{}:{}".format(box_id, sp_id) + self.erasure_species = erasure_species + self.groups = groups + + def print(self): + printer = PrintNode("StoragePool") + printer.add_line("BoxId", self.box_id) + printer.add_line("StoragePoolId", self.storage_pool_id) + printer.add_line("Name", self.name, encode=True) + printer.add_line("ErasureSpecies", self.erasure_species, encode=True) + printer.add_line("VDiskKind", "Default", encode=True) + printer.add_line("Kind", "rot", encode=True) + printer.add_line("NumGroups", self.groups) + pdisk_filter = PrintNode("PDiskFilter") + prop = PrintNode("Property") + prop.add_line("Type", "ROT") + pdisk_filter.add_node(prop) + printer.add_node(pdisk_filter) + return printer + + +class Metric: + def __init__(self, vslot, size): + self.vslot = vslot + self.size = size + + def print(self): + printer = PrintNode("Metric") + printer.add_node(self.vslot.print()) + printer.add_line("Metric", self.size) + return printer + + def __str__(self): + return "Metric({}, {})".format(str(self.vslot), self.size) + + def __repr__(self): + return self.__str__() + + +class MetricStrategy: + def __init__(self, name): + if name not in MetricStrategy.choices(): + raise Exception("wrong strategy typename") + self.func = getattr(self, 'strategy_{}'.format(name)) + + @staticmethod + def choices(): + allowed_names = [name for name in dir(MetricStrategy) if name.startswith('strategy_')] + allowed_names = [name[name.find('_') + 1:] for name in allowed_names] + return allowed_names + + def execute(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def strategy_random(self, minimum=10*MB, maximum=10*GB, *args, **kwargs): + return random.randint(minimum, maximum) + + def strategy_minmax(self, group_id, minimum=10*MB, maximum=10*GB, *args, **kwargs): + if group_id % 2 == 0: + return minimum + return maximum + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Test cluster crafting utility.') + parser.add_argument('--nodes', dest='total_nodes', type=int, default=0, help='count of nodes in cluster') + parser.add_argument('--groups', dest='groups', type=int, default=0, help='count of existing groups') + parser.add_argument('--nodes-in-rack', dest='nodes_in_rack', type=int, default=8, help='count of nodes in one rack') + parser.add_argument('--pdisks-per-node', dest='pdisks_per_node', type=int, default=1, help='count of pdisks on one node') + parser.add_argument('--cluster-output', dest='cluster_output', type=str, default='cluster', help='name of file with clusterfit config') + parser.add_argument('--metric-output', dest='metric_output', type=str, default='metric', help='name of file with vdisk metrics') + parser.add_argument('--strategy', dest='strategy', type=str, default='random', choices=MetricStrategy.choices(), help='vslot metric assignment mechanism') + parser.add_argument('--algorithm', dest='algorithm', type=str, choices=Request.choices(), help='clusterfit algorithm') + parser.add_argument('--iterations', dest='iterations', type=int, default=10**5, help='count of iteration in anneling algorithm') + args = parser.parse_args() + + racks = dict() + node_usage = dict() + strategy = MetricStrategy(args.strategy) + + nodes = [] + for id in range(args.total_nodes): + rack_id = id // args.nodes_in_rack + nodes.append(Node(human(id), 1, 1, human(id // args.nodes_in_rack), 1)) + node_usage[id] = [0 for _ in range(args.pdisks_per_node)] + if rack_id in racks: + racks[rack_id].append(id) + else: + racks[rack_id] = [id] + + pdisks = [] + for node in range(args.total_nodes): + for index in range(args.pdisks_per_node): + pdisk = PDisk(human(node), human(index), 1) + pdisks.append(pdisk) + + def make_vslots(): + def find_vslot(rack_id): + for node_id in racks[rack_id]: + for pdisk_id in range(args.pdisks_per_node): + if node_usage[node_id][pdisk_id] < 8: + node_usage[node_id][pdisk_id] += 1 + return VSlot(human(node_id), human(pdisk_id), node_usage[node_id][pdisk_id]) + return None + result = [] + for rack_id in racks: + if len(result) < 8: + vslot = find_vslot(rack_id) + if vslot is not None: + result.append(vslot) + return result + + groups = [] + metrics = [] + for group_id in range(args.groups): + group = Group(1, 1) + group.vslots = make_vslots() + for vslot in group.vslots: + metrics.append(Metric(vslot, strategy.execute(group_id=group_id, vslot=vslot))) + groups.append(group) + + storage_pools = [] + storage_pools.append(StoragePool(args.groups, 1, 1)) + + base_config = BaseConfig() + base_config.pdisks = pdisks + base_config.groups = groups + base_config.nodes = nodes + + with open(args.cluster_output, 'w') as cluster: + cluster.write(str(base_config.print()) + "\n") + for spool in storage_pools: + cluster.write(str(spool.print()) + "\n") + for metric in metrics: + cluster.write(str(metric.print()) + "\n") + cluster.write(str(Request(args.algorithm, args.iterations).print()) + "\n") + + with open(args.metric_output, 'w') as metrics_file: + metrics_file.write(str(metrics)) + diff --git a/ydb/core/mind/bscontroller/cluster_fit_scripts/parser.py b/ydb/core/mind/bscontroller/cluster_fit_scripts/parser.py index f4efc263a6..fa478de894 100644 --- a/ydb/core/mind/bscontroller/cluster_fit_scripts/parser.py +++ b/ydb/core/mind/bscontroller/cluster_fit_scripts/parser.py @@ -1,106 +1,106 @@ -#!/usr/bin/python3 -import argparse - - -class VSlot: - def __init__(self, node_id, pdisk_id, vslot_id): - self.node_id = node_id - self.pdisk_id = pdisk_id - self.vslot_id = vslot_id - - def print(self): - printer = PrintNode("VSlotId") - printer.add_line("NodeId", self.node_id) - printer.add_line("PDiskId", self.pdisk_id) - printer.add_line("VSlotId", self.vslot_id) - return printer - - def key(self): - return (self.node_id, self.pdisk_id, self.vslot_id) - - def __str__(self): - return "VSlot({}, {}, {})".format(self.node_id, self.pdisk_id, self.vslot_id) - - def __repr__(self): - return self.__str__() - - -class Metric: - def __init__(self, vslot, size): - self.vslot = vslot - self.size = size - - def print(self): - printer = PrintNode("Metric") - printer.add_node(self.vslot.print()) - printer.add_line("Metric", self.size) - return printer - - def __str__(self): - return "Metric({}, {})".format(str(self.vslot), self.size) - - def __repr__(self): - return self.__str__() - - -class Statistics: - def __init__(self): - self.metrics = dict() - self.moved = list() - self.total_size = 0 - self.moved_size = 0 - - def parse_metrics(self, metrics): - for metric in metrics: - self.metrics[metric.vslot.key()] = metric.size - self.total_size += metric.size - - def move_vslot(self, vslot): - self.moved.append(vslot) - self.moved_size += self.metrics[vslot.key()] - - def __str__(self): - return "Statistics(moved={:3f}%)".format(100 * self.moved_size / self.total_size) - - def __repr__(self): - return self.__str__() - - -def parse_line(line): - return int(line.strip().split(':')[-1].strip()) - - -def read_file(result_filename, metrics_filename): - with open(result_filename, 'r') as result_desc: - result_content = result_desc.read().strip() - with open(metrics_filename, 'r') as metrics_desc: - metrics = eval(metrics_desc.read()) - - stats = Statistics() - stats.parse_metrics(metrics) - - result_lines = result_content.split('\n') - current_line = 0 - while current_line < len(result_lines): - if result_lines[current_line].strip().startswith("MoveCommand"): - group_id = parse_line(result_lines[current_line + 1]) - origin_node_id = parse_line(result_lines[current_line + 2]) - origin_pdisk_id = parse_line(result_lines[current_line + 3]) - origin_vslot_id = parse_line(result_lines[current_line + 4]) - target_node_id = parse_line(result_lines[current_line + 5]) - target_pdisk_id = parse_line(result_lines[current_line + 6]) - stats.move_vslot(VSlot(origin_node_id, origin_pdisk_id, origin_vslot_id)) - current_line += 7 - else: - current_line += 1 - - return stats - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="ClusterFit output statistics tool") - parser.add_argument("--src", dest="result", type=str, help="text file with move commands and pdisk statistics") - parser.add_argument("--metrics", dest="metrics", type=str, help="metrics for vslots") - args = parser.parse_args() - stats = read_file(args.result, args.metrics) - print(stats) +#!/usr/bin/python3 +import argparse + + +class VSlot: + def __init__(self, node_id, pdisk_id, vslot_id): + self.node_id = node_id + self.pdisk_id = pdisk_id + self.vslot_id = vslot_id + + def print(self): + printer = PrintNode("VSlotId") + printer.add_line("NodeId", self.node_id) + printer.add_line("PDiskId", self.pdisk_id) + printer.add_line("VSlotId", self.vslot_id) + return printer + + def key(self): + return (self.node_id, self.pdisk_id, self.vslot_id) + + def __str__(self): + return "VSlot({}, {}, {})".format(self.node_id, self.pdisk_id, self.vslot_id) + + def __repr__(self): + return self.__str__() + + +class Metric: + def __init__(self, vslot, size): + self.vslot = vslot + self.size = size + + def print(self): + printer = PrintNode("Metric") + printer.add_node(self.vslot.print()) + printer.add_line("Metric", self.size) + return printer + + def __str__(self): + return "Metric({}, {})".format(str(self.vslot), self.size) + + def __repr__(self): + return self.__str__() + + +class Statistics: + def __init__(self): + self.metrics = dict() + self.moved = list() + self.total_size = 0 + self.moved_size = 0 + + def parse_metrics(self, metrics): + for metric in metrics: + self.metrics[metric.vslot.key()] = metric.size + self.total_size += metric.size + + def move_vslot(self, vslot): + self.moved.append(vslot) + self.moved_size += self.metrics[vslot.key()] + + def __str__(self): + return "Statistics(moved={:3f}%)".format(100 * self.moved_size / self.total_size) + + def __repr__(self): + return self.__str__() + + +def parse_line(line): + return int(line.strip().split(':')[-1].strip()) + + +def read_file(result_filename, metrics_filename): + with open(result_filename, 'r') as result_desc: + result_content = result_desc.read().strip() + with open(metrics_filename, 'r') as metrics_desc: + metrics = eval(metrics_desc.read()) + + stats = Statistics() + stats.parse_metrics(metrics) + + result_lines = result_content.split('\n') + current_line = 0 + while current_line < len(result_lines): + if result_lines[current_line].strip().startswith("MoveCommand"): + group_id = parse_line(result_lines[current_line + 1]) + origin_node_id = parse_line(result_lines[current_line + 2]) + origin_pdisk_id = parse_line(result_lines[current_line + 3]) + origin_vslot_id = parse_line(result_lines[current_line + 4]) + target_node_id = parse_line(result_lines[current_line + 5]) + target_pdisk_id = parse_line(result_lines[current_line + 6]) + stats.move_vslot(VSlot(origin_node_id, origin_pdisk_id, origin_vslot_id)) + current_line += 7 + else: + current_line += 1 + + return stats + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="ClusterFit output statistics tool") + parser.add_argument("--src", dest="result", type=str, help="text file with move commands and pdisk statistics") + parser.add_argument("--metrics", dest="metrics", type=str, help="metrics for vslots") + args = parser.parse_args() + stats = read_file(args.result, args.metrics) + print(stats) diff --git a/ydb/core/mind/bscontroller/config_cmd.cpp b/ydb/core/mind/bscontroller/config_cmd.cpp index fa329630a0..2fc925a9cd 100644 --- a/ydb/core/mind/bscontroller/config_cmd.cpp +++ b/ydb/core/mind/bscontroller/config_cmd.cpp @@ -155,7 +155,7 @@ namespace NKikimr::NBsController { State.emplace(*Self, Self->HostRecords, TActivationContext::Now()); State->CheckConsistency(); - + TString m; google::protobuf::TextFormat::Printer printer; printer.SetSingleLineMode(true); diff --git a/ydb/core/mind/bscontroller/config_fit_groups.cpp b/ydb/core/mind/bscontroller/config_fit_groups.cpp index cd42468e73..cb8801b0f5 100644 --- a/ydb/core/mind/bscontroller/config_fit_groups.cpp +++ b/ydb/core/mind/bscontroller/config_fit_groups.cpp @@ -1,7 +1,7 @@ #include "impl.h" #include "config.h" #include "group_mapper.h" -#include "group_geometry_info.h" +#include "group_geometry_info.h" namespace NKikimr { namespace NBsController { diff --git a/ydb/core/mind/bscontroller/group_geometry_info.h b/ydb/core/mind/bscontroller/group_geometry_info.h index 354c955221..10e5daedba 100644 --- a/ydb/core/mind/bscontroller/group_geometry_info.h +++ b/ydb/core/mind/bscontroller/group_geometry_info.h @@ -1,15 +1,15 @@ #pragma once - + #include "defs.h" -#include "impl.h" -#include "config.h" -#include "group_mapper.h" - +#include "impl.h" +#include "config.h" +#include "group_mapper.h" + namespace NKikimr::NBsController { - + struct TExFitGroupError : yexception {}; - + class TGroupGeometryInfo { const TBlobStorageGroupType Type; ui32 NumFailRealms; @@ -19,7 +19,7 @@ namespace NKikimr::NBsController { ui32 RealmLevelEnd; ui32 DomainLevelBegin; ui32 DomainLevelEnd; - + public: TGroupGeometryInfo(TBlobStorageGroupType type, NKikimrBlobStorage::TGroupGeometry g) : Type(type) @@ -36,7 +36,7 @@ namespace NKikimr::NBsController { const ui32 minNumFailRealms = isMirror3dc ? 3 : 1; const ui32 minNumFailDomainsPerFailRealm = isMirror3dc ? 3 : Type.BlobSubgroupSize(); const ui32 minNumVDisksPerFailDomain = 1; - + if (!NumFailRealms && !NumFailDomainsPerFailRealm && !NumVDisksPerFailDomain) { // no values are set, this means we're going to use the default ones NumFailRealms = minNumFailRealms; @@ -46,8 +46,8 @@ namespace NKikimr::NBsController { NumFailDomainsPerFailRealm < minNumFailDomainsPerFailRealm || NumVDisksPerFailDomain < minNumVDisksPerFailDomain) { throw TExFitGroupError() << "not enough fail domains, fail realms, or vdisks for specified erasure"; - } - + } + if (RealmLevelBegin || RealmLevelEnd || DomainLevelBegin || DomainLevelEnd) { if (RealmLevelEnd < RealmLevelBegin || DomainLevelEnd < DomainLevelBegin) { throw TExFitGroupError() << "XxxLevelBegin must be less than or equal to XxxLevelEnd"; @@ -57,9 +57,9 @@ namespace NKikimr::NBsController { RealmLevelEnd = 20; DomainLevelBegin = 10; DomainLevelEnd = 40; - } + } } - + ui32 GetNumFailRealms() const { return NumFailRealms; } ui32 GetNumFailDomainsPerFailRealm() const { return NumFailDomainsPerFailRealm; } ui32 GetNumVDisksPerFailDomain() const { return NumVDisksPerFailDomain; } @@ -77,19 +77,19 @@ namespace NKikimr::NBsController { requiredSpace, requireOperational, error)) { return; } - } + } throw TExFitGroupError() << "failed to allocate group: " << error; } - + bool ResizeGroup(TGroupMapper::TGroupDefinition& group) const { if (!group) { group.resize(NumFailRealms); - for (auto &realm : group) { + for (auto &realm : group) { realm.resize(NumFailDomainsPerFailRealm); - for (auto &domain : realm) { + for (auto &domain : realm) { domain.resize(NumVDisksPerFailDomain); - } - } + } + } } else { bool ok = group.size() == NumFailRealms; if (ok) { @@ -111,13 +111,13 @@ namespace NKikimr::NBsController { if (!ok) { return false; } - } + } return true; } - + TBlobStorageGroupType::EErasureSpecies GetErasure() const { return Type.GetErasure(); } }; - + } // NKikimr::NBsController diff --git a/ydb/core/mind/bscontroller/impl.h b/ydb/core/mind/bscontroller/impl.h index 31144441ba..ba311a254c 100644 --- a/ydb/core/mind/bscontroller/impl.h +++ b/ydb/core/mind/bscontroller/impl.h @@ -45,7 +45,7 @@ public: using THostConfigId = Schema::HostConfig::TKey::Type; using TBoxId = Schema::Box::TKey::Type; using TBoxStoragePoolId = Schema::BoxStoragePool::TKey::Type; - using THostId = std::tuple<TString, i32>; // (Host, IcPort) identifier + using THostId = std::tuple<TString, i32>; // (Host, IcPort) identifier class TTxInitScheme; class TTxMigrate; diff --git a/ydb/core/mind/bscontroller/monitoring.cpp b/ydb/core/mind/bscontroller/monitoring.cpp index 54374100eb..245c782cea 100644 --- a/ydb/core/mind/bscontroller/monitoring.cpp +++ b/ydb/core/mind/bscontroller/monitoring.cpp @@ -29,19 +29,19 @@ class TBlobStorageController::TTxMonEvent_OperationLog : public TTransactionBase const TCgiParameters Params; private: - struct TOperationLogEntry { + struct TOperationLogEntry { using T = Schema::OperationLog; T::Index::Type Index; T::Timestamp::Type Timestamp; T::Request::Type Request; T::Response::Type Response; T::ExecutionTime::Type ExecutionTime; - }; - + }; + private: - TVector<TOperationLogEntry> Logs; + TVector<TOperationLogEntry> Logs; ui64 NumRows = 0; - + public: TTxMonEvent_OperationLog(const TActorId& sender, TCgiParameters params, TSelf *controller) : TBase(controller) @@ -70,7 +70,7 @@ public: {} bool LoadOperationLog(TTransactionContext& txc, ui64 count, ui64 offset) { - NIceDb::TNiceDb db(txc.DB); + NIceDb::TNiceDb db(txc.DB); using T = Schema::OperationLog; // obtain the very first record index @@ -88,8 +88,8 @@ public: // scan the table auto table = db.Table<T>().GreaterOrEqual(firstRecordIndex + offset).Select(); if (!table.IsReady()) { - return false; - } + return false; + } Logs.reserve(count); for (; !table.EndOfSet() && count; --count) { const auto& index = table.GetValue<Schema::OperationLog::Index>(); @@ -99,12 +99,12 @@ public: const auto& executionTime = table.GetValue<Schema::OperationLog::ExecutionTime>(); Logs.emplace_back(TOperationLogEntry{index, timestamp, request, response, executionTime}); if (!table.Next()) { - return false; - } - } - return true; - } - + return false; + } + } + return true; + } + void RenderOperationLog(IOutputStream& out, const ui64 count, const ui64 offset) { Self->RenderHeader(out); @@ -537,13 +537,13 @@ public: const TActorId Source; const TGroupId GroupId; TString Response; - + TTxMonEvent_GetDown(const TActorId& source, TGroupId groupId, TSelf* bsc) : TBase(bsc) , Source(source) , GroupId(groupId) {} - + TTxType GetTxType() const override { return NBlobStorageController::TXTYPE_MON_EVENT_GET_DOWN; } bool Execute(TTransactionContext&, const TActorContext&) override { @@ -562,27 +562,27 @@ public: json = reportGroup(*group); } else { json["Error"] = Sprintf("GroupId# %" PRIu32 " not found", GroupId); - } + } } else { for (const auto& kv : Self->GroupMap) { json.AppendValue(reportGroup(*kv.second)); } - } + } TStringStream stream; NJson::WriteJson(&stream, &json); Response = stream.Str(); return true; - } - + } + void Complete(const TActorContext&) override { STLOG(PRI_DEBUG, BS_CONTROLLER, BSCTXMO02, "TBlobStorageController::TTxMonEvent_GetDown", (GroupId, GroupId), (Response, Response)); TActivationContext::Send(new IEventHandle(Source, Self->SelfId(), new NMon::TEvRemoteJsonInfoRes(Response))); - } + } }; - + class TDisableSelfHealActor : public TActorBootstrapped<TDisableSelfHealActor> { const TActorId MonProxy; const TString Url; @@ -616,11 +616,11 @@ public: bool TBlobStorageController::OnRenderAppHtmlPage(NMon::TEvRemoteHttpInfo::TPtr ev, const TActorContext&) { if (!Executor() || !Executor()->GetStats().IsActive) { return false; - } + } if (!ev) { return true; } - + THolder<TTransactionBase<TBlobStorageController>> tx; TStringStream str; const TCgiParameters& cgi(ev->Get()->Cgi()); @@ -900,7 +900,7 @@ void TBlobStorageController::RenderInternalTables(IOutputStream& out, const TStr } } } - } + } } else if (table == "serials") { TABLE_CLASS("table") { TABLEHEAD() { @@ -928,9 +928,9 @@ void TBlobStorageController::RenderInternalTables(IOutputStream& out, const TStr } } } - } + } } - + RenderFooter(out); } diff --git a/ydb/core/mind/bscontroller/scheme.h b/ydb/core/mind/bscontroller/scheme.h index 2d09837ee1..3b7a425c95 100644 --- a/ydb/core/mind/bscontroller/scheme.h +++ b/ydb/core/mind/bscontroller/scheme.h @@ -308,39 +308,39 @@ struct Schema : NIceDb::Schema { using TColumns = TableColumns<Index, Timestamp, Request, Response, ExecutionTime>; }; - struct MigrationPlan : Table<126> { - enum EState : ui32 { - CREATED, - ACTIVE, - PAUSED, - FINISHED - }; - - struct Name : Column<1, NScheme::NTypeIds::String> {}; - struct State : Column<2, NScheme::NTypeIds::Uint32> { using Type = EState; }; - struct Size : Column<3, NScheme::NTypeIds::Uint64> {}; - struct Done : Column<4, NScheme::NTypeIds::Uint64> {}; - - using TKey = TableKey<Name>; - using TColumns = TableColumns<Name, State, Size, Done>; - }; - - struct MigrationEntry : Table<127> { - struct PlanName : Column<1, MigrationPlan::Name::ColumnType> {}; - struct EntryIndex : Column<2, NScheme::NTypeIds::Uint64> {}; - struct GroupId : Column<3, NScheme::NTypeIds::Uint32> {}; - struct OriginNodeId : Column<4, NScheme::NTypeIds::Uint32> {}; - struct OriginPDiskId : Column<5, NScheme::NTypeIds::Uint32> {}; - struct OriginVSlotId : Column<6, NScheme::NTypeIds::Uint32> {}; - struct TargetNodeId : Column<7, NScheme::NTypeIds::Uint32> {}; - struct TargetPDiskId : Column<8, NScheme::NTypeIds::Uint32> {}; - struct Done : Column<9, NScheme::NTypeIds::Bool> {}; - - using TKey = TableKey<PlanName, EntryIndex>; - using TColumns = TableColumns<PlanName, EntryIndex, GroupId, OriginNodeId, - OriginPDiskId, OriginVSlotId, TargetNodeId, TargetPDiskId, Done>; - }; - + struct MigrationPlan : Table<126> { + enum EState : ui32 { + CREATED, + ACTIVE, + PAUSED, + FINISHED + }; + + struct Name : Column<1, NScheme::NTypeIds::String> {}; + struct State : Column<2, NScheme::NTypeIds::Uint32> { using Type = EState; }; + struct Size : Column<3, NScheme::NTypeIds::Uint64> {}; + struct Done : Column<4, NScheme::NTypeIds::Uint64> {}; + + using TKey = TableKey<Name>; + using TColumns = TableColumns<Name, State, Size, Done>; + }; + + struct MigrationEntry : Table<127> { + struct PlanName : Column<1, MigrationPlan::Name::ColumnType> {}; + struct EntryIndex : Column<2, NScheme::NTypeIds::Uint64> {}; + struct GroupId : Column<3, NScheme::NTypeIds::Uint32> {}; + struct OriginNodeId : Column<4, NScheme::NTypeIds::Uint32> {}; + struct OriginPDiskId : Column<5, NScheme::NTypeIds::Uint32> {}; + struct OriginVSlotId : Column<6, NScheme::NTypeIds::Uint32> {}; + struct TargetNodeId : Column<7, NScheme::NTypeIds::Uint32> {}; + struct TargetPDiskId : Column<8, NScheme::NTypeIds::Uint32> {}; + struct Done : Column<9, NScheme::NTypeIds::Bool> {}; + + using TKey = TableKey<PlanName, EntryIndex>; + using TColumns = TableColumns<PlanName, EntryIndex, GroupId, OriginNodeId, + OriginPDiskId, OriginVSlotId, TargetNodeId, TargetPDiskId, Done>; + }; + struct ScrubState : Table<128> { struct NodeId : Column<1, VSlot::NodeID::ColumnType> {}; struct PDiskId : Column<2, VSlot::PDiskID::ColumnType> {}; @@ -387,8 +387,8 @@ struct Schema : NIceDb::Schema { BoxStoragePoolUser, BoxStoragePoolPDiskFilter, GroupStoragePool, - OperationLog, - MigrationPlan, + OperationLog, + MigrationPlan, MigrationEntry, ScrubState, DriveSerial diff --git a/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp b/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp index 5263966493..87f7b18fb3 100644 --- a/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp +++ b/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp @@ -15,7 +15,7 @@ #include <library/cpp/actors/core/interconnect.h> #include <library/cpp/actors/interconnect/interconnect.h> -#include <util/datetime/cputimer.h> +#include <util/datetime/cputimer.h> #include <util/random/random.h> #include <google/protobuf/text_format.h> @@ -54,13 +54,13 @@ struct TEnvironmentSetup { TPDiskRecord ParsePDiskRecord(const NKikimrBlobStorage::TBaseConfig::TPDisk& pdisk) { return std::make_tuple( - pdisk.GetNodeId(), - std::make_tuple( - pdisk.GetPath(), - pdisk.GetType(), - pdisk.GetSharedWithOs(), - pdisk.GetReadCentric(), - pdisk.GetKind())); + pdisk.GetNodeId(), + std::make_tuple( + pdisk.GetPath(), + pdisk.GetType(), + pdisk.GetSharedWithOs(), + pdisk.GetReadCentric(), + pdisk.GetKind())); } TSet<TPDiskRecord> ParsePDisks(const NKikimrBlobStorage::TBaseConfig& config) { @@ -74,8 +74,8 @@ struct TEnvironmentSetup { TEnvironmentSetup(ui32 nodeCount, ui32 dataCenterCount) : NodeCount(nodeCount) , DataCenterCount(dataCenterCount) - { - } + { + } void Prepare(const TString& /*dispatchName*/, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { outActiveZone = false; @@ -130,8 +130,8 @@ struct TEnvironmentSetup { } void DefineBox(ui64 boxId, const TString& name, const TVector<TPDiskDefinition>& pdisks, - const TVector<TNodeRecord>& nodes, NKikimrBlobStorage::TConfigRequest& request, - const ui64 generation = 0) { + const TVector<TNodeRecord>& nodes, NKikimrBlobStorage::TConfigRequest& request, + const ui64 generation = 0) { auto& hostcfg = *request.AddCommand()->MutableDefineHostConfig(); hostcfg.SetHostConfigId(NextHostConfigId++); for (const auto& pdisk : pdisks) { @@ -151,7 +151,7 @@ struct TEnvironmentSetup { auto& box = *request.AddCommand()->MutableDefineBox(); box.SetBoxId(boxId); box.SetName(name); - box.SetItemConfigGeneration(generation); + box.SetItemConfigGeneration(generation); for (const auto& node : nodes) { TString fqdn; i32 icPort; @@ -171,9 +171,9 @@ struct TEnvironmentSetup { } void DefineStoragePool(ui64 boxId, ui64 storagePoolId, const TString& name, ui32 numGroups, - TMaybe<NKikimrBlobStorage::EPDiskType> pdiskType, TMaybe<bool> sharedWithOs, - NKikimrBlobStorage::TConfigRequest& request, const TString& erasure = "block-4-2", - const ui64 generation = 0) { + TMaybe<NKikimrBlobStorage::EPDiskType> pdiskType, TMaybe<bool> sharedWithOs, + NKikimrBlobStorage::TConfigRequest& request, const TString& erasure = "block-4-2", + const ui64 generation = 0) { auto& cmd = *request.AddCommand()->MutableDefineStoragePool(); cmd.SetBoxId(boxId); cmd.SetStoragePoolId(storagePoolId); @@ -181,7 +181,7 @@ struct TEnvironmentSetup { cmd.SetErasureSpecies(erasure); cmd.SetVDiskKind("Default"); cmd.SetNumGroups(numGroups); - cmd.SetItemConfigGeneration(generation); + cmd.SetItemConfigGeneration(generation); if (pdiskType || sharedWithOs) { auto& filter = *cmd.AddPDiskFilter(); if (pdiskType) { @@ -231,7 +231,7 @@ struct TEnvironmentSetup { void SetupTablet() { const TActorId bootstrapper = CreateTestBootstrapper(*Runtime, CreateTestTabletInfo(TabletId, TTabletTypes::FLAT_BS_CONTROLLER, TErasureType::ErasureNone, GroupId), - &CreateFlatBsController, NodeId); + &CreateFlatBsController, NodeId); Runtime->EnableScheduleForActor(bootstrapper); { TDispatchOptions options; @@ -252,8 +252,8 @@ class TFinalizer { public: TFinalizer(TEnvironmentSetup& env) : Env(env) - { - } + { + } ~TFinalizer() { Env.Finalize(); @@ -263,18 +263,18 @@ public: Y_UNIT_TEST_SUITE(BsControllerConfig) { Y_UNIT_TEST(Basic) { TEnvironmentSetup env(10, 1); - RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { + RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { TFinalizer finalizer(env); env.Prepare(dispatchName, setup, outActiveZone); NKikimrBlobStorage::TConfigRequest request; NKikimrBlobStorage::TConfigResponse response = env.Invoke(request); - UNIT_ASSERT(response.GetSuccess()); }); + UNIT_ASSERT(response.GetSuccess()); }); } Y_UNIT_TEST(PDiskCreate) { TEnvironmentSetup env(10, 1); - RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { + RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { TFinalizer finalizer(env); env.Prepare(dispatchName, setup, outActiveZone); @@ -290,7 +290,7 @@ Y_UNIT_TEST_SUITE(BsControllerConfig) { NKikimrBlobStorage::TConfigResponse response = env.Invoke(request); UNIT_ASSERT(response.GetSuccess()); - UNIT_ASSERT(env.ParsePDisks(response.GetStatus(baseConfigIndex).GetBaseConfig()) == env.ExpectedPDisks); }); + UNIT_ASSERT(env.ParsePDisks(response.GetStatus(baseConfigIndex).GetBaseConfig()) == env.ExpectedPDisks); }); } Y_UNIT_TEST(ManyPDisksRestarts) { @@ -331,7 +331,7 @@ Y_UNIT_TEST_SUITE(BsControllerConfig) { const ui32 numGroups1 = numNodes1 * 3; const ui32 numGroups2 = numNodes2 * 4; TEnvironmentSetup env(numNodes, 1); - RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { + RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { TFinalizer finalizer(env); env.Prepare(dispatchName, setup, outActiveZone); @@ -408,81 +408,81 @@ Y_UNIT_TEST_SUITE(BsControllerConfig) { for (const auto& kv : groups2) { UNIT_ASSERT_EQUAL(kv.second, 8); } - } }); - } - - Y_UNIT_TEST(ExtendBoxAndStoragePool) { - const ui32 totalNumNodes = 60; - const ui32 originNumNodes = 50; - const ui32 originNumGroups = originNumNodes * 3; - const ui32 resultNumGroups = totalNumNodes * 3; - TEnvironmentSetup env(totalNumNodes, 1); - RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { - TFinalizer finalizer(env); - env.Prepare(dispatchName, setup, outActiveZone); - - TVector<TEnvironmentSetup::TNodeRecord> nodes = env.GetNodes(), part1, part2; - - for (auto &node : nodes) { - (part1.size() < originNumNodes ? part1 : part2).push_back(node); + } }); + } + + Y_UNIT_TEST(ExtendBoxAndStoragePool) { + const ui32 totalNumNodes = 60; + const ui32 originNumNodes = 50; + const ui32 originNumGroups = originNumNodes * 3; + const ui32 resultNumGroups = totalNumNodes * 3; + TEnvironmentSetup env(totalNumNodes, 1); + RunTestWithReboots(env.TabletIds, [&] { return env.PrepareInitialEventsFilter(); }, [&](const TString& dispatchName, std::function<void(TTestActorRuntime&)> setup, bool& outActiveZone) { + TFinalizer finalizer(env); + env.Prepare(dispatchName, setup, outActiveZone); + + TVector<TEnvironmentSetup::TNodeRecord> nodes = env.GetNodes(), part1, part2; + + for (auto &node : nodes) { + (part1.size() < originNumNodes ? part1 : part2).push_back(node); + } + + // creating box of originNumNodes nodes + NKikimrBlobStorage::TConfigRequest request; + env.DefineBox(1, "first box", { + {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk2", NKikimrBlobStorage::ROT, true, false, 0}, + {"/dev/disk3", NKikimrBlobStorage::SSD, false, false, 0}, + }, part1, request); + + // creating storage pool of originNumGroups groups + env.DefineStoragePool(1, 1, "first storage pool", originNumGroups, NKikimrBlobStorage::ROT, {}, request); + + // executing request + size_t baseConfigIndex = request.CommandSize(); + request.AddCommand()->MutableQueryBaseConfig(); + NKikimrBlobStorage::TConfigResponse response = env.Invoke(request); + UNIT_ASSERT(response.GetSuccess()); + + // extending count of nodes in box: from originNumNodes to totalNumNodes + request.Clear(); + env.DefineBox(1, "first box", { + {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk2", NKikimrBlobStorage::ROT, true, false, 0}, + {"/dev/disk3", NKikimrBlobStorage::SSD, false, false, 0}, + }, nodes, request, 1); + + // saving node ids + TSet<ui32> nodeIds; + for (const auto& item : nodes) { + nodeIds.insert(std::get<2>(item)); } - // creating box of originNumNodes nodes - NKikimrBlobStorage::TConfigRequest request; - env.DefineBox(1, "first box", { - {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk2", NKikimrBlobStorage::ROT, true, false, 0}, - {"/dev/disk3", NKikimrBlobStorage::SSD, false, false, 0}, - }, part1, request); - - // creating storage pool of originNumGroups groups - env.DefineStoragePool(1, 1, "first storage pool", originNumGroups, NKikimrBlobStorage::ROT, {}, request); - - // executing request - size_t baseConfigIndex = request.CommandSize(); - request.AddCommand()->MutableQueryBaseConfig(); - NKikimrBlobStorage::TConfigResponse response = env.Invoke(request); - UNIT_ASSERT(response.GetSuccess()); - - // extending count of nodes in box: from originNumNodes to totalNumNodes - request.Clear(); - env.DefineBox(1, "first box", { - {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk2", NKikimrBlobStorage::ROT, true, false, 0}, - {"/dev/disk3", NKikimrBlobStorage::SSD, false, false, 0}, - }, nodes, request, 1); - - // saving node ids - TSet<ui32> nodeIds; - for (const auto& item : nodes) { - nodeIds.insert(std::get<2>(item)); - } - - // extending count of groups in box: from originNumGroups to resultNumGroups - env.DefineStoragePool(1, 1, "first storage pool", resultNumGroups, NKikimrBlobStorage::ROT, {}, - request, "block-4-2", 1); - - // executing extention request - baseConfigIndex = request.CommandSize(); - request.AddCommand()->MutableQueryBaseConfig(); - response = env.Invoke(request); - UNIT_ASSERT(response.GetSuccess()); - - // checking consequence - TMap<ui32, ui32> groups; - { - const auto& baseConfig = response.GetStatus(baseConfigIndex).GetBaseConfig(); - UNIT_ASSERT(env.ParsePDisks(baseConfig) == env.ExpectedPDisks); - for (const auto& vslot : baseConfig.GetVSlot()) { - UNIT_ASSERT(vslot.HasVSlotId()); - UNIT_ASSERT(nodeIds.count(vslot.GetVSlotId().GetNodeId())); - ++groups[vslot.GetGroupId()]; - } - UNIT_ASSERT_EQUAL(groups.size(), resultNumGroups); - for (const auto& kv : groups) { - UNIT_ASSERT_EQUAL(kv.second, 8); - } - } }); + // extending count of groups in box: from originNumGroups to resultNumGroups + env.DefineStoragePool(1, 1, "first storage pool", resultNumGroups, NKikimrBlobStorage::ROT, {}, + request, "block-4-2", 1); + + // executing extention request + baseConfigIndex = request.CommandSize(); + request.AddCommand()->MutableQueryBaseConfig(); + response = env.Invoke(request); + UNIT_ASSERT(response.GetSuccess()); + + // checking consequence + TMap<ui32, ui32> groups; + { + const auto& baseConfig = response.GetStatus(baseConfigIndex).GetBaseConfig(); + UNIT_ASSERT(env.ParsePDisks(baseConfig) == env.ExpectedPDisks); + for (const auto& vslot : baseConfig.GetVSlot()) { + UNIT_ASSERT(vslot.HasVSlotId()); + UNIT_ASSERT(nodeIds.count(vslot.GetVSlotId().GetNodeId())); + ++groups[vslot.GetGroupId()]; + } + UNIT_ASSERT_EQUAL(groups.size(), resultNumGroups); + for (const auto& kv : groups) { + UNIT_ASSERT_EQUAL(kv.second, 8); + } + } }); } Y_UNIT_TEST(DeleteStoragePool) { @@ -790,16 +790,16 @@ Y_UNIT_TEST_SUITE(BsControllerConfig) { NKikimrBlobStorage::TConfigRequest request; env.DefineBox(1, "test box", { - {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk2", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk3", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk4", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk5", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk6", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk7", NKikimrBlobStorage::ROT, false, false, 0}, - {"/dev/disk8", NKikimrBlobStorage::ROT, false, false, 0}, - }, - env.GetNodes(), request); + {"/dev/disk1", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk2", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk3", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk4", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk5", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk6", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk7", NKikimrBlobStorage::ROT, false, false, 0}, + {"/dev/disk8", NKikimrBlobStorage::ROT, false, false, 0}, + }, + env.GetNodes(), request); env.DefineStoragePool(1, 1, "test pool", numGroups, NKikimrBlobStorage::ROT, false, request); @@ -810,7 +810,7 @@ Y_UNIT_TEST_SUITE(BsControllerConfig) { NKikimrBlobStorage::TEvControllerSelectGroups request; request.SetReturnAllMatchingGroups(true); - auto* p = request.AddGroupParameters(); + auto* p = request.AddGroupParameters(); p->SetErasureSpecies(TBlobStorageGroupType::Erasure4Plus2Block); p->SetDesiredPDiskCategory(0); p->SetDesiredVDiskCategory(0); diff --git a/ydb/core/protos/blobstorage_config.proto b/ydb/core/protos/blobstorage_config.proto index 4a8e69bbef..10d0a29e6c 100644 --- a/ydb/core/protos/blobstorage_config.proto +++ b/ydb/core/protos/blobstorage_config.proto @@ -254,29 +254,29 @@ message TReassignGroupDisk { bool SuppressDonorMode = 7; // when set, donor mode is not used even if it is enabled through BSC } -enum EClusterFitAlgorithm { - QUADRATIC = 0; - HUNGARIAN = 1; - ANNEALING = 2; -} - +enum EClusterFitAlgorithm { + QUADRATIC = 0; + HUNGARIAN = 1; + ANNEALING = 2; +} + message TClusterFit { - EClusterFitAlgorithm Algorithm = 1; - uint64 Iterations = 2; + EClusterFitAlgorithm Algorithm = 1; + uint64 Iterations = 2; } -message TVSlotMetric { +message TVSlotMetric { NKikimrBlobStorage.TVSlotId VSlotId = 1; - uint64 Metric = 2; -} - -message TClusterFitConfig { - TBaseConfig BaseConfig = 1; - repeated TDefineStoragePool StoragePool = 2; - repeated TVSlotMetric Metric = 3; - TClusterFit Request = 4; -} - + uint64 Metric = 2; +} + +message TClusterFitConfig { + TBaseConfig BaseConfig = 1; + repeated TDefineStoragePool StoragePool = 2; + repeated TVSlotMetric Metric = 3; + TClusterFit Request = 4; +} + message TMergeBoxes { message TStoragePoolIdMap { uint64 OriginStoragePoolId = 1; @@ -299,15 +299,15 @@ message TMoveGroups { repeated uint32 ExplicitGroupId = 6; // if no groups are provided, then all groups of origin storage pool are moved } -message TAddMigrationPlan { - string Name = 1; - repeated TMoveCommand MoveCommand = 2; -} - -message TDeleteMigrationPlan { - string Name = 1; -} - +message TAddMigrationPlan { + string Name = 1; + repeated TMoveCommand MoveCommand = 2; +} + +message TDeleteMigrationPlan { + string Name = 1; +} + message TEnableSelfHeal { bool Enable = 1; } @@ -450,8 +450,8 @@ message TConfigRequest { TQueryBaseConfig QueryBaseConfig = 16; // introspection TMergeBoxes MergeBoxes = 23; TMoveGroups MoveGroups = 24; // move groups between storage pools; no checks of SP constraints are performed - TAddMigrationPlan AddMigrationPlan = 25; - TDeleteMigrationPlan DeleteMigrationPlan = 26; + TAddMigrationPlan AddMigrationPlan = 25; + TDeleteMigrationPlan DeleteMigrationPlan = 26; TEnableSelfHeal EnableSelfHeal = 27; TDeclareIntent DeclareIntent = 28; TReadIntent ReadIntent = 29; @@ -580,15 +580,15 @@ message TMoveCommand { uint32 TargetPDiskId = 6; } -message TPDiskStat { - uint32 NodeId = 1; - uint32 PDiskId = 2; - string Fqdn = 3; - int32 IcPort = 4; - string Path = 5; - uint32 NumSlotsAfterMigration = 6; -} - +message TPDiskStat { + uint32 NodeId = 1; + uint32 PDiskId = 2; + string Fqdn = 3; + int32 IcPort = 4; + string Path = 5; + uint32 NumSlotsAfterMigration = 6; +} + message TReassignedItem { NKikimrBlobStorage.TVDiskID VDiskId = 1; NKikimrBlobStorage.TVSlotId From = 2; @@ -645,7 +645,7 @@ message TConfigResponse { repeated uint32 GroupId = 8; uint64 AssignedStoragePoolId = 9; repeated TMoveCommand MoveCommand = 10; - repeated TPDiskStat PDiskStat = 11; + repeated TPDiskStat PDiskStat = 11; repeated TReassignedItem ReassignedItem = 14; TDeclareIntent Intent = 15; } |