aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse
diff options
context:
space:
mode:
authoriddqd <iddqd@yandex-team.com>2024-06-11 10:12:13 +0300
committeriddqd <iddqd@yandex-team.com>2024-06-11 10:22:43 +0300
commit07f57e35443ab7f09471caf2dbf1afbcced4d9f7 (patch)
treea4a7b66ead62e83fa988a2ec2ce6576311c1f4b1 /contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse
parent6db3b8ca95e44179e48306a58656fb1f9317d9c3 (diff)
downloadydb-07f57e35443ab7f09471caf2dbf1afbcced4d9f7.tar.gz
add contrib/python/yandexcloud to import
03b7d3cad2237366b55b393e18d4dc5eb222798c
Diffstat (limited to 'contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse')
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup.proto47
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup_service.proto69
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster.proto363
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster_service.proto1478
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto741
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database.proto23
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database_service.proto117
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema.proto31
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema_service.proto158
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/maintenance.proto55
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model.proto27
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model_service.proto157
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset.proto21
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto53
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user.proto1020
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user_service.proto218
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/version.proto20
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/versions_service.proto42
18 files changed, 4640 insertions, 0 deletions
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup.proto
new file mode 100644
index 0000000000..24dac5927c
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup.proto
@@ -0,0 +1,47 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A ClickHouse Backup resource. See the [Developer's Guide](/docs/managed-clickhouse/concepts)
+// for more information.
+message Backup {
+ enum BackupType {
+ BACKUP_TYPE_UNSPECIFIED = 0;
+
+ // Backup created by automated daily schedule.
+ AUTOMATED = 1;
+
+ // Backup created by user request.
+ MANUAL = 2;
+ }
+
+ // ID of the backup.
+ string id = 1;
+
+ // ID of the folder that the backup belongs to.
+ string folder_id = 2;
+
+ // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format
+ // (i.e. when the backup operation was completed).
+ google.protobuf.Timestamp created_at = 3;
+
+ // ID of the ClickHouse cluster that the backup was created for.
+ string source_cluster_id = 4;
+
+ // Names of the shards included in the backup.
+ repeated string source_shard_names = 6;
+
+ // Time when the backup operation was started.
+ google.protobuf.Timestamp started_at = 5;
+
+ // Size of backup in bytes.
+ int64 size = 7;
+
+ // How this backup was created (manual/automatic/etc...).
+ BackupType type = 8;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup_service.proto
new file mode 100644
index 0000000000..7bb00b44b9
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/backup_service.proto
@@ -0,0 +1,69 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "yandex/cloud/mdb/clickhouse/v1/backup.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ClickHouse Backup resources.
+service BackupService {
+ // Returns the specified ClickHouse Backup resource.
+ //
+ // To get the list of available ClickHouse Backup resources, make a [List] request.
+ rpc Get (GetBackupRequest) returns (Backup) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/backups/{backup_id}" };
+ }
+
+ // Retrieves the list of Backup resources available for the specified folder.
+ rpc List (ListBackupsRequest) returns (ListBackupsResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/backups" };
+ }
+}
+
+message GetBackupRequest {
+ // ID of the backup to return information about.
+ // To get the backup ID, use a [ClusterService.ListBackups] request.
+ string backup_id = 1 [(required) = true];
+}
+
+message ListBackupsRequest {
+ // ID of the folder to list backups in.
+ // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request.
+ string folder_id = 1 [(required) = true, (length) = "<=50"];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListBackupsResponse {
+ // List of Backup resources.
+ repeated Backup backups = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message DeleteBackupRequest {
+ // Required. ID of the backup to delete.
+ string backup_id = 1 [(required) = true];
+}
+
+message DeleteBackupMetadata {
+ // Required. ID of the ClickHouse backup that is currently being deleted.
+ string backup_id = 1;
+ // ID of the ClickHouse backup that is being deleted.
+ string cluster_id = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster.proto
new file mode 100644
index 0000000000..4c30e1cdfb
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster.proto
@@ -0,0 +1,363 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+import "google/type/timeofday.proto";
+import "yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto";
+import "yandex/cloud/mdb/clickhouse/v1/maintenance.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A ClickHouse Cluster resource. For more information, see the
+// [Cluster](/docs/managed-clickhouse/concepts) section in the Developer's Guide.
+message Cluster {
+ enum Environment {
+ ENVIRONMENT_UNSPECIFIED = 0;
+
+ // Stable environment with a conservative update policy:
+ // only hotfixes are applied during regular maintenance.
+ PRODUCTION = 1;
+
+ // Environment with more aggressive update policy: new versions
+ // are rolled out irrespective of backward compatibility.
+ PRESTABLE = 2;
+ }
+
+ enum Health {
+
+ // State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN).
+ HEALTH_UNKNOWN = 0;
+
+ // Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE).
+ ALIVE = 1;
+
+ // Cluster is inoperable ([Host.health] for every host in the cluster is DEAD).
+ DEAD = 2;
+
+ // Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE).
+ DEGRADED = 3;
+ }
+
+ enum Status {
+
+ // Cluster state is unknown.
+ STATUS_UNKNOWN = 0;
+
+ // Cluster is being created.
+ CREATING = 1;
+
+ // Cluster is running normally.
+ RUNNING = 2;
+
+ // Cluster encountered a problem and cannot operate.
+ ERROR = 3;
+
+ // Cluster is being updated.
+ UPDATING = 4;
+
+ // Cluster is stopping.
+ STOPPING = 5;
+
+ // Cluster stopped.
+ STOPPED = 6;
+
+ // Cluster is starting.
+ STARTING = 7;
+ }
+
+ // ID of the ClickHouse cluster.
+ // This ID is assigned by MDB at creation time.
+ string id = 1;
+
+ // ID of the folder that the ClickHouse cluster belongs to.
+ string folder_id = 2;
+
+ // Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ google.protobuf.Timestamp created_at = 3;
+
+ // Name of the ClickHouse cluster.
+ // The name is unique within the folder. 1-63 characters long.
+ string name = 4;
+
+ // Description of the ClickHouse cluster. 0-256 characters long.
+ string description = 5;
+
+ // Custom labels for the ClickHouse cluster as `key:value` pairs. Maximum 64 per resource.
+ map<string, string> labels = 6;
+
+ // Deployment environment of the ClickHouse cluster.
+ Environment environment = 7;
+
+ // Description of monitoring systems relevant to the ClickHouse cluster.
+ repeated Monitoring monitoring = 8;
+
+ // Configuration of the ClickHouse cluster.
+ ClusterConfig config = 9;
+
+ // ID of the network that the cluster belongs to.
+ string network_id = 10;
+
+ // Aggregated cluster health.
+ Health health = 11;
+
+ // Current state of the cluster.
+ Status status = 12;
+
+ // ID of the service account used for access to Object Storage.
+ string service_account_id = 13;
+
+ // Maintenance window for the cluster.
+ MaintenanceWindow maintenance_window = 14;
+
+ // Planned maintenance operation to be started for the cluster within the nearest [maintenance_window].
+ MaintenanceOperation planned_operation = 15;
+
+ // User security groups
+ repeated string security_group_ids = 16;
+
+ // Deletion Protection inhibits deletion of the cluster
+ bool deletion_protection = 17;
+}
+
+// Monitoring system metadata.
+message Monitoring {
+ // Name of the monitoring system.
+ string name = 1;
+
+ // Description of the monitoring system.
+ string description = 2;
+
+ // Link to the monitoring system charts for the ClickHouse cluster.
+ string link = 3;
+}
+
+message ClusterConfig {
+ message Clickhouse {
+ // Configuration settings of a ClickHouse server.
+ config.ClickhouseConfigSet config = 1;
+
+ // Resources allocated to ClickHouse hosts.
+ Resources resources = 2;
+ }
+
+ message Zookeeper {
+ // Resources allocated to ZooKeeper hosts.
+ Resources resources = 1;
+ }
+
+ // Version of the ClickHouse server software.
+ string version = 1;
+
+ // Configuration and resource allocation for ClickHouse hosts.
+ Clickhouse clickhouse = 2;
+
+ // Configuration and resource allocation for ZooKeeper hosts.
+ Zookeeper zookeeper = 3;
+
+ // Time to start the daily backup, in the UTC timezone.
+ google.type.TimeOfDay backup_window_start = 4;
+
+ // Access policy for external services.
+ Access access = 5;
+
+ CloudStorage cloud_storage = 6;
+
+ // Whether database management through SQL commands is enabled.
+ google.protobuf.BoolValue sql_database_management = 7;
+
+ // Whether user management through SQL commands is enabled.
+ google.protobuf.BoolValue sql_user_management = 8;
+
+ // Whether cluster should use embedded Keeper instead of Zookeeper.
+ google.protobuf.BoolValue embedded_keeper = 9;
+}
+
+message Shard {
+ // Name of the shard.
+ string name = 1;
+
+ // ID of the cluster that the shard belongs to.
+ string cluster_id = 2;
+
+ // Configuration of the shard.
+ ShardConfig config = 3;
+}
+
+message ShardGroup {
+ // Name of the shard group.
+ string name = 1;
+
+ // ID of the ClickHouse cluster that the shard group belongs to.
+ string cluster_id = 2;
+
+ // Description of the shard group. 0-256 characters long.
+ string description = 3;
+
+ // List of shard names contained in the shard group.
+ repeated string shard_names = 4;
+}
+
+message ShardConfig {
+ message Clickhouse {
+ // ClickHouse settings for a shard.
+ config.ClickhouseConfigSet config = 1;
+
+ // Computational resources for a shard.
+ Resources resources = 2;
+
+ // Relative weight of a shard considered when writing data to the cluster.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/operations/table_engines/distributed/).
+ google.protobuf.Int64Value weight = 3;
+ }
+
+ // ClickHouse configuration for a shard.
+ Clickhouse clickhouse = 1;
+}
+
+message Host {
+ enum Type {
+ // Host type is unspecified. Default value.
+ TYPE_UNSPECIFIED = 0;
+
+ // ClickHouse host.
+ CLICKHOUSE = 1;
+
+ // ZooKeeper host.
+ ZOOKEEPER = 2;
+ }
+
+ enum Health {
+
+ // Health of the host is unknown.
+ UNKNOWN = 0;
+
+ // The host is performing all its functions normally.
+ ALIVE = 1;
+
+ // The host is inoperable, and cannot perform any of its essential functions.
+ DEAD = 2;
+
+ // The host is degraded, and can perform only some of its essential functions.
+ DEGRADED = 3;
+ }
+
+ // Name of the ClickHouse host. The host name is assigned by MDB at creation time, and cannot be changed.
+ // 1-63 characters long.
+ //
+ // The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host.
+ string name = 1;
+
+ // ID of the ClickHouse host. The ID is assigned by MDB at creation time.
+ string cluster_id = 2;
+
+ // ID of the availability zone where the ClickHouse host resides.
+ string zone_id = 3;
+
+ // Type of the host. If the field has default value, it is not returned in the response.
+ Type type = 4;
+
+ // Resources allocated to the ClickHouse host.
+ Resources resources = 5;
+
+ // Aggregated health of the host. If the field has default value, it is not returned in the response.
+ Health health = 6;
+
+ // Services provided by the host.
+ repeated Service services = 7;
+
+ // ID of the subnet that the host belongs to.
+ string subnet_id = 8;
+
+ // Flag showing public IP assignment status to this host.
+ bool assign_public_ip = 9;
+
+ string shard_name = 10;
+}
+
+message Service {
+ enum Type {
+ // Service type of the host is unspecified. Default value.
+ TYPE_UNSPECIFIED = 0;
+
+ // The host is a ClickHouse server.
+ CLICKHOUSE = 1;
+
+ // The host is a ZooKeeper server.
+ ZOOKEEPER = 2;
+ }
+
+ enum Health {
+
+ // Health of the server is unknown. Default value.
+ UNKNOWN = 0;
+
+ // The server is working normally.
+ ALIVE = 1;
+
+ // The server is dead or unresponsive.
+ DEAD = 2;
+ }
+
+ // Type of the service provided by the host. If the field has default value, it is not returned in the response.
+ Type type = 1;
+
+ // Aggregated health of the service. If the field has default value, it is not returned in the response.
+ Health health = 2;
+}
+
+message Resources {
+ // ID of the preset for computational resources available to a host (CPU, memory etc.).
+ // All available presets are listed in the [documentation](/docs/managed-clickhouse/concepts/instance-types)
+ string resource_preset_id = 1;
+
+ // Volume of the storage available to a host, in bytes.
+ int64 disk_size = 2;
+
+ // Type of the storage environment for the host.
+ // Possible values:
+ // * network-hdd - network HDD drive,
+ // * network-ssd - network SSD drive,
+ // * local-ssd - local SSD storage.
+ string disk_type_id = 3;
+}
+
+message Access {
+ // Allow to export data from the cluster to DataLens.
+ bool data_lens = 1;
+
+ // Allow SQL queries to the cluster databases from the management console.
+ //
+ // See [SQL queries in the management console](/docs/managed-clickhouse/operations/web-sql-query) for more details.
+ bool web_sql = 2;
+
+ // Allow to import data from Yandex Metrica and AppMetrica to the cluster.
+ //
+ // See [AppMetrica documentation](https://appmetrica.yandex.com/docs/cloud/index.html) for more details.
+ bool metrika = 3;
+
+ // Allow access to cluster for Serverless.
+ bool serverless = 4;
+
+ // Allow access for DataTransfer
+ bool data_transfer = 5;
+
+ // Allow access for Query
+ bool yandex_query = 6;
+}
+
+message CloudStorage {
+ // Whether to use Object Storage for storing ClickHouse data.
+ bool enabled = 1;
+
+ google.protobuf.DoubleValue move_factor = 2 [(value) = "0-1"];
+
+ google.protobuf.BoolValue data_cache_enabled = 3;
+
+ google.protobuf.Int64Value data_cache_max_size = 4;
+
+ google.protobuf.BoolValue prefer_not_to_merge = 5;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster_service.proto
new file mode 100644
index 0000000000..b939b3d1ad
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/cluster_service.proto
@@ -0,0 +1,1478 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/wrappers.proto";
+import "google/type/timeofday.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/backup.proto";
+import "yandex/cloud/mdb/clickhouse/v1/cluster.proto";
+import "yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto";
+import "yandex/cloud/mdb/clickhouse/v1/database.proto";
+import "yandex/cloud/mdb/clickhouse/v1/maintenance.proto";
+import "yandex/cloud/mdb/clickhouse/v1/user.proto";
+import "yandex/cloud/operation/operation.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ClickHouse clusters.
+service ClusterService {
+ // Returns the specified ClickHouse cluster.
+ //
+ // To get the list of available ClickHouse clusters, make a [List] request.
+ rpc Get(GetClusterRequest) returns (Cluster) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}"};
+ }
+
+ // Retrieves a list of ClickHouse clusters that belong
+ // to the specified folder.
+ rpc List(ListClustersRequest) returns (ListClustersResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters"};
+ }
+
+ // Creates a ClickHouse cluster in the specified folder.
+ rpc Create(CreateClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Updates the specified ClickHouse cluster.
+ rpc Update(UpdateClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ patch: "/managed-clickhouse/v1/clusters/{cluster_id}"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Deletes the specified ClickHouse cluster.
+ rpc Delete(DeleteClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {delete: "/managed-clickhouse/v1/clusters/{cluster_id}"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteClusterMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Starts the specified ClickHouse cluster.
+ rpc Start(StartClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {post: "/managed-clickhouse/v1/clusters/{cluster_id}:start"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "StartClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Stops the specified ClickHouse cluster.
+ rpc Stop(StopClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {post: "/managed-clickhouse/v1/clusters/{cluster_id}:stop"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "StopClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Moves a ClickHouse cluster to the specified folder.
+ rpc Move(MoveClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:move"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "MoveClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Adds a ZooKeeper subcluster to the specified ClickHouse cluster.
+ rpc AddZookeeper(AddClusterZookeeperRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:addZookeeper"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "AddClusterZookeeperMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Creates a backup for the specified ClickHouse cluster.
+ rpc Backup(BackupClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {post: "/managed-clickhouse/v1/clusters/{cluster_id}:backup"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "BackupClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Creates a new ClickHouse cluster using the specified backup.
+ rpc Restore(RestoreClusterRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters:restore"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "RestoreClusterMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Reschedules planned maintenance operation.
+ rpc RescheduleMaintenance(RescheduleMaintenanceRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:rescheduleMaintenance"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "RescheduleMaintenanceMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Retrieves logs for the specified ClickHouse cluster.
+ rpc ListLogs(ListClusterLogsRequest) returns (ListClusterLogsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}:logs"};
+ }
+
+ // Same as ListLogs but using server-side streaming. Also allows for `tail -f` semantics.
+ rpc StreamLogs(StreamClusterLogsRequest) returns (stream StreamLogRecord) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}:stream_logs"};
+ }
+
+ // Retrieves the list of Operation resources for the specified cluster.
+ rpc ListOperations(ListClusterOperationsRequest) returns (ListClusterOperationsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/operations"};
+ }
+
+ // Retrieves the list of available backups for the specified ClickHouse cluster.
+ rpc ListBackups(ListClusterBackupsRequest) returns (ListClusterBackupsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/backups"};
+ }
+
+ // Retrieves a list of hosts for the specified cluster.
+ rpc ListHosts(ListClusterHostsRequest) returns (ListClusterHostsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/hosts"};
+ }
+
+ // Creates new hosts for a cluster.
+ rpc AddHosts(AddClusterHostsRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/hosts:batchCreate"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "AddClusterHostsMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Updates the specified hosts.
+ rpc UpdateHosts(UpdateClusterHostsRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/hosts:batchUpdate"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateClusterHostsMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Deletes the specified hosts for a cluster.
+ rpc DeleteHosts(DeleteClusterHostsRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/hosts:batchDelete"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteClusterHostsMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ rpc RestartHosts(RestartClusterHostsRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/hosts:restartHosts"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "RestartClusterHostsMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Returns the specified shard.
+ rpc GetShard(GetClusterShardRequest) returns (Shard) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/shards/{shard_name}"};
+ }
+
+ // Retrieves a list of shards that belong to the specified cluster.
+ rpc ListShards(ListClusterShardsRequest) returns (ListClusterShardsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/shards"};
+ }
+
+ // Creates a new shard in the specified cluster.
+ rpc AddShard(AddClusterShardRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/shards"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "AddClusterShardMetadata"
+ response: "Shard"
+ };
+ }
+
+ // Modifies the specified shard.
+ rpc UpdateShard(UpdateClusterShardRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ patch: "/managed-clickhouse/v1/clusters/{cluster_id}/shards/{shard_name}"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateClusterShardMetadata"
+ response: "Shard"
+ };
+ }
+
+ // Deletes the specified shard.
+ rpc DeleteShard(DeleteClusterShardRequest) returns (operation.Operation) {
+ option (google.api.http) = {delete: "/managed-clickhouse/v1/clusters/{cluster_id}/shards/{shard_name}"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteClusterShardMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Returns the specified shard group.
+ rpc GetShardGroup(GetClusterShardGroupRequest) returns (ShardGroup) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/shardGroups/{shard_group_name}"};
+ }
+
+ // Retrieves a list of shard groups that belong to specified cluster.
+ rpc ListShardGroups(ListClusterShardGroupsRequest) returns (ListClusterShardGroupsResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/shardGroups"};
+ }
+
+ // Creates a new shard group in the specified cluster.
+ rpc CreateShardGroup(CreateClusterShardGroupRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}/shardGroups"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateClusterShardGroupMetadata"
+ response: "ShardGroup"
+ };
+ }
+
+ // Updates the specified shard group.
+ rpc UpdateShardGroup(UpdateClusterShardGroupRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ patch: "/managed-clickhouse/v1/clusters/{cluster_id}/shardGroups/{shard_group_name}"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateClusterShardGroupMetadata"
+ response: "ShardGroup"
+ };
+ }
+
+ // Deletes the specified shard group.
+ rpc DeleteShardGroup(DeleteClusterShardGroupRequest) returns (operation.Operation) {
+ option (google.api.http) = {delete: "/managed-clickhouse/v1/clusters/{cluster_id}/shardGroups/{shard_group_name}"};
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteClusterShardGroupMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Retrieves a list of external dictionaries that belong to specified cluster.
+ rpc ListExternalDictionaries(ListClusterExternalDictionariesRequest) returns (ListClusterExternalDictionariesResponse) {
+ option (google.api.http) = {get: "/managed-clickhouse/v1/clusters/{cluster_id}/externalDictionaries"};
+ }
+
+ // Creates an external dictionary for the specified ClickHouse cluster.
+ rpc CreateExternalDictionary(CreateClusterExternalDictionaryRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:createExternalDictionary"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateClusterExternalDictionaryMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Updates an external dictionary for the specified ClickHouse cluster.
+ rpc UpdateExternalDictionary(UpdateClusterExternalDictionaryRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:updateExternalDictionary"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateClusterExternalDictionaryMetadata"
+ response: "Cluster"
+ };
+ }
+
+ // Deletes the specified external dictionary.
+ rpc DeleteExternalDictionary(DeleteClusterExternalDictionaryRequest) returns (operation.Operation) {
+ option (google.api.http) = {
+ post: "/managed-clickhouse/v1/clusters/{cluster_id}:deleteExternalDictionary"
+ body: "*"
+ };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteClusterExternalDictionaryMetadata"
+ response: "Cluster"
+ };
+ }
+}
+
+message GetClusterRequest {
+ // ID of the ClickHouse Cluster resource to return.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message ListClustersRequest {
+ // ID of the folder to list ClickHouse clusters in.
+ // To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request.
+ string folder_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+
+ // A filter expression that filters resources listed in the response.
+ // The expression must specify:
+ // 1. The field name. Currently you can only use filtering with the [Cluster.name] field.
+ // 2. An `=` operator.
+ // 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-zA-Z0-9_-]+`.
+ string filter = 4 [(length) = "<=1000"];
+}
+
+message ListClustersResponse {
+ // List of ClickHouse Cluster resources.
+ repeated Cluster clusters = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateClusterRequest {
+ // ID of the folder to create the ClickHouse cluster in.
+ string folder_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the ClickHouse cluster. The name must be unique within the folder.
+ string name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // Description of the ClickHouse cluster.
+ string description = 3 [(length) = "<=256"];
+
+ // Custom labels for the ClickHouse cluster as `key:value` pairs. Maximum 64 per resource.
+ // For example, "project": "mvp" or "source": "dictionary".
+ map<string, string> labels = 4 [
+ (yandex.cloud.size) = "<=64",
+ (length) = "<=63",
+ (pattern) = "[-_0-9a-z]*",
+ (map_key).length = "<=63",
+ (map_key).pattern = "[a-z][-_0-9a-z]*"
+ ];
+
+ // Deployment environment of the ClickHouse cluster.
+ Cluster.Environment environment = 5 [(required) = true];
+
+ // Configuration and resources for hosts that should be created for the ClickHouse cluster.
+ ConfigSpec config_spec = 6 [(required) = true];
+
+ // Descriptions of databases to be created in the ClickHouse cluster.
+ repeated DatabaseSpec database_specs = 7 [(size) = ">0"];
+
+ // Descriptions of database users to be created in the ClickHouse cluster.
+ repeated UserSpec user_specs = 8 [(size) = ">0"];
+
+ // Individual configurations for hosts that should be created for the ClickHouse cluster.
+ repeated HostSpec host_specs = 9 [(size) = ">0"];
+
+ // ID of the network to create the cluster in.
+ string network_id = 10 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the first shard in cluster. If not set, defaults to the value 'shard1'.
+ string shard_name = 11 [
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // ID of the service account used for access to Object Storage.
+ string service_account_id = 12;
+
+ // User security groups
+ repeated string security_group_ids = 13;
+
+ // Deletion Protection inhibits deletion of the cluster
+ bool deletion_protection = 14;
+
+ // Window of maintenance operations.
+ MaintenanceWindow maintenance_window = 15;
+}
+
+message CreateClusterMetadata {
+ // ID of the ClickHouse cluster that is being created.
+ string cluster_id = 1;
+}
+
+message UpdateClusterRequest {
+ // ID of the ClickHouse Cluster resource to update.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Field mask that specifies which fields of the ClickHouse Cluster resource should be updated.
+ google.protobuf.FieldMask update_mask = 2;
+
+ // New description of the ClickHouse cluster.
+ string description = 3 [(length) = "<=256"];
+
+ // Custom labels for the ClickHouse cluster as `key:value` pairs. Maximum 64 per resource.
+ // For example, "project": "mvp" or "source": "dictionary".
+ //
+ // The new set of labels will completely replace the old ones. To add a label, request the current
+ // set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set.
+ map<string, string> labels = 4 [
+ (yandex.cloud.size) = "<=64",
+ (length) = "<=63",
+ (pattern) = "[-_0-9a-z]*",
+ (map_key).length = "<=63",
+ (map_key).pattern = "[a-z][-_0-9a-z]*"
+ ];
+
+ // New configuration and resources for hosts in the cluster.
+ ConfigSpec config_spec = 5;
+
+ // New name for the cluster.
+ string name = 6 [
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // ID of the service account used for access to Object Storage.
+ string service_account_id = 7;
+
+ // New maintenance window settings for the cluster.
+ MaintenanceWindow maintenance_window = 8;
+
+ // User security groups
+ repeated string security_group_ids = 9;
+
+ // Deletion Protection inhibits deletion of the cluster
+ bool deletion_protection = 10;
+}
+
+message UpdateClusterMetadata {
+ // ID of the ClickHouse Cluster resource that is being updated.
+ string cluster_id = 1;
+}
+
+message DeleteClusterRequest {
+ // ID of the ClickHouse cluster to delete.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message DeleteClusterMetadata {
+ // ID of the ClickHouse cluster that is being deleted.
+ string cluster_id = 1;
+}
+
+message StartClusterRequest {
+ // ID of the ClickHouse cluster to start.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message StartClusterMetadata {
+ // ID of the ClickHouse cluster being started.
+ string cluster_id = 1;
+}
+
+message StopClusterRequest {
+ // ID of the ClickHouse cluster to stop.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message StopClusterMetadata {
+ // ID of the ClickHouse cluster being stopped.
+ string cluster_id = 1;
+}
+
+message MoveClusterRequest {
+ // ID of the ClickHouse cluster to move.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // ID of the destination folder.
+ string destination_folder_id = 2 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message MoveClusterMetadata {
+ // ID of the ClickHouse cluster being moved.
+ string cluster_id = 1;
+
+ // ID of the source folder.
+ string source_folder_id = 2;
+
+ // ID of the destination folder.
+ string destination_folder_id = 3;
+}
+
+message AddClusterZookeeperRequest {
+ // ID of the ClickHouse cluster to modify.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Resources allocated to Zookeeper hosts.
+ Resources resources = 2;
+
+ // Configuration of ZooKeeper hosts.
+ repeated HostSpec host_specs = 3;
+
+ // Enable automatic convertation of non-replicated MergeTree tables to replicated ones.
+ google.protobuf.BoolValue convert_tables_to_replicated = 4;
+}
+
+message AddClusterZookeeperMetadata {
+ // ID of the ClickHouse cluster.
+ string cluster_id = 1;
+}
+
+message BackupClusterRequest {
+ // ID of the ClickHouse cluster to back up.
+ // To get the ClickHouse cluster ID use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+}
+
+message BackupClusterMetadata {
+ // ID of the ClickHouse cluster that is being backed up.
+ string cluster_id = 1;
+}
+
+message RestoreClusterRequest {
+ reserved 12;
+ // ID of the backup to restore from. This backup will be used to create one cluster shard.
+ // To get the backup ID, use a [ClusterService.ListBackups] request.
+ string backup_id = 1 [(required) = true];
+
+ // Additional IDs of the backups to restore from.
+ // Each additional backup is responsible for restoring separate shard.
+ // Restored cluster will have len(additional_backup_ids)+1 shards in total.
+ // To get the backup ID, use a [ClusterService.ListBackups] request.
+ repeated string additional_backup_ids = 13;
+
+ // Name of the new ClickHouse cluster. The name must be unique within the folder.
+ string name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // Description of the new ClickHouse cluster.
+ string description = 3 [(length) = "<=256"];
+
+ // Custom labels for the ClickHouse cluster as `key:value` pairs. Maximum 64 per resource.
+ // For example, "project": "mvp" or "source": "dictionary".
+ map<string, string> labels = 4 [
+ (yandex.cloud.size) = "<=64",
+ (length) = "<=63",
+ (pattern) = "[-_0-9a-z]*",
+ (map_key).length = "<=63",
+ (map_key).pattern = "[a-z][-_0-9a-z]*"
+ ];
+
+ // Deployment environment of the new ClickHouse cluster.
+ Cluster.Environment environment = 5 [(required) = true];
+
+ // Configuration for the ClickHouse cluster to be created.
+ ConfigSpec config_spec = 6 [(required) = true];
+
+ // Configurations for ClickHouse hosts that should be created for
+ // the cluster that is being created from the backup.
+ repeated HostSpec host_specs = 7 [(size) = ">0"];
+
+ // ID of the network to create the ClickHouse cluster in.
+ string network_id = 8 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // ID of the folder to create the ClickHouse cluster in.
+ string folder_id = 9 [(length) = "<=50"];
+
+ // ID of the service account used for access to Object Storage.
+ string service_account_id = 10;
+
+ // User security groups
+ repeated string security_group_ids = 11;
+
+ // Deletion Protection inhibits deletion of the cluster
+ bool deletion_protection = 14;
+}
+
+message RestoreClusterMetadata {
+ // ID of the new ClickHouse cluster that is being created from a backup.
+ string cluster_id = 1;
+
+ // ID of the backup that is being used for creating a cluster.
+ string backup_id = 2;
+}
+
+message RescheduleMaintenanceRequest {
+ // ID of the ClickHouse cluster to reschedule the maintenance operation for.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ enum RescheduleType {
+ RESCHEDULE_TYPE_UNSPECIFIED = 0;
+
+ // Start the maintenance operation immediately.
+ IMMEDIATE = 1;
+
+ // Start the maintenance operation within the next available maintenance window.
+ NEXT_AVAILABLE_WINDOW = 2;
+
+ // Start the maintenance operation at the specific time.
+ SPECIFIC_TIME = 3;
+ }
+ // The type of reschedule request.
+ RescheduleType reschedule_type = 2 [(required) = true];
+
+ // The time until which this maintenance operation should be delayed. The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. The value can also point to the past moment of time if [reschedule_type.IMMEDIATE] reschedule type is chosen.
+ google.protobuf.Timestamp delayed_until = 3;
+}
+
+// Rescheduled maintenance operation metadata.
+message RescheduleMaintenanceMetadata {
+ reserved 2 to 3;
+ // Required. ID of the ClickHouse cluster.
+ string cluster_id = 1;
+
+ // Required. The time until which this maintenance operation is to be delayed.
+ google.protobuf.Timestamp delayed_until = 4;
+}
+
+message LogRecord {
+ // Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ google.protobuf.Timestamp timestamp = 1;
+
+ // Contents of the log record.
+ map<string, string> message = 2;
+}
+
+message ListClusterLogsRequest {
+ // ID of the ClickHouse cluster to request logs for.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Columns from logs table to request.
+ // If no columns are specified, entire log records are returned.
+ repeated string column_filter = 2;
+
+ // Type of the service to request logs about.
+ ServiceType service_type = 3 [(required) = true];
+
+ enum ServiceType {
+ SERVICE_TYPE_UNSPECIFIED = 0;
+
+ // Logs of ClickHouse activity.
+ CLICKHOUSE = 1;
+ }
+
+ // Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ google.protobuf.Timestamp from_time = 4;
+
+ // End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ google.protobuf.Timestamp to_time = 5;
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 6 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 7 [(length) = "<=100"];
+}
+
+message ListClusterLogsResponse {
+ // Requested log records.
+ repeated LogRecord logs = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterLogsRequest.page_token] query parameter in the next list request.
+ // Each subsequent list request will have its own [next_page_token] to continue paging through the results.
+ // This value is interchangeable with the [StreamLogRecord.next_record_token] from StreamLogs method.
+ string next_page_token = 2;
+}
+
+message StreamLogRecord {
+ // One of the requested log records.
+ LogRecord record = 1;
+
+ // This token allows you to continue streaming logs starting from the exact
+ // same record. To continue streaming, specify value of [next_record_token[
+ // as value for the [StreamClusterLogsRequest.record_token] parameter in the next StreamLogs request.
+ // This value is interchangeable with the [ListClusterLogsResponse.next_page_token] from ListLogs method.
+ string next_record_token = 2;
+}
+
+message StreamClusterLogsRequest {
+ // Required. ID of the ClickHouse cluster.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Columns from logs table to get in the response.
+ repeated string column_filter = 2;
+
+ ServiceType service_type = 3 [(required) = true];
+
+ enum ServiceType {
+ SERVICE_TYPE_UNSPECIFIED = 0;
+
+ // Logs of ClickHouse activity.
+ CLICKHOUSE = 1;
+ }
+
+ // Start timestamp for the logs request.
+ google.protobuf.Timestamp from_time = 4;
+
+ // End timestamp for the logs request.
+ // If this field is not set, all existing logs will be sent and then the new ones as
+ // they appear. In essence it has `tail -f` semantics.
+ google.protobuf.Timestamp to_time = 5;
+
+ // Record token. Set [record_token] to the [StreamLogRecord.next_record_token] returned by a previous StreamLogs
+ // request to start streaming from next log record.
+ string record_token = 6 [(length) = "<=100"];
+
+ // A filter expression that filters resources listed in the response.
+ // The expression must specify:
+ // 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], [LogRecord.logs.message.severity] fields.
+ // 2. An `=` operator.
+ // 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`.
+ // Examples of a filter:
+ // - `message.hostname='node1.db.cloud.yandex.net'`
+ // - `message.severity IN ('Error', 'Fatal') AND message.hostname != 'node2.db.cloud.yandex.net'`.
+ string filter = 7 [(length) = "<=1000"];
+}
+
+message ListClusterOperationsRequest {
+ // ID of the ClickHouse Cluster resource to list operations for.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterOperationsResponse {
+ // List of Operation resources for the specified ClickHouse cluster.
+ repeated operation.Operation operations = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterOperationsRequest.page_token] query parameter in the next list request.
+ // Each subsequent list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message ListClusterBackupsRequest {
+ // ID of the ClickHouse cluster.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the
+ // [ListClusterBackupsResponse.next_page_token] returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterBackupsResponse {
+ // List of ClickHouse Backup resources.
+ repeated Backup backups = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterBackupsRequest.page_token] query parameter in the next list request.
+ // Each subsequent list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message ListClusterHostsRequest {
+ // ID of the ClickHouse cluster.
+ // To get the ClickHouse cluster ID use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterHostsResponse {
+ // Requested list of hosts for the cluster.
+ repeated Host hosts = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterHostsRequest.page_token] query parameter in the next list request.
+ // Each subsequent list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message AddClusterHostsRequest {
+ // ID of the ClickHouse cluster to add hosts to.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Configurations for ClickHouse hosts that should be added to the cluster.
+ repeated HostSpec host_specs = 2 [(size) = ">0"];
+
+ // Whether to copy schema to new ClickHouse hosts from replicas.
+ google.protobuf.BoolValue copy_schema = 3;
+}
+
+message AddClusterHostsMetadata {
+ // ID of the ClickHouse cluster to which the hosts are being added.
+ string cluster_id = 1;
+
+ // Names of hosts that are being added to the cluster.
+ repeated string host_names = 2;
+}
+
+message UpdateHostSpec {
+ // Name of the host to update.
+ // To get the ClickHouse host name, use a [ClusterService.ListHosts] request.
+ string host_name = 1 [(required) = true];
+
+ // Field mask that specifies which fields of the ClickHouse host should be updated.
+ google.protobuf.FieldMask update_mask = 2;
+
+ // Whether the host should get a public IP address on creation.
+ google.protobuf.BoolValue assign_public_ip = 3;
+}
+
+message UpdateClusterHostsRequest {
+ // ID of the ClickHouse cluster to update hosts in.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // New configurations to apply to hosts.
+ repeated UpdateHostSpec update_host_specs = 2 [(size) = ">0"];
+}
+
+message UpdateClusterHostsMetadata {
+ // ID of the ClickHouse cluster to modify hosts in.
+ string cluster_id = 1;
+
+ // Names of hosts that are being modified.
+ repeated string host_names = 2;
+}
+
+message DeleteClusterHostsRequest {
+ // ID of the ClickHouse cluster to remove hosts from.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Names of hosts to delete.
+ repeated string host_names = 2 [
+ (size) = ">0",
+ (length) = "<=253"
+ ];
+}
+
+message DeleteClusterHostsMetadata {
+ // ID of the ClickHouse cluster to remove hosts from.
+ string cluster_id = 1;
+
+ // Names of hosts that are being deleted.
+ repeated string host_names = 2;
+}
+
+message RestartClusterHostsRequest {
+ // Required. ID of the Clickhouse cluster.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Required. Name of the hosts to restart.
+ repeated string host_names = 2 [
+ (size) = ">0",
+ (length) = "<=353"
+ ];
+}
+
+message RestartClusterHostsMetadata {
+ // Required. ID of the ClickHouse cluster.
+ string cluster_id = 1;
+
+ // Required. The name of restarting host.
+ repeated string host_names = 2;
+}
+
+message GetClusterShardRequest {
+ // ID of the cluster that the shard belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ // To get the name of the database, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard to request information about.
+ // To get the name of a shard, use a [ClusterService.ListShards] request.
+ string shard_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+}
+
+message ListClusterShardsRequest {
+ // ID of the ClickHouse cluster to list shards in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterShardsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "0-1000"];
+
+ // Page token. to get the next page of results, set [page_token] to the [ListClusterShardsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterShardsResponse {
+ // List of ClickHouse shards.
+ repeated Shard shards = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterShardsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterShardsRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message AddClusterShardRequest {
+ // ID of the ClickHouse cluster to add a shard to.
+ // To get the ClickHouse cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name for the new shard.
+ string shard_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // Configuration of the new shard.
+ ShardConfigSpec config_spec = 3;
+
+ // Configurations for ClickHouse hosts that should be created with the shard.
+ repeated HostSpec host_specs = 4 [(size) = ">0"];
+
+ // Whether to copy schema to hosts of the shard to be created. The schema is copied from hosts of an existing shard.
+ google.protobuf.BoolValue copy_schema = 5;
+}
+
+message AddClusterShardMetadata {
+ // ID of the cluster that a shard is being added to.
+ string cluster_id = 1;
+
+ // Name of the shard being created.
+ string shard_name = 2;
+}
+
+message UpdateClusterShardRequest {
+ // ID of the ClickHouse cluster the shard belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard to be updated.
+ // To get the name of a shard, use a [ClusterService.ListShards] request.
+ string shard_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // Field mask that specifies which attributes of the ClickHouse shard should be updated.
+ google.protobuf.FieldMask update_mask = 3;
+
+ // New configuration for the specified shard.
+ ShardConfigSpec config_spec = 4;
+}
+
+message UpdateClusterShardMetadata {
+ // ID of the cluster that contains the shard being updated.
+ string cluster_id = 1;
+
+ // Name of the shard being updated.
+ string shard_name = 2;
+}
+
+message DeleteClusterShardRequest {
+ // ID of the ClickHouse cluster the shard belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard to be deleted.
+ // To get the name of a shard, use a [ClusterService.ListShards] request.
+ string shard_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+}
+
+message DeleteClusterShardMetadata {
+ // ID of the cluster that contains the shard being deleted.
+ string cluster_id = 1;
+
+ // Name of the shard being deleted.
+ string shard_name = 2;
+}
+
+message GetClusterShardGroupRequest {
+ // ID of the cluster that the shard group belongs to.
+ //
+ // To get the cluster ID, make a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard group to request information about.
+ //
+ // To get the name of a shard group, make a [ClusterService.ListShardGroups] request.
+ string shard_group_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+}
+
+message ListClusterShardGroupsRequest {
+ // ID of the cluster that the shard group belongs to.
+ //
+ // To get the cluster ID, make a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return.
+ //
+ // If the number of available results is larger than [page_size], the service returns a [ListClusterShardGroupsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "0-1000"];
+
+ // Page token.
+ //
+ // To get the next page of results, set [page_token] to the [ListClusterShardGroupsResponse.next_page_token] returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterShardGroupsResponse {
+ // List of ClickHouse cluster's shard groups.
+ repeated ShardGroup shard_groups = 1;
+
+ // This token allows you to get the next page of results for list requests.
+ //
+ // If the number of results is larger than [ListClusterShardGroupsRequest.page_size], use the [next_page_token] as the value for the [ListClusterShardGroupsRequest.page_token] parameter in the next list request.
+ // Each subsequent list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateClusterShardGroupRequest {
+ // ID of the ClickHouse cluster to add a shard group to.
+ //
+ // To get the cluster ID, make a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name for the new shard group.
+ string shard_group_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ // Description of the new shard group. 0-256 characters long.
+ string description = 3;
+
+ // List of shard names that should be put into the new group.
+ //
+ // To get the list, make a [ClusterService.ListShardGroups] request.
+ repeated string shard_names = 4;
+}
+
+message CreateClusterShardGroupMetadata {
+ // ID of the cluster to add a shard group to.
+ string cluster_id = 1;
+
+ // Name of the shard group that is being added.
+ string shard_group_name = 2;
+}
+
+message UpdateClusterShardGroupRequest {
+ // ID of the ClickHouse cluster that contains the shard group to update.
+ //
+ // To get the cluster ID, make a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard group that should be updated.
+ //
+ // To get the name, make a [ClusterService.ListShardGroups] request.
+ string shard_group_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+
+ google.protobuf.FieldMask update_mask = 3;
+
+ // Updated description of the shard group. 0-256 characters long.
+ string description = 4;
+
+ // Updated list of shard names that belongs to the shard group.
+ repeated string shard_names = 5;
+}
+
+message UpdateClusterShardGroupMetadata {
+ // ID of the cluster that contains the shard group being updated.
+ string cluster_id = 1;
+
+ // Name of the shard group that is being updated.
+ string shard_group_name = 2;
+}
+
+message DeleteClusterShardGroupRequest {
+ // ID of the ClickHouse cluster that contains the shard group to delete.
+ //
+ // To get the cluster ID, make a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the shard group that should be deleted.
+ //
+ // To get the name, make a [ClusterService.ListShardGroups] request.
+ string shard_group_name = 2 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+}
+
+message DeleteClusterShardGroupMetadata {
+ // ID of the cluster that contains the shard group being deleted.
+ string cluster_id = 1;
+
+ // Name of the shard group that is being deleted.
+ string shard_group_name = 2;
+}
+
+message ListClusterExternalDictionariesRequest {
+ // ID of the cluster that the external dictionaries belong to.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListClusterExternalDictionaryResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "0-1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListClusterExternalDictionaryResponse.next_page_token]
+ // returned by a previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListClusterExternalDictionariesResponse {
+ // List of ClickHouse Cluster external dictionaries.
+ repeated config.ClickhouseConfig.ExternalDictionary external_dictionaries = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListClusterExternalDictionaryRequest.page_size], use the [next_page_token] as the value
+ // for the [ListClusterExternalDictionaryRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateClusterExternalDictionaryRequest {
+ // ID of the ClickHouse cluster to create the external dictionary for.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Configuration of the external dictionary.
+ config.ClickhouseConfig.ExternalDictionary external_dictionary = 2;
+}
+
+message CreateClusterExternalDictionaryMetadata {
+ // ID of the cluster for which an external dictionary is being created.
+ string cluster_id = 1;
+}
+
+message UpdateClusterExternalDictionaryRequest {
+ // ID of the ClickHouse cluster to update the external dictionary for.
+ // To get the cluster ID, use a [List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Configuration of the external dictionary.
+ config.ClickhouseConfig.ExternalDictionary external_dictionary = 2;
+
+ // Field mask that specifies which fields of the External Dictionary should be updated.
+ google.protobuf.FieldMask update_mask = 3;
+}
+
+message UpdateClusterExternalDictionaryMetadata {
+ // ID of the cluster for which an external dictionary is being updated.
+ string cluster_id = 1;
+
+ // Name of the external dictionary.
+ string external_dictionary_name = 2;
+}
+
+message DeleteClusterExternalDictionaryRequest {
+ // ID of the ClickHouse cluster to delete the external dictionary from.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [
+ (required) = true,
+ (length) = "<=50"
+ ];
+
+ // Name of the external dictionary to delete.
+ string external_dictionary_name = 2;
+}
+
+message DeleteClusterExternalDictionaryMetadata {
+ // ID of the cluster where an external dictionary is being deleted.
+ string cluster_id = 1;
+}
+
+message HostSpec {
+ // ID of the availability zone where the host resides.
+ // To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request.
+ string zone_id = 1 [(length) = "<=50"];
+
+ // Type of the host to be deployed.
+ Host.Type type = 2 [(required) = true];
+
+ // ID of the subnet that the host should belong to. This subnet should be a part
+ // of the network that the cluster belongs to.
+ // The ID of the network is set in the [Cluster.network_id] field.
+ string subnet_id = 3 [(length) = "<=50"];
+
+ // Whether the host should get a public IP address on creation.
+ //
+ // After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign
+ // a public IP to a host without one, recreate the host with [assign_public_ip] set as needed.
+ //
+ // Possible values:
+ // * false - don't assign a public IP to the host.
+ // * true - the host should have a public IP address.
+ bool assign_public_ip = 4;
+
+ // Name of the shard that the host is assigned to.
+ string shard_name = 5 [
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_-]*"
+ ];
+}
+
+message ConfigSpec {
+ message Clickhouse {
+ // Configuration for a ClickHouse server.
+ config.ClickhouseConfig config = 1;
+
+ // Resources allocated to ClickHouse hosts.
+ Resources resources = 2;
+ }
+
+ message Zookeeper {
+ // Resources allocated to ZooKeeper hosts. If not set, minimal available resources will be used.
+ // All available resource presets can be retrieved with a [ResourcePresetService.List] request.
+ Resources resources = 1;
+ }
+
+ // Version of the ClickHouse server software.
+ string version = 3;
+
+ // Configuration and resources for a ClickHouse server.
+ Clickhouse clickhouse = 1;
+
+ // Configuration and resources for a ZooKeeper server.
+ Zookeeper zookeeper = 2;
+
+ // Time to start the daily backup, in the UTC timezone.
+ google.type.TimeOfDay backup_window_start = 4;
+
+ // Access policy for external services.
+ //
+ // If you want a specific service to access the ClickHouse cluster, then set the necessary values in this policy.
+ Access access = 5;
+
+ CloudStorage cloud_storage = 6;
+
+ // Whether database management through SQL commands is enabled.
+ google.protobuf.BoolValue sql_database_management = 7;
+
+ // Whether user management through SQL commands is enabled.
+ google.protobuf.BoolValue sql_user_management = 8;
+
+ // Password for user 'admin' that has SQL user management access.
+ string admin_password = 9;
+
+ // Whether cluster should use embedded Keeper instead of Zookeeper
+ google.protobuf.BoolValue embedded_keeper = 10;
+}
+
+message ShardConfigSpec {
+ message Clickhouse {
+ // ClickHouse settings for the shard.
+ config.ClickhouseConfig config = 1;
+
+ // Computational resources for the shard.
+ Resources resources = 2;
+
+ // Relative weight of the shard considered when writing data to the cluster.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/operations/table_engines/distributed/).
+ google.protobuf.Int64Value weight = 3;
+ }
+
+ // ClickHouse configuration for a shard.
+ Clickhouse clickhouse = 1;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto
new file mode 100644
index 0000000000..ede985e5d9
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.proto
@@ -0,0 +1,741 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1.config;
+
+import "google/protobuf/wrappers.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1/config;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1.config";
+
+// ClickHouse configuration options. Detailed description for each set of options
+// is available in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/).
+//
+// Any options not listed here are not supported.
+message ClickhouseConfig {
+ enum LogLevel {
+ LOG_LEVEL_UNSPECIFIED = 0;
+
+ TRACE = 1;
+
+ DEBUG = 2;
+
+ INFORMATION = 3;
+
+ WARNING = 4;
+
+ ERROR = 5;
+ }
+
+ // Options specific to the MergeTree table engine.
+ message MergeTree {
+ // Number of blocks of hashes to keep in ZooKeeper.
+ google.protobuf.Int64Value replicated_deduplication_window = 1;
+
+ // Period of time to keep blocks of hashes for.
+ google.protobuf.Int64Value replicated_deduplication_window_seconds = 2;
+
+ // If table contains at least that many active parts in single partition, artificially slow down insert into table.
+ google.protobuf.Int64Value parts_to_delay_insert = 3;
+
+ // If more than this number active parts in single partition, throw 'Too many parts ...' exception.
+ google.protobuf.Int64Value parts_to_throw_insert = 4;
+
+ google.protobuf.Int64Value inactive_parts_to_delay_insert = 9;
+
+ google.protobuf.Int64Value inactive_parts_to_throw_insert = 10;
+
+ // How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.
+ google.protobuf.Int64Value max_replicated_merges_in_queue = 5;
+
+ // If there is less than specified number of free entries in background pool (or replicated queue), start to lower
+ // maximum size of merge to process.
+ google.protobuf.Int64Value number_of_free_entries_in_pool_to_lower_max_size_of_merge = 6;
+
+ // Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries
+ // in replication queue).
+ google.protobuf.Int64Value max_bytes_to_merge_at_min_space_in_pool = 7;
+
+ google.protobuf.Int64Value max_bytes_to_merge_at_max_space_in_pool = 8;
+
+ // Minimum number of bytes in a data part that can be stored in **Wide** format.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part).
+ google.protobuf.Int64Value min_bytes_for_wide_part = 11;
+
+ // Minimum number of rows in a data part that can be stored in **Wide** format.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part).
+ google.protobuf.Int64Value min_rows_for_wide_part = 12;
+
+ // Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#ttl_only_drop_parts).
+ google.protobuf.BoolValue ttl_only_drop_parts = 13;
+
+ google.protobuf.BoolValue allow_remote_fs_zero_copy_replication = 14;
+
+ google.protobuf.Int64Value merge_with_ttl_timeout = 15;
+
+ google.protobuf.Int64Value merge_with_recompression_ttl_timeout = 16;
+
+ google.protobuf.Int64Value max_parts_in_total = 17;
+
+ google.protobuf.Int64Value max_number_of_merges_with_ttl_in_pool = 18;
+
+ google.protobuf.Int64Value cleanup_delay_period = 19;
+
+ google.protobuf.Int64Value number_of_free_entries_in_pool_to_execute_mutation = 20;
+
+ // The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.
+ // Default: 1 GiB
+ // Min version: 22.10
+ // See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/f9558345e886876b9132d9c018e357f7fa9b22a3/src/Storages/MergeTree/MergeTreeSettings.h#L80)
+ google.protobuf.Int64Value max_avg_part_size_for_too_many_parts = 21 [(value) = ">=0"];
+
+ // Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds.
+ // Default: 0 - disabled
+ // Min_version: 22.10
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds)
+ google.protobuf.Int64Value min_age_to_force_merge_seconds = 22 [(value) = ">=0"];
+
+ // Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.
+ // Default: false
+ // Min_version: 22.11
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds)
+ google.protobuf.BoolValue min_age_to_force_merge_on_partition_only = 23;
+
+ // Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters.
+ // Default: 5000
+ // Min_version: 21.10
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#merge_selecting_sleep_ms)
+ google.protobuf.Int64Value merge_selecting_sleep_ms = 24 [(value) = ">0"];
+
+ }
+
+ message Kafka {
+ enum SecurityProtocol {
+ SECURITY_PROTOCOL_UNSPECIFIED = 0;
+ SECURITY_PROTOCOL_PLAINTEXT = 1;
+ SECURITY_PROTOCOL_SSL = 2;
+ SECURITY_PROTOCOL_SASL_PLAINTEXT = 3;
+ SECURITY_PROTOCOL_SASL_SSL = 4;
+ }
+
+ enum SaslMechanism {
+ SASL_MECHANISM_UNSPECIFIED = 0;
+ SASL_MECHANISM_GSSAPI = 1;
+ SASL_MECHANISM_PLAIN = 2;
+ SASL_MECHANISM_SCRAM_SHA_256 = 3;
+ SASL_MECHANISM_SCRAM_SHA_512 = 4;
+ }
+
+ SecurityProtocol security_protocol = 1;
+ SaslMechanism sasl_mechanism = 2;
+ string sasl_username = 3;
+ string sasl_password = 4;
+ google.protobuf.BoolValue enable_ssl_certificate_verification = 5;
+ google.protobuf.Int64Value max_poll_interval_ms = 6 [(value) = ">=0"];
+ google.protobuf.Int64Value session_timeout_ms = 7 [(value) = ">=0"];
+ }
+
+ message KafkaTopic {
+ string name = 1 [(required) = true];
+ Kafka settings = 2 [(required) = true];
+ }
+
+ message Rabbitmq {
+
+ // [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) username
+ string username = 1;
+
+ // [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) password
+ string password = 2;
+
+ // [RabbitMQ](https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/) virtual host
+ string vhost = 3;
+ }
+
+ message Compression {
+ enum Method {
+ METHOD_UNSPECIFIED = 0;
+
+ // [LZ4 compression algorithm](https://lz4.github.io/lz4/).
+ LZ4 = 1;
+
+ // [Zstandard compression algorithm](https://facebook.github.io/zstd/).
+ ZSTD = 2;
+ }
+
+ // Compression method to use for the specified combination of [min_part_size] and [min_part_size_ratio].
+ Method method = 1;
+
+ // Minimum size of a part of a table.
+ int64 min_part_size = 2 [(value) = ">=1"];
+
+ // Minimum ratio of a part relative to the size of all the data in the table.
+ double min_part_size_ratio = 3;
+
+ google.protobuf.Int64Value level = 4 [(value) = ">=0"];
+ }
+
+ message ExternalDictionary {
+ message HttpSource {
+ // URL of the source dictionary available over HTTP.
+ string url = 1 [(required) = true];
+
+ // The data format. Valid values are all formats supported by ClickHouse SQL dialect.
+ string format = 2 [(required) = true];
+ }
+
+ message MysqlSource {
+ message Replica {
+ // MySQL host of the replica.
+ string host = 1 [(required) = true, (length) = "<=253"];
+
+ // The priority of the replica that ClickHouse takes into account when connecting.
+ // Replica with the highest priority should have this field set to the lowest number.
+ int64 priority = 2 [(required) = true, (value) = ">0"];
+
+ // Port to use when connecting to the replica.
+ // If a port is not specified for a replica, ClickHouse uses the port specified for the source.
+ int64 port = 3 [(value) = "0-65535"];
+
+ // Name of the MySQL database user.
+ string user = 4;
+
+ // Password of the MySQL database user.
+ string password = 5;
+ }
+
+ // Name of the MySQL database to connect to.
+ string db = 1 [(required) = true];
+
+ // Name of the database table to use as a ClickHouse dictionary.
+ string table = 2 [(required) = true];
+
+ // Default port to use when connecting to a replica of the dictionary source.
+ int64 port = 3 [(value) = "0-65535"];
+
+ // Name of the default user for replicas of the dictionary source.
+ string user = 4;
+
+ // Password of the default user for replicas of the dictionary source.
+ string password = 5;
+
+ // List of MySQL replicas of the database used as dictionary source.
+ repeated Replica replicas = 6 [(size) = ">0"];
+
+ // Selection criteria for the data in the specified MySQL table.
+ string where = 7;
+
+ // Query for checking the dictionary status, to pull only updated data.
+ // For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/).
+ string invalidate_query = 8;
+ }
+
+ message ClickhouseSource {
+ // Name of the ClickHouse database.
+ string db = 1 [(required) = true];
+
+ // Name of the table in the specified database to be used as the dictionary source.
+ string table = 2 [(required) = true];
+
+ // ClickHouse host of the specified database.
+ string host = 3 [(required) = true, (length) = "<=253"];
+
+ // Port to use when connecting to the host.
+ int64 port = 4 [(value) = "0-65535"];
+
+ // Name of the ClickHouse database user.
+ string user = 5 [(required) = true];
+
+ // Password of the ClickHouse database user.
+ string password = 6;
+
+ // Selection criteria for the data in the specified ClickHouse table.
+ string where = 7;
+ }
+
+ message MongodbSource {
+ // Name of the MongoDB database.
+ string db = 1 [(required) = true];
+
+ // Name of the collection in the specified database to be used as the dictionary source.
+ string collection = 2 [(required) = true];
+
+ // MongoDB host of the specified database.
+ string host = 3 [(required) = true, (length) = "<=253"];
+
+ // Port to use when connecting to the host.
+ int64 port = 4 [(value) = "0-65535"];
+
+ // Name of the MongoDB database user.
+ string user = 5 [(required) = true];
+
+ // Password of the MongoDB database user.
+ string password = 6;
+
+ string options = 7;
+ }
+
+ message PostgresqlSource {
+ enum SslMode {
+ SSL_MODE_UNSPECIFIED = 0;
+
+ // Only try a non-SSL connection.
+ DISABLE = 1;
+
+ // First try a non-SSL connection; if that fails, try an SSL connection.
+ ALLOW = 2;
+
+ // First try an SSL connection; if that fails, try a non-SSL connection.
+ PREFER = 3;
+
+ // Only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
+ VERIFY_CA = 4;
+
+ // Only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
+ VERIFY_FULL = 5;
+ }
+
+ // Name of the PostrgreSQL database.
+ string db = 1 [(required) = true];
+
+ // Name of the table in the specified database to be used as the dictionary source.
+ string table = 2 [(required) = true];
+
+ // Name of the PostrgreSQL host
+ repeated string hosts = 3 [(size) = ">0"];
+
+ // Port to use when connecting to the host.
+ int64 port = 4 [(value) = "0-65535"];
+
+ // Name of the PostrgreSQL database user.
+ string user = 5 [(required) = true];
+
+ // Password of the PostrgreSQL database user.
+ string password = 6;
+
+ // Query for checking the dictionary status, to pull only updated data.
+ // For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/).
+ string invalidate_query = 7;
+
+ // Mode of SSL TCP/IP connection to the PostgreSQL host.
+ // For more details, see [PostgreSQL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html).
+ SslMode ssl_mode = 8;
+ }
+
+ message Structure {
+ message Attribute {
+ // Name of the column.
+ string name = 1 [(required) = true];
+
+ // Type of the column.
+ string type = 2 [(required) = true];
+
+ // Default value for an element without data (for example, an empty string).
+ string null_value = 3;
+
+ // Expression, describing the attribute, if applicable.
+ string expression = 4;
+
+ // Indication of hierarchy support.
+ // Default value: `false`.
+ bool hierarchical = 5;
+
+ // Indication of injective mapping "id -> attribute".
+ // Default value: `false`.
+ bool injective = 6;
+ }
+
+ // Numeric key.
+ message Id {
+ // Name of the numeric key.
+ string name = 1 [(required) = true];
+ }
+
+ // Complex key.
+ message Key {
+ // Attributes of a complex key.
+ repeated Attribute attributes = 1 [(size) = ">0"];
+ }
+
+ // Single numeric key column for the dictionary.
+ Id id = 1;
+
+ // Composite key for the dictionary, containing of one or more key columns.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#composite-key).
+ Key key = 3;
+
+ // Field holding the beginning of the range for dictionaries with `RANGE_HASHED` layout.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed).
+ Attribute range_min = 4;
+
+ // Field holding the end of the range for dictionaries with `RANGE_HASHED` layout.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed).
+ Attribute range_max = 5;
+
+ // Description of the fields available for database queries.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#attributes).
+ repeated Attribute attributes = 2 [(size) = ">0"];
+ }
+
+ // Layout determining how to store the dictionary in memory.
+ message Layout {
+ enum Type {
+ TYPE_UNSPECIFIED = 0;
+
+ // The entire dictionary is stored in memory in the form of flat arrays.
+ // Available for all dictionary sources.
+ FLAT = 1;
+
+ // The entire dictionary is stored in memory in the form of a hash table.
+ // Available for all dictionary sources.
+ HASHED = 2;
+
+ // Similar to HASHED, to be used with composite keys.
+ // Available for all dictionary sources.
+ COMPLEX_KEY_HASHED = 3;
+
+ // The entire dictionary is stored in memory in the form of a hash table,
+ // with an ordered array of ranges and their corresponding values.
+ // Available for all dictionary sources.
+ RANGE_HASHED = 4;
+
+ // The dictionary is stored in a cache with a set number of cells.
+ // Available for MySQL, ClickHouse and HTTP dictionary sources.
+ CACHE = 5;
+
+ // Similar to CACHE, to be used with composite keys.
+ // Available for MySQL, ClickHouse and HTTP dictionary sources.
+ COMPLEX_KEY_CACHE = 6;
+ }
+
+ // Layout type for an external dictionary.
+ Type type = 1 [(required) = true];
+
+ // Number of cells in the cache. Rounded up to a power of two.
+ // Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
+ int64 size_in_cells = 2;
+ }
+
+ message Range {
+ // Minimum dictionary lifetime.
+ int64 min = 1;
+
+ // Maximum dictionary lifetime.
+ int64 max = 2;
+ }
+
+ // Name of the external dictionary.
+ string name = 1 [(required) = true];
+
+ // Set of attributes for the external dictionary.
+ // For in-depth description, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/).
+ Structure structure = 2 [(required) = true];
+
+ // Layout for storing the dictionary in memory.
+ // For in-depth description, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/).
+ Layout layout = 3 [(required) = true];
+
+ // Setting for the period of time between dictionary updates.
+ // For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/).
+ oneof lifetime {
+ option (exactly_one) = true;
+
+ // Fixed interval between dictionary updates.
+ int64 fixed_lifetime = 4;
+
+ // Range of intervals between dictionary updates for ClickHouse to choose from.
+ Range lifetime_range = 5;
+ }
+
+ // Description of the source for the external dictionary.
+ oneof source {
+ option (exactly_one) = true;
+
+ // HTTP source for the dictionary.
+ HttpSource http_source = 6;
+
+ // MySQL source for the dictionary.
+ MysqlSource mysql_source = 7;
+
+ // ClickHouse source for the dictionary.
+ ClickhouseSource clickhouse_source = 8;
+
+ // MongoDB source for the dictionary.
+ MongodbSource mongodb_source = 9;
+
+ // PostgreSQL source for the dictionary.
+ PostgresqlSource postgresql_source = 10;
+ }
+ }
+
+ // Rollup settings for the GraphiteMergeTree table engine.
+ message GraphiteRollup {
+ message Pattern {
+ message Retention {
+ // Minimum age of the data in seconds.
+ int64 age = 1 [(value) = ">=0"];
+
+ // Precision of determining the age of the data, in seconds.
+ int64 precision = 2 [(value) = ">0"];
+ }
+
+ // Pattern for metric names.
+ string regexp = 1;
+
+ // Name of the aggregating function to apply to data of the age specified in [retention].
+ string function = 2 [(required) = true];
+
+ // Age of data to use for thinning.
+ repeated Retention retention = 3 [(size) = ">0"];
+ }
+
+ // Name for the specified combination of settings for Graphite rollup.
+ string name = 1 [(required) = true];
+
+ // Pattern to use for the rollup.
+ repeated Pattern patterns = 2 [(size) = ">0"];
+ }
+
+ // Logging level for the ClickHouse cluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
+ LogLevel log_level = 1;
+
+ // Settings for the MergeTree engine.
+ // See description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#merge_tree).
+ MergeTree merge_tree = 2;
+
+ // Compression settings for the ClickHouse cluster.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#compression).
+ repeated Compression compression = 3;
+
+ // Configuration of external dictionaries to be used by the ClickHouse cluster.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts/).
+ repeated ExternalDictionary dictionaries = 4;
+
+ // Settings for thinning Graphite data.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#server_settings-graphite_rollup).
+ repeated GraphiteRollup graphite_rollup = 5;
+
+ Kafka kafka = 35;
+
+ repeated KafkaTopic kafka_topics = 36;
+
+ Rabbitmq rabbitmq = 37;
+
+ // Maximum number of inbound connections.
+ google.protobuf.Int64Value max_connections = 6 [(value) = ">=10"];
+
+ // Maximum number of simultaneously processed requests.
+ google.protobuf.Int64Value max_concurrent_queries = 7 [(value) = ">=10"];
+
+ // Number of milliseconds that ClickHouse waits for incoming requests before closing the connection.
+ google.protobuf.Int64Value keep_alive_timeout = 8;
+
+ // Cache size (in bytes) for uncompressed data used by MergeTree tables.
+ google.protobuf.Int64Value uncompressed_cache_size = 9;
+
+ // Approximate size (in bytes) of the cache of "marks" used by MergeTree tables.
+ google.protobuf.Int64Value mark_cache_size = 10 [(value) = ">0"];
+
+ // Maximum size of the table that can be deleted using a DROP query.
+ google.protobuf.Int64Value max_table_size_to_drop = 11;
+
+ // Maximum size of the partition that can be deleted using a DROP query.
+ google.protobuf.Int64Value max_partition_size_to_drop = 13;
+
+ // The setting is deprecated and has no effect.
+ google.protobuf.Int64Value builtin_dictionaries_reload_interval = 12 [deprecated = true];
+
+ // The server's time zone to be used in DateTime fields conversions. Specified as an IANA identifier.
+ string timezone = 14;
+
+ // Enable or disable geobase.
+ google.protobuf.BoolValue geobase_enabled = 66;
+
+ // Address of the archive with the user geobase in Object Storage.
+ string geobase_uri = 15;
+
+ // The maximum size that query_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // query_log data based on size is disabled.
+ google.protobuf.Int64Value query_log_retention_size = 16;
+
+ // The maximum time that query_log records will be retained before removal. If set to 0, automatic removal of
+ // query_log data based on time is disabled.
+ google.protobuf.Int64Value query_log_retention_time = 17;
+
+ // Whether query_thread_log system table is enabled.
+ google.protobuf.BoolValue query_thread_log_enabled = 18;
+
+ // The maximum size that query_thread_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // query_thread_log data based on size is disabled.
+ google.protobuf.Int64Value query_thread_log_retention_size = 19;
+
+ // The maximum time that query_thread_log records will be retained before removal. If set to 0, automatic removal of
+ // query_thread_log data based on time is disabled.
+ google.protobuf.Int64Value query_thread_log_retention_time = 20;
+
+ // The maximum size that part_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // part_log data based on size is disabled.
+ google.protobuf.Int64Value part_log_retention_size = 21;
+
+ // The maximum time that part_log records will be retained before removal. If set to 0, automatic removal of
+ // part_log data based on time is disabled.
+ google.protobuf.Int64Value part_log_retention_time = 22;
+
+ // Whether metric_log system table is enabled.
+ google.protobuf.BoolValue metric_log_enabled = 23;
+
+ // The maximum size that metric_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // metric_log data based on size is disabled.
+ google.protobuf.Int64Value metric_log_retention_size = 24;
+
+ // The maximum time that metric_log records will be retained before removal. If set to 0, automatic removal of
+ // metric_log data based on time is disabled.
+ google.protobuf.Int64Value metric_log_retention_time = 25;
+
+ // Whether trace_log system table is enabled.
+ google.protobuf.BoolValue trace_log_enabled = 26;
+
+ // The maximum size that trace_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // trace_log data based on size is disabled.
+ google.protobuf.Int64Value trace_log_retention_size = 27;
+
+ // The maximum time that trace_log records will be retained before removal. If set to 0, automatic removal of
+ // trace_log data based on time is disabled.
+ google.protobuf.Int64Value trace_log_retention_time = 28;
+
+ // Whether text_log system table is enabled.
+ google.protobuf.BoolValue text_log_enabled = 29;
+
+ // The maximum size that text_log can grow to before old data will be removed. If set to 0, automatic removal of
+ // text_log data based on size is disabled.
+ google.protobuf.Int64Value text_log_retention_size = 30;
+
+ // The maximum time that text_log records will be retained before removal. If set to 0, automatic removal of
+ // text_log data based on time is disabled.
+ google.protobuf.Int64Value text_log_retention_time = 31;
+
+ // Logging level for text_log system table. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
+ LogLevel text_log_level = 32;
+
+ // Enable or disable opentelemetry_span_log system table. Default value: false.
+ google.protobuf.BoolValue opentelemetry_span_log_enabled = 42;
+
+ // The maximum size that opentelemetry_span_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of opentelemetry_span_log data based on size is disabled.
+ google.protobuf.Int64Value opentelemetry_span_log_retention_size = 55 [(value) = ">=0"];
+
+ // The maximum time that opentelemetry_span_log records will be retained before removal. If set to 0,
+ // automatic removal of opentelemetry_span_log data based on time is disabled.
+ google.protobuf.Int64Value opentelemetry_span_log_retention_time = 56 [(value) = ">=0"];
+
+ // Enable or disable query_views_log system table. Default value: false.
+ google.protobuf.BoolValue query_views_log_enabled = 49;
+
+ // The maximum size that query_views_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of query_views_log data based on size is disabled.
+ google.protobuf.Int64Value query_views_log_retention_size = 50 [(value) = ">=0"];
+
+ // The maximum time that query_views_log records will be retained before removal. If set to 0,
+ // automatic removal of query_views_log data based on time is disabled.
+ google.protobuf.Int64Value query_views_log_retention_time = 51 [(value) = ">=0"];
+
+ // Enable or disable asynchronous_metric_log system table. Default value: false.
+ google.protobuf.BoolValue asynchronous_metric_log_enabled = 52;
+
+ // The maximum size that asynchronous_metric_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of asynchronous_metric_log data based on size is disabled.
+ google.protobuf.Int64Value asynchronous_metric_log_retention_size = 53 [(value) = ">=0"];
+
+ // The maximum time that asynchronous_metric_log records will be retained before removal. If set to 0,
+ // automatic removal of asynchronous_metric_log data based on time is disabled.
+ google.protobuf.Int64Value asynchronous_metric_log_retention_time = 54 [(value) = ">=0"];
+
+ // Enable or disable session_log system table. Default value: false.
+ google.protobuf.BoolValue session_log_enabled = 57;
+
+ // The maximum size that session_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of session_log data based on size is disabled.
+ google.protobuf.Int64Value session_log_retention_size = 58 [(value) = ">=0"];
+
+ // The maximum time that session_log records will be retained before removal. If set to 0,
+ // automatic removal of session_log data based on time is disabled.
+ google.protobuf.Int64Value session_log_retention_time = 59 [(value) = ">=0"];
+
+ // Enable or disable zookeeper_log system table. Default value: false.
+ google.protobuf.BoolValue zookeeper_log_enabled = 60;
+
+ // The maximum size that zookeeper_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of zookeeper_log data based on size is disabled.
+ google.protobuf.Int64Value zookeeper_log_retention_size = 61 [(value) = ">=0"];
+
+ // The maximum time that zookeeper_log records will be retained before removal. If set to 0,
+ // automatic removal of zookeeper_log data based on time is disabled.
+ google.protobuf.Int64Value zookeeper_log_retention_time = 62 [(value) = ">=0"];
+
+ // Enable or disable asynchronous_insert_log system table. Default value: false.
+ // Minimal required ClickHouse version: 22.10.
+ google.protobuf.BoolValue asynchronous_insert_log_enabled = 63;
+
+ // The maximum size that asynchronous_insert_log can grow to before old data will be removed. If set to 0 (default),
+ // automatic removal of asynchronous_insert_log data based on size is disabled.
+ google.protobuf.Int64Value asynchronous_insert_log_retention_size = 64 [(value) = ">=0"];
+
+ // The maximum time that asynchronous_insert_log records will be retained before removal. If set to 0,
+ // automatic removal of asynchronous_insert_log data based on time is disabled.
+ google.protobuf.Int64Value asynchronous_insert_log_retention_time = 65 [(value) = ">=0"];
+
+ google.protobuf.Int64Value background_pool_size = 33 [(value) = ">0"];
+
+ // Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and background_pool_size is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server. The same as for background_pool_size setting background_merges_mutations_concurrency_ratio could be applied from the default profile for backward compatibility.
+ // Default: 2
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio)
+ google.protobuf.Int64Value background_merges_mutations_concurrency_ratio = 48 [(value) = ">0"];
+
+ google.protobuf.Int64Value background_schedule_pool_size = 34 [(value) = ">0"];
+
+ // Sets the number of threads performing background fetches for tables with **ReplicatedMergeTree** engines. Default value: 8.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#background_fetches_pool_size).
+ google.protobuf.Int64Value background_fetches_pool_size = 38 [(value) = ">0"];
+
+ google.protobuf.Int64Value background_move_pool_size = 39 [(value) = ">0"];
+
+ google.protobuf.Int64Value background_distributed_schedule_pool_size = 40 [(value) = ">0"];
+
+ google.protobuf.Int64Value background_buffer_flush_schedule_pool_size = 41 [(value) = ">0"];
+
+ google.protobuf.Int64Value background_message_broker_schedule_pool_size = 46 [(value) = ">0"];
+
+ // The maximum number of threads that will be used for performing a variety of operations (mostly garbage collection) for *MergeTree-engine tables in a background.
+ // Default: 8
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_common_pool_size)
+ google.protobuf.Int64Value background_common_pool_size = 47 [(value) = ">0"];
+
+ // The default database.
+ //
+ // To get a list of cluster databases, see [Yandex Managed ClickHouse documentation](/docs/managed-clickhouse/operations/databases#list-db).
+ google.protobuf.StringValue default_database = 43;
+
+ // Sets the memory size (in bytes) for a stack trace at every peak allocation step. Default value: **4194304**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#total-memory-profiler-step).
+ google.protobuf.Int64Value total_memory_profiler_step = 44;
+
+ google.protobuf.DoubleValue total_memory_tracker_sample_probability = 45;
+}
+
+message ClickhouseConfigSet {
+ // Effective settings for a ClickHouse cluster (a combination of settings defined
+ // in [user_config] and [default_config]).
+ ClickhouseConfig effective_config = 1 [(required) = true];
+
+ // User-defined settings for a ClickHouse cluster.
+ ClickhouseConfig user_config = 2;
+
+ // Default configuration for a ClickHouse cluster.
+ ClickhouseConfig default_config = 3;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database.proto
new file mode 100644
index 0000000000..39cbd586c8
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database.proto
@@ -0,0 +1,23 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A ClickHouse Database resource. For more information, see the
+// [Developer's Guide](/docs/managed-clickhouse/concepts).
+message Database {
+ // Name of the database.
+ string name = 1;
+
+ // ID of the ClickHouse cluster that the database belongs to.
+ string cluster_id = 2;
+}
+
+message DatabaseSpec {
+ // Name of the ClickHouse database. 1-63 characters long.
+ string name = 1 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database_service.proto
new file mode 100644
index 0000000000..83a27b5e24
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/database_service.proto
@@ -0,0 +1,117 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/operation/operation.proto";
+import "yandex/cloud/validation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/database.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ClickHouse Database resources.
+// NOTE: these methods are available only if database management through SQL is disabled.
+service DatabaseService {
+ // Returns the specified ClickHouse Database resource.
+ //
+ // To get the list of available ClickHouse Database resources, make a [List] request.
+ rpc Get (GetDatabaseRequest) returns (Database) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/databases/{database_name}" };
+ }
+
+ // Retrieves the list of ClickHouse Database resources in the specified cluster.
+ rpc List (ListDatabasesRequest) returns (ListDatabasesResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/databases" };
+ }
+
+ // Creates a new ClickHouse database in the specified cluster.
+ rpc Create (CreateDatabaseRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/databases" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateDatabaseMetadata"
+ response: "Database"
+ };
+ }
+
+ // Deletes the specified ClickHouse database.
+ rpc Delete (DeleteDatabaseRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/managed-clickhouse/v1/clusters/{cluster_id}/databases/{database_name}" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteDatabaseMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+}
+
+message GetDatabaseRequest {
+ // ID of the ClickHouse cluster that the database belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the ClickHouse Database resource to return.
+ // To get the name of the database, use a [DatabaseService.List] request.
+ string database_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message ListDatabasesRequest {
+ // ID of the ClickHouse cluster to list databases in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. to get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListDatabasesResponse {
+ // List of ClickHouse databases.
+ repeated Database databases = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value
+ // for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateDatabaseRequest {
+ // ID of the ClickHouse cluster to create a database in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Configuration of the database to create.
+ DatabaseSpec database_spec = 2 [(required) = true];
+}
+
+message CreateDatabaseMetadata {
+ // ID of the ClickHouse cluster where a database is being created.
+ string cluster_id = 1;
+
+ // Name of the ClickHouse database that is being created.
+ string database_name = 2;
+}
+
+message DeleteDatabaseRequest {
+ // ID of the ClickHouse cluster to delete a database in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the database to delete.
+ // To get the name of the database, use a [DatabaseService.List] request.
+ string database_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message DeleteDatabaseMetadata {
+ // ID of the ClickHouse cluster where a database is being deleted.
+ string cluster_id = 1;
+
+ // Name of the ClickHouse database that is being deleted.
+ string database_name = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema.proto
new file mode 100644
index 0000000000..ba50556430
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+message FormatSchema {
+ // Format schema name.
+ string name = 1;
+
+ // ClickHouse cluster ID.
+ string cluster_id = 2;
+
+ // Schema type. Possible values are the following:
+ //
+ // * FORMAT_SCHEMA_TYPE_PROTOBUF - [Protobuf](https://protobuf.dev/) data format (including [ProtobufSingle](https://clickhouse.com/docs/en/interfaces/formats#protobufsingle)).
+ // * FORMAT_SCHEMA_TYPE_CAPNPROTO - [Cap'n Proto](https://capnproto.org/) data format.
+ FormatSchemaType type = 3;
+
+ // Link to the file of a format schema in Yandex Object Storage. Managed Service for ClickHouse works only with format schemas imported to Object Storage.
+ string uri = 4;
+}
+
+enum FormatSchemaType {
+ FORMAT_SCHEMA_TYPE_UNSPECIFIED = 0;
+
+ FORMAT_SCHEMA_TYPE_PROTOBUF = 1;
+
+ FORMAT_SCHEMA_TYPE_CAPNPROTO = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema_service.proto
new file mode 100644
index 0000000000..1faa490079
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/format_schema_service.proto
@@ -0,0 +1,158 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/operation/operation.proto";
+import "yandex/cloud/validation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/format_schema.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing [format schemas](https://clickhouse.com/docs/en/interfaces/formats) for input and output data.
+service FormatSchemaService {
+ // Returns detailed information about a given format schema.
+ rpc Get (GetFormatSchemaRequest) returns (FormatSchema) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/formatSchemas/{format_schema_name}" };
+ }
+
+ // Returns a list of format schemas in a cluster.
+ rpc List (ListFormatSchemasRequest) returns (ListFormatSchemasResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/formatSchemas" };
+ }
+
+ // Adds a format schema to a cluster.
+ rpc Create (CreateFormatSchemaRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/formatSchemas" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateFormatSchemaMetadata"
+ response: "FormatSchema"
+ };
+ }
+
+ // Changes a format schema.
+ rpc Update (UpdateFormatSchemaRequest) returns (operation.Operation) {
+ option (google.api.http) = { patch: "/managed-clickhouse/v1/clusters/{cluster_id}/formatSchemas/{format_schema_name}" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateFormatSchemaMetadata"
+ response: "FormatSchema"
+ };
+ }
+
+ // Deletes a format schema from a cluster.
+ rpc Delete (DeleteFormatSchemaRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/managed-clickhouse/v1/clusters/{cluster_id}/formatSchemas/{format_schema_name}" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteFormatSchemaMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+}
+
+message GetFormatSchemaRequest {
+ // ClickHouse cluster ID.
+ //
+ // To get a ClickHouse cluster ID, use the [ClusterService.List] method.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Format schema name.
+ //
+ // To get a format schema name, use the [FormatSchemaService.List] method.
+ string format_schema_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message ListFormatSchemasRequest {
+ // ClickHouse cluster ID.
+ //
+ // To get a ClickHouse cluster ID, use the [ClusterService.List] method.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // The maximum number of results per page to return. If the number of the results is larger than [page_size], the service returns [ListFormatSchemasResponse.next_page_token]. You can use it to get the next page of the results in subsequent requests of a format schema list.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListFormatSchemasResponse.next_page_token] returned by the previous format schema list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListFormatSchemasResponse {
+ // List of format schemas.
+ repeated FormatSchema format_schemas = 1;
+
+ // This token allows you to get the next page of results when requesting the format schema list. If the number of the results is larger than [ListFormatSchemasRequest.page_size], use the [next_page_token] as the value for the [ListFormatSchemasRequest.page_token] parameter in the next request. Each subsequent request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateFormatSchemaRequest {
+ // ClickHouse cluster ID.
+ //
+ // To get a ClickHouse cluster ID, use the [ClusterService.List] method.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Format schema name.
+ string format_schema_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+
+ // Schema type. Possible values are the following:
+ //
+ // * FORMAT_SCHEMA_TYPE_PROTOBUF - [Protobuf](https://protobuf.dev/) data format (including [ProtobufSingle](https://clickhouse.com/docs/en/interfaces/formats#protobufsingle)).
+ // * FORMAT_SCHEMA_TYPE_CAPNPROTO - [Cap'n Proto](https://capnproto.org/) data format.
+ FormatSchemaType type = 3 [(required) = true];
+
+ // [Link to the file](/docs/managed-clickhouse/operations/s3-access#get-link-to-object) of a format schema in Yandex Object Storage. Managed Service for ClickHouse works only with format schemas imported to Object Storage.
+ string uri = 4 [(required) = true];
+}
+
+message CreateFormatSchemaMetadata {
+ // ClickHouse cluster ID.
+ string cluster_id = 1;
+
+ // Format schema name.
+ string format_schema_name = 2;
+}
+
+message UpdateFormatSchemaRequest {
+ // ClickHouse cluster ID.
+ //
+ // To get a ClickHouse cluster ID, use the [ClusterService.List] method.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Format schema name.
+ //
+ // To get a format schema name, use the [FormatSchemaService.List] method.
+ string format_schema_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+
+ google.protobuf.FieldMask update_mask = 3;
+
+ // [Link to the file](/docs/managed-clickhouse/operations/s3-access#get-link-to-object) of a format schema in Yandex Object Storage. Managed Service for ClickHouse works only with format schemas imported to Object Storage.
+ string uri = 4;
+}
+
+message UpdateFormatSchemaMetadata {
+ // ClickHouse cluster ID.
+ string cluster_id = 1;
+
+ // Format schema name.
+ string format_schema_name = 2;
+}
+
+message DeleteFormatSchemaRequest {
+ // ClickHouse cluster ID.
+ //
+ // To get a ClickHouse cluster ID, use the [ClusterService.List] method.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Format schema name.
+ //
+ // To get a format schema name, use the [FormatSchemaService.List] method.
+ string format_schema_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message DeleteFormatSchemaMetadata {
+ // ClickHouse cluster ID.
+ string cluster_id = 1;
+
+ // Format schema name.
+ string format_schema_name = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/maintenance.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/maintenance.proto
new file mode 100644
index 0000000000..547286f8f7
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/maintenance.proto
@@ -0,0 +1,55 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/protobuf/timestamp.proto";
+
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A maintenance window settings.
+message MaintenanceWindow {
+ // The maintenance policy in effect.
+ oneof policy {
+ option (exactly_one) = true;
+
+ // Maintenance operation can be scheduled anytime.
+ AnytimeMaintenanceWindow anytime = 1;
+
+ // Maintenance operation can be scheduled on a weekly basis.
+ WeeklyMaintenanceWindow weekly_maintenance_window = 2;
+ }
+}
+
+message AnytimeMaintenanceWindow {}
+
+// Weelky maintenance window settings.
+message WeeklyMaintenanceWindow {
+ enum WeekDay {
+ WEEK_DAY_UNSPECIFIED = 0;
+ MON = 1;
+ TUE = 2;
+ WED = 3;
+ THU = 4;
+ FRI = 5;
+ SAT = 6;
+ SUN = 7;
+ }
+
+ // Day of the week (in `DDD` format).
+ WeekDay day = 1;
+
+ // Hour of the day in UTC (in `HH` format).
+ int64 hour = 2 [(value) = "1-24"];
+}
+
+// A planned maintenance operation.
+message MaintenanceOperation {
+ // Information about this maintenance operation.
+ string info = 1 [(length) = "<=256"];
+
+ // Time until which this maintenance operation is delayed.
+ google.protobuf.Timestamp delayed_until = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model.proto
new file mode 100644
index 0000000000..42360f335b
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model.proto
@@ -0,0 +1,27 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+message MlModel {
+ // Name of the the model.
+ string name = 1;
+
+ // ID of the ClickHouse cluster that the model belongs to.
+ string cluster_id = 2;
+
+ // Type of the model.
+ MlModelType type = 3;
+
+ // Model file URL. You can only use models stored in Object Storage.
+ string uri = 4;
+}
+
+enum MlModelType {
+ ML_MODEL_TYPE_UNSPECIFIED = 0;
+
+ // CatBoost model.
+ ML_MODEL_TYPE_CATBOOST = 1;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model_service.proto
new file mode 100644
index 0000000000..8b2c9e22e4
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/ml_model_service.proto
@@ -0,0 +1,157 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/operation/operation.proto";
+import "yandex/cloud/validation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/ml_model.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing machine learning models.
+service MlModelService {
+ // Returns the specified machine learning model.
+ //
+ // To get the list of all available models, make a [List] request.
+ rpc Get (GetMlModelRequest) returns (MlModel) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/mlModels/{ml_model_name}" };
+ }
+
+ // Retrieves the list of machine learning models in the specified cluster.
+ rpc List (ListMlModelsRequest) returns (ListMlModelsResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/mlModels" };
+ }
+
+ // Creates a machine learning model in the specified cluster.
+ rpc Create (CreateMlModelRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/mlModels" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateMlModelMetadata"
+ response: "MlModel"
+ };
+ }
+
+ // Updates the specified machine learning model.
+ rpc Update (UpdateMlModelRequest) returns (operation.Operation) {
+ option (google.api.http) = { patch: "/managed-clickhouse/v1/clusters/{cluster_id}/mlModels/{ml_model_name}" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateMlModelMetadata"
+ response: "MlModel"
+ };
+ }
+
+ // Deletes the specified machine learning model.
+ rpc Delete (DeleteMlModelRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/managed-clickhouse/v1/clusters/{cluster_id}/mlModels/{ml_model_name}" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteMlModelMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+}
+
+message GetMlModelRequest {
+ // ID of the cluster that the model belongs to.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the model to return.
+ //
+ // To get a model name make a [MlModelService.List] request.
+ string ml_model_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message ListMlModelsRequest {
+ // ID of the cluster that models belongs to.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than `page_size`, the service returns a [ListMlModelsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ // Default value: 100.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set `page_token` to the
+ // [ListMlModelsResponse.next_page_token] returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListMlModelsResponse {
+ // List of models in the specified cluster.
+ repeated MlModel ml_models = 1;
+
+ // Token for getting the next page of the list. If the number of results is greater than
+ // the specified [ListMlModelsRequest.page_size], use `next_page_token` as the value
+ // for the [ListMlModelsRequest.page_token] parameter in the next list request.
+ //
+ // Each subsequent page will have its own `next_page_token` to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateMlModelRequest {
+ // ID of the cluster to create a model in.
+ //
+ // To get a cluster ID make a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Model name. The model name is one of the arguments of the modelEvaluate() function, which is used to call the model in ClickHouse.
+ string ml_model_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+
+ // Type of the model.
+ MlModelType type = 3 [(required) = true];
+
+ // Model file URL. You can only use models stored in Object Storage.
+ string uri = 4 [(required) = true];
+}
+
+message CreateMlModelMetadata {
+ // ID of the cluster that a model is being added to.
+ string cluster_id = 1;
+
+ // Name of the the model that is being created.
+ string ml_model_name = 2;
+}
+
+message UpdateMlModelRequest {
+ // ID of the cluster to update the model in.
+ //
+ // To get a cluster ID make a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the the model to update.
+ string ml_model_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+
+ google.protobuf.FieldMask update_mask = 3;
+
+ // The new model file URL. You can only use models stored in Object Storage.
+ string uri = 4;
+}
+
+message UpdateMlModelMetadata {
+ // ID of the cluster that contains the model being updated.
+ string cluster_id = 1;
+
+ // Name of the the model that is being updated.
+ string ml_model_name = 2;
+}
+
+message DeleteMlModelRequest {
+ // ID of the cluster to delete the model in.
+ //
+ // To get a cluster ID make a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the the model to delete.
+ string ml_model_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message DeleteMlModelMetadata {
+ // ID of the cluster that contains the model being deleted.
+ string cluster_id = 1;
+
+ // Name of the the model that is being deleted.
+ string ml_model_name = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset.proto
new file mode 100644
index 0000000000..0db9bc3d75
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset.proto
@@ -0,0 +1,21 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A ResourcePreset resource for describing hardware configuration presets.
+message ResourcePreset {
+ // ID of the ResourcePreset resource.
+ string id = 1;
+
+ // IDs of availability zones where the resource preset is available.
+ repeated string zone_ids = 2;
+
+ // Number of CPU cores for a ClickHouse host created with the preset.
+ int64 cores = 3;
+
+ // RAM volume for a ClickHouse host created with the preset, in bytes.
+ int64 memory = 4;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto
new file mode 100644
index 0000000000..cdce2359cd
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.proto
@@ -0,0 +1,53 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "yandex/cloud/validation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/resource_preset.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ResourcePreset resources.
+service ResourcePresetService {
+ // Returns the specified ResourcePreset resource.
+ //
+ // To get the list of available ResourcePreset resources, make a [List] request.
+ rpc Get (GetResourcePresetRequest) returns (ResourcePreset) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/resourcePresets/{resource_preset_id}" };
+ }
+
+ // Retrieves the list of available ResourcePreset resources.
+ rpc List (ListResourcePresetsRequest) returns (ListResourcePresetsResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/resourcePresets" };
+ }
+}
+
+message GetResourcePresetRequest {
+ // ID of the resource preset to return.
+ // To get the resource preset ID, use a [ResourcePresetService.List] request.
+ string resource_preset_id = 1 [(required) = true, (length) = "<=50"];
+}
+
+message ListResourcePresetsRequest {
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, Set [page_token] to the [ListResourcePresetsResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListResourcePresetsResponse {
+ // List of ResourcePreset resources.
+ repeated ResourcePreset resource_presets = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value
+ // for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user.proto
new file mode 100644
index 0000000000..935168ee2a
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user.proto
@@ -0,0 +1,1020 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/protobuf/wrappers.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A ClickHouse User resource. For more information, see
+// the [Developer's guide](/docs/managed-clickhouse/concepts).
+message User {
+ // Name of the ClickHouse user.
+ string name = 1;
+
+ // ID of the ClickHouse cluster the user belongs to.
+ string cluster_id = 2;
+
+ // Set of permissions granted to the user.
+ repeated Permission permissions = 3;
+
+ UserSettings settings = 4;
+
+ // Set of quotas assigned to the user.
+ repeated UserQuota quotas = 5;
+}
+
+message Permission {
+ reserved 2;
+
+ // Name of the database that the permission grants access to.
+ string database_name = 1;
+}
+
+message UserSpec {
+ // Name of the ClickHouse user.
+ string name = 1 [
+ (required) = true,
+ (length) = "<=63",
+ (pattern) = "[a-zA-Z0-9_][a-zA-Z0-9_-]*"
+ ];
+
+ // Password of the ClickHouse user.
+ string password = 2 [
+ (required) = true,
+ (length) = "8-128"
+ ];
+
+ // Set of permissions to grant to the user. If not set, it's granted permissions to access all databases.
+ repeated Permission permissions = 3;
+
+ UserSettings settings = 4;
+
+ // Set of quotas assigned to the user.
+ repeated UserQuota quotas = 5;
+}
+
+// ClickHouse user settings. Supported settings are a limited subset of all settings
+// described in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/).
+message UserSettings {
+ reserved 82 to 83;
+ enum OverflowMode {
+ OVERFLOW_MODE_UNSPECIFIED = 0;
+
+ OVERFLOW_MODE_THROW = 1;
+
+ OVERFLOW_MODE_BREAK = 2;
+ }
+
+ enum GroupByOverflowMode {
+ GROUP_BY_OVERFLOW_MODE_UNSPECIFIED = 0;
+
+ GROUP_BY_OVERFLOW_MODE_THROW = 1;
+
+ GROUP_BY_OVERFLOW_MODE_BREAK = 2;
+
+ GROUP_BY_OVERFLOW_MODE_ANY = 3;
+ }
+
+ enum DistributedProductMode {
+ DISTRIBUTED_PRODUCT_MODE_UNSPECIFIED = 0;
+
+ // Default value. Prohibits using these types of subqueries (returns the "Double-distributed in/JOIN subqueries is denied" exception).
+ DISTRIBUTED_PRODUCT_MODE_DENY = 1;
+
+ // Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal IN/JOIN.
+ DISTRIBUTED_PRODUCT_MODE_LOCAL = 2;
+
+ // Replaces the IN/JOIN query with GLOBAL IN/GLOBAL JOIN.
+ DISTRIBUTED_PRODUCT_MODE_GLOBAL = 3;
+
+ // Allows the use of these types of subqueries.
+ DISTRIBUTED_PRODUCT_MODE_ALLOW = 4;
+ }
+
+ enum QuotaMode {
+ QUOTA_MODE_UNSPECIFIED = 0;
+ QUOTA_MODE_DEFAULT = 1;
+ QUOTA_MODE_KEYED = 2;
+ QUOTA_MODE_KEYED_BY_IP = 3;
+ }
+
+ enum CountDistinctImplementation {
+ COUNT_DISTINCT_IMPLEMENTATION_UNSPECIFIED = 0;
+ COUNT_DISTINCT_IMPLEMENTATION_UNIQ = 1;
+ COUNT_DISTINCT_IMPLEMENTATION_UNIQ_COMBINED = 2;
+ COUNT_DISTINCT_IMPLEMENTATION_UNIQ_COMBINED_64 = 3;
+ COUNT_DISTINCT_IMPLEMENTATION_UNIQ_HLL_12 = 4;
+ COUNT_DISTINCT_IMPLEMENTATION_UNIQ_EXACT = 5;
+ }
+
+ enum JoinAlgorithm {
+ JOIN_ALGORITHM_UNSPECIFIED = 0;
+ JOIN_ALGORITHM_HASH = 1;
+ JOIN_ALGORITHM_PARALLEL_HASH = 2;
+ JOIN_ALGORITHM_PARTIAL_MERGE = 3;
+ JOIN_ALGORITHM_DIRECT = 4;
+ JOIN_ALGORITHM_AUTO = 5;
+ JOIN_ALGORITHM_FULL_SORTING_MERGE = 6;
+ JOIN_ALGORITHM_PREFER_PARTIAL_MERGE = 7;
+ }
+
+ enum FormatRegexpEscapingRule {
+ FORMAT_REGEXP_ESCAPING_RULE_UNSPECIFIED = 0;
+ FORMAT_REGEXP_ESCAPING_RULE_ESCAPED = 1;
+ FORMAT_REGEXP_ESCAPING_RULE_QUOTED = 2;
+ FORMAT_REGEXP_ESCAPING_RULE_CSV = 3;
+ FORMAT_REGEXP_ESCAPING_RULE_JSON = 4;
+ FORMAT_REGEXP_ESCAPING_RULE_XML = 5;
+ FORMAT_REGEXP_ESCAPING_RULE_RAW = 6;
+ }
+
+ enum DateTimeInputFormat {
+ DATE_TIME_INPUT_FORMAT_UNSPECIFIED = 0;
+ DATE_TIME_INPUT_FORMAT_BEST_EFFORT = 1;
+ DATE_TIME_INPUT_FORMAT_BASIC = 2;
+ DATE_TIME_INPUT_FORMAT_BEST_EFFORT_US = 3;
+ }
+
+ enum DateTimeOutputFormat {
+ DATE_TIME_OUTPUT_FORMAT_UNSPECIFIED = 0;
+ DATE_TIME_OUTPUT_FORMAT_SIMPLE = 1;
+ DATE_TIME_OUTPUT_FORMAT_ISO = 2;
+ DATE_TIME_OUTPUT_FORMAT_UNIX_TIMESTAMP = 3;
+ }
+
+ enum LocalFilesystemReadMethod {
+ LOCAL_FILESYSTEM_READ_METHOD_UNSPECIFIED = 0;
+ LOCAL_FILESYSTEM_READ_METHOD_READ = 1;
+ LOCAL_FILESYSTEM_READ_METHOD_PREAD_THREADPOOL = 2;
+ LOCAL_FILESYSTEM_READ_METHOD_PREAD = 3;
+ LOCAL_FILESYSTEM_READ_METHOD_NMAP = 4;
+ }
+
+ enum RemoteFilesystemReadMethod {
+ REMOTE_FILESYSTEM_READ_METHOD_UNSPECIFIED = 0;
+ REMOTE_FILESYSTEM_READ_METHOD_READ = 1;
+ REMOTE_FILESYSTEM_READ_METHOD_THREADPOOL = 2;
+ }
+
+ // Restricts permissions for non-DDL queries. To restrict permissions for DDL queries, use [allow_ddl] instead.
+ // * **0** (default)-no restrictions.
+ // * **1**-only read data queries are allowed.
+ // * **2**-read data and change settings queries are allowed.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/permissions-for-queries/#settings_readonly).
+ google.protobuf.Int64Value readonly = 1 [(value) = "0-2"];
+
+ // Determines whether DDL queries are allowed (e.g., **CREATE**, **ALTER**, **RENAME**, etc).
+ //
+ // Default value: **true**.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/permissions-for-queries/#settings_allow_ddl).
+ google.protobuf.BoolValue allow_ddl = 2;
+
+ // Enables [introspections functions](https://clickhouse.com/docs/en/sql-reference/functions/introspection) for query profiling.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-allow_introspection_functions).
+ google.protobuf.BoolValue allow_introspection_functions = 96;
+
+ // Connection timeout in milliseconds.
+ //
+ // Value must be greater than **0** (default: **10000**, 10 seconds).
+ google.protobuf.Int64Value connect_timeout = 39 [(value) = ">0"];
+
+ // The timeout in milliseconds for connecting to a remote server for a Distributed table engine. Applies only if the cluster uses sharding and replication. If unsuccessful, several attempts are made to connect to various replicas.
+ //
+ // Default value: **50**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#connect-timeout-with-failover-ms).
+ google.protobuf.Int64Value connect_timeout_with_failover = 97 [(value) = ">0"];
+
+ // Receive timeout in milliseconds.
+ //
+ // Value must be greater than **0** (default: **300000**, 300 seconds or 5 minutes).
+ google.protobuf.Int64Value receive_timeout = 40 [(value) = ">0"];
+
+ // Send timeout in milliseconds.
+ //
+ // Value must be greater than **0** (default: **300000**, 300 seconds or 5 minutes).
+ google.protobuf.Int64Value send_timeout = 41 [(value) = ">0"];
+
+ // Timeout (in seconds) between checks of execution speed. It is checked that execution speed is not less that specified in [min_execution_speed] parameter.
+ //
+ // Default value: **10**.
+ google.protobuf.Int64Value timeout_before_checking_execution_speed = 98;
+
+ // Enables or disables write quorum for ClickHouse cluster.
+ // If the value is less than **2**, then write quorum is disabled, otherwise it is enabled.
+ //
+ // When used, write quorum guarantees that ClickHouse has written data to the quorum of **insert_quorum** replicas with no errors until the [insert_quorum_timeout] expires.
+ // All replicas in the quorum are in the consistent state, meaning that they contain linearized data from the previous **INSERT** queries.
+ // Employ write quorum, if you need the guarantees that the written data would not be lost in case of one or more replicas failure.
+ //
+ // You can use [select_sequential_consistency] setting to read the data written with write quorum.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum).
+ google.protobuf.Int64Value insert_quorum = 3 [(value) = ">=0"];
+
+ // Quorum write timeout in milliseconds.
+ //
+ // If the write quorum is enabled in the cluster, this timeout expires and some data is not written to the [insert_quorum] replicas, then ClickHouse will abort the execution of **INSERT** query and return an error.
+ // In this case, the client must send the query again to write the data block into the same or another replica.
+ //
+ // Minimum value: **1000**, 1 second (default: **60000**, 1 minute).
+ google.protobuf.Int64Value insert_quorum_timeout = 4 [(value) = ">=1000"];
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum_parallel).
+ google.protobuf.BoolValue insert_quorum_parallel = 99;
+
+ // Enables the insertion of default values instead of NULL into columns with not nullable data type.
+ //
+ // Default value: **true**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#insert_null_as_default).
+ google.protobuf.BoolValue insert_null_as_default = 100;
+
+ // Determines the behavior of **SELECT** queries from the replicated table: if enabled, ClickHouse will terminate a query with error message in case the replica does not have a chunk written with the quorum and will not read the parts that have not yet been written with the quorum.
+ //
+ // Default value: **false** (sequential consistency is disabled).
+ google.protobuf.BoolValue select_sequential_consistency = 5;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-deduplicate-blocks-in-dependent-materialized-views).
+ google.protobuf.BoolValue deduplicate_blocks_in_dependent_materialized_views = 101;
+
+ // Wait mode for asynchronous actions in **ALTER** queries on replicated tables:
+ //
+ // * **0**-do not wait for replicas.
+ // * **1**-only wait for own execution (default).
+ // * **2**-wait for all replicas.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/alter/#synchronicity-of-alter-queries).
+ google.protobuf.Int64Value replication_alter_partitions_sync = 42 [(value) = "0-2"];
+
+ // Max replica delay in milliseconds. If a replica lags more than the set value, this replica is not used and becomes a stale one.
+ //
+ // Minimum value: **1000**, 1 second (default: **300000**, 300 seconds or 5 minutes).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_replica_delay_for_distributed_queries).
+ google.protobuf.Int64Value max_replica_delay_for_distributed_queries = 6 [(value) = ">=1000"];
+
+ // Enables or disables query forcing to a stale replica in case the actual data is unavailable.
+ // If enabled, ClickHouse will choose the most up-to-date replica and force the query to use the data in this replica.
+ // This setting can be used when doing **SELECT** query from a distributed table that points to replicated tables.
+ //
+ // Default value: **true** (query forcing is enabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-fallback_to_stale_replicas_for_distributed_queries).
+ google.protobuf.BoolValue fallback_to_stale_replicas_for_distributed_queries = 7;
+
+ // Determine the behavior of distributed subqueries.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#distributed-product-mode).
+ DistributedProductMode distributed_product_mode = 43;
+
+ // Enables of disables memory saving mode when doing distributed aggregation.
+ //
+ // When ClickHouse works with a distributed query, external aggregation is done on remote servers.
+ // Enable this setting to achieve a smaller memory footprint on the server that sourced such a distributed query.
+ //
+ // Default value: **false** (memory saving mode is disabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory).
+ google.protobuf.BoolValue distributed_aggregation_memory_efficient = 72;
+
+ // Timeout for DDL queries, in milliseconds.
+ google.protobuf.Int64Value distributed_ddl_task_timeout = 73;
+
+ // Enables or disables silent skipping of unavailable shards.
+ //
+ // A shard is considered unavailable if all its replicas are also unavailable.
+ //
+ // Default value: **false** (silent skipping is disabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-skip_unavailable_shards).
+ google.protobuf.BoolValue skip_unavailable_shards = 81;
+
+ // Enables or disables expression compilation.
+ // If you execute a lot of queries that contain identical expressions, then enable this setting.
+ // As a result, such queries may be executed faster due to use of compiled expressions.
+ //
+ // Use this setting in combination with [min_count_to_compile_expression] setting.
+ //
+ // Default value: **false** (expression compilation is disabled).
+ google.protobuf.BoolValue compile_expressions = 46;
+
+ // How many identical expressions ClickHouse has to encounter before they are compiled.
+ //
+ // Minimum value: **0** (default: **3**).
+ //
+ // For the **0** value compilation is synchronous: a query waits for expression compilation process to complete prior to continuing execution.
+ // It is recommended to set this value only for testing purposes.
+ //
+ // For all other values, compilation is asynchronous: the compilation process executes in a separate thread.
+ // When a compiled expression is ready, it will be used by ClickHouse for eligible queries, including the ones that are currently running.
+ google.protobuf.Int64Value min_count_to_compile_expression = 47 [(value) = ">=0"];
+
+ // The maximum block size for reading.
+ //
+ // Data in ClickHouse is organized and processed by blocks (block is a set of columns' parts).
+ // The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block.
+ //
+ // This setting is a recommendation for size of block (in a count of rows) that should be loaded from tables.
+ //
+ // Value must be greater than **0** (default: **65536**).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-max_block_size).
+ google.protobuf.Int64Value max_block_size = 9 [(value) = ">0"];
+
+ // Limits the minimum number of rows in a block to be inserted in a table by **INSERT** query.
+ // Blocks that are smaller than the specified value, will be squashed together into the bigger blocks.
+ //
+ // Minimal value: **0**, block squashing is disabled (default: **1048576**).
+ google.protobuf.Int64Value min_insert_block_size_rows = 48 [(value) = ">=0"];
+
+ // Limits the minimum number of bytes in a block to be inserted in a table by **INSERT** query.
+ // Blocks that are smaller than the specified value, will be squashed together into the bigger blocks.
+ //
+ // Minimal value: **0**, block squashing is disabled (default: **268435456**, 256 MB).
+ google.protobuf.Int64Value min_insert_block_size_bytes = 49 [(value) = ">=0"];
+
+ // Allows to form blocks of the specified size (in bytes) when inserting data in a table.
+ // This setting has effect only if server is creating such blocks by itself.
+ //
+ // Value must be greater than **0** (default: **1048576**).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_insert_block_size).
+ google.protobuf.Int64Value max_insert_block_size = 10 [(value) = ">0"];
+
+ // Limits the minimum number of bytes to enable unbuffered direct reads from disk (Direct I/O).
+ //
+ // By default, ClickHouse does not read data directly from disk, but relies on the filesystem and its cache instead.
+ // Such reading strategy is effective when the data volume is small.
+ // If the amount of the data to read is huge, it is more effective to read directly from the disk, bypassing the filesystem cache.
+ //
+ // If the total amount of the data to read is greater than the value of this setting, then ClickHouse will fetch this data directly from the disk.
+ //
+ // Minimal value and default value: **0**, Direct I/O is disabled.
+ google.protobuf.Int64Value min_bytes_to_use_direct_io = 50 [(value) = ">=0"];
+
+ // Determines whether to use the cache of uncompressed blocks, or not.
+ // Using this cache can significantly reduce latency and increase the throughput when a huge amount of small queries is to be processed.
+ // Enable this setting for the users who instantiates small queries frequently.
+ //
+ // This setting has effect only for tables of the MergeTree family.
+ //
+ // Default value: **false** (uncompressed cache is disabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-use_uncompressed_cache).
+ google.protobuf.BoolValue use_uncompressed_cache = 51;
+
+ // Limits the maximum size in rows of the request that can use the cache of uncompressed data. The cache is not used for requests larger
+ // than the specified value.
+ //
+ // Use this setting in combination with [use_uncompressed_cache] setting.
+ //
+ // Value must be greater than **0** (default: **128x8192**).
+ google.protobuf.Int64Value merge_tree_max_rows_to_use_cache = 52 [(value) = ">0"];
+
+ // Limits the maximum size in bytes of the request that can use the cache of uncompressed data. The cache is not used for requests larger
+ // than the specified value.
+ //
+ // Use this setting in combination with [use_uncompressed_cache] setting.
+ //
+ // Value must be greater than **0** (default: **192x10x1024x1024**).
+ google.protobuf.Int64Value merge_tree_max_bytes_to_use_cache = 53 [(value) = ">0"];
+
+ // Limits the minimum number of rows to be read from a file to enable concurrent read.
+ // If the number of rows to be read exceeds this value, then ClickHouse will try to use a few threads to read from a file concurrently.
+ //
+ // This setting has effect only for tables of the MergeTree family.
+ //
+ // Value must be greater than **0** (default: **20x8192**).
+ google.protobuf.Int64Value merge_tree_min_rows_for_concurrent_read = 54 [(value) = ">0"];
+
+ // Limits the number of bytes to be read from a file to enable concurrent read.
+ // If the number of bytes to be read exceeds this value, then ClickHouse will try to use a few threads to read from a file concurrently.
+ //
+ // This setting has effect only for tables of the MergeTree family.
+ //
+ // Value must be greater than **0** (default: **24x10x1024x1024**).
+ google.protobuf.Int64Value merge_tree_min_bytes_for_concurrent_read = 55 [(value) = ">0"];
+
+ // Sets the threshold of RAM consumption (in bytes) after that the temporary data, collected during the **GROUP BY** operation, should be flushed to disk to limit the RAM comsumption.
+ //
+ // By default, aggregation is done by employing hash table that resides in RAM.
+ // A query can result in aggregation of huge data volumes that can lead to memory exhaustion and abortion of the query (see the [max_memory_usage] setting).
+ // For such queries, you can use this setting to force ClickHouse to do flushing and complete aggregation successfully.
+ //
+ // Minimal value and default value: **0**, **GROUP BY** in the external memory is disabled.
+ //
+ // When using aggregation in external memory, it is recommended to set the value of this setting twice as low as the [max_memory_usage] setting value (by default, the maximum memory usage is limited to ten gigabytes).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory).
+ //
+ // See also: the [distributed_aggregation_memory_efficient] setting.
+ google.protobuf.Int64Value max_bytes_before_external_group_by = 74;
+
+ // This setting is equivalent of the [max_bytes_before_external_group_by] setting, except for it is for sort operation (**ORDER BY**), not aggregation.
+ google.protobuf.Int64Value max_bytes_before_external_sort = 75;
+
+ // Sets the threshold of the number of keys, after that the two-level aggregation should be used.
+ //
+ // Minimal value: **0**, threshold is not set (default: **10000**).
+ google.protobuf.Int64Value group_by_two_level_threshold = 76;
+
+ // Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
+ //
+ // Minimal value: **0**, threshold is not set (default: **100000000**).
+ google.protobuf.Int64Value group_by_two_level_threshold_bytes = 77;
+
+ // Sets the priority of a query.
+ //
+ // * **0**-priority is not used.
+ // * **1**-the highest priority.
+ // * and so on. The higher the number, the lower a query's priority.
+ //
+ // This setting should be set up for each query individually.
+ //
+ // If ClickHouse is working with the high-priority queries, and a low-priority query enters, then the low-priority query is paused until higher-priority queries are completed.
+ //
+ // Minimal value and default value: **0**, priority is not used.
+ google.protobuf.Int64Value priority = 56 [(value) = ">=0"];
+
+ // Limits the maximum number of threads to process the request (setting does not take threads that read data from remote servers into account).
+ //
+ // This setting applies to threads that perform the same stages of the query processing pipeline in parallel.
+ //
+ // Minimal value and default value: **0** (the thread number is calculated automatically based on the number of physical CPU cores, no HyperThreading cores are taken into account).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_threads).
+ google.protobuf.Int64Value max_threads = 8 [(value) = ">0"];
+
+ // Limits the maximum memory usage (in bytes) for processing of a single user's query on a single server.
+ // This setting does not take server's free RAM amount or total RAM amount into account.
+ //
+ // This limitation is enforced for any user's single query on a single server.
+ //
+ // Minimal value: **0**, no limitation is set.
+ // Value that is set in the ClickHouse default config file: **10737418240** (10 GB).
+ //
+ // If you use [max_bytes_before_external_group_by] or [max_bytes_before_external_sort] setting, then it is recommended to set their values twice as low as [max_memory_usage] setting value.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#settings_max_memory_usage).
+ google.protobuf.Int64Value max_memory_usage = 11 [(value) = ">=0"];
+
+ // Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
+ // This setting does not take server's free RAM amount or total RAM amount into account.
+ //
+ // This limitation is enforced for all queries that belong to one user and run simultaneously on a single server.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_memory_usage_for_user = 12 [(value) = ">=0"];
+
+ // The maximum speed of data exchange over the network in bytes per second for a query.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_network_bandwidth = 57;
+
+ // The maximum speed of data exchange over the network in bytes per second for all concurrently running user queries.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_network_bandwidth_for_user = 58;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/ru/operations/settings/query-complexity/#max-partitions-per-insert-block).
+ google.protobuf.Int64Value max_partitions_per_insert_block = 102;
+
+ // The maximum number of concurrent requests per user.
+ // Default value: 0 (no limit).
+ google.protobuf.Int64Value max_concurrent_queries_for_user = 103;
+
+ // If enabled, query is not executed if the ClickHouse can't use index by date.
+ // This setting has effect only for tables of the MergeTree family.
+ //
+ // Default value: **false** (setting is disabled, query executes even if ClickHouse can't use index by date).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-force_index_by_date).
+ google.protobuf.BoolValue force_index_by_date = 59;
+
+ // If enabled, query is not executed if the ClickHouse can't use index by primary key.
+ // This setting has effect only for tables of the MergeTree family.
+ //
+ // Default value: **false** (setting is disabled, query executes even if ClickHouse can't use index by primary key).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#force-primary-key).
+ google.protobuf.BoolValue force_primary_key = 60;
+
+ // Limits the maximum number of rows that can be read from a table when running a query.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-rows-to-read).
+ google.protobuf.Int64Value max_rows_to_read = 13 [(value) = ">=0"];
+
+ // Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_bytes_to_read = 14 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while reading the data.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode read_overflow_mode = 15;
+
+ // Limits the maximum number of unique keys received from aggregation function.
+ // This setting helps to reduce RAM consumption while doing aggregation.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_rows_to_group_by = 16 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing aggregation.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ // * **any**-perform approximate **GROUP BY** operation by continuing aggregation for the keys that got into the set, but don't add new keys to the set.
+ GroupByOverflowMode group_by_overflow_mode = 17;
+
+ // Limits the maximum number of rows that can be read from a table for sorting.
+ // This setting helps to reduce RAM consumption.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_rows_to_sort = 18 [(value) = ">=0"];
+
+ // Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
+ // This setting helps to reduce RAM consumption.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_bytes_to_sort = 19 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while sorting.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode sort_overflow_mode = 20;
+
+ // Limits the number of rows in the result.
+ // This limitation is also checked for subqueries and parts of distributed queries that run on remote servers.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_result_rows = 21 [(value) = ">=0"];
+
+ // Limits the number of bytes in the result.
+ // This limitation is also checked for subqueries and parts of distributed queries that run on remote servers.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_result_bytes = 22 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while forming result.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode result_overflow_mode = 23;
+
+ // Limits the maximum number of different rows when using **DISTINCT**.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_rows_in_distinct = 24 [(value) = ">=0"];
+
+ // Limits the maximum size of a hash table in bytes (uncompressed data) when using **DISTINCT**.
+ google.protobuf.Int64Value max_bytes_in_distinct = 25 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing **DISCTINCT**.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode distinct_overflow_mode = 26;
+
+ // Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using **GLOBAL IN**.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_rows_to_transfer = 27 [(value) = ">=0"];
+
+ // Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary
+ // table when using **GLOBAL IN**.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_bytes_to_transfer = 28 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing transfers.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode transfer_overflow_mode = 29;
+
+ // Limits the maximum query execution time in milliseconds.
+ // At this moment, this limitation is not checked when passing one of the sorting stages, as well as merging and finalizing aggregation funictions.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_execution_time = 30 [(value) = ">=0"];
+
+ // Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) of execution time.
+ //
+ // * **throw**-abort query execution, return an error.
+ // * **break**-stop query execution, return partial result.
+ OverflowMode timeout_overflow_mode = 31;
+
+ // Limit on the number of rows in the set resulting from the execution of the IN section.
+ google.protobuf.Int64Value max_rows_in_set = 87 [(value) = ">=0"];
+
+ // Limit on the number of bytes in the set resulting from the execution of the IN section.
+ google.protobuf.Int64Value max_bytes_in_set = 88 [(value) = ">=0"];
+
+ // Determine the behavior on exceeding max_rows_in_set or max_bytes_in_set limit.
+ // Possible values: OVERFLOW_MODE_THROW, OVERFLOW_MODE_BREAK.
+ OverflowMode set_overflow_mode = 89;
+
+ // Limit on maximum size of the hash table for JOIN, in rows.
+ google.protobuf.Int64Value max_rows_in_join = 90 [(value) = ">=0"];
+
+ // Limit on maximum size of the hash table for JOIN, in bytes.
+ google.protobuf.Int64Value max_bytes_in_join = 91 [(value) = ">=0"];
+
+ // Determine the behavior on exceeding max_rows_in_join or max_bytes_in_join limit.
+ // Possible values: OVERFLOW_MODE_THROW, OVERFLOW_MODE_BREAK.
+ OverflowMode join_overflow_mode = 92;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-join_algorithm).
+ repeated JoinAlgorithm join_algorithm = 104;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#any_join_distinct_right_table_keys).
+ google.protobuf.BoolValue any_join_distinct_right_table_keys = 105;
+
+ // Limits the maximum number of columns that can be read from a table in a single query.
+ // If the query requires to read more columns to complete, then it will be aborted.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_columns_to_read = 32 [(value) = ">=0"];
+
+ // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_temporary_columns = 33 [(value) = ">=0"];
+
+ // Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
+ //
+ // Minimal value and default value: **0**, no limitation is set.
+ google.protobuf.Int64Value max_temporary_non_const_columns = 34 [(value) = ">=0"];
+
+ // Limits the size of the part of a query that can be transferred to RAM for parsing with the SQL parser, in bytes.
+ //
+ // Value must be greater than **0** (default: **262144**).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_query_size).
+ google.protobuf.Int64Value max_query_size = 35 [(value) = ">0"];
+
+ // Limits the maximum depth of query syntax tree.
+ //
+ // Executing a big and complex query may result in building a syntax tree of enormous depth.
+ // By using this setting, you can prohibit execution of over-sized or non-optimized queries for huge tables.
+ //
+ // For example, the **SELECT *** query may result in more complex and deeper syntax tree, compared to the **SELECT ... WHERE ...** query, containing constraints and conditions, in the most cases.
+ // A user can be forced to construct more optimized queries, if this setting is used.
+ //
+ // Value must be greater than **0** (default: **1000**).
+ // If a too small value is set, it may render ClickHouse unable to execute even simple queries.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-ast-depth).
+ google.protobuf.Int64Value max_ast_depth = 36 [(value) = ">0"];
+
+ // Limits the maximum size of query syntax tree in number of nodes.
+ //
+ // Executing a big and complex query may result in building a syntax tree of enormous size.
+ // By using this setting, you can prohibit execution of over-sized or non-optimized queries for huge tables.
+ //
+ // Value must be greater than **0** (default: **50000**).
+ // If a too small value is set, it may render ClickHouse unable to execute even simple queries.
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-ast-elements).
+ google.protobuf.Int64Value max_ast_elements = 37 [(value) = ">0"];
+
+ // Limits the maximum size of query syntax tree in number of nodes after expansion of aliases and the asterisk values.
+ //
+ // Executing a big and complex query may result in building a syntax tree of enormous size.
+ // By using this setting, you can prohibit execution of over-sized or non-optimized queries for huge tables.
+ //
+ // Value must be greater than **0** (default: **500000**).
+ // If a too small value is set, it may render ClickHouse unable to execute even simple queries.
+ google.protobuf.Int64Value max_expanded_ast_elements = 38 [(value) = ">0"];
+
+ // Minimal execution speed in rows per second.
+ google.protobuf.Int64Value min_execution_speed = 84 [(value) = ">=0"];
+
+ // Minimal execution speed in bytes per second.
+ google.protobuf.Int64Value min_execution_speed_bytes = 85 [(value) = ">=0"];
+
+ // Aggregate function to use for implementation of count(DISTINCT ...).
+ CountDistinctImplementation count_distinct_implementation = 86;
+
+ // Enables or disables SQL parser if the fast stream parser cannot parse the data.
+ //
+ // Enable this setting, if the data that you want to insert into a table contains SQL expressions.
+ //
+ // For example, the stream parser is unable to parse a value that contains **now()** expression; therefore an **INSERT** query for this value will fail and no data will be inserted into a table.
+ // With enabled SQL parser, this expression is parsed correctly: the **now()** expression will be parsed as SQL function, interpreted, and the current date and time will be inserted into the table as a result.
+ //
+ // This setting has effect only if you use [Values](https://clickhouse.com/docs/en/interfaces/formats/#data-format-values) format when inserting data.
+ //
+ // Default value: **true** (SQL parser is enabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-input_format_values_interpret_expressions).
+ google.protobuf.BoolValue input_format_values_interpret_expressions = 61;
+
+ // Enables or disables replacing omitted input values with default values of the respective columns when performing **INSERT** queries.
+ //
+ // Default value: **true** (replacing is enabled).
+ google.protobuf.BoolValue input_format_defaults_for_omitted_fields = 62;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#input_format_null_as_default).
+ google.protobuf.BoolValue input_format_null_as_default = 106;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#date_time_input_format).
+ DateTimeInputFormat date_time_input_format = 107;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#input_format_with_names_use_header).
+ google.protobuf.BoolValue input_format_with_names_use_header = 108;
+
+ // Enables quoting of 64-bit integers in JSON output format.
+ //
+ // If this setting is enabled, then 64-bit integers (**UInt64** and **Int64**) will be quoted when written to JSON output in order to maintain compatibility with the most of the JavaScript engines.
+ // Otherwise, such integers will not be quoted.
+ //
+ // Default value: **false** (quoting 64-bit integers is disabled).
+ google.protobuf.BoolValue output_format_json_quote_64bit_integers = 63 [json_name = "outputFormatJsonQuote_64bitIntegers"];
+
+ // Enables special floating-point values (**+nan**, **-nan**, **+inf** and **-inf**) in JSON output format.
+ //
+ // Default value: **false** (special values do not present in output).
+ google.protobuf.BoolValue output_format_json_quote_denormals = 64;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#date_time_output_format).
+ DateTimeOutputFormat date_time_output_format = 109;
+
+ // Determines whether to use LowCardinality type in Native format.
+ //
+ // * **true** (default)-yes, use.
+ // * **false**-convert LowCardinality columns to regular columns when doing **SELECT**, and convert regular columns to LowCardinality when doing **INSERT**.
+ //
+ // LowCardinality columns (aka sparse columns) store data in more effective way, compared to regular columns, by using hash tables.
+ // If data to insert suits this storage format, ClickHouse will place them into LowCardinality column.
+ //
+ // If you use a third-party ClickHouse client that can't work with LowCardinality columns, then this client will not be able to correctly interpret the result of the query that asks for data stored in LowCardinality column.
+ // Disable this setting to convert LowCardinality column to regular column when creating the result, so such clients will be able to process the result.
+ //
+ // Official ClickHouse client works with LowCardinality columns out-of-the-box.
+ //
+ // Default value: **true** (LowCardinality columns are used in Native format).
+ google.protobuf.BoolValue low_cardinality_allow_in_native_format = 78;
+
+ // Allows specifying **LowCardinality** modifier for types of small fixed size (8 or less) in CREATE TABLE statements. Enabling this may increase merge times and memory consumption.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#allow_suspicious_low_cardinality_types).
+ google.protobuf.BoolValue allow_suspicious_low_cardinality_types = 110;
+
+ // Enables returning of empty result when aggregating without keys (with **GROUP BY** operation absent) on empty set (e.g., **SELECT count(*) FROM table WHERE 0**).
+ //
+ // * **true**-ClickHouse will return an empty result for such queries.
+ // * **false** (default)-ClickHouse will return a single-line result consisting of **NULL** values for aggregation functions, in accordance with SQL standard.
+ google.protobuf.BoolValue empty_result_for_aggregation_by_empty_set = 79;
+
+ // HTTP connection timeout, in milliseconds.
+ //
+ // Value must be greater than **0** (default: **1000**, 1 second).
+ google.protobuf.Int64Value http_connection_timeout = 65;
+
+ // HTTP receive timeout, in milliseconds.
+ //
+ // Value must be greater than **0** (default: **1800000**, 1800 seconds, 30 minutes).
+ google.protobuf.Int64Value http_receive_timeout = 66;
+
+ // HTTP send timeout, in milliseconds.
+ //
+ // Value must be greater than **0** (default: **1800000**, 1800 seconds, 30 minutes).
+ google.protobuf.Int64Value http_send_timeout = 67;
+
+ // Enables or disables data compression in HTTP responses.
+ //
+ // By default, ClickHouse stores data compressed. When executing a query, its result is uncompressed.
+ // Use this setting to command ClickHouse to compress the result when sending it via HTTP.
+ //
+ // Enable this setting and add the **Accept-Encoding: <compression method>** HTTP header in a HTTP request to force compression of HTTP response from ClickHouse.
+ //
+ // ClickHouse support the following compression methods: **gzip**, **br** and **deflate**.
+ //
+ // Default value: **false** (compression is disabled).
+ //
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/interfaces/http/).
+ google.protobuf.BoolValue enable_http_compression = 68;
+
+ // Enables progress notifications using **X-ClickHouse-Progress** HTTP header.
+ //
+ // Default value: **false** (notifications disabled).
+ google.protobuf.BoolValue send_progress_in_http_headers = 69;
+
+ // Minimum interval between progress notifications with **X-ClickHouse-Progress** HTTP header, in milliseconds.
+ //
+ // Value must be greater than **0** (default: **100**).
+ google.protobuf.Int64Value http_headers_progress_interval = 70;
+
+ // Adds CORS header in HTTP responses.
+ //
+ // Default value: **false** (header is not added).
+ google.protobuf.BoolValue add_http_cors_header = 71;
+
+ // Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response.
+ //
+ // Default value: **false**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#cancel-http-readonly-queries-on-client-close).
+ google.protobuf.BoolValue cancel_http_readonly_queries_on_client_close = 111;
+
+ // Limits the maximum number of HTTP GET redirect hops for [URL-engine](https://clickhouse.com/docs/en/engines/table-engines/special/url) tables.
+ //
+ // If the parameter is set to **0** (default), no hops is allowed.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-max_http_get_redirects).
+ google.protobuf.Int64Value max_http_get_redirects = 112;
+
+ google.protobuf.BoolValue joined_subquery_requires_alias = 93;
+
+ google.protobuf.BoolValue join_use_nulls = 94;
+
+ google.protobuf.BoolValue transform_null_in = 95;
+
+ // Quota accounting mode. Possible values: QUOTA_MODE_DEFAULT, QUOTA_MODE_KEYED and QUOTA_MODE_KEYED_BY_IP.
+ QuotaMode quota_mode = 80;
+
+ // Sets the data format of a [nested](https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested) columns.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#flatten-nested).
+ google.protobuf.BoolValue flatten_nested = 113;
+
+ // Regular expression (for Regexp format)
+ string format_regexp = 114;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#format_regexp_escaping_rule).
+ FormatRegexpEscapingRule format_regexp_escaping_rule = 115;
+
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#format_regexp_skip_unmatched).
+ google.protobuf.BoolValue format_regexp_skip_unmatched = 116;
+
+ // Enables asynchronous inserts.
+ //
+ // Disabled by default.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert).
+ google.protobuf.BoolValue async_insert = 117;
+
+ // The maximum number of threads for background data parsing and insertion.
+ //
+ // If the parameter is set to **0**, asynchronous insertions are disabled. Default value: **16**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-threads).
+ google.protobuf.Int64Value async_insert_threads = 118;
+
+ // Enables waiting for processing of asynchronous insertion. If enabled, server returns OK only after the data is inserted.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#wait-for-async-insert).
+ google.protobuf.BoolValue wait_for_async_insert = 119;
+
+ // The timeout (in seconds) for waiting for processing of asynchronous insertion.
+ //
+ // Default value: **120**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#wait-for-async-insert-timeout).
+ google.protobuf.Int64Value wait_for_async_insert_timeout = 120;
+
+ // The maximum size of the unparsed data in bytes collected per query before being inserted.
+ //
+ // If the parameter is set to **0**, asynchronous insertions are disabled. Default value: **100000**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-max-data-size).
+ google.protobuf.Int64Value async_insert_max_data_size = 121;
+
+ // The maximum timeout in milliseconds since the first INSERT query before inserting collected data.
+ //
+ // If the parameter is set to **0**, the timeout is disabled. Default value: **200**.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-busy-timeout-ms).
+ google.protobuf.Int64Value async_insert_busy_timeout = 122;
+
+ // The maximum timeout in milliseconds since the last INSERT query before dumping collected data. If enabled, the settings prolongs the [async_insert_busy_timeout] with every INSERT query as long as [async_insert_max_data_size] is not exceeded.
+ //
+ // More info see in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#async-insert-stale-timeout-ms).
+ google.protobuf.Int64Value async_insert_stale_timeout = 123;
+
+ // Memory profiler step (in bytes).
+ //
+ // If the next query step requires more memory than this parameter specifies, the memory profiler collects the allocating stack trace. Values lower than a few megabytes slow down query processing.
+ //
+ // Default value: **4194304** (4 MB). Zero means disabled memory profiler.
+ google.protobuf.Int64Value memory_profiler_step = 124;
+
+ // Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type. The probability is for every alloc/free regardless to the size of the allocation.
+ //
+ // Possible values: from **0** to **1**. Default: **0**.
+ google.protobuf.DoubleValue memory_profiler_sample_probability = 125;
+
+ // Sets the maximum number of parallel threads for the SELECT query data read phase with the FINAL modifier.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#max-final-threads).
+ google.protobuf.Int64Value max_final_threads = 126 [(value) = ">=0"];
+
+ // Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](https://clickhouse.com/docs/en/interfaces/formats#tabseparated), [TKSV](https://clickhouse.com/docs/en/interfaces/formats#tskv), [CSV](https://clickhouse.com/docs/en/interfaces/formats#csv) and [JSONEachRow](https://clickhouse.com/docs/en/interfaces/formats#jsoneachrow) formats.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#input-format-parallel-parsing)
+ google.protobuf.BoolValue input_format_parallel_parsing = 127;
+
+ // Enables or disables the insertion of JSON data with nested objects.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#input-format-parallel-parsing)
+ google.protobuf.BoolValue input_format_import_nested_json = 128;
+
+ // Method of reading data from local filesystem, one of: read, pread, mmap, io_uring, pread_threadpool. The 'io_uring' method is experimental and does not work for Log, TinyLog, StripeLog, File, Set and Join, and other tables with append-able files in presence of concurrent reads and writes.
+ LocalFilesystemReadMethod local_filesystem_read_method = 129;
+
+ // The maximum size of the buffer to read from the filesystem.
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/codebrowser/ClickHouse/src/Core/Settings.h.html#DB::SettingsTraits::Data::max_read_buffer_size)
+ google.protobuf.Int64Value max_read_buffer_size = 130 [(value) = ">0"];
+
+ // The setting sets the maximum number of retries for ClickHouse Keeper (or ZooKeeper) requests during insert into replicated MergeTree. Only Keeper requests which failed due to network error, Keeper session timeout, or request timeout are considered for retries.
+ // Default: 20 from 23.2, 0(disabled) before
+ // Min_version: 22.11
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#insert_keeper_max_retries)
+ google.protobuf.Int64Value insert_keeper_max_retries = 131 [(value) = ">=0"];
+
+ // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running user queries. Zero means unlimited.
+ // Default: 0 - unlimited
+ // Min_version: 22.10
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity#settings_max_temporary_data_on_disk_size_for_user)
+ google.protobuf.Int64Value max_temporary_data_on_disk_size_for_user = 132 [(value) = ">=0"];
+
+ // The maximum amount of data consumed by temporary files on disk in bytes for all concurrently running queries. Zero means unlimited.
+ // Default: 0 - unlimited
+ // Min_version: 22.10
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity#settings_max_temporary_data_on_disk_size_for_query)
+ google.protobuf.Int64Value max_temporary_data_on_disk_size_for_query = 133 [(value) = ">=0"];
+
+ // Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size.
+ // Default: 1000
+ // Special: 0 - unlimited
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#max_parser_depth)
+ google.protobuf.Int64Value max_parser_depth = 134 [(value) = ">=0"];
+
+ // Method of reading data from remote filesystem, one of: read, threadpool.
+ // Default: read
+ // Min_version: 21.11
+ // See in-depth description in [ClickHouse GitHub](https://github.com/ClickHouse/ClickHouse/blob/f9558345e886876b9132d9c018e357f7fa9b22a3/src/Core/Settings.h#L660)
+ RemoteFilesystemReadMethod remote_filesystem_read_method = 135;
+
+ // It represents soft memory limit in case when hard limit is reached on user level. This value is used to compute overcommit ratio for the query. Zero means skip the query.
+ // Default: 1GiB
+ // Min_version: 22.5
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_overcommit_ratio_denominator)
+ google.protobuf.Int64Value memory_overcommit_ratio_denominator = 136 [(value) = ">=0"];
+
+ // It represents soft memory limit in case when hard limit is reached on global level. This value is used to compute overcommit ratio for the query. Zero means skip the query.
+ // Default: 1GiB
+ // Min_version: 22.5
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_overcommit_ratio_denominator_for_user)
+ google.protobuf.Int64Value memory_overcommit_ratio_denominator_for_user = 137 [(value) = ">=0"];
+
+ // Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. If the timeout is reached and memory is not freed, an exception is thrown.
+ // Default: 5000000
+ // Min_version: 22.5
+ // See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings#memory_usage_overcommit_max_wait_microseconds)
+ google.protobuf.Int64Value memory_usage_overcommit_max_wait_microseconds = 138 [(value) = ">=0"];
+
+ // The setting is deprecated and has no effect.
+ google.protobuf.BoolValue compile = 44 [deprecated = true];
+
+ // The setting is deprecated and has no effect.
+ google.protobuf.Int64Value min_count_to_compile = 45 [deprecated = true];
+}
+
+// ClickHouse quota representation. Each quota associated with an user and limits it resource usage for an interval.
+// See in-depth description [ClickHouse documentation](https://clickhouse.com/docs/en/operations/quotas/).
+message UserQuota {
+ // Duration of interval for quota in milliseconds.
+ // Minimal value is 1 second.
+ google.protobuf.Int64Value interval_duration = 1 [(value) = ">=1000"];
+
+ // The total number of queries.
+ // 0 - unlimited.
+ google.protobuf.Int64Value queries = 2 [(value) = ">=0"];
+
+ // The number of queries that threw exception.
+ // 0 - unlimited.
+ google.protobuf.Int64Value errors = 3 [(value) = ">=0"];
+
+ // The total number of rows given as the result..
+ // 0 - unlimited.
+ google.protobuf.Int64Value result_rows = 4 [(value) = ">=0"];
+
+ // The total number of source rows read from tables for running the query, on all remote servers.
+ // 0 - unlimited.
+ google.protobuf.Int64Value read_rows = 5 [(value) = ">=0"];
+
+ // The total query execution time, in milliseconds (wall time).
+ // 0 - unlimited.
+ google.protobuf.Int64Value execution_time = 6 [(value) = ">=0"];
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user_service.proto
new file mode 100644
index 0000000000..894e0c7262
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/user_service.proto
@@ -0,0 +1,218 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "yandex/cloud/operation/operation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/user.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/validation.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ClickHouse User resources.
+// NOTE: these methods are available only if user management through SQL is disabled.
+service UserService {
+ // Returns the specified ClickHouse User resource.
+ //
+ // To get the list of available ClickHouse User resources, make a [List] request.
+ rpc Get (GetUserRequest) returns (User) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/users/{user_name}" };
+ }
+
+ // Retrieves the list of ClickHouse User resources in the specified cluster.
+ rpc List (ListUsersRequest) returns (ListUsersResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/clusters/{cluster_id}/users" };
+ }
+
+ // Creates a ClickHouse user in the specified cluster.
+ rpc Create (CreateUserRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/users" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "CreateUserMetadata"
+ response: "User"
+ };
+ }
+
+ // Updates the specified ClickHouse user.
+ rpc Update (UpdateUserRequest) returns (operation.Operation) {
+ option (google.api.http) = { patch: "/managed-clickhouse/v1/clusters/{cluster_id}/users/{user_name}" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "UpdateUserMetadata"
+ response: "User"
+ };
+ }
+
+ // Deletes the specified ClickHouse user.
+ rpc Delete (DeleteUserRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/managed-clickhouse/v1/clusters/{cluster_id}/users/{user_name}" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "DeleteUserMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+
+ // Grants a permission to the specified ClickHouse user.
+ rpc GrantPermission (GrantUserPermissionRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/users/{user_name}:grantPermission" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "GrantUserPermissionMetadata"
+ response: "User"
+ };
+ }
+
+ // Revokes a permission from the specified ClickHouse user.
+ rpc RevokePermission (RevokeUserPermissionRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/managed-clickhouse/v1/clusters/{cluster_id}/users/{user_name}:revokePermission" body: "*" };
+ option (yandex.cloud.api.operation) = {
+ metadata: "RevokeUserPermissionMetadata"
+ response: "User"
+ };
+ }
+}
+
+message GetUserRequest {
+ // ID of the ClickHouse cluster the user belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the ClickHouse User resource to return.
+ // To get the name of the user, use a [UserService.List] request.
+ string user_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_]*"];
+}
+
+message ListUsersRequest {
+ // ID of the cluster to list ClickHouse users in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // The maximum number of results per page to return. If the number of available
+ // results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token]
+ // that can be used to get the next page of results in subsequent list requests.
+ int64 page_size = 2 [(value) = "<=1000"];
+
+ // Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token]
+ // returned by the previous list request.
+ string page_token = 3 [(length) = "<=100"];
+}
+
+message ListUsersResponse {
+ // List of ClickHouse User resources.
+ repeated User users = 1;
+
+ // This token allows you to get the next page of results for list requests. If the number of results
+ // is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value
+ // for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent
+ // list request will have its own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}
+
+message CreateUserRequest {
+ // ID of the ClickHouse cluster to create a user in.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Properties of the user to be created.
+ UserSpec user_spec = 2 [(required) = true];
+}
+
+message CreateUserMetadata {
+ // ID of the ClickHouse cluster the user is being created in.
+ string cluster_id = 1;
+
+ // Name of the user that is being created.
+ string user_name = 2;
+}
+
+message UpdateUserRequest {
+ // ID of the ClickHouse cluster the user belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the user to be updated.
+ // To get the name of the user, use a [UserService.List] request.
+ string user_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_]*"];
+
+ // Field mask that specifies which attributes of the ClickHouse user should be updated.
+ google.protobuf.FieldMask update_mask = 3;
+
+ // New password for the user.
+ string password = 4 [(length) = "8-128"];
+
+ // New set of permissions for the user.
+ repeated Permission permissions = 5;
+
+ UserSettings settings = 6;
+
+ repeated UserQuota quotas = 7;
+}
+
+message UpdateUserMetadata {
+ // ID of the ClickHouse cluster the user belongs to.
+ string cluster_id = 1;
+
+ // Name of the user that is being updated.
+ string user_name = 2;
+}
+
+message DeleteUserRequest {
+ // ID of the ClickHouse cluster the user belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the user to delete.
+ // To get the name of the user, use a [UserService.List] request.
+ string user_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_]*"];
+}
+
+message DeleteUserMetadata {
+ // ID of the ClickHouse cluster the user belongs to.
+ string cluster_id = 1;
+
+ // Name of the user that is being deleted.
+ string user_name = 2;
+}
+
+message GrantUserPermissionRequest {
+ // ID of the ClickHouse cluster the user belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the user to grant the permission to.
+ // To get the name of the user, use a [UserService.List] request.
+ string user_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_]*"];
+
+ // Permission that should be granted to the specified user.
+ Permission permission = 3;
+}
+
+message GrantUserPermissionMetadata {
+ // ID of the ClickHouse cluster the user belongs to.
+ string cluster_id = 1;
+
+ // Name of the user that is being granted a permission.
+ string user_name = 2;
+}
+
+message RevokeUserPermissionRequest {
+ // ID of the ClickHouse cluster the user belongs to.
+ // To get the cluster ID, use a [ClusterService.List] request.
+ string cluster_id = 1 [(required) = true, (length) = "<=50"];
+
+ // Name of the user to revoke a permission from.
+ // To get the name of the user, use a [UserService.List] request.
+ string user_name = 2 [(required) = true, (length) = "<=63", (pattern) = "[a-zA-Z0-9_]*"];
+
+ // Name of the database that the user should lose access to.
+ string database_name = 3 [(length) = "<=63", (pattern) = "[a-zA-Z0-9_-]*"];
+}
+
+message RevokeUserPermissionMetadata {
+ // ID of the ClickHouse cluster the user belongs to.
+ string cluster_id = 1;
+
+ // Name of the user whose permission is being revoked.
+ string user_name = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/version.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/version.proto
new file mode 100644
index 0000000000..d82f86e214
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/version.proto
@@ -0,0 +1,20 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+message Version {
+ // ID of the version.
+ string id = 1;
+
+ // Name of the version.
+ string name = 2;
+
+ // Whether version is deprecated.
+ bool deprecated = 3;
+
+ // List of versions that can be updated from current.
+ repeated string updatable_to = 4;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/versions_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/versions_service.proto
new file mode 100644
index 0000000000..c3552cef09
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/mdb/clickhouse/v1/versions_service.proto
@@ -0,0 +1,42 @@
+syntax = "proto3";
+
+package yandex.cloud.mdb.clickhouse.v1;
+
+import "google/api/annotations.proto";
+import "yandex/cloud/validation.proto";
+import "yandex/cloud/mdb/clickhouse/v1/version.proto";
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/clickhouse/v1;clickhouse";
+option java_package = "yandex.cloud.api.mdb.clickhouse.v1";
+
+// A set of methods for managing ClickHouse versions.
+service VersionsService {
+ // Returns list of available ClickHouse versions.
+ rpc List (ListVersionsRequest) returns (ListVersionsResponse) {
+ option (google.api.http) = { get: "/managed-clickhouse/v1/versions" };
+ }
+}
+
+message ListVersionsRequest {
+ // The maximum number of results per page that should be returned. If the number of available
+ // results is larger than [page_size], the service returns a [ListVersionsResponse.next_page_token] that can be used
+ // to get the next page of results in subsequent ListVersions requests.
+ // Default value: 100.
+ int64 page_size = 1 [(value) = "0-1000"];
+
+ // Page token. Set [page_token] to the [ListVersionsResponse.next_page_token] returned by a previous ListVersions
+ // request to get the next page of results.
+ string page_token = 2 [(length) = "<=100"];
+}
+
+message ListVersionsResponse {
+ // Requested list of available versions.
+ repeated Version version = 1;
+
+ // This token allows you to get the next page of results for ListVersions requests,
+ // if the number of results is larger than [ListVersionsRequest.page_size] specified in the request.
+ // To get the next page, specify the value of [next_page_token] as a value for
+ // the [ListVersionsRequest.page_token] parameter in the next ListVerions request. Subsequent ListVersions
+ // requests will have their own [next_page_token] to continue paging through the results.
+ string next_page_token = 2;
+}