aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1
diff options
context:
space:
mode:
authoriddqd <iddqd@yandex-team.com>2024-06-11 10:12:13 +0300
committeriddqd <iddqd@yandex-team.com>2024-06-11 10:22:43 +0300
commit07f57e35443ab7f09471caf2dbf1afbcced4d9f7 (patch)
treea4a7b66ead62e83fa988a2ec2ce6576311c1f4b1 /contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1
parent6db3b8ca95e44179e48306a58656fb1f9317d9c3 (diff)
downloadydb-07f57e35443ab7f09471caf2dbf1afbcced4d9f7.tar.gz
add contrib/python/yandexcloud to import
03b7d3cad2237366b55b393e18d4dc5eb222798c
Diffstat (limited to 'contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1')
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint.proto46
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/clickhouse.proto86
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/common.proto112
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/kafka.proto100
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/metrika.proto25
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mongo.proto64
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mysql.proto114
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/parsers.proto33
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/postgres.proto147
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/serializers.proto33
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/ydb.proto58
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/yds.proto63
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint_service.proto111
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer.proto215
-rw-r--r--contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer_service.proto143
15 files changed, 1350 insertions, 0 deletions
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint.proto
new file mode 100644
index 0000000000..b34ba1c1d8
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint.proto
@@ -0,0 +1,46 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1;datatransfer";
+option java_package = "yandex.cloud.api.datatransfer.v1";
+
+import "yandex/cloud/datatransfer/v1/endpoint/clickhouse.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/kafka.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/metrika.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/mongo.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/mysql.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/postgres.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/ydb.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/yds.proto";
+
+message Endpoint {
+ reserved 3, 7 to 51;
+ string id = 1;
+ string folder_id = 2;
+ string name = 4;
+ string description = 5;
+ map<string,string> labels = 6;
+ EndpointSettings settings = 52;
+}
+message EndpointSettings {
+ reserved 4 to 6, 10 to 15, 17 to 100, 103, 106 to 109, 112 to 145, 147 to 149;
+ oneof settings {
+ endpoint.MysqlSource mysql_source = 1;
+ endpoint.PostgresSource postgres_source = 2;
+ endpoint.YdbSource ydb_source = 3;
+ endpoint.YDSSource yds_source = 7;
+ endpoint.KafkaSource kafka_source = 8;
+ endpoint.MongoSource mongo_source = 9;
+ endpoint.ClickhouseSource clickhouse_source = 16;
+ endpoint.MysqlTarget mysql_target = 101;
+ endpoint.PostgresTarget postgres_target = 102;
+ endpoint.ClickhouseTarget clickhouse_target = 104;
+ endpoint.YdbTarget ydb_target = 105;
+ endpoint.KafkaTarget kafka_target = 110;
+ endpoint.MongoTarget mongo_target = 111;
+ endpoint.MetrikaSource metrika_source = 146;
+ endpoint.YDSTarget yds_target = 150;
+ }
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/clickhouse.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/clickhouse.proto
new file mode 100644
index 0000000000..ca61659646
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/clickhouse.proto
@@ -0,0 +1,86 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "google/protobuf/empty.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+enum ClickhouseCleanupPolicy {
+ CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED = 0;
+ CLICKHOUSE_CLEANUP_POLICY_DISABLED = 1;
+ CLICKHOUSE_CLEANUP_POLICY_DROP = 2;
+ CLICKHOUSE_CLEANUP_POLICY_TRUNCATE = 3;
+}
+message ClickhouseShard {
+ string name = 1;
+ repeated string hosts = 2;
+}
+message OnPremiseClickhouse {
+ reserved 2, 5 to 7;
+ repeated ClickhouseShard shards = 1;
+ int64 http_port = 3;
+ int64 native_port = 4;
+ TLSMode tls_mode = 8;
+}
+message ClickhouseConnectionOptions {
+ reserved 1, 3 to 4;
+ oneof address {
+ string mdb_cluster_id = 5;
+ OnPremiseClickhouse on_premise = 2;
+ }
+ string user = 6;
+ Secret password = 7;
+ // Database
+ string database = 8;
+}
+message ClickhouseConnection {
+ oneof connection {
+ ClickhouseConnectionOptions connection_options = 1;
+ }
+}
+message ClickhouseSharding {
+ message ColumnValueHash {
+ string column_name = 1;
+ }
+ message ColumnValueMapping {
+ message ValueToShard {
+ ColumnValue column_value = 1;
+ string shard_name = 2;
+ }
+ string column_name = 1;
+ repeated ValueToShard mapping = 2;
+ }
+ oneof sharding {
+ ColumnValueHash column_value_hash = 1;
+ ColumnValueMapping custom_mapping = 2;
+ google.protobuf.Empty transfer_id = 3;
+ google.protobuf.Empty round_robin = 4;
+ }
+}
+message ClickhouseSource {
+ reserved 2 to 6;
+ ClickhouseConnection connection = 1;
+ // While list of tables for replication. If none or empty list is presented - will
+ // replicate all tables. Can contain * patterns.
+ repeated string include_tables = 7;
+ // Exclude list of tables for replication. If none or empty list is presented -
+ // will replicate all tables. Can contain * patterns.
+ repeated string exclude_tables = 8;
+ string subnet_id = 9;
+ repeated string security_groups = 10;
+}
+message ClickhouseTarget {
+ reserved 1, 3 to 11, 13 to 16, 18 to 20, 23 to 49;
+ ClickhouseConnection connection = 2;
+ string subnet_id = 12;
+ // Alternative table names in target
+ repeated AltName alt_names = 17;
+ ClickhouseCleanupPolicy cleanup_policy = 21;
+ ClickhouseSharding sharding = 22;
+ string clickhouse_cluster_name = 50;
+ repeated string security_groups = 51;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/common.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/common.proto
new file mode 100644
index 0000000000..45a4012fc6
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/common.proto
@@ -0,0 +1,112 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "google/protobuf/empty.proto";
+
+enum ObjectTransferStage {
+ OBJECT_TRANSFER_STAGE_UNSPECIFIED = 0;
+ // Before data transfer
+ BEFORE_DATA = 1;
+ // After data transfer
+ AFTER_DATA = 2;
+ // Don't copy
+ NEVER = 3;
+}
+enum CleanupPolicy {
+ CLEANUP_POLICY_UNSPECIFIED = 0;
+ // Don't cleanup
+ DISABLED = 1;
+ // Drop
+ DROP = 2;
+ // Truncate
+ TRUNCATE = 3;
+}
+enum ColumnType {
+ COLUMN_TYPE_UNSPECIFIED = 0;
+ INT32 = 1;
+ INT16 = 2;
+ INT8 = 3;
+ UINT64 = 4;
+ UINT32 = 5;
+ UINT16 = 6;
+ UINT8 = 7;
+ DOUBLE = 8;
+ BOOLEAN = 9;
+ STRING = 10;
+ UTF8 = 11;
+ ANY = 12;
+ DATETIME = 13;
+ INT64 = 14;
+}
+message AltName {
+ // Source table name
+ string from_name = 1;
+ // Target table name
+ string to_name = 2;
+}
+message Secret {
+ oneof value {
+ // Raw secret value
+ string raw = 1;
+ }
+}
+message ColSchema {
+ string name = 1;
+ ColumnType type = 2;
+ bool key = 3;
+ bool required = 4;
+ string path = 5;
+}
+message TLSMode {
+ oneof tls_mode {
+ google.protobuf.Empty disabled = 1;
+ TLSConfig enabled = 2;
+ }
+}
+message TLSConfig {
+ // CA certificate
+ //
+ // X.509 certificate of the certificate authority which issued the server's
+ // certificate, in PEM format. When CA certificate is specified TLS is used to
+ // connect to the server.
+ string ca_certificate = 1;
+}
+message ColumnValue {
+ oneof value {
+ string string_value = 1;
+ }
+}
+message DataTransformationOptions {
+ reserved 6 to 7;
+ // Cloud function
+ string cloud_function = 1;
+ // Number of retries
+ int64 number_of_retries = 2;
+ // Buffer size for function
+ string buffer_size = 3;
+ // Flush interval
+ string buffer_flush_interval = 4;
+ // Invocation timeout
+ string invocation_timeout = 5;
+ // Service account
+ string service_account_id = 8;
+}
+message FieldList {
+ reserved 1;
+ // Column schema
+ repeated ColSchema fields = 2;
+}
+message DataSchema {
+ oneof schema {
+ FieldList fields = 2;
+ string json_fields = 1;
+ }
+}
+// No authentication
+message NoAuth {
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/kafka.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/kafka.proto
new file mode 100644
index 0000000000..dc223bd078
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/kafka.proto
@@ -0,0 +1,100 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/parsers.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/serializers.proto";
+
+enum KafkaMechanism {
+ KAFKA_MECHANISM_UNSPECIFIED = 0;
+ KAFKA_MECHANISM_SHA256 = 1;
+ KAFKA_MECHANISM_SHA512 = 2;
+}
+message KafkaConnectionOptions {
+ oneof connection {
+ // Managed Service for Kafka cluster ID
+ string cluster_id = 1;
+ // Connection options for on-premise Kafka
+ OnPremiseKafka on_premise = 2;
+ }
+}
+message OnPremiseKafka {
+ reserved 2 to 3;
+ // Kafka broker URLs
+ repeated string broker_urls = 1;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 4;
+ // TLS settings for broker connection. Disabled by default.
+ TLSMode tls_mode = 5;
+}
+message KafkaAuth {
+ oneof security {
+ // Authentication with SASL
+ KafkaSaslSecurity sasl = 1;
+ // No authentication
+ NoAuth no_auth = 2;
+ }
+}
+message KafkaSaslSecurity {
+ reserved 2;
+ // User name
+ string user = 1;
+ // SASL mechanism for authentication
+ KafkaMechanism mechanism = 3;
+ // Password for user
+ Secret password = 4;
+}
+message KafkaSource {
+ reserved 6;
+ // Connection settings
+ KafkaConnectionOptions connection = 1;
+ // Authentication settings
+ KafkaAuth auth = 2;
+ // Security groups
+ repeated string security_groups = 3;
+ // Full source topic name
+ // Deprecated in favor of topic names
+ string topic_name = 4 [deprecated = true];
+ // Data transformation rules
+ DataTransformationOptions transformer = 5;
+ // Data parsing rules
+ Parser parser = 7;
+ // List of topic names to read
+ repeated string topic_names = 8;
+}
+message KafkaTarget {
+ reserved 4 to 6;
+ // Connection settings
+ KafkaConnectionOptions connection = 1;
+ // Authentication settings
+ KafkaAuth auth = 2;
+ // Security groups
+ repeated string security_groups = 3;
+ // Target topic settings
+ KafkaTargetTopicSettings topic_settings = 7;
+ // Data serialization format settings
+ Serializer serializer = 8;
+}
+message KafkaTargetTopicSettings {
+ oneof topic_settings {
+ // Full topic name
+ KafkaTargetTopic topic = 1;
+ // Topic prefix
+ //
+ // Analogue of the Debezium setting database.server.name.
+ // Messages will be sent to topic with name <topic_prefix>.<schema>.<table_name>.
+ string topic_prefix = 2;
+ }
+}
+message KafkaTargetTopic {
+ // Topic name
+ string topic_name = 1;
+ // Save transactions order
+ // Not to split events queue into separate per-table queues.
+ bool save_tx_order = 2;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/metrika.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/metrika.proto
new file mode 100644
index 0000000000..1974ca6cbd
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/metrika.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+enum MetrikaStreamType {
+ METRIKA_STREAM_TYPE_UNSPECIFIED = 0;
+ METRIKA_STREAM_TYPE_HITS = 1;
+ METRIKA_STREAM_TYPE_VISITS = 2;
+ METRIKA_STREAM_TYPE_HITS_V2 = 3;
+}
+message MetrikaStream {
+ MetrikaStreamType type = 1;
+ repeated string columns = 2;
+}
+message MetrikaSource {
+ repeated int64 counter_ids = 1;
+ Secret token = 2;
+ repeated MetrikaStream streams = 3;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mongo.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mongo.proto
new file mode 100644
index 0000000000..6e58a0f1f6
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mongo.proto
@@ -0,0 +1,64 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+message OnPremiseMongo {
+ reserved 3 to 4;
+ repeated string hosts = 1;
+ int64 port = 2;
+ string replica_set = 5;
+ TLSMode tls_mode = 6;
+}
+message MongoConnectionOptions {
+ oneof address {
+ string mdb_cluster_id = 1;
+ OnPremiseMongo on_premise = 2;
+ }
+ // User name
+ string user = 3;
+ // Password for user
+ Secret password = 4;
+ // Database name associated with the credentials
+ string auth_source = 5;
+}
+message MongoConnection {
+ reserved 1 to 2;
+ oneof connection {
+ MongoConnectionOptions connection_options = 3;
+ }
+}
+message MongoCollection {
+ string database_name = 1;
+ string collection_name = 2;
+}
+message MongoSource {
+ reserved 3 to 5, 9 to 10;
+ MongoConnection connection = 1;
+ string subnet_id = 2;
+ // List of collections for replication. Empty list implies replication of all
+ // tables on the deployment. Allowed to use * as collection name.
+ repeated MongoCollection collections = 6;
+ // List of forbidden collections for replication. Allowed to use * as collection
+ // name for forbid all collections of concrete schema.
+ repeated MongoCollection excluded_collections = 7;
+ // Read mode for mongo client
+ bool secondary_preferred_mode = 8;
+ // Security groups
+ repeated string security_groups = 11;
+}
+message MongoTarget {
+ reserved 3 to 5;
+ MongoConnection connection = 1;
+ // Database name
+ string database = 2;
+ CleanupPolicy cleanup_policy = 6;
+ string subnet_id = 7;
+ // Security groups
+ repeated string security_groups = 8;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mysql.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mysql.proto
new file mode 100644
index 0000000000..86627d4357
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/mysql.proto
@@ -0,0 +1,114 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+message OnPremiseMysql {
+ reserved 1, 3;
+ // Database port
+ int64 port = 2;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 4;
+ repeated string hosts = 5;
+ // TLS settings for server connection. Disabled by default.
+ TLSMode tls_mode = 6;
+}
+message MysqlConnection {
+ oneof connection {
+ // Managed Service for MySQL cluster ID
+ string mdb_cluster_id = 1;
+ // Connection options for on-premise MySQL
+ OnPremiseMysql on_premise = 2;
+ }
+}
+message MysqlObjectTransferSettings {
+ // Views
+ //
+ // CREATE VIEW ...
+ ObjectTransferStage view = 1;
+ // Routines
+ //
+ // CREATE PROCEDURE ... ; CREATE FUNCTION ... ;
+ ObjectTransferStage routine = 2;
+ // Triggers
+ //
+ // CREATE TRIGGER ...
+ ObjectTransferStage trigger = 3;
+ ObjectTransferStage tables = 4;
+}
+message MysqlSource {
+ reserved 5 to 7, 9 to 10;
+ // Database connection settings
+ MysqlConnection connection = 1;
+ // Database name
+ //
+ // You can leave it empty, then it will be possible to transfer tables from several
+ // databases at the same time from this source.
+ string database = 2;
+ // User for database access.
+ string user = 3;
+ // Password for database access.
+ Secret password = 4;
+ // Database timezone
+ //
+ // Is used for parsing timestamps for saving source timezones. Accepts values from
+ // IANA timezone database. Default: local timezone.
+ string timezone = 8;
+ // Schema migration
+ //
+ // Select database objects to be transferred during activation or deactivation.
+ MysqlObjectTransferSettings object_transfer_settings = 11;
+ repeated string include_tables_regex = 12;
+ repeated string exclude_tables_regex = 13;
+ // Security groups
+ repeated string security_groups = 14;
+ // Database for service tables
+ //
+ // Default: data source database. Here created technical tables (__tm_keeper,
+ // __tm_gtid_keeper).
+ string service_database = 15;
+}
+message MysqlTarget {
+ reserved 9 to 14;
+ // Database connection settings
+ MysqlConnection connection = 1;
+ // Database name
+ //
+ // Allowed to leave it empty, then the tables will be created in databases with the
+ // same names as on the source. If this field is empty, then you must fill below db
+ // schema for service table.
+ string database = 2;
+ // User for database access.
+ string user = 3;
+ // Password for database access.
+ Secret password = 4;
+ // Default: NO_AUTO_VALUE_ON_ZERO,NO_DIR_IN_CREATE,NO_ENGINE_SUBSTITUTION.
+ string sql_mode = 5;
+ // Disable constraints checks
+ //
+ // Recommend to disable for increase replication speed, but if schema contain
+ // cascading operations we don't recommend to disable. This option set
+ // FOREIGN_KEY_CHECKS=0 and UNIQUE_CHECKS=0.
+ bool skip_constraint_checks = 6;
+ // Database timezone
+ //
+ // Is used for parsing timestamps for saving source timezones. Accepts values from
+ // IANA timezone database. Default: local timezone.
+ string timezone = 7;
+ // Cleanup policy
+ //
+ // Cleanup policy for activate, reactivate and reupload processes. Default is
+ // DISABLED.
+ CleanupPolicy cleanup_policy = 8;
+ // Database schema for service table
+ //
+ // Default: db name. Here created technical tables (__tm_keeper, __tm_gtid_keeper).
+ string service_database = 15;
+ // Security groups
+ repeated string security_groups = 16;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/parsers.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/parsers.proto
new file mode 100644
index 0000000000..14fc9460b0
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/parsers.proto
@@ -0,0 +1,33 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+message Parser {
+ reserved 3, 5;
+ oneof parser {
+ GenericParserCommon json_parser = 1;
+ AuditTrailsV1Parser audit_trails_v1_parser = 2;
+ CloudLoggingParser cloud_logging_parser = 4;
+ GenericParserCommon tskv_parser = 6;
+ }
+}
+message GenericParserCommon {
+ reserved 4 to 6;
+ DataSchema data_schema = 1;
+ // Allow null keys, if no - null keys will be putted to unparsed data
+ bool null_keys_allowed = 2;
+ // Will add _rest column for all unknown fields
+ bool add_rest_column = 3;
+ // Unescape string values
+ bool unescape_string_values = 7;
+}
+message AuditTrailsV1Parser {
+}
+message CloudLoggingParser {
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/postgres.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/postgres.proto
new file mode 100644
index 0000000000..1acbce869b
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/postgres.proto
@@ -0,0 +1,147 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/common.proto";
+
+message PostgresObjectTransferSettings {
+ // Sequences
+ //
+ // CREATE SEQUENCE ...
+ ObjectTransferStage sequence = 1;
+ // Owned sequences
+ //
+ // CREATE SEQUENCE ... OWNED BY ...
+ ObjectTransferStage sequence_owned_by = 2;
+ // Tables
+ //
+ // CREATE TABLE ...
+ ObjectTransferStage table = 3;
+ // Primary keys
+ //
+ // ALTER TABLE ... ADD PRIMARY KEY ...
+ ObjectTransferStage primary_key = 4;
+ // Foreign keys
+ //
+ // ALTER TABLE ... ADD FOREIGN KEY ...
+ ObjectTransferStage fk_constraint = 5;
+ // Default values
+ //
+ // ALTER TABLE ... ALTER COLUMN ... SET DEFAULT ...
+ ObjectTransferStage default_values = 6;
+ // Constraints
+ //
+ // ALTER TABLE ... ADD CONSTRAINT ...
+ ObjectTransferStage constraint = 7;
+ // Indexes
+ //
+ // CREATE INDEX ...
+ ObjectTransferStage index = 8;
+ // Views
+ //
+ // CREATE VIEW ...
+ ObjectTransferStage view = 9;
+ // Functions
+ //
+ // CREATE FUNCTION ...
+ ObjectTransferStage function = 10;
+ // Triggers
+ //
+ // CREATE TRIGGER ...
+ ObjectTransferStage trigger = 11;
+ // Types
+ //
+ // CREATE TYPE ...
+ ObjectTransferStage type = 12;
+ // Rules
+ //
+ // CREATE RULE ...
+ ObjectTransferStage rule = 13;
+ // Collations
+ //
+ // CREATE COLLATION ...
+ ObjectTransferStage collation = 14;
+ // Policies
+ //
+ // CREATE POLICY ...
+ ObjectTransferStage policy = 15;
+ // Casts
+ //
+ // CREATE CAST ...
+ ObjectTransferStage cast = 16;
+ // Materialized views
+ //
+ // CREATE MATERIALIZED VIEW ...
+ ObjectTransferStage materialized_view = 17;
+ //
+ ObjectTransferStage sequence_set = 18;
+}
+message OnPremisePostgres {
+ reserved 1, 3;
+ // Will be used if the cluster ID is not specified.
+ int64 port = 2;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 4;
+ repeated string hosts = 5;
+ // TLS settings for server connection. Disabled by default.
+ TLSMode tls_mode = 6;
+}
+message PostgresConnection {
+ oneof connection {
+ // Managed Service for PostgreSQL cluster ID
+ string mdb_cluster_id = 1;
+ // Connection options for on-premise PostgreSQL
+ OnPremisePostgres on_premise = 2;
+ }
+}
+message PostgresSource {
+ reserved 7, 10 to 12;
+ // Database connection settings
+ PostgresConnection connection = 1;
+ // Database name
+ string database = 2;
+ // User for database access.
+ string user = 3;
+ // Password for database access.
+ Secret password = 4;
+ // Included tables
+ //
+ // If none or empty list is presented, all tables are replicated. Full table name
+ // with schema. Can contain schema_name.* patterns.
+ repeated string include_tables = 5;
+ // Excluded tables
+ //
+ // If none or empty list is presented, all tables are replicated. Full table name
+ // with schema. Can contain schema_name.* patterns.
+ repeated string exclude_tables = 6;
+ // Maximum lag of replication slot (in bytes); after exceeding this limit
+ // replication will be aborted.
+ int64 slot_byte_lag_limit = 8;
+ // Database schema for service tables (__consumer_keeper,
+ // __data_transfer_mole_finder). Default is public
+ string service_schema = 9;
+ // Select database objects to be transferred during activation or deactivation.
+ PostgresObjectTransferSettings object_transfer_settings = 13;
+ // Security groups
+ repeated string security_groups = 14;
+}
+message PostgresTarget {
+ reserved 6;
+ // Database connection settings
+ PostgresConnection connection = 1;
+ // Database name
+ string database = 2;
+ // User for database access.
+ string user = 3;
+ // Password for database access.
+ Secret password = 4;
+ // Cleanup policy for activate, reactivate and reupload processes. Default is
+ // truncate.
+ CleanupPolicy cleanup_policy = 5;
+ // Security groups
+ repeated string security_groups = 7;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/serializers.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/serializers.proto
new file mode 100644
index 0000000000..83c21ad15f
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/serializers.proto
@@ -0,0 +1,33 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+message SerializerAuto {
+}
+message SerializerJSON {
+}
+message DebeziumSerializerParameter {
+ // Name of the serializer parameter
+ string key = 1;
+ // Value of the serializer parameter
+ string value = 2;
+}
+message SerializerDebezium {
+ // Settings of sterilization parameters as key-value pairs
+ repeated DebeziumSerializerParameter serializer_parameters = 1;
+}
+// Data serialization format
+message Serializer {
+ oneof serializer {
+ // Select the serialization format automatically
+ SerializerAuto serializer_auto = 1;
+ // Serialize data in json format
+ SerializerJSON serializer_json = 2;
+ // Serialize data in debezium format
+ SerializerDebezium serializer_debezium = 3;
+ }
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/ydb.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/ydb.proto
new file mode 100644
index 0000000000..f6d25f4ea1
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/ydb.proto
@@ -0,0 +1,58 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+enum YdbCleanupPolicy {
+ YDB_CLEANUP_POLICY_UNSPECIFIED = 0;
+ YDB_CLEANUP_POLICY_DISABLED = 1;
+ YDB_CLEANUP_POLICY_DROP = 2;
+}
+enum YdbDefaultCompression {
+ YDB_DEFAULT_COMPRESSION_UNSPECIFIED = 0;
+ YDB_DEFAULT_COMPRESSION_DISABLED = 1;
+ YDB_DEFAULT_COMPRESSION_LZ4 = 2;
+}
+message YdbSource {
+ reserved 3 to 4, 7 to 29, 31 to 32;
+ // Path in YDB where to store tables
+ string database = 1;
+ // Instance of YDB. example: ydb-ru-prestable.yandex.net:2135
+ string instance = 2;
+ repeated string paths = 5;
+ string service_account_id = 6;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 30;
+ // Authorization Key
+ string sa_key_content = 33;
+ // Security groups
+ repeated string security_groups = 34;
+ // Pre-created change feed
+ string changefeed_custom_name = 35;
+}
+message YdbTarget {
+ reserved 3 to 9, 12 to 20, 22 to 29, 31;
+ // Path in YDB where to store tables
+ string database = 1;
+ // Instance of YDB. example: ydb-ru-prestable.yandex.net:2135
+ string instance = 2;
+ // Path extension for database, each table will be layouted into this path
+ string path = 10;
+ string service_account_id = 11;
+ // Cleanup policy
+ YdbCleanupPolicy cleanup_policy = 21;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 30;
+ // SA content
+ string sa_key_content = 32;
+ // Security groups
+ repeated string security_groups = 33;
+ // Should create column-oriented table (OLAP). By default it creates row-oriented
+ // (OLTP)
+ bool is_table_column_oriented = 34;
+ // Compression that will be used for default columns family on YDB table creation
+ YdbDefaultCompression default_compression = 35;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/yds.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/yds.proto
new file mode 100644
index 0000000000..66c23c1f4c
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint/yds.proto
@@ -0,0 +1,63 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1.endpoint;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1/endpoint;endpoint";
+option java_package = "yandex.cloud.api.datatransfer.v1.endpoint";
+option csharp_namespace = "Yandex.Cloud.Datatransfer.V1.EndPoint"; // there is a clash with class Endpoint in namespace Yandex.Cloud.Datatransfer.V1
+
+import "yandex/cloud/datatransfer/v1/endpoint/parsers.proto";
+import "yandex/cloud/datatransfer/v1/endpoint/serializers.proto";
+
+enum YdsCompressionCodec {
+ YDS_COMPRESSION_CODEC_UNSPECIFIED = 0;
+ YDS_COMPRESSION_CODEC_RAW = 1;
+ YDS_COMPRESSION_CODEC_GZIP = 2;
+ YDS_COMPRESSION_CODEC_ZSTD = 4;
+}
+message YDSSource {
+ reserved 3 to 7, 12 to 19, 21 to 29, 31 to 33;
+ // Database
+ string database = 1;
+ // Stream
+ string stream = 2;
+ // SA which has read access to the stream.
+ string service_account_id = 8;
+ // Compression codec
+ repeated YdsCompressionCodec supported_codecs = 9;
+ // Data parsing rules
+ Parser parser = 10;
+ // Should continue working, if consumer read lag exceed TTL of topic
+ // False: stop the transfer in error state, if detected lost data. True: continue
+ // working with losing part of data
+ bool allow_ttl_rewind = 11;
+ // for dedicated db
+ string endpoint = 20;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 30;
+ // Security groups
+ repeated string security_groups = 34;
+ // for important streams
+ string consumer = 35;
+}
+message YDSTarget {
+ reserved 5 to 7, 9 to 19, 21 to 29, 31 to 33;
+ // Database
+ string database = 1;
+ // Stream
+ string stream = 2;
+ // SA which has read access to the stream.
+ string service_account_id = 3;
+ // Save transaction order
+ // Not to split events queue into separate per-table queues.
+ // Incompatible with setting Topic prefix, only with Topic full name.
+ bool save_tx_order = 4;
+ // Data serialization format
+ Serializer serializer = 8;
+ // for dedicated db
+ string endpoint = 20;
+ // Network interface for endpoint. If none will assume public ipv4
+ string subnet_id = 30;
+ // Security groups
+ repeated string security_groups = 34;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint_service.proto
new file mode 100644
index 0000000000..90accbe631
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/endpoint_service.proto
@@ -0,0 +1,111 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1;datatransfer";
+option java_package = "yandex.cloud.api.datatransfer.v1";
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "yandex/cloud/datatransfer/v1/endpoint.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/operation/operation.proto";
+
+service EndpointService {
+ rpc Get(GetEndpointRequest) returns (Endpoint) {
+ option (google.api.http) = { get: "/v1/endpoint/{endpoint_id}" };
+ }
+ rpc List(ListEndpointsRequest) returns (ListEndpointsResponse) {
+ option (google.api.http) = { get: "/v1/endpoints/list/{folder_id}" };
+ }
+ rpc Create(CreateEndpointRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/v1/endpoint" body: "*" };
+ option (api.operation) = {
+ metadata: "CreateEndpointMetadata"
+ response: "Endpoint"
+ };
+ }
+ rpc Update(UpdateEndpointRequest) returns (operation.Operation) {
+ option (google.api.http) = { patch: "/v1/endpoint/{endpoint_id}" body: "*" };
+ option (api.operation) = {
+ metadata: "UpdateEndpointMetadata"
+ response: "Endpoint"
+ };
+ }
+ rpc Delete(DeleteEndpointRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/v1/endpoint/{endpoint_id}" };
+ option (api.operation) = {
+ metadata: "DeleteEndpointMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+}
+message GetEndpointRequest {
+ string endpoint_id = 1;
+}
+message ListEndpointsRequest {
+ // Identifier of the folder containing the endpoints to be listed.
+ string folder_id = 1;
+ // The maximum number of endpoints to be sent in the response message. If the
+ // folder contains more endpoints than `page_size`, `next_page_token` will be
+ // included
+ // in the response message. Include it into the subsequent `ListEndpointRequest` to
+ // fetch the next page. Defaults to `100` if not specified. The maximum allowed
+ // value
+ // for this field is `500`.
+ int64 page_size = 2;
+ // Opaque value identifying the endpoints page to be fetched. Should be empty in
+ // the first `ListEndpointsRequest`. Subsequent requests should have this field
+ // filled
+ // with the `next_page_token` from the previous `ListEndpointsResponse`.
+ string page_token = 3;
+}
+message ListEndpointsResponse {
+ // The list of endpoints. If there are more endpoints in the folder, then
+ // `next_page_token` is a non-empty string to be included into the subsequent
+ // `ListEndpointsRequest` to fetch the next endpoints page.
+ repeated Endpoint endpoints = 1;
+ // Opaque value identifying the next endpoints page. This field is empty if there
+ // are no more endpoints in the folder. Otherwise, it is non-empty and should be
+ // included in the subsequent `ListEndpointsRequest` to fetch the next endpoints
+ // page.
+ string next_page_token = 2;
+}
+message CreateEndpointRequest {
+ reserved 5 to 51;
+ string folder_id = 1;
+ string name = 2;
+ string description = 3;
+ map<string,string> labels = 4;
+ EndpointSettings settings = 52;
+}
+message CreateEndpointMetadata {
+ string endpoint_id = 1;
+}
+message UpdateEndpointRequest {
+ reserved 1 to 9, 14 to 51, 53 to 59;
+ // Identifier of the endpoint to be updated.
+ string endpoint_id = 10;
+ // The new endpoint name. Must be unique within the folder.
+ string name = 11;
+ // The new description for the endpoint.
+ string description = 12;
+ map<string,string> labels = 13;
+ // The new endpoint name. Must be unique within the folder.
+ EndpointSettings settings = 52;
+ // Field mask specifying endpoint fields to be updated. Semantics for this field is
+ // described here:
+ // <https://pkg.go.dev/google.golang.org/protobuf/types/known/fieldmaskpb#FieldMask>
+ // The only exception: if the repeated field is specified in the mask, then
+ // the new value replaces the old one instead of being appended to the old one.
+ google.protobuf.FieldMask update_mask = 60;
+}
+message UpdateEndpointMetadata {
+ string endpoint_id = 1;
+}
+message DeleteEndpointRequest {
+ string endpoint_id = 1;
+}
+message DeleteEndpointMetadata {
+ string endpoint_id = 1;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer.proto
new file mode 100644
index 0000000000..ed4abcad5c
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer.proto
@@ -0,0 +1,215 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1;datatransfer";
+option java_package = "yandex.cloud.api.datatransfer.v1";
+
+import "yandex/cloud/datatransfer/v1/endpoint.proto";
+
+enum TransferType {
+ TRANSFER_TYPE_UNSPECIFIED = 0;
+ // Snapshot and increment
+ SNAPSHOT_AND_INCREMENT = 1;
+ // Snapshot
+ SNAPSHOT_ONLY = 2;
+ // Increment
+ INCREMENT_ONLY = 3;
+}
+enum TransferStatus {
+ TRANSFER_STATUS_UNSPECIFIED = 0;
+ // Transfer does some work before running
+ CREATING = 1;
+ // Transfer created but not started by user
+ CREATED = 2;
+ // Transfer currently doing replication work
+ RUNNING = 3;
+ // Transfer shutdown
+ STOPPING = 4;
+ // Transfer stopped by user
+ STOPPED = 5;
+ // Transfer stopped by system
+ ERROR = 6;
+ // Transfer copy snapshot
+ SNAPSHOTTING = 7;
+ // Transfer reach terminal phase
+ DONE = 8;
+}
+// Transfer core entity
+message Transfer {
+ reserved 3, 11, 13 to 14, 16, 18 to 21;
+ string id = 1;
+ string folder_id = 2;
+ string name = 4;
+ string description = 5;
+ map<string,string> labels = 6;
+ Endpoint source = 7;
+ Endpoint target = 8;
+ Runtime runtime = 9;
+ TransferStatus status = 10;
+ TransferType type = 12;
+ string warning = 15;
+ Transformation transformation = 17;
+ bool prestable = 22;
+}
+message Runtime {
+ reserved 1 to 3;
+ oneof runtime {
+ YcRuntime yc_runtime = 4;
+ }
+}
+message ShardingUploadParams {
+ int64 job_count = 1;
+ int64 process_count = 2;
+}
+message YcRuntime {
+ reserved 2 to 7;
+ int64 job_count = 1;
+ ShardingUploadParams upload_shard_params = 8;
+}
+// Mask function
+message MaskFunction {
+ oneof mask_function {
+ // Hash mask function
+ MaskFunctionHash mask_function_hash = 1;
+ }
+}
+// Hash data using HMAC
+message MaskFunctionHash {
+ // This string will be used in the HMAC(sha256, salt) function applied to the
+ // column data.
+ string user_defined_salt = 1;
+}
+// Filter tables using lists of included and excluded tables.
+message TablesFilter {
+ // List of tables that will be included to transfer
+ repeated string include_tables = 1;
+ // List of tables that will be excluded to transfer
+ repeated string exclude_tables = 2;
+}
+// Filter columns using lists of included and excluded columns.
+message ColumnsFilter {
+ // List of columns that will be included to transfer
+ repeated string include_columns = 1;
+ // List of columns that will be excluded to transfer
+ repeated string exclude_columns = 2;
+}
+// Mask field transformer allows you to hash data
+message MaskFieldTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // Specify the name of the column for data masking (a regular expression).
+ repeated string columns = 2;
+ // Mask function
+ MaskFunction function = 3;
+}
+// Set up a list of table columns to transfer
+message FilterColumnsTransformer {
+ // List of the tables to filter using lists of included and excluded tables.
+ TablesFilter tables = 1;
+ // List of the columns to transfer to the target tables using lists of included and
+ // excluded columns.
+ ColumnsFilter columns = 2;
+}
+message Table {
+ string name_space = 1;
+ string name = 2;
+}
+// Specify rule for renaming table
+message RenameTable {
+ // Specify the current names of the table in the source
+ Table original_name = 1;
+ // Specify the new names for this table in the target
+ Table new_name = 2;
+}
+// Set rules for renaming tables by specifying the current names of the tables in
+// the source and new names for these tables in the target.
+message RenameTablesTransformer {
+ // List of renaming rules
+ repeated RenameTable rename_tables = 1;
+}
+// Override primary keys
+message ReplacePrimaryKeyTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // List of columns to be used as primary keys
+ repeated string keys = 2;
+}
+// Convert column values to strings.
+// The values will be converted depending on the source type.
+// Conversion rules are described in [documentation](/docs/data-transfer/concepts/data-transformation#convert-to-string).
+message ToStringTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // List of included and excluded columns
+ ColumnsFilter columns = 2;
+}
+// Set the number of shards for particular tables and a list of columns whose
+// values will be used for calculating a hash to determine a shard.
+message SharderTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // List of included and excluded columns
+ ColumnsFilter columns = 2;
+ // Number of shards
+ int64 shards_count = 3;
+}
+// A transfer splits the X table into multiple tables (X_1, X_2, ..., X_n) based on
+// data.
+// If a row was located in the X table before it was split, it is now in the X_i
+// table,
+// where i is determined by the column list and split string parameters.
+// Example:
+// If the column list has two columns, month of birth and gender, specified and the
+// split string states @,
+// information about an employee whose name is John and who was born on February
+// 11, 1984,
+// from the Employees table will get to a new table named Employees@February@male.
+message TableSplitterTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // Specify the columns in the tables to be partitioned.
+ repeated string columns = 2;
+ // Specify the split string to be used for merging components in a new table name.
+ string splitter = 3;
+}
+// This filter only applies to transfers with queues (Logbroker or Apache Kafka®)
+// as a data source.
+// When running a transfer, only the strings meeting the specified criteria remain
+// in a changefeed.
+message FilterRowsTransformer {
+ // List of included and excluded tables
+ TablesFilter tables = 1;
+ // Filtering criterion. This can be comparison operators for numeric, string, and
+ // Boolean values,
+ // comparison to NULL, and checking whether a substring is part of a string.
+ // For more details see [documentation](/docs/data-transfer/concepts/data-transformation#append-only-sources).
+ string filter = 2;
+}
+// Some transformers may have limitations and only apply to some source-target
+// pairs.
+message Transformer {
+ reserved 3, 5, 8, 10 to 12;
+ oneof transformer {
+ MaskFieldTransformer mask_field = 1;
+ FilterColumnsTransformer filter_columns = 2;
+ RenameTablesTransformer rename_tables = 4;
+ ReplacePrimaryKeyTransformer replace_primary_key = 6;
+ ToStringTransformer convert_to_string = 7;
+ SharderTransformer sharder_transformer = 9;
+ TableSplitterTransformer table_splitter_transformer = 13;
+ FilterRowsTransformer filter_rows = 14;
+ }
+}
+// Transformation is converting data using special transformer functions.
+// These functions are executed on a data stream, applied to each data change item,
+// and transform them.
+// A transformer can be run at both the metadata and data levels.
+// Data can only be transformed if the source and target are of different types.
+message Transformation {
+ // Transformers are set as a list.
+ // When activating a transfer, a transformation plan is made for the tables that
+ // match the specified criteria.
+ // Transformers are applied to the tables in the sequence specified in the list.
+ repeated Transformer transformers = 1;
+}
diff --git a/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer_service.proto b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer_service.proto
new file mode 100644
index 0000000000..d6b1d7fdbf
--- /dev/null
+++ b/contrib/libs/yandex-cloud-api-protos/yandex/cloud/datatransfer/v1/transfer_service.proto
@@ -0,0 +1,143 @@
+syntax = "proto3";
+
+package yandex.cloud.datatransfer.v1;
+
+option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/datatransfer/v1;datatransfer";
+option java_package = "yandex.cloud.api.datatransfer.v1";
+
+import "google/api/annotations.proto";
+import "google/protobuf/field_mask.proto";
+import "yandex/cloud/datatransfer/v1/transfer.proto";
+import "yandex/cloud/api/operation.proto";
+import "yandex/cloud/operation/operation.proto";
+
+service TransferService {
+ rpc Create(CreateTransferRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/v1/transfer" body: "*" };
+ option (api.operation) = {
+ metadata: "CreateTransferMetadata"
+ response: "Transfer"
+ };
+ }
+ rpc Update(UpdateTransferRequest) returns (operation.Operation) {
+ option (google.api.http) = { patch: "/v1/transfer/{transfer_id}" body: "*" };
+ option (api.operation) = {
+ metadata: "UpdateTransferMetadata"
+ response: "Transfer"
+ };
+ }
+ rpc Delete(DeleteTransferRequest) returns (operation.Operation) {
+ option (google.api.http) = { delete: "/v1/transfer/{transfer_id}" };
+ option (api.operation) = {
+ metadata: "DeleteTransferMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+ rpc List(ListTransfersRequest) returns (ListTransfersResponse) {
+ option (google.api.http) = { get: "/v1/transfers/list/{folder_id}" };
+ }
+ rpc Get(GetTransferRequest) returns (Transfer) {
+ option (google.api.http) = { get: "/v1/transfer/{transfer_id}" };
+ }
+ rpc Deactivate(DeactivateTransferRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/v1/transfer/{transfer_id}:deactivate" body: "*" };
+ option (api.operation) = {
+ metadata: "DeactivateTransferMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+ rpc Activate(ActivateTransferRequest) returns (operation.Operation) {
+ option (google.api.http) = { post: "/v1/transfer/{transfer_id}:activate" body: "*" };
+ option (api.operation) = {
+ metadata: "ActivateTransferMetadata"
+ response: "google.protobuf.Empty"
+ };
+ }
+}
+message CreateTransferRequest {
+ reserved 9;
+ string source_id = 1;
+ string target_id = 2;
+ string description = 3;
+ string folder_id = 4;
+ Runtime runtime = 5;
+ TransferType type = 6;
+ string name = 7;
+ map<string,string> labels = 8;
+ Transformation transformation = 10;
+}
+message CreateTransferMetadata {
+ string transfer_id = 1;
+}
+message UpdateTransferRequest {
+ reserved 7;
+ // Identifier of the transfer to be updated.
+ string transfer_id = 1;
+ // The new description for the transfer.
+ string description = 2;
+ Runtime runtime = 3;
+ // The new transfer name. Must be unique within the folder.
+ string name = 4;
+ // Field mask specifying transfer fields to be updated. Semantics for this field is
+ // described here:
+ // <https://pkg.go.dev/google.golang.org/protobuf/types/known/fieldmaskpb#FieldMask>
+ // The only exception: if the repeated field is specified in the mask, then
+ // the new value replaces the old one instead of being appended to the old one.
+ google.protobuf.FieldMask update_mask = 5;
+ map<string,string> labels = 6;
+ Transformation transformation = 8;
+}
+message UpdateTransferMetadata {
+ string transfer_id = 1;
+}
+message DeleteTransferRequest {
+ string transfer_id = 1;
+}
+message DeleteTransferMetadata {
+ string transfer_id = 1;
+}
+message ListTransfersRequest {
+ reserved 1;
+ // Identifier of the folder containing the transfers to be listed.
+ string folder_id = 2;
+ // The maximum number of transfers to be sent in the response message. If the
+ // folder contains more transfers than `page_size`, `next_page_token` will be
+ // included
+ // in the response message. Include it into the subsequent `ListTransfersRequest`
+ // to
+ // fetch the next page. Defaults to `100` if not specified. The maximum allowed
+ // value
+ // for this field is `500`.
+ int64 page_size = 3;
+ // Opaque value identifying the transfers page to be fetched. Should be empty in
+ // the first `ListTransfersRequest`. Subsequent requests should have this field
+ // filled
+ // with the `next_page_token` from the previous `ListTransfersResponse`.
+ string page_token = 4;
+}
+message ListTransfersResponse {
+ // The list of transfers. If there are more transfers in the folder, then
+ // `next_page_token` is a non-empty string to be included into the subsequent
+ // `ListTransfersRequest` to fetch the next transfers page.
+ repeated Transfer transfers = 1;
+ // Opaque value identifying the next transfers page. This field is empty if there
+ // are no more transfers in the folder. Otherwise it is non-empty and should be
+ // included in the subsequent `ListTransfersRequest` to fetch the next transfers
+ // page.
+ string next_page_token = 2;
+}
+message GetTransferRequest {
+ string transfer_id = 1;
+}
+message DeactivateTransferRequest {
+ string transfer_id = 1;
+}
+message DeactivateTransferMetadata {
+ string transfer_id = 1;
+}
+message ActivateTransferRequest {
+ string transfer_id = 1;
+}
+message ActivateTransferMetadata {
+ string transfer_id = 1;
+}