1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
|
syntax = "proto3";
package yandex.cloud.mdb.kafka.v1;
import "google/protobuf/descriptor.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";
import "yandex/cloud/mdb/kafka/v1/common.proto";
import "yandex/cloud/mdb/kafka/v1/maintenance.proto";
option go_package = "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/kafka/v1;kafka";
option java_package = "yandex.cloud.api.mdb.kafka.v1";
// An Apache Kafka® cluster resource.
// For more information, see the [Concepts](/docs/managed-kafka/concepts) section of the documentation.
message Cluster {
enum Environment {
ENVIRONMENT_UNSPECIFIED = 0;
// Stable environment with a conservative update policy when only hotfixes are applied during regular maintenance.
PRODUCTION = 1;
// Environment with a more aggressive update policy when new versions are rolled out irrespective of backward compatibility.
PRESTABLE = 2;
}
enum Health {
// State of the cluster is unknown ([Host.health] of all hosts in the cluster is `UNKNOWN`).
HEALTH_UNKNOWN = 0;
// Cluster is alive and well ([Host.health] of all hosts in the cluster is `ALIVE`).
ALIVE = 1;
// Cluster is inoperable ([Host.health] of all hosts in the cluster is `DEAD`).
DEAD = 2;
// Cluster is in degraded state ([Host.health] of at least one of the hosts in the cluster is not `ALIVE`).
DEGRADED = 3;
}
enum Status {
// Cluster state is unknown.
STATUS_UNKNOWN = 0;
// Cluster is being created.
CREATING = 1;
// Cluster is running normally.
RUNNING = 2;
// Cluster encountered a problem and cannot operate.
ERROR = 3;
// Cluster is being updated.
UPDATING = 4;
// Cluster is stopping.
STOPPING = 5;
// Cluster stopped.
STOPPED = 6;
// Cluster is starting.
STARTING = 7;
}
// ID of the Apache Kafka® cluster.
// This ID is assigned at creation time.
string id = 1;
// ID of the folder that the Apache Kafka® cluster belongs to.
string folder_id = 2;
// Creation timestamp.
google.protobuf.Timestamp created_at = 3;
// Name of the Apache Kafka® cluster.
// The name must be unique within the folder. 1-63 characters long. Value must match the regular expression `[a-zA-Z0-9_-]*`.
string name = 4;
// Description of the Apache Kafka® cluster. 0-256 characters long.
string description = 5;
// Custom labels for the Apache Kafka® cluster as `key:value` pairs.
// A maximum of 64 labels per resource is allowed.
map<string, string> labels = 6;
// Deployment environment of the Apache Kafka® cluster.
Environment environment = 7;
// Description of monitoring systems relevant to the Apache Kafka® cluster.
repeated Monitoring monitoring = 8;
// Configuration of the Apache Kafka® cluster.
ConfigSpec config = 9;
// ID of the network that the cluster belongs to.
string network_id = 10;
// Aggregated cluster health.
Health health = 11;
// Current state of the cluster.
Status status = 12;
// User security groups
repeated string security_group_ids = 13;
// Host groups hosting VMs of the cluster.
repeated string host_group_ids = 14;
// Deletion Protection inhibits deletion of the cluster
bool deletion_protection = 15;
// Window of maintenance operations.
MaintenanceWindow maintenance_window = 16;
// Scheduled maintenance operation.
MaintenanceOperation planned_operation = 17;
}
// Metadata of monitoring system.
message Monitoring {
// Name of the monitoring system.
string name = 1;
// Description of the monitoring system.
string description = 2;
// Link to the monitoring system charts for the Apache Kafka® cluster.
string link = 3;
}
message ConfigSpec {
message Kafka {
reserved 2 to 3;
// Resources allocated to Kafka brokers.
Resources resources = 1;
// Kafka broker configuration.
oneof kafka_config {
KafkaConfig2_8 kafka_config_2_8 = 4 [json_name = "kafkaConfig_2_8"];
KafkaConfig3 kafka_config_3 = 5 [json_name = "kafkaConfig_3"];
}
}
message Zookeeper {
// Resources allocated to ZooKeeper hosts.
Resources resources = 1;
}
// Version of Apache Kafka® used in the cluster. Possible values: `2.1`, `2.6`.
string version = 1;
// Configuration and resource allocation for Kafka brokers.
Kafka kafka = 2;
// Configuration and resource allocation for ZooKeeper hosts.
Zookeeper zookeeper = 3;
// IDs of availability zones where Kafka brokers reside.
repeated string zone_id = 4;
// The number of Kafka brokers deployed in each availability zone.
google.protobuf.Int64Value brokers_count = 5;
// The flag that defines whether a public IP address is assigned to the cluster.
// If the value is `true`, then Apache Kafka® cluster is available on the Internet via it's public IP address.
bool assign_public_ip = 6;
// Allows to manage topics via AdminAPI
// Deprecated. Feature enabled permanently.
bool unmanaged_topics = 7 [deprecated = true];
// Enables managed schema registry on cluster
bool schema_registry = 8;
// Access policy for external services.
Access access = 9;
message RestAPIConfig {
// Is REST API enabled for this cluster.
bool enabled = 1;
}
// Configuration of REST API.
RestAPIConfig rest_api_config = 10;
}
message Resources {
// ID of the preset for computational resources available to a host (CPU, memory, etc.).
// All available presets are listed in the [documentation](/docs/managed-kafka/concepts/instance-types).
string resource_preset_id = 1;
// Volume of the storage available to a host, in bytes. Must be greater than 2 * partition segment size in bytes * partitions count, so each partition can have one active segment file and one closed segment file that can be deleted.
int64 disk_size = 2;
// Type of the storage environment for the host.
string disk_type_id = 3;
}
// Kafka version 2.8 broker configuration.
message KafkaConfig2_8 {
// Cluster topics compression type.
CompressionType compression_type = 1;
// The number of messages accumulated on a log partition before messages are flushed to disk.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.flush_messages] setting.
google.protobuf.Int64Value log_flush_interval_messages = 2;
// The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk.
// If not set, the value of [log_flush_scheduler_interval_ms] is used.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.flush_ms] setting.
google.protobuf.Int64Value log_flush_interval_ms = 3;
// The frequency of checks (in milliseconds) for any logs that need to be flushed to disk.
// This check is done by the log flusher.
google.protobuf.Int64Value log_flush_scheduler_interval_ms = 4;
// Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig2_8.cleanup_policy] is in effect.
// This setting is helpful if you need to control the size of a log due to limited disk space.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.retention_bytes] setting.
google.protobuf.Int64Value log_retention_bytes = 5;
// The number of hours to keep a log segment file before deleting it.
google.protobuf.Int64Value log_retention_hours = 6;
// The number of minutes to keep a log segment file before deleting it.
//
// If not set, the value of [log_retention_hours] is used.
google.protobuf.Int64Value log_retention_minutes = 7;
// The number of milliseconds to keep a log segment file before deleting it.
//
// If not set, the value of [log_retention_minutes] is used.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.retention_ms] setting.
google.protobuf.Int64Value log_retention_ms = 8;
// The maximum size of a single log file.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.segment_bytes] setting.
google.protobuf.Int64Value log_segment_bytes = 9;
// Should pre allocate file when create new segment?
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig2_8.preallocate] setting.
google.protobuf.BoolValue log_preallocate = 10;
// The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
google.protobuf.Int64Value socket_send_buffer_bytes = 11;
// The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
google.protobuf.Int64Value socket_receive_buffer_bytes = 12;
// Enable auto creation of topic on the server
google.protobuf.BoolValue auto_create_topics_enable = 13;
// Default number of partitions per topic on the whole cluster
google.protobuf.Int64Value num_partitions = 14;
// Default replication factor of the topic on the whole cluster
google.protobuf.Int64Value default_replication_factor = 15;
// The largest record batch size allowed by Kafka. Default value: 1048588.
google.protobuf.Int64Value message_max_bytes = 16;
// The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576.
google.protobuf.Int64Value replica_fetch_max_bytes = 17;
// A list of cipher suites.
repeated string ssl_cipher_suites = 18;
// Offset storage time after a consumer group loses all its consumers. Default: 10080.
google.protobuf.Int64Value offsets_retention_minutes = 19;
// The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512].
repeated SaslMechanism sasl_enabled_mechanisms = 20;
}
// Kafka version 3.x broker configuration.
message KafkaConfig3 {
// Cluster topics compression type.
CompressionType compression_type = 1;
// The number of messages accumulated on a log partition before messages are flushed to disk.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_messages] setting.
google.protobuf.Int64Value log_flush_interval_messages = 2;
// The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk.
// If not set, the value of [log_flush_scheduler_interval_ms] is used.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_ms] setting.
google.protobuf.Int64Value log_flush_interval_ms = 3;
// The frequency of checks (in milliseconds) for any logs that need to be flushed to disk.
// This check is done by the log flusher.
google.protobuf.Int64Value log_flush_scheduler_interval_ms = 4;
// Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig3.cleanup_policy] is in effect.
// This setting is helpful if you need to control the size of a log due to limited disk space.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_bytes] setting.
google.protobuf.Int64Value log_retention_bytes = 5;
// The number of hours to keep a log segment file before deleting it.
google.protobuf.Int64Value log_retention_hours = 6;
// The number of minutes to keep a log segment file before deleting it.
//
// If not set, the value of [log_retention_hours] is used.
google.protobuf.Int64Value log_retention_minutes = 7;
// The number of milliseconds to keep a log segment file before deleting it.
//
// If not set, the value of [log_retention_minutes] is used.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_ms] setting.
google.protobuf.Int64Value log_retention_ms = 8;
// The maximum size of a single log file.
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.segment_bytes] setting.
google.protobuf.Int64Value log_segment_bytes = 9;
// Should pre allocate file when create new segment?
//
// This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.preallocate] setting.
google.protobuf.BoolValue log_preallocate = 10;
// The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
google.protobuf.Int64Value socket_send_buffer_bytes = 11;
// The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.
google.protobuf.Int64Value socket_receive_buffer_bytes = 12;
// Enable auto creation of topic on the server
google.protobuf.BoolValue auto_create_topics_enable = 13;
// Default number of partitions per topic on the whole cluster
google.protobuf.Int64Value num_partitions = 14;
// Default replication factor of the topic on the whole cluster
google.protobuf.Int64Value default_replication_factor = 15;
// The largest record batch size allowed by Kafka. Default value: 1048588.
google.protobuf.Int64Value message_max_bytes = 16;
// The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576.
google.protobuf.Int64Value replica_fetch_max_bytes = 17;
// A list of cipher suites.
repeated string ssl_cipher_suites = 18;
// Offset storage time after a consumer group loses all its consumers. Default: 10080.
google.protobuf.Int64Value offsets_retention_minutes = 19;
// The list of SASL mechanisms enabled in the Kafka server. Default: [SCRAM_SHA_512].
repeated SaslMechanism sasl_enabled_mechanisms = 20;
}
// Cluster host metadata.
message Host {
reserved 7;
enum Role {
// Role of the host is unspecified. Default value.
ROLE_UNSPECIFIED = 0;
// The host is a Kafka broker.
KAFKA = 1;
// The host is a ZooKeeper server.
ZOOKEEPER = 2;
}
enum Health {
// Health of the host is unknown. Default value.
UNKNOWN = 0;
// The host is performing all its functions normally.
ALIVE = 1;
// The host is inoperable and cannot perform any of its essential functions.
DEAD = 2;
// The host is degraded and can perform only some of its essential functions.
DEGRADED = 3;
}
// Name of the host.
string name = 1;
// ID of the Apache Kafka® cluster.
string cluster_id = 2;
// ID of the availability zone where the host resides.
string zone_id = 3;
// Host role. If the field has default value, it is not returned in the response.
Role role = 4;
// Computational resources allocated to the host.
Resources resources = 5;
// Aggregated host health data. If the field has default value, it is not returned in the response.
Health health = 6;
// ID of the subnet the host resides in.
string subnet_id = 8;
// The flag that defines whether a public IP address is assigned to the node.
//
// If the value is `true`, then this node is available on the Internet via it's public IP address.
bool assign_public_ip = 9;
}
message Access {
// Allow access for DataTransfer.
bool data_transfer = 1;
}
|