aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/actions/build/action.yml6
-rw-r--r--.github/config/muted_ya.txt23
-rw-r--r--.github/scripts/create_or_update_pr.py111
-rwxr-xr-x.github/scripts/tests/create_new_muted_ya.py16
-rwxr-xr-x.github/scripts/tests/update_mute_issues.py15
-rw-r--r--.github/workflows/build_ydb_dstool.yml121
-rw-r--r--.github/workflows/collect_analytics.yml8
-rw-r--r--.github/workflows/create_issues_for_muted_tests.yml79
-rw-r--r--.github/workflows/regression_run_compatibility.yml20
-rw-r--r--.github/workflows/update_muted_ya.yml112
-rw-r--r--CHANGELOG.md23
-rw-r--r--build/conf/bison_lex.conf8
-rw-r--r--build/conf/fbs.conf16
-rw-r--r--build/conf/go.conf130
-rw-r--r--build/conf/java.conf6
-rw-r--r--build/conf/licenses.json1
-rw-r--r--build/conf/linkers/msvc_linker.conf4
-rw-r--r--build/conf/project_specific/other.conf2
-rw-r--r--build/conf/project_specific/yt.conf4
-rw-r--r--build/conf/proto.conf53
-rw-r--r--build/conf/python.conf22
-rw-r--r--build/conf/swig.conf14
-rw-r--r--build/conf/ts/node_modules.conf2
-rw-r--r--build/export_generators/ide-gradle/build.gradle.kts.jinja6
-rw-r--r--build/export_generators/ide-gradle/build.gradle.kts.proto.jinja14
-rw-r--r--build/export_generators/ide-gradle/builddir.jinja6
-rw-r--r--build/export_generators/ide-gradle/proto_builddir.jinja2
-rw-r--r--build/export_generators/ide-gradle/proto_prepare.jinja40
-rw-r--r--build/export_generators/ide-gradle/proto_vars.jinja4
-rw-r--r--build/export_generators/ide-gradle/run_common.jinja11
-rw-r--r--build/export_generators/ide-gradle/run_java_program.jinja46
-rw-r--r--build/export_generators/ide-gradle/run_program.jinja49
-rw-r--r--build/external_resources/ymake/public.resources.json10
-rw-r--r--build/external_resources/ymake/resources.json10
-rw-r--r--build/mapping.conf.json16
-rw-r--r--build/platform/yfm/resources.json8
-rw-r--r--build/plugins/_dart_fields.py58
-rw-r--r--build/plugins/pybuild.py2
-rw-r--r--build/plugins/ytest.py2
-rw-r--r--build/prebuilt/tools/black_linter/resources.json12
-rw-r--r--build/scripts/check_config_h.py8
-rw-r--r--build/scripts/docs_proto_markdown.tmpl112
-rw-r--r--build/scripts/docs_proto_wrapper.py4
-rw-r--r--build/sysincl/esp-idf.yml3
-rw-r--r--build/ymake.core.conf181
-rw-r--r--contrib/libs/croaring/.yandex_meta/override.nix4
-rw-r--r--contrib/libs/croaring/include/roaring/roaring_version.h4
-rw-r--r--contrib/libs/croaring/src/roaring.c2
-rw-r--r--contrib/libs/croaring/src/roaring64.c4
-rw-r--r--contrib/libs/croaring/ya.make4
-rw-r--r--contrib/libs/cxxsupp/builtins/.yandex_meta/build.ym2
-rw-r--r--contrib/libs/cxxsupp/builtins/ya.make4
-rw-r--r--contrib/libs/expat/.yandex_meta/devtools.copyrights.report33
-rw-r--r--contrib/libs/expat/.yandex_meta/licenses.list.txt6
-rw-r--r--contrib/libs/expat/.yandex_meta/override.nix4
-rw-r--r--contrib/libs/expat/Changes85
-rw-r--r--contrib/libs/expat/README.md17
-rw-r--r--contrib/libs/expat/expat.h6
-rw-r--r--contrib/libs/expat/expat_config.h6
-rw-r--r--contrib/libs/expat/lib/xmlparse.c572
-rw-r--r--contrib/libs/expat/ya.make4
-rw-r--r--contrib/libs/libfuzzer/.yandex_meta/override.nix4
-rw-r--r--contrib/libs/libfuzzer/lib/fuzzer/afl/ya.make2
-rw-r--r--contrib/libs/libfuzzer/ya.make4
-rw-r--r--contrib/libs/libunwind/.yandex_meta/override.nix4
-rw-r--r--contrib/libs/libunwind/ya.make4
-rw-r--r--contrib/python/iniconfig/.dist-info/METADATA9
-rw-r--r--contrib/python/iniconfig/LICENSE36
-rw-r--r--contrib/python/iniconfig/iniconfig/__init__.py2
-rw-r--r--contrib/python/iniconfig/iniconfig/_version.py23
-rw-r--r--contrib/python/iniconfig/iniconfig/exceptions.py2
-rw-r--r--contrib/python/iniconfig/ya.make2
-rw-r--r--contrib/python/multidict/.dist-info/METADATA9
-rw-r--r--contrib/python/multidict/multidict/__init__.py14
-rw-r--r--contrib/python/multidict/multidict/__init__.pyi152
-rw-r--r--contrib/python/multidict/multidict/_abc.py77
-rw-r--r--contrib/python/multidict/multidict/_multidict.c85
-rw-r--r--contrib/python/multidict/multidict/_multidict_base.py72
-rw-r--r--contrib/python/multidict/multidict/_multidict_py.py487
-rw-r--r--contrib/python/multidict/multidict/_multilib/defs.h4
-rw-r--r--contrib/python/multidict/multidict/_multilib/istr.h4
-rw-r--r--contrib/python/multidict/multidict/_multilib/pair_list.h60
-rw-r--r--contrib/python/multidict/multidict/_multilib/pythoncapi_compat.h1142
-rw-r--r--contrib/python/multidict/tests/conftest.py68
-rw-r--r--contrib/python/multidict/tests/gen_pickles.py8
-rw-r--r--contrib/python/multidict/tests/test_abc.py86
-rw-r--r--contrib/python/multidict/tests/test_copy.py16
-rw-r--r--contrib/python/multidict/tests/test_guard.py10
-rw-r--r--contrib/python/multidict/tests/test_istr.py2
-rw-r--r--contrib/python/multidict/tests/test_multidict.py252
-rw-r--r--contrib/python/multidict/tests/test_multidict_benchmarks.py391
-rw-r--r--contrib/python/multidict/tests/test_mutable_multidict.py124
-rw-r--r--contrib/python/multidict/tests/test_pickle.py21
-rw-r--r--contrib/python/multidict/tests/test_types.py52
-rw-r--r--contrib/python/multidict/tests/test_update.py44
-rw-r--r--contrib/python/multidict/tests/test_version.py91
-rw-r--r--contrib/python/multidict/ya.make3
-rw-r--r--library/cpp/coroutine/engine/coroutine_ut.cpp2
-rw-r--r--library/cpp/execprofile/profile.cpp16
-rw-r--r--library/cpp/http/misc/httpreqdata.cpp2
-rw-r--r--library/cpp/http/misc/httpreqdata.h10
-rw-r--r--library/cpp/http/server/http_ex.cpp1
-rw-r--r--library/cpp/logger/backend_creator.cpp2
-rw-r--r--library/cpp/logger/sync_page_cache_file.cpp84
-rw-r--r--library/cpp/logger/sync_page_cache_file.h9
-rw-r--r--library/cpp/neh/http2.cpp9
-rw-r--r--library/cpp/neh/https.cpp14
-rw-r--r--library/cpp/tld/tlds-alpha-by-domain.txt2
-rw-r--r--library/cpp/yt/error/error.cpp1
-rw-r--r--library/cpp/yt/error/unittests/error_ut.cpp16
-rw-r--r--util/generic/ptr.h4
-rw-r--r--util/stream/debug.cpp50
-rw-r--r--util/stream/debug.h53
-rw-r--r--util/stream/output.cpp44
-rw-r--r--util/stream/output.h40
-rw-r--r--util/stream/trace.h16
-rw-r--r--util/system/flock.h1
-rw-r--r--util/ya.make1
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go426
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/ya.make2
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/ya.make2
-rw-r--r--ydb/apps/etcd_proxy/readme.txt1
-rw-r--r--ydb/apps/etcd_proxy/service/etcd_impl.cpp16
-rw-r--r--ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp54
-rw-r--r--ydb/apps/ydb/CHANGELOG.md3
-rw-r--r--ydb/apps/ydb/ut/parse_command_line.cpp12
-rw-r--r--ydb/ci/rightlib.txt2
-rw-r--r--ydb/core/backup/common/metadata.cpp44
-rw-r--r--ydb/core/backup/common/metadata.h17
-rw-r--r--ydb/core/base/appdata_fwd.h5
-rw-r--r--ydb/core/base/local_user_token.cpp22
-rw-r--r--ydb/core/base/local_user_token.h23
-rw-r--r--ydb/core/base/ya.make2
-rw-r--r--ydb/core/blob_depot/assimilator.cpp200
-rw-r--r--ydb/core/blob_depot/assimilator.h8
-rw-r--r--ydb/core/blob_depot/data.h19
-rw-r--r--ydb/core/blob_depot/data_decommit.cpp282
-rw-r--r--ydb/core/blob_depot/data_load.cpp5
-rw-r--r--ydb/core/blobstorage/dsproxy/dsproxy_get.cpp34
-rw-r--r--ydb/core/blobstorage/dsproxy/dsproxy_put.cpp4
-rw-r--r--ydb/core/blobstorage/dsproxy/root_cause.h13
-rw-r--r--ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp137
-rw-r--r--ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h4
-rw-r--r--ydb/core/blobstorage/nodewarden/distconf_binding.cpp6
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk.h34
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp58
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp76
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_chunk_tracker.h18
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp25
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.h2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_config.h4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_data.h15
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_free_chunks.h5
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp236
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.h7
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp144
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp78
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_keeper.h2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp68
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_quota_record.h6
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.h14
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp10
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.h4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp2
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp10
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.h3
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_sector.h6
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp4
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp12
-rw-r--r--ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.h38
-rw-r--r--ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp50
-rw-r--r--ydb/core/blobstorage/vdisk/common/vdisk_context.cpp3
-rw-r--r--ydb/core/blobstorage/vdisk/common/vdisk_context.h1
-rw-r--r--ydb/core/blobstorage/vdisk/common/vdisk_log.cpp12
-rw-r--r--ydb/core/blobstorage/vdisk/common/vdisk_log.h3
-rw-r--r--ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp5
-rw-r--r--ydb/core/cms/console/console_configs_manager.cpp11
-rw-r--r--ydb/core/config/init/init_impl.h7
-rw-r--r--ydb/core/driver_lib/run/factories.h2
-rw-r--r--ydb/core/driver_lib/run/kikimr_services_initializers.cpp12
-rw-r--r--ydb/core/driver_lib/run/run.cpp37
-rw-r--r--ydb/core/formats/arrow/accessor/abstract/accessor.cpp10
-rw-r--r--ydb/core/formats/arrow/accessor/abstract/constructor.h4
-rw-r--r--ydb/core/formats/arrow/accessor/plain/accessor.h10
-rw-r--r--ydb/core/formats/arrow/accessor/plain/constructor.cpp4
-rw-r--r--ydb/core/formats/arrow/accessor/sparsed/accessor.h22
-rw-r--r--ydb/core/formats/arrow/accessor/sparsed/constructor.cpp5
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/accessor.cpp138
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/accessor.h3
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/columns_storage.cpp2
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/constructor.cpp6
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/constructor.h5
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/data_extractor.cpp114
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/data_extractor.h101
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/direct_builder.cpp51
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/direct_builder.h88
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/json_extractors.cpp79
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/json_extractors.h188
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/others_storage.cpp14
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/others_storage.h9
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/request.cpp19
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/settings.h17
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/ut/ut_sub_columns.cpp8
-rw-r--r--ydb/core/formats/arrow/accessor/sub_columns/ya.make5
-rw-r--r--ydb/core/formats/arrow/arrow_filter.cpp15
-rw-r--r--ydb/core/formats/arrow/arrow_filter.h6
-rw-r--r--ydb/core/formats/arrow/program/abstract.h38
-rw-r--r--ydb/core/formats/arrow/program/assign_internal.cpp30
-rw-r--r--ydb/core/formats/arrow/program/assign_internal.h4
-rw-r--r--ydb/core/formats/arrow/program/collection.cpp5
-rw-r--r--ydb/core/formats/arrow/program/collection.h1
-rw-r--r--ydb/core/formats/arrow/program/execution.h25
-rw-r--r--ydb/core/formats/arrow/program/graph_optimization.cpp221
-rw-r--r--ydb/core/formats/arrow/program/graph_optimization.h15
-rw-r--r--ydb/core/formats/arrow/program/index.h2
-rw-r--r--ydb/core/formats/arrow/program/kernel_logic.cpp73
-rw-r--r--ydb/core/formats/arrow/program/kernel_logic.h146
-rw-r--r--ydb/core/formats/arrow/program/original.h1
-rw-r--r--ydb/core/formats/arrow/program/stream_logic.cpp110
-rw-r--r--ydb/core/formats/arrow/program/stream_logic.h5
-rw-r--r--ydb/core/formats/arrow/reader/merger.cpp4
-rw-r--r--ydb/core/formats/arrow/ut/ut_program_step.cpp45
-rw-r--r--ydb/core/fq/libs/compute/ydb/control_plane/cms_grpc_client_actor.cpp1
-rw-r--r--ydb/core/graph/shard/backends.cpp2
-rw-r--r--ydb/core/graph/shard/tx_monitoring.cpp46
-rw-r--r--ydb/core/graph/shard/ya.make1
-rw-r--r--ydb/core/grpc_services/grpc_request_check_actor.h39
-rw-r--r--ydb/core/grpc_services/query/rpc_execute_query.cpp2
-rw-r--r--ydb/core/grpc_services/rpc_load_rows.cpp16
-rw-r--r--ydb/core/grpc_services/rpc_read_rows.cpp49
-rw-r--r--ydb/core/kafka_proxy/actors/actors.h2
-rw-r--r--ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp34
-rw-r--r--ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.h14
-rw-r--r--ydb/core/kafka_proxy/actors/kafka_metadata_actor.h2
-rw-r--r--ydb/core/kafka_proxy/actors/kafka_transaction_actor.h32
-rw-r--r--ydb/core/kafka_proxy/kafka_connection.cpp14
-rw-r--r--ydb/core/kafka_proxy/kafka_connection.h3
-rw-r--r--ydb/core/kafka_proxy/kafka_events.h91
-rw-r--r--ydb/core/kafka_proxy/kafka_listener.h8
-rw-r--r--ydb/core/kafka_proxy/kafka_messages.cpp2329
-rw-r--r--ydb/core/kafka_proxy/kafka_messages.h4203
-rw-r--r--ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp179
-rw-r--r--ydb/core/kafka_proxy/kafka_transactions_coordinator.h92
-rw-r--r--ydb/core/kafka_proxy/ut/ut_protocol.cpp6
-rw-r--r--ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp310
-rw-r--r--ydb/core/kafka_proxy/ut/ya.make1
-rw-r--r--ydb/core/kafka_proxy/ya.make1
-rw-r--r--ydb/core/kqp/common/kqp_yql.cpp11
-rw-r--r--ydb/core/kqp/common/kqp_yql.h34
-rw-r--r--ydb/core/kqp/common/simple/kqp_event_ids.h1
-rw-r--r--ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp1
-rw-r--r--ydb/core/kqp/compute_actor/kqp_compute_actor_factory.h1
-rw-r--r--ydb/core/kqp/compute_actor/kqp_compute_events.h8
-rw-r--r--ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp3
-rw-r--r--ydb/core/kqp/compute_actor/kqp_scan_compute_manager.h20
-rw-r--r--ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp19
-rw-r--r--ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.h3
-rw-r--r--ydb/core/kqp/executer_actor/kqp_executer_impl.h27
-rw-r--r--ydb/core/kqp/executer_actor/kqp_executer_stats.cpp179
-rw-r--r--ydb/core/kqp/executer_actor/kqp_partition_helper.cpp17
-rw-r--r--ydb/core/kqp/executer_actor/kqp_partition_helper.h9
-rw-r--r--ydb/core/kqp/executer_actor/kqp_planner.cpp4
-rw-r--r--ydb/core/kqp/executer_actor/kqp_planner.h2
-rw-r--r--ydb/core/kqp/executer_actor/kqp_table_resolver.cpp9
-rw-r--r--ydb/core/kqp/executer_actor/kqp_table_resolver.h3
-rw-r--r--ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp8
-rw-r--r--ydb/core/kqp/executer_actor/kqp_tasks_graph.h6
-rw-r--r--ydb/core/kqp/host/kqp_gateway_proxy.cpp6
-rw-r--r--ydb/core/kqp/node_service/kqp_node_service.cpp1
-rw-r--r--ydb/core/kqp/opt/kqp_query_plan.cpp11
-rw-r--r--ydb/core/kqp/opt/logical/kqp_opt_log.cpp2
-rw-r--r--ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp4
-rw-r--r--ydb/core/kqp/opt/physical/kqp_opt_phy.cpp12
-rw-r--r--ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp4
-rw-r--r--ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp28
-rw-r--r--ydb/core/kqp/opt/physical/kqp_opt_phy_rules.h3
-rw-r--r--ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp12
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_datasource.cpp12
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_exec.cpp2
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_opt_build.cpp78
-rw-r--r--ydb/core/kqp/provider/yql_kikimr_type_ann.cpp4
-rw-r--r--ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp4
-rw-r--r--ydb/core/kqp/query_compiler/kqp_query_compiler.cpp20
-rw-r--r--ydb/core/kqp/runtime/kqp_read_actor.cpp6
-rw-r--r--ydb/core/kqp/runtime/kqp_write_actor.cpp2
-rw-r--r--ydb/core/kqp/session_actor/kqp_session_actor.cpp36
-rw-r--r--ydb/core/kqp/tools/combiner_perf/bin/main.cpp181
-rw-r--r--ydb/core/kqp/tools/combiner_perf/bin/ya.make23
-rw-r--r--ydb/core/kqp/tools/combiner_perf/converters.cpp14
-rw-r--r--ydb/core/kqp/tools/combiner_perf/converters.h41
-rw-r--r--ydb/core/kqp/tools/combiner_perf/factories.cpp19
-rw-r--r--ydb/core/kqp/tools/combiner_perf/factories.h19
-rw-r--r--ydb/core/kqp/tools/combiner_perf/printout.h15
-rw-r--r--ydb/core/kqp/tools/combiner_perf/run_params.h17
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple.cpp86
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple.h13
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple_block.cpp209
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple_block.h13
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple_last.cpp103
-rw-r--r--ydb/core/kqp/tools/combiner_perf/simple_last.h13
-rw-r--r--ydb/core/kqp/tools/combiner_perf/streams.cpp48
-rw-r--r--ydb/core/kqp/tools/combiner_perf/streams.h290
-rw-r--r--ydb/core/kqp/tools/combiner_perf/tpch_last.cpp168
-rw-r--r--ydb/core/kqp/tools/combiner_perf/tpch_last.h10
-rw-r--r--ydb/core/kqp/tools/combiner_perf/ya.make53
-rw-r--r--ydb/core/kqp/ut/common/kqp_ut_common.cpp14
-rw-r--r--ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp64
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpcds64_1000s_column_store.json220
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpcds78_1000s_column_store.json92
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch10_1000s_column_store.json30
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch11_1000s_column_store.json40
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch13_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch14_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch15_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch16_1000s_column_store.json20
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch17_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch18_1000s_column_store.json20
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch19_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch21_1000s_column_store.json144
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch2_1000s_column_store.json94
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch3_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch4_1000s_column_store.json10
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch5_1000s_column_store.json42
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch7_1000s_column_store.json40
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch8_1000s_column_store.json40
-rw-r--r--ydb/core/kqp/ut/join/data/join_order/tpch9_1000s_column_store.json40
-rw-r--r--ydb/core/kqp/ut/join/data/queries/shuffle_elimination_tpcds_map_join_bug.sql16
-rw-r--r--ydb/core/kqp/ut/join/kqp_join_order_ut.cpp31
-rw-r--r--ydb/core/kqp/ut/olap/json_ut.cpp355
-rw-r--r--ydb/core/kqp/ut/olap/kqp_olap_ut.cpp126
-rw-r--r--ydb/core/kqp/ut/opt/kqp_kv_ut.cpp84
-rw-r--r--ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp102
-rw-r--r--ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp (renamed from ydb/core/kqp/ut/spilling/kqp_scan_spilling_ut.cpp)0
-rw-r--r--ydb/core/kqp/ut/runtime/ya.make (renamed from ydb/core/kqp/ut/spilling/ya.make)1
-rw-r--r--ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp180
-rw-r--r--ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp71
-rw-r--r--ydb/core/kqp/ut/ya.make2
-rw-r--r--ydb/core/kqp/ya.make1
-rw-r--r--ydb/core/persqueue/pq_impl.cpp17
-rw-r--r--ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp62
-rw-r--r--ydb/core/protos/auth.proto1
-rw-r--r--ydb/core/protos/config.proto17
-rw-r--r--ydb/core/protos/counters_blob_depot.proto2
-rw-r--r--ydb/core/protos/flat_scheme_op.proto11
-rw-r--r--ydb/core/protos/index_builder.proto15
-rw-r--r--ydb/core/protos/kqp.proto11
-rw-r--r--ydb/core/protos/out/out.cpp4
-rw-r--r--ydb/core/protos/table_service_config.proto5
-rw-r--r--ydb/core/protos/tx_datashard.proto63
-rw-r--r--ydb/core/security/ticket_parser_impl.h6
-rw-r--r--ydb/core/security/ticket_parser_ut.cpp24
-rw-r--r--ydb/core/statistics/service/service_impl.cpp5
-rw-r--r--ydb/core/sys_view/service/ext_counters.cpp4
-rw-r--r--ydb/core/sys_view/show_create/create_table_formatter.cpp413
-rw-r--r--ydb/core/sys_view/show_create/create_table_formatter.h65
-rw-r--r--ydb/core/sys_view/show_create/create_view_formatter.cpp22
-rw-r--r--ydb/core/sys_view/show_create/create_view_formatter.h19
-rw-r--r--ydb/core/sys_view/show_create/formatters_common.cpp19
-rw-r--r--ydb/core/sys_view/show_create/formatters_common.h60
-rw-r--r--ydb/core/sys_view/show_create/show_create.cpp145
-rw-r--r--ydb/core/sys_view/show_create/ya.make5
-rw-r--r--ydb/core/sys_view/ut_common.cpp7
-rw-r--r--ydb/core/sys_view/ut_kqp.cpp243
-rw-r--r--ydb/core/sys_view/ya.make1
-rw-r--r--ydb/core/testlib/actors/test_runtime.cpp2
-rw-r--r--ydb/core/testlib/mock_transfer_writer_factory.h19
-rw-r--r--ydb/core/testlib/storage_helpers.cpp28
-rw-r--r--ydb/core/testlib/storage_helpers.h8
-rw-r--r--ydb/core/testlib/test_client.cpp141
-rw-r--r--ydb/core/testlib/test_client.h42
-rw-r--r--ydb/core/testlib/ya.make1
-rw-r--r--ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp4
-rw-r--r--ydb/core/tx/columnshard/columnshard__write.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/ya.make1
-rw-r--r--ydb/core/tx/columnshard/engines/predicate/filter.cpp34
-rw-r--r--ydb/core/tx/columnshard/engines/predicate/filter.h102
-rw-r--r--ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp6
-rw-r--r--ydb/core/tx/columnshard/engines/reader/abstract/constructor.h10
-rw-r--r--ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h7
-rw-r--r--ydb/core/tx/columnshard/engines/reader/actor/actor.cpp45
-rw-r--r--ydb/core/tx/columnshard/engines/reader/actor/actor.h7
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common/description.h16
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common/ya.make2
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp4
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.h17
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp1
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.h32
-rw-r--r--ydb/core/tx/columnshard/engines/reader/common_reader/iterator/ya.make1
-rw-r--r--ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp6
-rw-r--r--ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp18
-rw-r--r--ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.h2
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp10
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.h4
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.cpp100
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.h208
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp5
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp6
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp125
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.h39
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp29
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.h148
-rw-r--r--ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/ya.make1
-rw-r--r--ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp3
-rw-r--r--ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp12
-rw-r--r--ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp12
-rw-r--r--ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp11
-rw-r--r--ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp8
-rw-r--r--ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp42
-rw-r--r--ydb/core/tx/columnshard/engines/scheme/index_info.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/scheme/index_info.h7
-rw-r--r--ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp2
-rw-r--r--ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.h2
-rw-r--r--ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp15
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/abstract.h2
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/bitset.h3
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/string.cpp3
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp10
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.h6
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp14
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.h1
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp42
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.h25
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp11
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.h8
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/abstract.h8
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.cpp7
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.h2
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/sub_column.h2
-rw-r--r--ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.h27
-rw-r--r--ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp12
-rw-r--r--ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.h8
-rw-r--r--ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp22
-rw-r--r--ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp2
-rw-r--r--ydb/core/tx/columnshard/operations/batch_builder/builder.cpp4
-rw-r--r--ydb/core/tx/columnshard/operations/batch_builder/merger.h2
-rw-r--r--ydb/core/tx/columnshard/operations/batch_builder/restore.cpp4
-rw-r--r--ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp18
-rw-r--r--ydb/core/tx/columnshard/operations/write.cpp2
-rw-r--r--ydb/core/tx/columnshard/test_helper/shard_writer.cpp2
-rw-r--r--ydb/core/tx/columnshard/transactions/locks/read_start.cpp2
-rw-r--r--ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp118
-rw-r--r--ydb/core/tx/conveyor/service/service.cpp3
-rw-r--r--ydb/core/tx/conveyor/service/service.h6
-rw-r--r--ydb/core/tx/data_events/common/signals_flow.cpp2
-rw-r--r--ydb/core/tx/data_events/common/signals_flow.h3
-rw-r--r--ydb/core/tx/data_events/shard_writer.cpp10
-rw-r--r--ydb/core/tx/data_events/shard_writer.h4
-rw-r--r--ydb/core/tx/data_events/write_data.cpp5
-rw-r--r--ydb/core/tx/data_events/write_data.h4
-rw-r--r--ydb/core/tx/datashard/buffer_data.h5
-rw-r--r--ydb/core/tx/datashard/build_index.cpp43
-rw-r--r--ydb/core/tx/datashard/datashard__data_cleanup.cpp14
-rw-r--r--ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp74
-rw-r--r--ydb/core/tx/datashard/datashard_ut_local_kmeans.cpp32
-rw-r--r--ydb/core/tx/datashard/datashard_ut_prefix_kmeans.cpp58
-rw-r--r--ydb/core/tx/datashard/datashard_ut_reshuffle_kmeans.cpp28
-rw-r--r--ydb/core/tx/datashard/datashard_ut_trace.cpp2
-rw-r--r--ydb/core/tx/datashard/extstorage_usage_config.h45
-rw-r--r--ydb/core/tx/datashard/import_s3.cpp21
-rw-r--r--ydb/core/tx/datashard/kmeans_helper.cpp10
-rw-r--r--ydb/core/tx/datashard/kmeans_helper.h2
-rw-r--r--ydb/core/tx/datashard/local_kmeans.cpp55
-rw-r--r--ydb/core/tx/datashard/prefix_kmeans.cpp53
-rw-r--r--ydb/core/tx/datashard/reshuffle_kmeans.cpp36
-rw-r--r--ydb/core/tx/datashard/sample_k.cpp6
-rw-r--r--ydb/core/tx/datashard/scan_common.h14
-rw-r--r--ydb/core/tx/datashard/upload_stats.h19
-rw-r--r--ydb/core/tx/program/builder.cpp44
-rw-r--r--ydb/core/tx/program/builder.h6
-rw-r--r--ydb/core/tx/replication/service/service.cpp17
-rw-r--r--ydb/core/tx/replication/service/transfer_writer.cpp840
-rw-r--r--ydb/core/tx/replication/service/transfer_writer.h20
-rw-r--r--ydb/core/tx/replication/service/transfer_writer_factory.h32
-rw-r--r--ydb/core/tx/replication/service/transfer_writer_ut.cpp65
-rw-r--r--ydb/core/tx/replication/service/ya.make2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp4
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__init.cpp5
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__login.cpp6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp8
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp5
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp2
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp9
-rw-r--r--ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp8
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_build_index.cpp8
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp21
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp6
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_export__create.cpp17
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp1
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.cpp14
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_impl.h3
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import.cpp28
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp1
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import__create.cpp248
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp14
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import_getters.cpp (renamed from ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.cpp)552
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import_getters.h (renamed from ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.h)4
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import_getters_fallback.cpp57
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_import_scheme_getter_fallback.cpp36
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_info_types.h78
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_private.h13
-rw-r--r--ydb/core/tx/schemeshard/schemeshard_schema.h6
-rw-r--r--ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp237
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp22
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.h2
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.cpp66
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/helpers.h6
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/test_env.cpp2
-rw-r--r--ydb/core/tx/schemeshard/ut_helpers/test_env.h1
-rw-r--r--ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp6
-rw-r--r--ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp70
-rw-r--r--ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp70
-rw-r--r--ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp14
-rw-r--r--ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp13
-rw-r--r--ydb/core/tx/schemeshard/ya.make4
-rw-r--r--ydb/core/tx/tx_proxy/schemereq.cpp30
-rw-r--r--ydb/core/tx/tx_proxy/upload_rows.cpp3
-rw-r--r--ydb/core/tx/tx_proxy/upload_rows_common_impl.h138
-rw-r--r--ydb/core/tx/tx_proxy/ya.make1
-rw-r--r--ydb/core/util/cpuinfo.cpp7
-rw-r--r--ydb/core/viewer/tests/canondata/result.json442
-rw-r--r--ydb/core/viewer/tests/test.py78
-rw-r--r--ydb/core/viewer/viewer_describe.h3
-rw-r--r--ydb/core/viewer/viewer_topic_data.h6
-rw-r--r--ydb/core/ydb_convert/table_description.cpp107
-rw-r--r--ydb/core/ydb_convert/table_description.h3
-rw-r--r--ydb/docs/en/core/concepts/federated_query/s3/_includes/date_formats.md7
-rw-r--r--ydb/docs/en/core/concepts/federated_query/s3/_includes/format_settings.md10
-rw-r--r--ydb/docs/en/core/concepts/federated_query/s3/_includes/path_format.md2
-rw-r--r--ydb/docs/en/core/concepts/federated_query/s3/external_data_source.md41
-rw-r--r--ydb/docs/en/core/concepts/federated_query/s3/external_table.md2
-rw-r--r--ydb/docs/en/core/concepts/topic.md24
-rw-r--r--ydb/docs/en/core/contributor/load-actors-kqp.md20
-rw-r--r--ydb/docs/en/core/contributor/load-actors-memory.md10
-rw-r--r--ydb/docs/en/core/contributor/load-actors-pdisk-log.md24
-rw-r--r--ydb/docs/en/core/contributor/load-actors-pdisk-read.md22
-rw-r--r--ydb/docs/en/core/contributor/load-actors-pdisk-write.md24
-rw-r--r--ydb/docs/en/core/contributor/load-actors-stop.md8
-rw-r--r--ydb/docs/en/core/contributor/load-actors-storage.md78
-rw-r--r--ydb/docs/en/core/contributor/load-actors-vdisk.md28
-rw-r--r--ydb/docs/en/core/contributor/localdb-uncommitted-txs.md152
-rw-r--r--ydb/docs/en/core/public-materials/_includes/conferences/2025/fosdem.md2
-rw-r--r--ydb/docs/en/core/public-materials/_includes/conferences/2025/fossasia.md15
-rw-r--r--ydb/docs/en/core/public-materials/_includes/conferences/2025/pgconfIndia.md13
-rw-r--r--ydb/docs/en/core/public-materials/videos.md7
-rw-r--r--ydb/docs/en/core/reference/configuration/index.md80
-rw-r--r--ydb/docs/en/core/reference/kafka-api/examples.md2
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/_includes/commands.md1
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/commands/monitoring-healthcheck.md151
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/export-import/_includes/s3_conn.md30
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/interactive-cli.md28
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/operation-get.md4
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/operation-list.md10
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/parameterized-queries-cli.md24
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/table-attribute-add.md4
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/table-attribute-drop.md4
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/table-drop.md4
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/table-ttl-set.md12
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/toc_i.yaml2
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/tools-copy.md6
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-alter.md16
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-consumer-add.md10
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-consumer-drop.md4
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-consumer-offset-commit.md10
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-create.md16
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-read.md28
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/topic-write.md18
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/version.md10
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/workload-click-bench.md14
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/workload-topic.md114
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/workload-tpcds.md28
-rw-r--r--ydb/docs/en/core/reference/ydb-cli/workload-tpch.md28
-rw-r--r--ydb/docs/en/core/reference/ydb-sdk/feature-parity.md286
-rw-r--r--ydb/docs/en/core/reference/ydb-sdk/health-check-api.md8
-rw-r--r--ydb/docs/en/core/reference/ydb-sdk/topic.md104
-rw-r--r--ydb/docs/en/core/yql/reference/_includes/permissions_list.md4
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/_includes/date_formats.md7
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/_includes/format_settings.md10
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/_includes/path_format.md2
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/external_data_source.md37
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/external_table.md1
-rw-r--r--ydb/docs/ru/core/concepts/federated_query/s3/formats.md24
-rw-r--r--ydb/docs/ru/core/concepts/glossary.md4
-rw-r--r--ydb/docs/ru/core/contributor/documentation/toc_p.yaml8
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-kqp.md20
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-memory.md10
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-overview.md22
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-pdisk-log.md24
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-pdisk-read.md22
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-pdisk-write.md24
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-stop.md8
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-storage.md78
-rw-r--r--ydb/docs/ru/core/contributor/load-actors-vdisk.md28
-rw-r--r--ydb/docs/ru/core/contributor/localdb-uncommitted-txs.md152
-rw-r--r--ydb/docs/ru/core/dev/system-views.md136
-rw-r--r--ydb/docs/ru/core/postgresql/import.md8
-rw-r--r--ydb/docs/ru/core/public-materials/_includes/conferences/2024/SmartData.md11
-rw-r--r--ydb/docs/ru/core/public-materials/videos.md2
-rw-r--r--ydb/docs/ru/core/reference/configuration/index.md124
-rw-r--r--ydb/docs/ru/core/reference/kafka-api/examples.md2
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/_includes/commands.md1
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/commands/monitoring-healthcheck.md147
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/export-import/_includes/auth-s3.md24
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/toc_i.yaml2
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/topic-read.md32
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/workload-click-bench.md46
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/workload-tpcds.md28
-rw-r--r--ydb/docs/ru/core/reference/ydb-cli/workload-tpch.md28
-rw-r--r--ydb/docs/ru/core/reference/ydb-sdk/health-check-api.md6
-rw-r--r--ydb/docs/ru/core/reference/ydb-sdk/topic.md114
-rw-r--r--ydb/docs/ru/core/security/audit-log.md60
-rw-r--r--ydb/docs/ru/core/security/short-access-control-notation.md69
-rw-r--r--ydb/docs/ru/core/yql/reference/_includes/permissions_list.md4
-rw-r--r--ydb/docs/ru/core/yql/reference/syntax/lexer.md38
-rw-r--r--ydb/docs/ru/core/yql/reference/types/primitive.md244
-rw-r--r--ydb/docs/ru/core/yql/reference/types/serial.md26
-rw-r--r--ydb/docs/ru/core/yql/reference/types/special.md22
-rw-r--r--ydb/docs/ru/core/yql/reference/udf/list/postgres.md38
-rw-r--r--ydb/library/benchmarks/gen/tpcds-dbgen/tdefs.c22
-rw-r--r--ydb/library/benchmarks/gen/tpcds-dbgen/w_datetbl.c12
-rw-r--r--ydb/library/formats/arrow/protos/accessor.proto20
-rw-r--r--ydb/library/grpc/server/grpc_server.h2
-rw-r--r--ydb/library/login/login.cpp2
-rw-r--r--ydb/library/login/login.h2
-rw-r--r--ydb/library/workload/benchmark_base/workload.cpp17
-rw-r--r--ydb/library/workload/benchmark_base/workload.h1
-rw-r--r--ydb/library/workload/log/select_queries.sql410
-rw-r--r--ydb/library/workload/log/select_queries_original.sql452
-rw-r--r--ydb/library/workload/log/ya.make3
-rw-r--r--ydb/library/yaml_config/ya.make1
-rw-r--r--ydb/library/yaml_config/yaml_config.cpp42
-rw-r--r--ydb/library/yaml_config/yaml_config.h26
-rw-r--r--ydb/library/yql/dq/actors/compute/dq_compute_actor.h2
-rw-r--r--ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h8
-rw-r--r--ydb/library/yql/dq/actors/protos/dq_events.proto1
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_build.cpp69
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_dphyp_solver.h2
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_join.cpp44
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_join_cost_based.cpp2
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_join_tree_node.cpp26
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_join_tree_node.h21
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_phy.cpp12
-rw-r--r--ydb/library/yql/dq/opt/dq_opt_phy.h6
-rw-r--r--ydb/library/yql/dq/runtime/dq_output_consumer.cpp36
-rw-r--r--ydb/library/yql/dq/runtime/dq_tasks_runner.cpp18
-rw-r--r--ydb/library/yql/providers/dq/opt/physical_optimize.cpp14
-rw-r--r--ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp534
-rw-r--r--ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h6
-rw-r--r--ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp412
-rw-r--r--ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp4
-rw-r--r--ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h7
-rw-r--r--ydb/library/yql/providers/pq/async_io/ya.make1
-rw-r--r--ydb/library/yql/providers/pq/common/pq_partition_key.cpp3
-rw-r--r--ydb/library/yql/providers/pq/common/pq_partition_key.h29
-rw-r--r--ydb/library/yql/providers/pq/common/ya.make1
-rw-r--r--ydb/library/yql/providers/pq/common/yql_names.h1
-rw-r--r--ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json15
-rw-r--r--ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp41
-rw-r--r--ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h9
-rw-r--r--ydb/library/yql/providers/pq/gateway/native/ya.make1
-rw-r--r--ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp33
-rw-r--r--ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp106
-rw-r--r--ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h8
-rw-r--r--ydb/library/yql/providers/pq/proto/dq_io.proto8
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp32
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp17
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_gateway.h11
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp25
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp9
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_provider.h2
-rw-r--r--ydb/library/yql/providers/pq/provider/yql_pq_topic_client.h25
-rw-r--r--ydb/library/yql/tests/sql/dq_file.py23
-rw-r--r--ydb/library/yql/tests/sql/hybrid_file.py20
-rw-r--r--ydb/library/yql/udfs/common/roaring/roaring.cpp17
-rw-r--r--ydb/mvp/meta/meta_cp_databases.h36
-rw-r--r--ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp130
-rw-r--r--ydb/public/lib/ydb_cli/commands/benchmark_utils.h5
-rw-r--r--ydb/public/lib/ydb_cli/commands/interactive/complete/ya.make2
-rw-r--r--ydb/public/lib/ydb_cli/commands/interactive/complete/yql_completer.cpp7
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp36
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_benchmark.h5
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_command.cpp2
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp13
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_sql.cpp26
-rw-r--r--ydb/public/lib/ydb_cli/commands/ydb_sql.h3
-rw-r--r--ydb/public/lib/ydb_cli/common/client_command_options.h21
-rw-r--r--ydb/public/lib/ydb_cli/common/command.h1
-rw-r--r--ydb/public/lib/ydb_cli/common/parameters.cpp27
-rw-r--r--ydb/public/lib/ydb_cli/common/parameters.h4
-rw-r--r--ydb/public/lib/ydb_cli/common/ya.make5
-rw-r--r--ydb/public/lib/ydb_cli/common/yql_parser/ut/ya.make11
-rw-r--r--ydb/public/lib/ydb_cli/common/yql_parser/ya.make20
-rw-r--r--ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.cpp392
-rw-r--r--ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h17
-rw-r--r--ydb/public/lib/ydb_cli/common/yql_parser/yql_parser_ut.cpp928
-rw-r--r--ydb/public/lib/ydb_cli/dump/util/view_utils.cpp35
-rw-r--r--ydb/public/lib/ydb_cli/dump/util/view_utils.h8
-rw-r--r--ydb/public/sdk/cpp/client/ydb_topic/impl/read_session_impl.ipp3
-rw-r--r--ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/rate_limiter/rate_limiter.h4
-rw-r--r--ydb/public/sdk/cpp/src/client/resources/ya.make1
-rw-r--r--ydb/public/sdk/cpp/src/client/resources/ydb_ca.cpp4
-rw-r--r--ydb/public/sdk/cpp/src/client/resources/ydb_resources.cpp4
-rw-r--r--ydb/public/sdk/cpp/src/client/resources/ydb_sdk_version.txt1
-rw-r--r--ydb/public/sdk/cpp/src/client/topic/impl/proto_accessor.cpp (renamed from ydb/public/sdk/cpp/src/client/topic/proto_accessor.cpp)0
-rw-r--r--ydb/public/sdk/cpp/src/client/topic/impl/read_session_impl.ipp3
-rw-r--r--ydb/public/sdk/cpp/src/client/topic/impl/ya.make1
-rw-r--r--ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp60
-rw-r--r--ydb/public/sdk/cpp/src/client/topic/ya.make1
-rw-r--r--ydb/public/sdk/cpp/src/version.h8
-rw-r--r--ydb/services/metadata/ds_table/accessor_snapshot_base.cpp30
-rw-r--r--ydb/services/metadata/ds_table/accessor_snapshot_base.h13
-rw-r--r--ydb/services/persqueue_v1/actors/partition_actor.cpp10
-rw-r--r--ydb/services/persqueue_v1/topic_yql_ut.cpp48
-rw-r--r--ydb/services/ydb/backup_ut/ydb_backup_ut.cpp9
-rw-r--r--ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp38
-rw-r--r--ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp7
-rw-r--r--ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp4
-rw-r--r--ydb/tests/functional/benchmarks_init/canondata/test_init.TestClickbenchInit.test_s1_row/s1_row2
-rw-r--r--ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpcdsInit.test_s1_row/s1_row48
-rw-r--r--ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpchInit.test_s1_row/s1_row16
-rw-r--r--ydb/tests/functional/canonical/canondata/test_sql.TestCanonicalFolder1.test_case_write_multi_usage.script-script_/write_multi_usage.script.plan2
-rw-r--r--ydb/tests/functional/clickbench/canondata/test.test_plans_column_/queries-original-plan-column-07
-rw-r--r--ydb/tests/functional/compatibility/test_stress.py69
-rw-r--r--ydb/tests/functional/replication/main.cpp124
-rw-r--r--ydb/tests/functional/replication/replication.cpp104
-rw-r--r--ydb/tests/functional/replication/transfer.cpp27
-rw-r--r--ydb/tests/functional/replication/utils.h572
-rw-r--r--ydb/tests/functional/replication/ya.make11
-rw-r--r--ydb/tests/functional/scheme_tests/canondata/tablet_scheme_tests.TestTabletSchemes.test_tablet_schemes_flat_schemeshard_/flat_schemeshard.schema14
-rw-r--r--ydb/tests/functional/tenants/conftest.py4
-rw-r--r--ydb/tests/functional/tenants/test_user_administration.py41
-rw-r--r--ydb/tests/functional/tpc/large/ya.make8
-rw-r--r--ydb/tests/functional/transfer/main.cpp1467
-rw-r--r--ydb/tests/functional/transfer/ya.make30
-rw-r--r--ydb/tests/functional/ya.make1
-rw-r--r--ydb/tests/functional/ydb_cli/canondata/result.json3
-rw-r--r--ydb/tests/functional/ydb_cli/canondata/test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax/result.output5
-rw-r--r--ydb/tests/functional/ydb_cli/test_ydb_recursive_remove.py6
-rw-r--r--ydb/tests/functional/ydb_cli/test_ydb_sql.py18
-rw-r--r--ydb/tests/library/fixtures/__init__.py_168
-rw-r--r--ydb/tests/library/harness/kikimr_config.py18
-rw-r--r--ydb/tests/library/harness/resources/default_yaml.yml13
-rw-r--r--ydb/tests/library/ut/ya.make6
-rw-r--r--ydb/tests/olap/load/lib/tpch.py14
-rw-r--r--ydb/tests/olap/oom/overlapping_portions.py104
-rw-r--r--ydb/tests/olap/oom/ya.make26
-rw-r--r--ydb/tests/olap/scenario/test_read_update_write_load.py6
-rw-r--r--ydb/tests/olap/test_log_scenario.py151
-rw-r--r--ydb/tests/olap/ttl_tiering/base.py3
-rw-r--r--ydb/tests/olap/ya.make9
-rw-r--r--ydb/tests/olap/zip_bomb.py125
-rw-r--r--ydb/tests/tools/fqrun/src/fq_runner.cpp2
-rw-r--r--ydb/tests/tools/fqrun/src/fq_setup.cpp4
-rw-r--r--ydb/tests/tools/kqprun/README.md4
-rw-r--r--ydb/tests/tools/kqprun/configuration/app_config.conf2
-rw-r--r--ydb/tests/tools/kqprun/runlib/application.cpp8
-rw-r--r--ydb/tests/tools/kqprun/runlib/settings.h4
-rw-r--r--ydb/tests/tools/kqprun/runlib/utils.cpp1
-rw-r--r--ydb/tests/tools/kqprun/src/kqp_runner.cpp2
-rw-r--r--ydb/tests/tools/kqprun/src/ydb_setup.cpp78
-rw-r--r--ydb/tests/tools/kqprun/ya.make1
-rw-r--r--ydb/tools/cfg/base.py1
-rw-r--r--ydb/tools/cfg/bin/__main__.py7
-rw-r--r--ydb/tools/cfg/static.py73
-rw-r--r--ydb/tools/cfg/utils.py63
-rw-r--r--ydb/tools/cfg/ya.make1
-rw-r--r--ydb/tools/stress_tool/device_test_tool.h29
-rw-r--r--ydb/tools/stress_tool/device_test_tool_pdisk_test.h4
-rw-r--r--ydb/tools/ydbd_slice/__init__.py2
-rw-r--r--ydb/tools/ydbd_slice/nodes.py16
-rw-r--r--yql/essentials/cfg/tests/gateways-experimental.conf20
-rw-r--r--yql/essentials/core/expr_nodes/yql_expr_nodes.json10
-rw-r--r--yql/essentials/core/histogram/eq_width_histogram.cpp73
-rw-r--r--yql/essentials/core/histogram/eq_width_histogram.h228
-rw-r--r--yql/essentials/core/histogram/ut/eq_width_histogram_ut.cpp127
-rw-r--r--yql/essentials/core/histogram/ut/ya.make8
-rw-r--r--yql/essentials/core/histogram/ya.make12
-rw-r--r--yql/essentials/core/minsketch/ut/ya.make11
-rw-r--r--yql/essentials/core/peephole_opt/yql_opt_peephole_physical.cpp347
-rw-r--r--yql/essentials/core/type_ann/type_ann_blocks.cpp80
-rw-r--r--yql/essentials/core/type_ann/type_ann_blocks.h2
-rw-r--r--yql/essentials/core/type_ann/type_ann_core.cpp2
-rw-r--r--yql/essentials/core/ya.make1
-rw-r--r--yql/essentials/core/yql_expr_constraint.cpp2
-rw-r--r--yql/essentials/core/yql_expr_type_annotation.cpp55
-rw-r--r--yql/essentials/core/yql_expr_type_annotation.h4
-rw-r--r--yql/essentials/core/yql_join.cpp76
-rw-r--r--yql/essentials/core/yql_join.h3
-rw-r--r--yql/essentials/core/yql_statistics.cpp10
-rw-r--r--yql/essentials/core/yql_statistics.h2
-rw-r--r--yql/essentials/core/yql_type_annotation.cpp2
-rw-r--r--yql/essentials/data/language/pragmas_opensource.json1
-rw-r--r--yql/essentials/data/language/rules_corr_basic.json2
-rw-r--r--yql/essentials/data/language/sql_functions.json1
-rw-r--r--yql/essentials/data/language/types.json2
-rw-r--r--yql/essentials/data/language/udfs_basic.json1
-rwxr-xr-xyql/essentials/data/language/update_functions.sh4
-rw-r--r--yql/essentials/docs/ru/syntax/expressions.md22
-rw-r--r--yql/essentials/minikql/aligned_page_pool.cpp37
-rw-r--r--yql/essentials/minikql/aligned_page_pool.h4
-rw-r--r--yql/essentials/minikql/comp_nodes/benchmark/block_coalesce/bench.cpp3
-rw-r--r--yql/essentials/minikql/comp_nodes/mkql_block_coalesce.cpp25
-rw-r--r--yql/essentials/minikql/comp_nodes/mkql_unwrap.cpp4
-rw-r--r--yql/essentials/minikql/comp_nodes/mkql_weakmember.cpp7
-rw-r--r--yql/essentials/minikql/comp_nodes/ut/mkql_block_coalesce_ut.cpp89
-rw-r--r--yql/essentials/minikql/computation/mkql_method_address_helper.h15
-rw-r--r--yql/essentials/minikql/mkql_alloc.cpp29
-rw-r--r--yql/essentials/minikql/mkql_alloc.h113
-rw-r--r--yql/essentials/minikql/mkql_string_util_ut.cpp8
-rw-r--r--yql/essentials/minikql/mkql_type_builder.cpp1
-rw-r--r--yql/essentials/providers/common/config/yql_dispatch.cpp6
-rw-r--r--yql/essentials/providers/common/config/yql_dispatch.h8
-rw-r--r--yql/essentials/providers/common/mkql/yql_provider_mkql.cpp2
-rw-r--r--yql/essentials/providers/common/proto/gateways_config.proto53
-rw-r--r--yql/essentials/providers/common/provider/yql_provider.cpp2
-rw-r--r--yql/essentials/providers/common/provider/yql_provider.h2
-rw-r--r--yql/essentials/public/fastcheck/format.cpp9
-rw-r--r--yql/essentials/public/fastcheck/linter_ut.cpp13
-rw-r--r--yql/essentials/public/fastcheck/ya.make1
-rw-r--r--yql/essentials/public/udf/arrow/bit_util.h32
-rw-r--r--yql/essentials/public/udf/arrow/ut/bit_util_ut.cpp20
-rw-r--r--yql/essentials/sql/v1/SQLv1.g.in2
-rw-r--r--yql/essentials/sql/v1/SQLv1Antlr4.g.in35
-rw-r--r--yql/essentials/sql/v1/builtin.cpp830
-rw-r--r--yql/essentials/sql/v1/complete/antlr4/c3i.h43
-rw-r--r--yql/essentials/sql/v1/complete/antlr4/c3t.h (renamed from yql/essentials/sql/v1/complete/c3_engine.h)64
-rw-r--r--yql/essentials/sql/v1/complete/antlr4/defs.h18
-rw-r--r--yql/essentials/sql/v1/complete/antlr4/ya.make9
-rw-r--r--yql/essentials/sql/v1/complete/bench/main.cpp40
-rw-r--r--yql/essentials/sql/v1/complete/bench/ya.make13
-rw-r--r--yql/essentials/sql/v1/complete/name/name_service.h10
-rw-r--r--yql/essentials/sql/v1/complete/name/static/default_name_set.cpp46
-rw-r--r--yql/essentials/sql/v1/complete/name/static/frequency.cpp87
-rw-r--r--yql/essentials/sql/v1/complete/name/static/frequency.h17
-rw-r--r--yql/essentials/sql/v1/complete/name/static/frequency_ut.cpp37
-rw-r--r--yql/essentials/sql/v1/complete/name/static/json_name_set.cpp58
-rw-r--r--yql/essentials/sql/v1/complete/name/static/name_service.cpp85
-rw-r--r--yql/essentials/sql/v1/complete/name/static/name_service.h7
-rw-r--r--yql/essentials/sql/v1/complete/name/static/ranking.cpp102
-rw-r--r--yql/essentials/sql/v1/complete/name/static/ranking.h23
-rw-r--r--yql/essentials/sql/v1/complete/name/static/ranking_ut.cpp14
-rw-r--r--yql/essentials/sql/v1/complete/name/static/ut/ya.make7
-rw-r--r--yql/essentials/sql/v1/complete/name/static/ya.make16
-rw-r--r--yql/essentials/sql/v1/complete/sql_antlr4.cpp126
-rw-r--r--yql/essentials/sql/v1/complete/sql_complete.cpp37
-rw-r--r--yql/essentials/sql/v1/complete/sql_complete.h1
-rw-r--r--yql/essentials/sql/v1/complete/sql_complete_ut.cpp232
-rw-r--r--yql/essentials/sql/v1/complete/sql_context.h26
-rw-r--r--yql/essentials/sql/v1/complete/string_util.h17
-rw-r--r--yql/essentials/sql/v1/complete/syntax/ansi.cpp (renamed from yql/essentials/sql/v1/complete/sql_syntax.cpp)2
-rw-r--r--yql/essentials/sql/v1/complete/syntax/ansi.h (renamed from yql/essentials/sql/v1/complete/sql_syntax.h)0
-rw-r--r--yql/essentials/sql/v1/complete/syntax/grammar.cpp86
-rw-r--r--yql/essentials/sql/v1/complete/syntax/grammar.h (renamed from yql/essentials/sql/v1/complete/sql_antlr4.h)21
-rw-r--r--yql/essentials/sql/v1/complete/syntax/grammar_ut.cpp37
-rw-r--r--yql/essentials/sql/v1/complete/syntax/local.cpp (renamed from yql/essentials/sql/v1/complete/sql_context.cpp)82
-rw-r--r--yql/essentials/sql/v1/complete/syntax/local.h28
-rw-r--r--yql/essentials/sql/v1/complete/syntax/parser_call_stack.cpp65
-rw-r--r--yql/essentials/sql/v1/complete/syntax/parser_call_stack.h13
-rw-r--r--yql/essentials/sql/v1/complete/syntax/ut/ya.make7
-rw-r--r--yql/essentials/sql/v1/complete/syntax/ya.make30
-rw-r--r--yql/essentials/sql/v1/complete/text/ut/ya.make7
-rw-r--r--yql/essentials/sql/v1/complete/text/word.cpp (renamed from yql/essentials/sql/v1/complete/string_util.cpp)2
-rw-r--r--yql/essentials/sql/v1/complete/text/word.h15
-rw-r--r--yql/essentials/sql/v1/complete/text/word_ut.cpp (renamed from yql/essentials/sql/v1/complete/string_util_ut.cpp)6
-rw-r--r--yql/essentials/sql/v1/complete/text/ya.make11
-rw-r--r--yql/essentials/sql/v1/complete/ut/ya.make1
-rw-r--r--yql/essentials/sql/v1/complete/ya.make15
-rw-r--r--yql/essentials/sql/v1/context.cpp12
-rw-r--r--yql/essentials/sql/v1/context.h2
-rw-r--r--yql/essentials/sql/v1/format/sql_format_ut.h9
-rw-r--r--yql/essentials/sql/v1/lexer/lexer.cpp95
-rw-r--r--yql/essentials/sql/v1/lexer/lexer.h11
-rw-r--r--yql/essentials/sql/v1/lexer/lexer_ut.cpp354
-rw-r--r--yql/essentials/sql/v1/lexer/lexer_ut.h37
-rw-r--r--yql/essentials/sql/v1/lexer/regex/lexer.cpp254
-rw-r--r--yql/essentials/sql/v1/lexer/regex/lexer.h9
-rw-r--r--yql/essentials/sql/v1/lexer/regex/lexer_ut.cpp219
-rw-r--r--yql/essentials/sql/v1/lexer/regex/regex.cpp240
-rw-r--r--yql/essentials/sql/v1/lexer/regex/regex.h14
-rw-r--r--yql/essentials/sql/v1/lexer/regex/regex_ut.cpp90
-rw-r--r--yql/essentials/sql/v1/lexer/regex/ut/ya.make13
-rw-r--r--yql/essentials/sql/v1/lexer/regex/ya.make39
-rw-r--r--yql/essentials/sql/v1/lexer/ut/ya.make4
-rw-r--r--yql/essentials/sql/v1/node.h1
-rw-r--r--yql/essentials/sql/v1/query.cpp13
-rw-r--r--yql/essentials/sql/v1/reflect/sql_reflect.cpp173
-rw-r--r--yql/essentials/sql/v1/reflect/sql_reflect.h19
-rw-r--r--yql/essentials/sql/v1/reflect/sql_reflect_ut.cpp46
-rw-r--r--yql/essentials/sql/v1/reflect/ut/ya.make7
-rw-r--r--yql/essentials/sql/v1/reflect/ya.make13
-rw-r--r--yql/essentials/sql/v1/select.cpp4
-rw-r--r--yql/essentials/sql/v1/source.h2
-rw-r--r--yql/essentials/sql/v1/sql_query.cpp91
-rw-r--r--yql/essentials/sql/v1/sql_query.h2
-rw-r--r--yql/essentials/sql/v1/sql_ut_common.h32
-rw-r--r--yql/essentials/tests/s-expressions/minirun/part6/canondata/result.json14
-rw-r--r--yql/essentials/tests/s-expressions/minirun/part8/canondata/result.json14
-rw-r--r--yql/essentials/tests/s-expressions/suites/Blocks/ListFromBlocks.yqls23
-rw-r--r--yql/essentials/tests/s-expressions/suites/Blocks/ListToBlocks.yqls23
-rw-r--r--yql/essentials/tests/sql/minirun/part7/canondata/result.json14
-rw-r--r--yql/essentials/tests/sql/sql2yql/canondata/result.json12
-rw-r--r--yql/essentials/tests/sql/sql2yql/canondata/test_sql_format.test_window-yql-19709_/formatted.sql24
-rw-r--r--yql/essentials/tests/sql/suites/window/yql-19709.sql18
-rw-r--r--yql/essentials/tools/sql2yql/sql2yql.cpp4
-rw-r--r--yql/essentials/tools/sql_functions_dump/sql_functions_dump.cpp40
-rw-r--r--yql/essentials/tools/sql_functions_dump/test/test.py20
-rw-r--r--yql/essentials/tools/sql_functions_dump/test/ya.make15
-rw-r--r--yql/essentials/tools/sql_functions_dump/ya.make20
-rw-r--r--yql/essentials/tools/types_dump/types_dump.cpp14
-rw-r--r--yql/essentials/tools/ya.make1
-rw-r--r--yql/essentials/tools/yql_complete/ya.make2
-rw-r--r--yql/essentials/tools/yql_complete/yql_complete.cpp40
-rw-r--r--yql/essentials/udfs/language/yql/test/canondata/result.json5
-rw-r--r--yql/essentials/udfs/language/yql/test/canondata/test.test_ExtractPragmas_/results.txt87
-rw-r--r--yql/essentials/udfs/language/yql/test/cases/ExtractPragmas.sql8
-rw-r--r--yql/essentials/udfs/language/yql/yql_language_udf.cpp8
-rw-r--r--yql/essentials/utils/method_index.h7
-rw-r--r--yt/cpp/mapreduce/client/client.cpp56
-rw-r--r--yt/cpp/mapreduce/client/client.h20
-rw-r--r--yt/cpp/mapreduce/client/operation.cpp3
-rw-r--r--yt/cpp/mapreduce/client/partition_reader.cpp66
-rw-r--r--yt/cpp/mapreduce/client/partition_reader.h18
-rw-r--r--yt/cpp/mapreduce/client/ya.make1
-rw-r--r--yt/cpp/mapreduce/common/retry_lib.cpp1
-rw-r--r--yt/cpp/mapreduce/http/context.cpp2
-rw-r--r--yt/cpp/mapreduce/http/context.h2
-rw-r--r--yt/cpp/mapreduce/http_client/raw_client.cpp18
-rw-r--r--yt/cpp/mapreduce/http_client/raw_client.h5
-rw-r--r--yt/cpp/mapreduce/http_client/rpc_parameters_serialization.cpp8
-rw-r--r--yt/cpp/mapreduce/http_client/rpc_parameters_serialization.h4
-rw-r--r--yt/cpp/mapreduce/interface/client_method_options.h28
-rw-r--r--yt/cpp/mapreduce/interface/common.h3
-rw-r--r--yt/cpp/mapreduce/interface/io-inl.h25
-rw-r--r--yt/cpp/mapreduce/interface/io.h42
-rw-r--r--yt/cpp/mapreduce/interface/operation.h3
-rw-r--r--yt/cpp/mapreduce/interface/raw_client.h5
-rw-r--r--yt/cpp/mapreduce/interface/serialize.cpp1
-rw-r--r--yt/python/yt/common.py4
-rw-r--r--yt/yql/providers/yt/codec/yt_codec.cpp13
-rw-r--r--yt/yql/providers/yt/codec/yt_codec.h13
-rw-r--r--yt/yql/providers/yt/codec/yt_codec_io.cpp65
-rw-r--r--yt/yql/providers/yt/codec/yt_codec_io.h2
-rw-r--r--yt/yql/providers/yt/common/yql_yt_settings.cpp7
-rw-r--r--yt/yql/providers/yt/common/yql_yt_settings.h1
-rw-r--r--yt/yql/providers/yt/comp_nodes/yql_mkql_block_table_content.cpp22
-rw-r--r--yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.cpp3
-rw-r--r--yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.cpp20
-rw-r--r--yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.h11
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/impl/ya.make1
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.cpp7
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.h1
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/ya.make1
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/yql_yt_coordinator_proto_helpers.cpp7
-rw-r--r--yt/yql/providers/yt/fmr/coordinator/interface/yql_yt_coordinator.h3
-rw-r--r--yt/yql/providers/yt/fmr/job/impl/ut/yql_yt_job_ut.cpp8
-rw-r--r--yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.cpp75
-rw-r--r--yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.h6
-rw-r--r--yt/yql/providers/yt/fmr/job/impl/yql_yt_table_data_service_writer.h2
-rw-r--r--yt/yql/providers/yt/fmr/proto/coordinator.proto1
-rw-r--r--yt/yql/providers/yt/fmr/proto/request_options.proto1
-rw-r--r--yt/yql/providers/yt/fmr/request_options/proto_helpers/yql_yt_request_proto_helpers.cpp7
-rw-r--r--yt/yql/providers/yt/fmr/request_options/ya.make1
-rw-r--r--yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.cpp4
-rw-r--r--yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.h8
-rw-r--r--yt/yql/providers/yt/fmr/yt_service/impl/yql_yt_yt_service_impl.cpp8
-rw-r--r--yt/yql/providers/yt/fmr/yt_service/interface/yql_yt_yt_service.h1
-rw-r--r--yt/yql/providers/yt/gateway/file/yql_yt_file.cpp2
-rw-r--r--yt/yql/providers/yt/gateway/file/yql_yt_file_comp_nodes.cpp2
-rw-r--r--yt/yql/providers/yt/gateway/file/yql_yt_file_mkql_compiler.cpp8
-rw-r--r--yt/yql/providers/yt/gateway/fmr/ya.make5
-rw-r--r--yt/yql/providers/yt/gateway/fmr/yql_yt_fmr.cpp438
-rw-r--r--yt/yql/providers/yt/gateway/lib/yt_helpers.cpp2
-rw-r--r--yt/yql/providers/yt/gateway/native/yql_yt_native.cpp90
-rw-r--r--yt/yql/providers/yt/gateway/native/yql_yt_transform.cpp7
-rw-r--r--yt/yql/providers/yt/job/yql_job_user.cpp1
-rw-r--r--yt/yql/providers/yt/lib/expr_traits/yql_expr_traits.cpp1
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp10
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.h1
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp2
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp23
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp6
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp19
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp18
-rw-r--r--yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp18
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_block_input.cpp37
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp6
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp29
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp37
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp24
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_helpers.cpp26
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_helpers.h8
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp7
-rw-r--r--yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp26
-rw-r--r--yt/yql/tests/sql/suites/multicluster/map_force.cfg4
-rw-r--r--yt/yql/tests/sql/suites/multicluster/map_force.sql6
-rw-r--r--yt/yql/tests/sql/suites/multicluster/partition_by_key_force.cfg4
-rw-r--r--yt/yql/tests/sql/suites/multicluster/partition_by_key_force.sql9
-rw-r--r--yt/yql/tests/sql/suites/multicluster/sort_force.cfg4
-rw-r--r--yt/yql/tests/sql/suites/multicluster/sort_force.sql8
-rw-r--r--yt/yt/client/api/client.cpp47
-rw-r--r--yt/yt/client/api/client.h6
-rw-r--r--yt/yt/client/api/delegating_client.h2
-rw-r--r--yt/yt/client/api/public.h2
-rw-r--r--yt/yt/client/api/query_tracker_client.cpp13
-rw-r--r--yt/yt/client/api/query_tracker_client.h17
-rw-r--r--yt/yt/client/api/rpc_proxy/client_base.cpp4
-rw-r--r--yt/yt/client/api/rpc_proxy/client_impl.cpp16
-rw-r--r--yt/yt/client/api/table_client.h3
-rw-r--r--yt/yt/client/chaos_client/replication_card.cpp33
-rw-r--r--yt/yt/client/driver/distributed_table_commands.cpp6
-rw-r--r--yt/yt/client/driver/driver.cpp4
-rw-r--r--yt/yt/client/driver/query_commands.cpp7
-rw-r--r--yt/yt/client/driver/table_commands.cpp2
-rw-r--r--yt/yt/client/federated/client.cpp4
-rw-r--r--yt/yt/client/federated/unittests/client_ut.cpp22
-rw-r--r--yt/yt/client/hedging/hedging.cpp4
-rw-r--r--yt/yt/client/queue_client/consumer_client.cpp5
-rw-r--r--yt/yt/client/scheduler/public.h1
-rw-r--r--yt/yt/client/signature/generator.cpp32
-rw-r--r--yt/yt/client/signature/generator.h4
-rw-r--r--yt/yt/client/signature/signature.cpp13
-rw-r--r--yt/yt/client/signature/signature.h6
-rw-r--r--yt/yt/client/signature/unittests/dummy_ut.cpp18
-rw-r--r--yt/yt/client/signature/validator.cpp23
-rw-r--r--yt/yt/client/signature/validator.h2
-rw-r--r--yt/yt/client/tablet_client/config.cpp62
-rw-r--r--yt/yt/client/tablet_client/config.h5
-rw-r--r--yt/yt/client/unittests/mock/client.h2
-rw-r--r--yt/yt/client/unittests/replication_progress_ut.cpp17
-rw-r--r--yt/yt/core/actions/invoker_util.h7
-rw-r--r--yt/yt/core/actions/unittests/future_ut.cpp1
-rw-r--r--yt/yt/core/actions/unittests/invoker_ut.cpp1
-rw-r--r--yt/yt/core/concurrency/delayed_executor.cpp2
-rw-r--r--yt/yt/core/concurrency/fair_share_action_queue.cpp1
-rw-r--r--yt/yt/core/concurrency/fiber_scheduler_thread.cpp1
-rw-r--r--yt/yt/core/concurrency/thread_pool_detail.cpp1
-rw-r--r--yt/yt/core/concurrency/unittests/bounded_concurrency_invoker_ut.cpp1
-rw-r--r--yt/yt/core/concurrency/unittests/invoker_pool_ut.cpp1
-rw-r--r--yt/yt/core/concurrency/unittests/scheduled_executor_ut.cpp1
-rw-r--r--yt/yt/core/http/compression.cpp1
-rw-r--r--yt/yt/core/misc/error.cpp1
-rw-r--r--yt/yt/core/misc/fs.cpp1
-rw-r--r--yt/yt/core/misc/range_helpers-inl.h65
-rw-r--r--yt/yt/core/misc/range_helpers.h10
-rw-r--r--yt/yt/core/misc/unittests/range_helpers_ut.cpp12
-rw-r--r--yt/yt/core/yson/protobuf_interop-inl.h29
-rw-r--r--yt/yt/core/yson/protobuf_interop.cpp79
-rw-r--r--yt/yt/core/yson/protobuf_interop.h16
-rw-r--r--yt/yt/core/ytree/attribute_filter-inl.h2
-rw-r--r--yt/yt/core/ytree/attribute_filter.cpp82
-rw-r--r--yt/yt/core/ytree/attribute_filter.h10
-rw-r--r--yt/yt/core/ytree/serialize-inl.h26
-rw-r--r--yt/yt/core/ytree/serialize.h8
-rw-r--r--yt/yt/core/ytree/unittests/serialize_ut.cpp24
-rw-r--r--yt/yt_proto/yt/client/api/rpc_proxy/proto/api_service.proto12
1067 files changed, 33586 insertions, 14693 deletions
diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml
index 4bdf863d1e..6b2b10ef56 100644
--- a/.github/actions/build/action.yml
+++ b/.github/actions/build/action.yml
@@ -25,9 +25,6 @@ runs:
mkdir -p ../build
patch -p1 < ydb/deploy/patches/0001-sanitizer-build.patch
- # Temporary patch to dix difference between antlr4.9 and 4.13 behaviour
- sed -i 's/TOKEN(NULL)/TOKEN(NULL_)/g' ydb/public/lib/ydb_cli/commands/interactive/yql_highlight.cpp
-
cd ../build
rm -rf *
export CC=/usr/bin/clang-16
@@ -42,9 +39,6 @@ runs:
shell: bash
if: ${{!inputs.sanitizer}}
run: |
- # Temporary patch to dix difference between antlr4.9 and 4.13 behaviour
- sed -i 's/TOKEN(NULL)/TOKEN(NULL_)/g' ydb/public/lib/ydb_cli/commands/interactive/yql_highlight.cpp
-
mkdir -p ../build
cd ../build
rm -rf *
diff --git a/.github/config/muted_ya.txt b/.github/config/muted_ya.txt
index 9ca1397b2b..454b016b52 100644
--- a/.github/config/muted_ya.txt
+++ b/.github/config/muted_ya.txt
@@ -19,6 +19,10 @@ ydb/core/keyvalue/ut_trace TKeyValueTracingTest.WriteSmall
ydb/core/kqp/ut/cost KqpCost.OlapWriteRow
ydb/core/kqp/ut/olap KqpDecimalColumnShard.TestAggregation
ydb/core/kqp/ut/olap KqpDecimalColumnShard.TestFilterCompare
+ydb/core/kqp/ut/olap KqpOlapIndexes.IndexesInBS
+ydb/core/kqp/ut/olap KqpOlapIndexes.IndexesInLocalMetadata
+ydb/core/kqp/ut/olap KqpOlapJson.SwitchAccessorCompactionVariants
+ydb/core/kqp/ut/olap KqpOlapOptimizer.OptimizationByTime
ydb/core/kqp/ut/olap KqpOlapSysView.StatsSysViewBytesColumnActualization
ydb/core/kqp/ut/olap KqpOlapSysView.StatsSysViewBytesDictActualization
ydb/core/kqp/ut/olap KqpOlapSysView.StatsSysViewBytesDictStatActualization
@@ -46,8 +50,10 @@ ydb/core/kqp/ut/tx KqpSnapshotIsolation.TReadOnlyOltpNoSink
ydb/core/kqp/ut/tx KqpSnapshotIsolation.TSimpleOltp
ydb/core/kqp/ut/tx KqpSnapshotIsolation.TSimpleOltpNoSink
ydb/core/kqp/ut/yql KqpScripting.StreamExecuteYqlScriptScanOperationTmeoutBruteForce
+ydb/core/mind/hive/ut TStorageBalanceTest.TestScenario2
ydb/core/quoter/ut QuoterWithKesusTest.PrefetchCoefficient
ydb/core/statistics/aggregator/ut AnalyzeColumnshard.AnalyzeRebootColumnShard
+ydb/core/tablet_flat/ut DataCleanup.CleanupDataWithFollowers
ydb/core/tx/datashard/ut_incremental_backup IncrementalBackup.ComplexRestoreBackupCollection+WithIncremental
ydb/core/tx/schemeshard/ut_login_large TSchemeShardLoginLargeTest.RemoveLogin_Many
ydb/core/tx/schemeshard/ut_move_reboots TSchemeShardMoveRebootsTest.WithData
@@ -59,8 +65,6 @@ ydb/library/actors/interconnect/ut_huge_cluster sole chunk chunk
ydb/library/yaml_config/ut_transform test_transform.py.TestYamlConfigTransformations.test_basic[args1-dump_ds_init]
ydb/library/yql/dq/opt/ut sole chunk chunk
ydb/public/sdk/cpp/src/client/topic/ut [*/*] chunk chunk
-ydb/services/persqueue_v1/ut TPersQueueCommonTest.TestLimiterLimitsWithBlobsRateLimit
-ydb/services/persqueue_v1/ut TPersQueueCommonTest.TestLimiterLimitsWithUserPayloadRateLimit
ydb/services/ydb/sdk_sessions_pool_ut YdbSdkSessionsPool.StressTestSync1
ydb/services/ydb/sdk_sessions_pool_ut YdbSdkSessionsPool.StressTestSync10
ydb/services/ydb/sdk_sessions_ut YdbSdkSessions.TestSdkFreeSessionAfterBadSessionQueryService
@@ -80,7 +84,15 @@ ydb/tests/fq/yds test_mem_alloc.py.TestMemAlloc.test_join_alloc[v1]
ydb/tests/fq/yds test_recovery.py.TestRecovery.test_ic_disconnection
ydb/tests/fq/yds test_select_limit_db_id.py.TestSelectLimitWithDbId.test_select_same_with_id[v1-mvp_external_ydb_endpoint0]
ydb/tests/fq/yds test_yds_bindings.py.TestBindings.test_yds_insert[v1]
+ydb/tests/functional/compatibility test_compatibility.py.TestCompatibility.test_simple
ydb/tests/functional/compatibility test_followers.py.TestFollowersCompatibility.test_followers_compatability
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_kv[last_stable-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_kv[mixed-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_log[last_stable-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_log[mixed-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_tpch1[current-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_tpch1[last_stable-row]
+ydb/tests/functional/compatibility test_stress.py.TestStress.test_tpch1[mixed-row]
ydb/tests/functional/hive test_drain.py.TestHive.test_drain_on_stop
ydb/tests/functional/rename [test_rename.py */*] chunk chunk
ydb/tests/functional/serializable sole chunk chunk
@@ -109,19 +121,22 @@ ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[86]
ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[9]
ydb/tests/functional/tpc/large test_tpch_spilling.py.TestTpchSpillingS10.test_tpch[7]
ydb/tests/olap sole chunk chunk
+ydb/tests/olap test_quota_exhaustion.py.TestYdbWorkload.test_delete
ydb/tests/olap/column_family/compression alter_compression.py.TestAlterCompression.test_all_supported_compression
ydb/tests/olap/column_family/compression sole chunk chunk
+ydb/tests/olap/oom overlapping_portions.py.TestOverlappingPortions.test
ydb/tests/olap/scenario sole chunk chunk
ydb/tests/olap/scenario test_alter_compression.py.TestAlterCompression.test[alter_compression]
ydb/tests/olap/scenario test_alter_tiering.py.TestAlterTiering.test[many_tables]
ydb/tests/olap/scenario test_insert.py.TestInsert.test[read_data_during_bulk_upsert]
+ydb/tests/olap/scenario test_read_update_write_load.py.TestReadUpdateWriteLoad.test[read_update_write_load]
ydb/tests/olap/ttl_tiering [data_migration_when_alter_ttl.py] chunk chunk
-ydb/tests/olap/ttl_tiering data_correctness.py.TestDataCorrectness.test
+ydb/tests/olap/ttl_tiering [ttl_delete_s3.py] chunk chunk
ydb/tests/olap/ttl_tiering data_migration_when_alter_ttl.py.TestDataMigrationWhenAlterTtl.test
ydb/tests/olap/ttl_tiering sole chunk chunk
ydb/tests/olap/ttl_tiering ttl_delete_s3.py.TestDeleteS3Ttl.test_data_unchanged_after_ttl_change
ydb/tests/olap/ttl_tiering ttl_delete_s3.py.TestDeleteS3Ttl.test_delete_s3_tiering
-ydb/tests/olap/ttl_tiering unstable_connection.py.TestUnstableConnection.test
+ydb/tests/olap/ttl_tiering ttl_delete_s3.py.TestDeleteS3Ttl.test_ttl_delete
ydb/tests/postgres_integrations/go-libpq [docker_wrapper_test.py] chunk chunk
ydb/tests/postgres_integrations/go-libpq docker_wrapper_test.py.test_pg_generated[Test64BitErrorChecking]
ydb/tests/postgres_integrations/go-libpq docker_wrapper_test.py.test_pg_generated[TestArrayValueBackend]
diff --git a/.github/scripts/create_or_update_pr.py b/.github/scripts/create_or_update_pr.py
index e2579f952b..de1c55b438 100644
--- a/.github/scripts/create_or_update_pr.py
+++ b/.github/scripts/create_or_update_pr.py
@@ -1,40 +1,95 @@
-# .github/scripts/create_or_update_pr.py
+#!/usr/bin/env python3
import os
+import argparse
from github import Github
-def create_or_update_pr():
- GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
- BASE_BRANCH = os.getenv('BASE_BRANCH')
- BRANCH_FOR_PR = os.getenv('BRANCH_FOR_PR')
- TITLE = os.getenv('TITLE')
- BODY = os.getenv('BODY')
- REVIEWERS = os.getenv('REVIEWERS').split(',') if os.getenv('REVIEWERS') else []
+def read_body_from_file(file_path):
+ with open(file_path, 'r') as file:
+ return file.read()
- g = Github(GITHUB_TOKEN)
- repo = g.get_repo(os.getenv('GITHUB_REPOSITORY'))
+def get_body_content(body_input):
+ """Determines if the body content is a file path or direct text."""
+ if os.path.isfile(body_input):
+ print(f"Body content will be read from file: {body_input}.")
+ return read_body_from_file(body_input)
+ else:
+ print(f"Body content will be taken directly: '{body_input}.'")
+ return body_input
+
+def create_or_update_pr(args, repo):
+ current_pr = None
+ pr_number = None
+ body = get_body_content(args.body)
# Check for an existing PR
- existing_prs = repo.get_pulls(head=BRANCH_FOR_PR, base=BASE_BRANCH, state='open')
- existing_pr = None
+ existing_prs = repo.get_pulls(head=args.branch_for_pr, base=args.base_branch, state='open')
for pr in existing_prs:
- if pr.title == TITLE:
- existing_pr = pr
+ if pr.base.ref == args.base_branch and pr.head.ref == args.branch_for_pr:
+ current_pr = pr
break
-
- if existing_pr:
- print(f"Existing PR found. Updating PR #{existing_pr.number}.")
- # Update existing PR
- existing_pr.edit(title=TITLE, body=BODY)
- # Add reviewers
- if REVIEWERS:
- existing_pr.create_review_request(reviewers=REVIEWERS)
+ if current_pr:
+ print(f"Existing PR found. Updating PR #{current_pr.number}.")
+ current_pr.edit(title=args.title, body=body)
else:
print("No existing PR found. Creating a new PR.")
- # Create new PR
- pr = repo.create_pull(title=TITLE, body=BODY, head=BRANCH_FOR_PR, base=BASE_BRANCH)
- # Add reviewers
- if REVIEWERS:
- pr.create_review_request(reviewers=REVIEWERS)
+ current_pr = repo.create_pull(title=args.title, body=body, head=args.branch_for_pr, base=args.base_branch)
+
+ pr_number = current_pr.number
+ if os.environ['GITHUB_OUTPUT']:
+ with open(os.environ['GITHUB_OUTPUT'], 'a') as gh_out:
+ print(f"pr_number={pr_number}", file=gh_out)
+
+ print(f"PR created or updated successfully. PR number: {pr_number}")
+
+def append_to_pr_body(args, repo):
+ body_to_append = get_body_content(args.body)
+
+ pr = None
+ if args.pr_number:
+ pr = repo.get_pull(args.pr_number)
+ else:
+ existing_prs = repo.get_pulls(head=args.branch_for_pr, base=args.base_branch, state='open')
+ for p in existing_prs:
+ if p.base.ref == args.base_branch and p.head.ref == args.branch_for_pr:
+ pr = p
+ break
+
+ if pr:
+ print(f"Appending to PR #{pr.number}.")
+ current_body = pr.body or ""
+ new_body = current_body + "\n\n" + body_to_append
+ pr.edit(body=new_body)
+ else:
+ print("No matching pull request found to append body.")
if __name__ == '__main__':
- create_or_update_pr()
+ parser = argparse.ArgumentParser(description='Operate on a GitHub Pull Request')
+ subparsers = parser.add_subparsers(dest='mode', required=True, help='Mode of operation')
+
+ # Subparser for create or update PR mode
+ create_parser = subparsers.add_parser('create_or_update', help='Create or update a pull request')
+ create_parser.add_argument('--base_branch', type=str, required=True, help='Base branch for the PR')
+ create_parser.add_argument('--branch_for_pr', type=str, required=True, help='Branch from which to create the PR')
+ create_parser.add_argument('--title', type=str, required=True, help='Title of the PR')
+ create_parser.add_argument('--body', type=str, default='', required=False, help='Body content of the PR, or path to a file with the content')
+
+ # Subparser for append PR body mode
+ append_parser = subparsers.add_parser('append_pr_body', help='Append text to the body of an existing pull request')
+ group = append_parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--pr_number', type=int, help='Pull request number')
+ append_parser.add_argument('--body', type=str, required=True, help='Text to append to the PR body')
+
+ args = parser.parse_args()
+
+ GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
+ if not GITHUB_TOKEN:
+ raise ValueError("GITHUB_TOKEN environment variable is not set")
+
+ g = Github(GITHUB_TOKEN)
+ repo_name = os.getenv('GITHUB_REPOSITORY', 'ydb-platform/ydb')
+ repo = g.get_repo(repo_name)
+
+ if args.mode == "create_or_update":
+ create_or_update_pr(args, repo)
+ elif args.mode == "append_pr_body":
+ append_to_pr_body(args, repo)
diff --git a/.github/scripts/tests/create_new_muted_ya.py b/.github/scripts/tests/create_new_muted_ya.py
index 0549e65373..8642a93398 100755
--- a/.github/scripts/tests/create_new_muted_ya.py
+++ b/.github/scripts/tests/create_new_muted_ya.py
@@ -348,6 +348,22 @@ def create_mute_issues(all_tests, file_path):
print("\n\n")
print("\n".join(results))
+ if 'GITHUB_OUTPUT' in os.environ:
+ if 'GITHUB_WORKSPACE' not in os.environ:
+ raise EnvironmentError("GITHUB_WORKSPACE environment variable is not set.")
+
+ file_path = os.path.join(os.environ['GITHUB_WORKSPACE'], "created_issues.txt")
+ print(f"Writing results to {file_path}")
+
+ with open(file_path, 'w') as f:
+ f.write("```\n")
+ f.write("\n".join(results))
+ f.write("\n```")
+
+ with open(os.environ['GITHUB_OUTPUT'], 'a') as gh_out:
+ gh_out.write(f"created_issues_file={file_path}")
+
+ print(f"Result saved to env variable GITHUB_OUTPUT by key created_issues_file")
def mute_worker(args):
diff --git a/.github/scripts/tests/update_mute_issues.py b/.github/scripts/tests/update_mute_issues.py
index 7ae751272e..8387f40a0f 100755
--- a/.github/scripts/tests/update_mute_issues.py
+++ b/.github/scripts/tests/update_mute_issues.py
@@ -19,6 +19,20 @@ CURRENT_TEST_HISTORY_DASHBOARD = "https://datalens.yandex/34xnbsom67hcq?"
# admin:org
# project
+
+def handle_github_errors(response):
+ if 'errors' in response:
+ for error in response['errors']:
+ if error['type'] == 'INSUFFICIENT_SCOPES':
+ print("Error: Insufficient Scopes")
+ print("Message:", error['message'])
+ raise Exception("Insufficient scopes. Please update your token's scopes.")
+ # Handle other types of errors if necessary
+ else:
+ print("Unknown error type:", error.get('type', 'No type'))
+ print("Message:", error.get('message', 'No message available'))
+ raise Exception("GraphQL Error: " + error.get('message', 'Unknown error'))
+
def run_query(query, variables=None):
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
HEADERS = {"Authorization": f"Bearer {GITHUB_TOKEN}", "Content-Type": "application/json"}
@@ -26,6 +40,7 @@ def run_query(query, variables=None):
'https://api.github.com/graphql', json={'query': query, 'variables': variables}, headers=HEADERS
)
if request.status_code == 200:
+ handle_github_errors(request.json())
return request.json()
else:
raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}")
diff --git a/.github/workflows/build_ydb_dstool.yml b/.github/workflows/build_ydb_dstool.yml
index f7e79837bb..e2f6c42852 100644
--- a/.github/workflows/build_ydb_dstool.yml
+++ b/.github/workflows/build_ydb_dstool.yml
@@ -3,37 +3,29 @@ run-name: Build YDB DSTool
on:
workflow_dispatch:
inputs:
- build-linux:
+ commit_sha:
+ type: string
+ default: ""
+ build-linux-amd:
+ type: boolean
+ description: Build YDB DSTool for Linux (amd64)
+ default: true
+ build-linux-arm:
type: boolean
- description: Build YDB DSTool for Linux
+ description: Build YDB DSTool for Linux (arm64)
default: true
build-darwin-amd:
type: boolean
- description: Build YDB DSTool for Darwin amd64
+ description: Build YDB DSTool for MacOS (amd64)
default: true
- build-windows:
+ build-darwin-arm:
type: boolean
- description: Build YDB DSTool for Windows
+ description: Build YDB DSTool for MacOS (arm64)
+ default: true
+ build-windows-amd:
+ type: boolean
+ description: Build YDB DSTool for Windows (amd64)
default: true
- commit_sha:
- type: string
- default: ""
- s3_host:
- type: string
- default: "storage.yandexcloud.net"
- description: "S3 endpoint. Details: https://yandex.cloud/en/docs/storage/tools/s3cmd"
- s3_bucket:
- type: string
- default: "yandexcloud-ydb-dstool"
- description: "S3 bucket. S3Uri (without hostname). Details: https://yandex.cloud/en/docs/storage/tools/s3cmd"
- s3_dns_host_bucket:
- type: string
- default: "%(bucket)s.storage.yandexcloud.net"
- description: "S3 DNS-style bucket+hostname:port template for accessing a bucket. Details: https://yandex.cloud/en/docs/storage/tools/s3cmd"
- s3_region:
- type: string
- default: "ru-central1"
- description: "S3 region. Details: https://yandex.cloud/en/docs/storage/tools/s3cmd"
defaults:
run:
shell: bash
@@ -49,17 +41,25 @@ jobs:
id: set-matrix
run: |
MATRIX='{"include":[]}'
- if [ "${{ inputs.build-linux }}" == "true" ]; then
- MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "linux", "runner": "ubuntu-latest", "shell": "bash", "binary": "ydb-dstool"}]')
- echo "Matrix after adding linux: $MATRIX"
+ if [ "${{ inputs.build-linux-amd }}" == "true" ]; then
+ MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "linux-amd", "runner": "ubuntu-latest", "shell": "bash", "binary": "ydb-dstool", "platform": "DEFAULT-LINUX-X86_64"}]')
+ echo "Matrix after adding linux-amd: $MATRIX"
+ fi
+ if [ "${{ inputs.build-linux-arm }}" == "true" ]; then
+ MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "linux-arm", "runner": "ubuntu-latest", "shell": "bash", "binary": "ydb-dstool", "platform": "DEFAULT-LINUX-AARCH64"}]')
+ echo "Matrix after adding linux-arm: $MATRIX"
fi
if [ "${{ inputs.build-darwin-amd }}" == "true" ]; then
- MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "darwin-amd", "runner": "macos-13", "shell": "bash", "binary": "ydb-dstool"}]')
+ MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "darwin-amd", "runner": "macos-13", "shell": "bash", "binary": "ydb-dstool", "platform": "DEFAULT-DARWIN-X86_64"}]')
echo "Matrix after adding darwin-amd: $MATRIX"
fi
- if [ "${{ inputs.build-windows }}" == "true" ]; then
- MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "windows", "runner": "windows-latest", "shell": "bash", "binary": "ydb-dstool.exe"}]')
- echo "Matrix after adding windows: $MATRIX"
+ if [ "${{ inputs.build-darwin-arm }}" == "true" ]; then
+ MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "darwin-arm", "runner": "macos-13", "shell": "bash", "binary": "ydb-dstool", "platform": "DEFAULT-DARWIN-ARM64"}]')
+ echo "Matrix after adding darwin-arm: $MATRIX"
+ fi
+ if [ "${{ inputs.build-windows-amd }}" == "true" ]; then
+ MATRIX=$(echo $MATRIX | jq -c '.include += [{"os": "windows-amd", "runner": "windows-latest", "shell": "bash", "binary": "ydb-dstool.exe", "platform": "DEFAULT-WIN-X86_64"}]')
+ echo "Matrix after adding windows-amd: $MATRIX"
fi
echo "Final output matrix: $MATRIX"
@@ -71,7 +71,7 @@ jobs:
build-platform-specific-binary:
strategy:
matrix: ${{ fromJSON(needs.build-matrix.outputs.matrix) }}
- name: Build ${{ matrix.os }} YDB CLI binary
+ name: Build ${{ matrix.os }} YDB DSTool binary
needs: build-matrix
runs-on: ${{ matrix.runner }}
defaults:
@@ -90,13 +90,13 @@ jobs:
# Turns out it is crucial to prepare VS environment and build in one step due to env variable visibility
- name: Prepare Visual Studio environment and build windows binary with ya make
- if: ${{ matrix.os == 'windows' }}
+ if: ${{ matrix.os == 'windows-amd' }}
shell: cmd
run: ${{ '"%ProgramFiles%\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\VsDevCmd.bat" -arch=amd64' }} && python ya make ydb/apps/dstool -r -DUSE_SSE4=no -o ./
- name: Build unix binary with ya make
- if: ${{ matrix.os != 'windows' }}
- run: ./ya make ydb/apps/dstool -r -DUSE_SSE4=no
+ if: ${{ matrix.os != 'windows-amd' }}
+ run: ./ya make ydb/apps/dstool -r -DUSE_SSE4=no --target-platform ${{ matrix.platform }}
- name: Upload binary to artifact
uses: actions/upload-artifact@v4
@@ -115,39 +115,57 @@ jobs:
uses: actions/checkout@v4
with:
ref: ${{ inputs.commit_sha }}
- - name: Get YDB CLI version from ydb/apps/dstool/version.txt
+ - name: Get YDB DSTool version from ydb/apps/dstool/version.txt
id: getver
run: echo "dstool_version=$(cat ydb/apps/dstool/version.txt)" >> $GITHUB_OUTPUT
- - name: Print YDB CLI version ${{ steps.getver.outputs.dstool_version }}
+ - name: Print YDB DSTool version ${{ steps.getver.outputs.dstool_version }}
run: echo ${{ steps.getver.outputs.dstool_version }}
- - name: Prepare directory for linux binary
- if: ${{ inputs.build-linux }}
+ - name: Prepare directory for linux-amd binary
+ if: ${{ inputs.build-linux-amd }}
run: mkdir -p ${{ steps.getver.outputs.dstool_version }}/linux/amd64
- - name: Prepare directory for Darwin amd binary
+ - name: Prepare directory for linux-arm binary
+ if: ${{ inputs.build-linux-arm }}
+ run: mkdir -p ${{ steps.getver.outputs.dstool_version }}/linux/arm64
+ - name: Prepare directory for darwin-amd binary
if: ${{ inputs.build-darwin-amd }}
run: mkdir -p ${{ steps.getver.outputs.dstool_version }}/darwin/amd64
- - name: Prepare directory for Windows binary
- if: ${{ inputs.build-windows }}
+ - name: Prepare directory for darwin-arm binary
+ if: ${{ inputs.build-darwin-arm }}
+ run: mkdir -p ${{ steps.getver.outputs.dstool_version }}/darwin/arm64
+ - name: Prepare directory for windows-amd binary
+ if: ${{ inputs.build-windows-amd }}
run: mkdir -p ${{ steps.getver.outputs.dstool_version }}/windows/amd64/unsigned
- - name: Copy linux binary
- if: ${{ inputs.build-linux }}
+ - name: Copy linux-amd binary
+ if: ${{ inputs.build-linux-amd }}
uses: actions/download-artifact@v4
with:
- name: linux-binary
+ name: linux-amd-binary
path: ${{ steps.getver.outputs.dstool_version }}/linux/amd64/
+ - name: Copy linux-arm binary
+ if: ${{ inputs.build-linux-arm }}
+ uses: actions/download-artifact@v4
+ with:
+ name: linux-arm-binary
+ path: ${{ steps.getver.outputs.dstool_version }}/linux/arm64/
- name: Copy darwin amd64 binary
if: ${{ inputs.build-darwin-amd }}
uses: actions/download-artifact@v4
with:
name: darwin-amd-binary
path: ${{ steps.getver.outputs.dstool_version }}/darwin/amd64/
- - name: Copy windows binary (unsigned)
- if: ${{ inputs.build-windows }}
+ - name: Copy darwin arm64 binary
+ if: ${{ inputs.build-darwin-arm }}
+ uses: actions/download-artifact@v4
+ with:
+ name: darwin-arm-binary
+ path: ${{ steps.getver.outputs.dstool_version }}/darwin/arm64/
+ - name: Copy windows-amd binary (unsigned)
+ if: ${{ inputs.build-windows-amd }}
uses: actions/download-artifact@v4
with:
- name: windows-binary
+ name: windows-amd-binary
path: ${{ steps.getver.outputs.dstool_version }}/windows/amd64/unsigned/
- name: Print resulting file hierarchy
@@ -164,5 +182,10 @@ jobs:
cd ..
- name: Upload to S3
- run: s3cmd --access_key=${{ secrets.CLI_S3_KEY_ID }} --secret_key=${{ secrets.CLI_S3_KEY_SECRET_ID }} --host=${{ inputs.s3_host }} --host-bucket="${{ inputs.s3_dns_host_bucket }}" --region=${{ inputs.s3_region }} sync --recursive ${{ steps.getver.outputs.dstool_version }} s3://${{ inputs.s3_bucket }}/release/
+ env:
+ S3_HOST: "storage.yandexcloud.net"
+ S3_BUCKET: "yandexcloud-ydb-dstool"
+ S3_DNS_HOST_BUCKET: "%(bucket)s.storage.yandexcloud.net"
+ S3_REGION: ru-central1
+ run: s3cmd --access_key=${{ secrets.CLI_S3_KEY_ID }} --secret_key=${{ secrets.CLI_S3_KEY_SECRET_ID }} --host="$S3_HOST" --host-bucket="$S3_DNS_HOST_BUCKET" --region="$S3_REGION" sync --recursive ${{ steps.getver.outputs.dstool_version }} "s3://$S3_BUCKET/release/"
diff --git a/.github/workflows/collect_analytics.yml b/.github/workflows/collect_analytics.yml
index 3e92d0feb6..06a40ecba0 100644
--- a/.github/workflows/collect_analytics.yml
+++ b/.github/workflows/collect_analytics.yml
@@ -1,7 +1,7 @@
name: Collect-analytics-run
on:
schedule:
- - cron: "0 * * * *" # Every 1 h
+ - cron: "0 1-23/2 * * *" #каждые 2 часа в 0 минут, начиная с 1:00 и заканчивая 23:00.
workflow_dispatch:
inputs:
commit_sha:
@@ -43,11 +43,7 @@ jobs:
- name: Collect test history data with window 10 run release-asan for main
continue-on-error: true
run: python3 .github/scripts/analytics/flaky_tests_history_n_runs.py --runs=10 --build_type=release-asan
- - name: Collect test history data with window 50 run relwithdebinfo for main
- continue-on-error: true
- run: python3 .github/scripts/analytics/flaky_tests_history_n_runs.py --runs=50
- - name: Collect test history data with window 50 run release-asan for main
- run: python3 .github/scripts/analytics/flaky_tests_history_n_runs.py --runs=50 --build_type=release-asan
+
diff --git a/.github/workflows/create_issues_for_muted_tests.yml b/.github/workflows/create_issues_for_muted_tests.yml
new file mode 100644
index 0000000000..6a6bd90443
--- /dev/null
+++ b/.github/workflows/create_issues_for_muted_tests.yml
@@ -0,0 +1,79 @@
+name: Create issues for muted tests
+
+on:
+ pull_request_review:
+ types:
+ - submitted
+ workflow_dispatch:
+ inputs:
+ pr_number:
+ description: 'The pull request number'
+ required: true
+ type: number
+
+env:
+ GH_TOKEN: ${{ secrets.YDBOT_TOKEN }}
+ MUTED_YA_FILE_PATH: .github/config/muted_ya.txt
+
+jobs:
+ create-issues-for-muted-tests:
+ runs-on: ubuntu-latest
+ if: |
+ github.event.review.state == 'approved' &&
+ contains(github.event.pull_request.labels.*.name, 'mute-unmute') ||
+ github.event_name == 'workflow_dispatch'
+
+ steps:
+ - name: Set environment variables for branches
+ run: |
+ echo "BRANCH_FOR_PR=${{ github.event.pull_request.head.ref || github.head_ref }}" >> $GITHUB_ENV
+ echo "BASE_BRANCH=${{ github.event.pull_request.base.ref || github.base_ref }}" >> $GITHUB_ENV
+
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.ref || github.head_ref }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install ydb[yc] PyGithub
+
+ - name: Setup ydb access
+ uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials
+ with:
+ ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }}
+
+ - name: Create issues for muted tests
+ id: create_issues
+ env:
+ GITHUB_TOKEN: ${{ env.GH_TOKEN }}
+ run: |
+ .github/scripts/tests/create_new_muted_ya.py create_issues --file_path=${{ github.workspace }}/${{ env.MUTED_YA_FILE_PATH }}
+
+ - name: Add issues to PR
+ env:
+ GITHUB_TOKEN: ${{ env.GH_TOKEN }}
+ run: |
+ python .github/scripts/create_or_update_pr.py append_pr_body --pr_number=${{ github.event.pull_request.number || github.event.inputs.pr_number }} --body=${{ steps.create_issues.outputs.created_issues_file }}
+
+ - name: Comment PR
+ uses: actions/github-script@v7
+
+ with:
+ github-token: ${{ env.GH_TOKEN }}
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ const workflowUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`;
+ const filePath = ${{ steps.create_issues.outputs.created_issues_file }}
+ const bodyText = fs.readFileSync(filePath, 'utf8');
+ const completeBody = `Collected in workflow [#${{ github.run_number }}](${workflowUrl})\n\n${bodyText}`;
+
+ github.rest.issues.createComment({
+ issue_number: ${{ github.event.pull_request.number || github.event.inputs.pr_number }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: completeBody
+ });
diff --git a/.github/workflows/regression_run_compatibility.yml b/.github/workflows/regression_run_compatibility.yml
new file mode 100644
index 0000000000..3d68c97556
--- /dev/null
+++ b/.github/workflows/regression_run_compatibility.yml
@@ -0,0 +1,20 @@
+name: Regression-run_compatibility
+
+on:
+ schedule:
+ - cron: "0 23 * * *" # At 23:00 every day
+ workflow_dispatch:
+
+jobs:
+ main:
+ name: Regression-run_compatibility
+ uses: ./.github/workflows/run_tests.yml
+ secrets: inherit
+ strategy:
+ fail-fast: false
+ matrix:
+ build_preset: ["relwithdebinfo", "release-asan", "release-tsan", "release-msan"]
+ with:
+ test_targets: ydb/tests/functional/compatibility/
+ build_preset: ${{ matrix.build_preset }}
+
diff --git a/.github/workflows/update_muted_ya.yml b/.github/workflows/update_muted_ya.yml
index 53bb474537..99f40222b6 100644
--- a/.github/workflows/update_muted_ya.yml
+++ b/.github/workflows/update_muted_ya.yml
@@ -1,6 +1,8 @@
name: Update Muted tests
on:
+ schedule:
+ - cron: "0 */2 * * *" # At the beginning of every 2nd hour
workflow_dispatch:
env:
@@ -8,7 +10,8 @@ env:
BRANCH_FOR_PR: update-muted-ya
TITLE: "Update muted_ya.txt"
BASE_BRANCH: main
- REVIEWERS: ydb-platform/ci
+ REVIEWERS: "['ci']"
+ LABEL: mute-unmute
jobs:
create-or-update-muted-ya:
@@ -22,46 +25,53 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install ydb[yc] PyGithub
+ pip install ydb[yc] PyGithub codeowners pandas
- name: Setup ydb access
uses: ./.github/actions/setup_ci_ydb_service_account_key_file_credentials
with:
ci_ydb_service_account_key_file_credentials: ${{ secrets.CI_YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS }}
+
+ - name: Collect test history data with window 1 days relwithdebinfo for ${{ env.BASE_BRANCH }}
+ run: python3 .github/scripts/analytics/flaky_tests_history.py --days-window=1 --branch=${{ env.BASE_BRANCH }}
+
+ - name: Update muted and not muted tests in DB for ${{ env.BASE_BRANCH }}
+ run: python3 .github/scripts/tests/get_muted_tests.py upload_muted_tests --branch=${{ env.BASE_BRANCH }}
+
+ - name: Update test monitor (how long tests in state) for ${{ env.BASE_BRANCH }}
+ run: python3 .github/scripts/analytics/tests_monitor.py --branch=${{ env.BASE_BRANCH }}
- - name: Update branch with base branch
+ - name: Update branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} with branch ${{ env.BASE_BRANCH }}
run: |
git config user.name YDBot
git config user.email ydbot@ydb.tech
# Fetch the latest changes from remote
- git fetch origin
+ git fetch origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
# Checkout BRANCH_FOR_PR, create if it doesn't exist based on BASE_BRANCH
- if git show-ref --quiet refs/heads/${{ env.BRANCH_FOR_PR }}; then
- git checkout ${{ env.BRANCH_FOR_PR }}
+ if git show-ref --quiet origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}; then
+ echo 'Branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} exists.'
+ git checkout ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
else
- git checkout -b ${{ env.BRANCH_FOR_PR }} origin/${{ env.BASE_BRANCH }}
+ echo 'Branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} does not exist. Creating based on ${{ env.BASE_BRANCH }}'
+ git checkout -b ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} origin/${{ env.BASE_BRANCH }}
fi
# Attempt to rebase BRANCH_FOR_PR onto BASE_BRANCH
- if ! git rebase origin/${{ env.BASE_BRANCH }}; then
- echo "Rebase failed, resetting branch to match BASE_BRANCH..."
-
+ if ! git rebase origin/${{ env.BASE_BRANCH }} -X theirs; then
+ echo "Rebase failed, resetting branch to match ${{ env.BASE_BRANCH }}..."
+
# Abort the rebase process
git rebase --abort
- # Reset the branch to BASE_BRANCH
+ echo "Reset branch ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} to origin/${{ env.BASE_BRANCH }}"
git reset --hard origin/${{ env.BASE_BRANCH }}
- # Force push the reset branch to remote
- git push origin ${{ env.BRANCH_FOR_PR }} --force
- else
- # If rebase is successful, push the updated branch
- git push origin ${{ env.BRANCH_FOR_PR }}
fi
+ git push origin ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }} --force
- - name: Run the script
+ - name: Run script create_new_muted_ya.py
run: |
.github/scripts/tests/create_new_muted_ya.py update_muted_ya
@@ -81,9 +91,12 @@ jobs:
fi
- name: Collect PR description
+ if: env.changes == 'true'
id: pr_description
run: |
PR_BODY=''
+ PR_BODY_FILE="pr_body_content.txt"
+
if [ -s mute_update/deleted_tests_in_mute_debug.txt ]; then
DELETED_COUNT=$(wc -l < mute_update/deleted_tests_in_mute_debug.txt)
PR_BODY+=$'**Removed from mute: '"${DELETED_COUNT}**"$'\n\n'
@@ -106,8 +119,10 @@ jobs:
PR_BODY+=$'\n```\n\n'
fi
- # Use printf to handle special characters and newlines
- printf "PR_BODY<<EOF\n%s\nEOF\n" "$PR_BODY" >> $GITHUB_ENV
+ # Save PR_BODY to the file
+ echo "$PR_BODY" > "$PR_BODY_FILE"
+ # Export the path to the file to the GitHub environment
+ echo "PR_BODY_PATH=$PR_BODY_FILE" >> $GITHUB_ENV
- name: Stage changes if any
if: env.changes == 'true'
@@ -124,24 +139,59 @@ jobs:
git commit -m "Update muted YA file"
- name: Push changes
+ if: env.changes == 'true'
uses: ad-m/github-push-action@v0.8.0
with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- branch: ${{ env.BRANCH_FOR_PR }}
+ github_token: ${{ secrets.YDBOT_TOKEN }}
+ branch: ${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}
force: true
- - name: Install PyGithub
- run: |
- pip install PyGithub
- name: Create or update PR
+ if: env.changes == 'true'
id: create_or_update_pr
env:
- GITHUB_TOKEN: ${{ env.GH_TOKEN }}
- BRANCH_FOR_PR: ${{ env.BRANCH_FOR_PR }}
- TITLE: ${{ env.TITLE }}
- BASE_BRANCH: ${{ env.BASE_BRANCH }}
- BODY: ${{ env.PR_BODY }}
- REVIEWERS: ${{ env.REVIEWERS }}
+ GITHUB_TOKEN: ${{ secrets.YDBOT_TOKEN }}
run: |
- python .github/scripts/create_or_update_pr.py \ No newline at end of file
+ python .github/scripts/create_or_update_pr.py create_or_update --base_branch="${{ env.BASE_BRANCH }}" --branch_for_pr="${{ env.BRANCH_FOR_PR }}_${{ env.BASE_BRANCH }}" --title="${{ env.TITLE }} in ${{ env.BASE_BRANCH }}" --body="${{ env.PR_BODY_PATH }}"
+
+
+ - name: Comment PR
+ uses: actions/github-script@v7
+ if: env.changes == 'true'
+ with:
+ github-token: ${{ secrets.YDBOT_TOKEN }}
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ const workflowUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`;
+ const filePath = path.join(process.env.GITHUB_WORKSPACE, 'pr_body_content.txt');
+ const bodyText = fs.readFileSync(filePath, 'utf8');
+ const completeBody = `Collected in workflow [#${{ github.run_number }}](${workflowUrl})\n\n${bodyText}`;
+
+ github.rest.issues.createComment({
+ issue_number: ${{ steps.create_or_update_pr.outputs.pr_number }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: completeBody
+ });
+
+ github.rest.issues.addLabels({
+ ...context.repo,
+ issue_number: ${{ steps.create_or_update_pr.outputs.pr_number }},
+ labels: ['${{ env.LABEL }}']
+ });
+
+ - name: Add reviewers
+ if: env.changes == 'true'
+ uses: octokit/request-action@v2.x
+ with:
+ route: POST /repos/{owner}/{repo}/pulls/{pull_number}/requested_reviewers
+ owner: ${{ github.repository_owner }}
+ repo: ${{ github.event.repository.name }}
+ pull_number: ${{ steps.create_or_update_pr.outputs.pr_number }}
+ team_reviewers: ${{ env.REVIEWERS }}
+ token: ${{ secrets.YDBOT_TOKEN }}
+
+ \ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6a8722d22e..a0ce54858b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,16 @@
-## Unreleased
-
-### Functionality
-* 15186:Increased the query text limit size in system views from 4 KB to 10 KB. [#15186](https://github.com/ydb-platform/ydb/pull/15186) ([spuchin](https://github.com/spuchin))
-
-### Bug fixes
-* 15160:Fixed the issue of the transaction hanging if a user performs a control plane operation with a topic (for example, adding partitions or a consumer) and the PQ tablet moves to another node. The transaction is now completed successfully. [#15070](https://github.com/ydb-platform/ydb/issues/15070) [#15160](https://github.com/ydb-platform/ydb/pull/15160) ([Alek5andr-Kotov](https://github.com/Alek5andr-Kotov))
+## Unreleased
+
+### Functionality
+
+* 15186:Increased [the query text limit size](../dev/system-views#query-metrics) in system views from 4 KB to 10 KB. [#15186](https://github.com/ydb-platform/ydb/pull/15186) ([spuchin](https://github.com/spuchin))
+* 15693:Added a health check configuration that administrators can customize: the number of node restarts, tablets, the time difference between database dynodes,
+and timeout (by default, the maximum response time from healthcheck). Documentation is under construction. [#15693](https://github.com/ydb-platform/ydb/pull/15693) ([Andrei Rykov](https://github.com/StekPerepolnen))
+
+### Bug fixes
+
+* 15721:Fixed a bug in YDB UUID column handling in ReadTable SDK method. [#15721](https://github.com/ydb-platform/ydb/pull/15721) ([Ivan Nikolaev](https://github.com/lex007in))
+* 16061:Fixed a bug in handling OLAP scan queries with predicates. [#16061](https://github.com/ydb-platform/ydb/pull/16061) ([Semyon](https://github.com/swalrus1))
+* 16060:Fixed an [error](https://github.com/ydb-platform/ydb/issues/15551) that caused the **RETURNING** clause work incorrectly with INSERT/UPSERT operations. [#16060](https://github.com/ydb-platform/ydb/pull/16060) ([Vitalii Gridnev](https://github.com/gridnevvvit))
+* 16021:Fixed a rare error that led to a VERIFY error when replicating data. #10650 [#16021](https://github.com/ydb-platform/ydb/pull/16021) ([Alexander Rutkovsky](https://github.com/alexvru))
+* 16016:Fixed rare node failures during read session balancing. https://github.com/ydb-platform/ydb/issues/16017 [#16016](https://github.com/ydb-platform/ydb/pull/16016) ([Nikolay Shestakov](https://github.com/nshestakov))
+* 16423:Changed behavior — `SHOW CREATE TABLE` now fails on views instead of producing wrong output. [#16423](https://github.com/ydb-platform/ydb/pull/16423) ([Daniil Demin](https://github.com/jepett0)) \ No newline at end of file
diff --git a/build/conf/bison_lex.conf b/build/conf/bison_lex.conf
index 68dc35a66a..99ed9a79c9 100644
--- a/build/conf/bison_lex.conf
+++ b/build/conf/bison_lex.conf
@@ -37,7 +37,7 @@ _FLEX_TOOL_DIR=
_FLEX_HEADER=
when ($_BISON_FLEX_SET_DEFAULTS == "yes") {
- _BISON_HEADER=--defines=${nopath;noext;output;main;addincl;norel;suf=$_BISON_HEADER_SUFFIX:SRC}
+ _BISON_HEADER=--defines=${nopath;noext;main;addincl;norel;output;suf=$_BISON_HEADER_SUFFIX:SRC}
_BISON_PP=$YMAKE_PYTHON ${input:"build/scripts/preprocess.py"} $_ADD_HIDDEN_INPUTS($_CPP_BISON_SKELS) ${nopath;noext;tmp;suf=$_BISON_HEADER_SUFFIX:SRC}
_FLEX_TOOL=${tool:"contrib/tools/flex-old"}
_FLEX_TOOL_DIR=contrib/tools/flex-old
@@ -93,7 +93,7 @@ macro FLEX_GEN_CPP() {
### Use SUFF (including extension) to name Bison defines header file. The default is just `.h`.
macro BISON_HEADER(Suffix) {
SET(_BISON_HEADER_SUFFIX $Suffix)
- SET(_BISON_HEADER --defines=\${nopath;noext;output;main;addincl;norel;suf=$_BISON_HEADER_SUFFIX:SRC})
+ SET(_BISON_HEADER --defines=\${nopath;noext;main;addincl;norel;output;suf=$_BISON_HEADER_SUFFIX:SRC})
SET(_BISON_PP $YMAKE_PYTHON \${input:"build/scripts/preprocess.py"} $_ADD_HIDDEN_INPUTS($_CPP_BISON_SKELS) \${nopath;noext;tmp;suf=$_BISON_HEADER_SUFFIX:SRC})
}
@@ -123,7 +123,7 @@ macro USE_MODERN_FLEX() {
macro USE_MODERN_FLEX_WITH_HEADER(Suffix) {
SET(_FLEX_TOOL \${tool:"contrib/tools/flex"} --m4=\${tool:"contrib/tools/m4"})
SET(_FLEX_TOOL_DIR contrib/tools/flex)
- SET(_FLEX_HEADER --header-file=\${nopath;noext;output;main;addincl;norel;suf=$Suffix:SRC})
+ SET(_FLEX_HEADER --header-file=\${nopath;noext;main;addincl;norel;output;suf=$Suffix:SRC})
}
### @usage: USE_OLD_FLEX()
@@ -137,7 +137,7 @@ macro USE_OLD_FLEX() {
macro _SRC("y", SRC, SRCFLAGS...) {
.PEERDIR=build/induced/by_bison
.CMD=${tool:"contrib/tools/bison"} $BISON_FLAGS ${env:"M4=${tool:M4_PATH}"} ${env:"BISON_PKGDATADIR=${ARCADIA_ROOT}/${_BISON_DATA_DIR}"} $_BISON_HEADER ${hide:_BISON_GEN_EXT} -o ${nopath;output;suf=$_BISON_GEN_EXT:SRC} ${input:SRC} ${SRCFLAGS} ${hide;kv:"p YC"} ${hide;kv:"pc light-green"} && $_BISON_PP
- .SEM=target_macroses-ITEM && target_macroses-macro target_bison_parser && target_macroses-args PRIVATE ${input:SRC} ${output;nopath;noext;hide;suf=${OBJ_SUF}.o:SRC} ${nopath;noext;hide;output:SRC.h} ${nopath;noext;output;addincl;hide:SRC.h} && platform_vars-BISON_FLAGS ${quo:BISON_FLAGS} && conan-tool_requires m4/1.4.19 && conan-imports 'bin, m4* -> ./bin/m4/bin' && conan-tool_requires bison/3.8.2 && conan-imports 'bin, bison* -> ./bin/bison/bin' && conan-imports 'res, * -> ./bin/bison/res'
+ .SEM=target_macroses-ITEM && target_macroses-macro target_bison_parser && target_macroses-args PRIVATE ${input:SRC} ${hide;output;suf=${OBJ_SUF}.o;nopath;noext:SRC} ${hide;nopath;noext;output;suf=.h:SRC} ${hide;nopath;noext;addincl;output;suf=.h:SRC} && platform_vars-BISON_FLAGS ${quo:BISON_FLAGS} && conan-tool_requires m4/1.4.19 && conan-imports 'bin, m4* -> ./bin/m4/bin' && conan-tool_requires bison/3.8.2 && conan-imports 'bin, bison* -> ./bin/bison/bin' && conan-imports 'res, * -> ./bin/bison/res'
}
macro _SRC("ypp", SRC, SRCFLAGS...) {
diff --git a/build/conf/fbs.conf b/build/conf/fbs.conf
index 067c45639e..667f3d2537 100644
--- a/build/conf/fbs.conf
+++ b/build/conf/fbs.conf
@@ -22,11 +22,11 @@ _PY_FBS_DEPS=contrib/python/flatbuffers
### processed when --add-flatbuf-result flag is specified on the command line
### for 'ya make ...' (tar archive is extracted to output directory).
macro FBS_TO_PYSRC(OUT_NAME, IN_FBS_FILES...) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --python --no-warnings --python-typing --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:IN_FBS_FILES} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --exts .py --input $BINDIR --output ${noauto;output;tared:OUT_NAME.py3.fbs.pysrc} ${hide;kv:"p FP"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --python --no-warnings --python-typing --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:IN_FBS_FILES} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --exts .py --input $BINDIR --output ${noauto;output;tared;suf=.py3.fbs.pysrc:OUT_NAME} ${hide;kv:"p FP"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
}
macro FBS_TO_PY2SRC(OUT_NAME, IN_FBS_FILES...) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:"contrib/deprecated/flatc"} --python --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:IN_FBS_FILES} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --exts .py --input $BINDIR --output ${noauto;output;tared:OUT_NAME.py2.fbs.pysrc} ${hide;kv:"p FP"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:"contrib/deprecated/flatc"} --python --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:IN_FBS_FILES} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --exts .py --input $BINDIR --output ${noauto;output;tared;suf=.py2.fbs.pysrc:OUT_NAME} ${hide;kv:"p FP"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
}
# tag:fbs tag:go-specific
@@ -38,14 +38,14 @@ _GO_FLATC_IMPORTS=\
# tag:fbs tag:cpp-specific
macro _CPP_FLATC_CMD(SRC, SRCFLAGS...) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cpp_flatc_wrapper.py"} ${tool:FLATC} --no-warnings --cpp --keep-prefix --gen-mutable --schema -b --yandex-maps-iter --gen-object-api --filename-suffix .fbs ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${output;main;norel:SRC.h} ${hide;output;norel:SRC.cpp} ${input:SRC} ${hide;output;noext;norel:SRC.iter.fbs.h} ${hide;noauto;output;noext;norel:SRC.bfbs} ${hide;kv:"p FL"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
- .SEM=target_macroses-ITEM && target_macroses-macro target_fbs_source && target_macroses-args PRIVATE ${input:SRC} ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} ${hide;output;norel:SRC.h} ${hide;output;norel:SRC.cpp} ${hide;output;noext;norel:SRC.iter.fbs.h} ${hide;noauto;output;noext;norel:SRC.bfbs} ${hide;tool:FLATC} && platform_vars-FBS_CPP_FLAGS "--no-warnings --cpp --keep-prefix --gen-mutable --schema -b --yandex-maps-iter --gen-object-api --filename-suffix .fbs" ${hide;input:"build/scripts/cpp_flatc_wrapper.py"}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cpp_flatc_wrapper.py"} ${tool:FLATC} --no-warnings --cpp --keep-prefix --gen-mutable --schema -b --yandex-maps-iter --gen-object-api --filename-suffix .fbs ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${main;norel;output;suf=.h:SRC} ${hide;norel;output;suf=.cpp:SRC} ${input:SRC} ${hide;norel;output;suf=.iter.fbs.h;noext:SRC} ${hide;noauto;norel;output;suf=.bfbs;noext:SRC} ${hide;kv:"p FL"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
+ .SEM=target_macroses-ITEM && target_macroses-macro target_fbs_source && target_macroses-args PRIVATE ${input:SRC} ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} ${hide;norel;output;suf=.h:SRC} ${hide;norel;output;suf=.cpp:SRC} ${hide;norel;output;suf=.iter.fbs.h;noext:SRC} ${hide;noauto;norel;output;suf=.bfbs;noext:SRC} ${hide;tool:FLATC} && platform_vars-FBS_CPP_FLAGS "--no-warnings --cpp --keep-prefix --gen-mutable --schema -b --yandex-maps-iter --gen-object-api --filename-suffix .fbs" ${hide;input:"build/scripts/cpp_flatc_wrapper.py"}
.PEERDIR=contrib/libs/flatbuffers
}
# tag:fbs tag:cpp-specific
macro _CPP_FLATC64_CMD(SRC, SRCFLAGS...) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cpp_flatc_wrapper.py"} ${tool:FLATC64} --no-warnings --cpp --keep-prefix --gen-mutable --schema -b --filename-suffix .fbs64 ${FLATC_FLAGS_VALUE} -I ${ARCADIA_ROOT} -I ${ARCADIA_BUILD_ROOT} -o ${output;main;norel:SRC.h} ${hide;output;norel:SRC.cpp} ${input:SRC} ${hide;noauto;output;noext;norel:SRC.bfbs64} ${hide;kv:"p FL64"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cpp_flatc_wrapper.py"} ${tool:FLATC64} --no-warnings --cpp --keep-prefix --gen-mutable --schema -b --filename-suffix .fbs64 ${FLATC_FLAGS_VALUE} -I ${ARCADIA_ROOT} -I ${ARCADIA_BUILD_ROOT} -o ${main;norel;output;suf=.h:SRC} ${hide;norel;output;suf=.cpp:SRC} ${input:SRC} ${hide;noauto;norel;output;suf=.bfbs64;noext:SRC} ${hide;kv:"p FL64"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
.PEERDIR=contrib/libs/flatbuffers64
}
@@ -57,7 +57,7 @@ macro _CPP_FLATC64_CMD(SRC, SRCFLAGS...) {
### --add-protobuf-result flag is specified on the command line for 'ya make ...'
### (tar archive is extracted to output directory).
macro _GO_FLATC_CMD(SRC, PACKAGE_NAME) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --go --gen-mutable --go-namespace ${PACKAGE_NAME} ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR}/_generated ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/postprocess_go_fbs.py"} --arcadia-prefix ${GO_ARCADIA_PROJECT_PREFIX} --input-dir ${BINDIR} --map $_FBS_NAMESPACE_MAP_GLOBAL && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --flat --input ${BINDIR}/_generated --output ${output;noext;tared:SRC.fbs.gosrc} --exts .go ${hide;kv:"p FG"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --go --gen-mutable --go-namespace ${PACKAGE_NAME} ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR}/_generated ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/postprocess_go_fbs.py"} --arcadia-prefix ${GO_ARCADIA_PROJECT_PREFIX} --input-dir ${BINDIR} --map $_FBS_NAMESPACE_MAP_GLOBAL && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --flat --input ${BINDIR}/_generated --output ${output;noext;tared;suf=.fbs.gosrc:SRC} --exts .go ${hide;kv:"p FG"} ${hide;kv:"pc light-green"} ${hide;kv:"tared_kind nodir"} ${hide:FBS_FAKEID}
.PEERDIR=${_GO_FLATC_IMPORTS}
}
@@ -69,7 +69,7 @@ macro _GO_FLATC_CMD(SRC, PACKAGE_NAME) {
### be added to results when --add-flatbuf-result flag is specified on the command
### line for 'ya make ...'
macro _JAVA_FLATC_CMD(SRC, SRCFLAGS...) {
- .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --java --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --input $BINDIR --output ${output;nopath;noext:SRC.fbs.jsrc} --exts .java ${hide;kv:"p FJ"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:FLATC} --java --gen-mutable ${FLATC_FLAGS_VALUE} ${pre=-I :_FLATC__INCLUDE} -o ${BINDIR} ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_sources.py"} --input $BINDIR --output ${output;suf=.fbs.jsrc;nopath;noext:SRC} --exts .java ${hide;kv:"p FJ"} ${hide;kv:"pc light-green"} ${hide:FBS_FAKEID}
.PEERDIR=contrib/java/com/google/flatbuffers/flatbuffers-java/${JAVA_FLATBUFFERS_VERSION}
}
@@ -150,7 +150,7 @@ macro FBS_CMD(SRC, SRCFLAGS...) {
###
### Produce flatbuf schema out of protobuf description.
macro PROTO2FBS(File) {
- .CMD=${cwd:BINDIR} ${tool:FLATC} -I . -I ${ARCADIA_ROOT} --proto ${input:File} ${hide;output;norel;noext:File.fbs} ${hide;kv:"p FBS"} ${hide;kv:"pc yellow"} && $MOVE_FILE ${BINDIR}/${nopath;noext:File.fbs} ${output;norel;noext:File.fbs}
+ .CMD=${cwd:BINDIR} ${tool:FLATC} -I . -I ${ARCADIA_ROOT} --proto ${input:File} ${hide;norel;output;suf=.fbs;noext:File} ${hide;kv:"p FBS"} ${hide;kv:"pc yellow"} && $MOVE_FILE ${BINDIR}/${suf=.fbs;nopath;noext:File} ${norel;output;suf=.fbs;noext:File}
}
_CPP_FLATC_CMDLINE=$_CPP_FLATC_CMD($SRC $SRCFLAGS)
diff --git a/build/conf/go.conf b/build/conf/go.conf
index 859d8faa65..fe60cd6224 100644
--- a/build/conf/go.conf
+++ b/build/conf/go.conf
@@ -202,7 +202,7 @@ _GO_TOOL_COMMON_FLAGS=\
# tag:go-specific
macro _GO_GEN_COVER_GO(GO_FILE, GO_COVER_OUTPUT, VAR_ID) {
- .CMD=${hide:_GO_FAKEID} ${cwd;rootdir;input:GO_FILE} $GO_TOOLS_ROOT/pkg/tool/$_GO_TC_PATH/cover -mode set -var $VAR_ID -o ${output;noext;suf=.cover.go:GO_COVER_OUTPUT} ${rootrel;input:GO_FILE}
+ .CMD=${hide:_GO_FAKEID} ${cwd;rootdir;input:GO_FILE} $GO_TOOLS_ROOT/pkg/tool/$_GO_TC_PATH/cover -mode set -var $VAR_ID -o ${output;suf=.cover.go;noext:GO_COVER_OUTPUT} ${rootrel;input:GO_FILE}
}
# tag:go-specific
@@ -213,12 +213,12 @@ macro _GO_COMPILE_SYMABIS(FLAGS[], ASM_FILES...) {
# tag:go-specific
macro _GO_COMPILE_CGO1(NAME, FLAGS[], FILES...) {
- .CMD=${hide:_CGO_FAKEID} ${cwd:ARCADIA_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cgo1_wrapper.py"} $_GO_CGO1_WRAPPER_FLAGS --build-root ${ARCADIA_BUILD_ROOT} --source-root ${ARCADIA_ROOT} --cgo1-files ${output;noext:FILES.cgo1.go} --cgo2-files ${noauto;output;noext:FILES.cgo2.c} -- ${GO_TOOLS_ROOT}/pkg/tool/$_GO_TC_PATH/cgo -objdir $BINDIR -importpath $NAME $GO_CGO1_FLAGS_VALUE $FLAGS -- $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} ${CGO_CFLAGS_VALUE} ${input:FILES} ${hide;output:"_cgo_export.h"} ${hide;output:"_cgo_export.c"} ${hide;output:"_cgo_gotypes.go"} ${hide;noauto;output:"_cgo_main.c"} $GO_TOOLCHAIN_ENV ${hide;kv:"p go"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
+ .CMD=${hide:_CGO_FAKEID} ${cwd:ARCADIA_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/cgo1_wrapper.py"} $_GO_CGO1_WRAPPER_FLAGS --build-root ${ARCADIA_BUILD_ROOT} --source-root ${ARCADIA_ROOT} --cgo1-files ${output;suf=.cgo1.go;noext:FILES} --cgo2-files ${noauto;output;suf=.cgo2.c;noext:FILES} -- ${GO_TOOLS_ROOT}/pkg/tool/$_GO_TC_PATH/cgo -objdir $BINDIR -importpath $NAME $GO_CGO1_FLAGS_VALUE $FLAGS -- $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} ${CGO_CFLAGS_VALUE} ${input:FILES} ${hide;output:"_cgo_export.h"} ${hide;output:"_cgo_export.c"} ${hide;output:"_cgo_gotypes.go"} ${hide;noauto;output:"_cgo_main.c"} $GO_TOOLCHAIN_ENV ${hide;kv:"p go"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
}
# tag:go-specific
macro _GO_COMPILE_CGO2(NAME, C_FILES[], S_FILES[], OBJ_FILES[], FILES...) {
- .CMD=${hide:_CGO_FAKEID} $C_COMPILER $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} $CGO_CFLAGS_VALUE ${input;tobindir:"_cgo_main.c"} -c -o ${tmp;noauto;suf=${OBJECT_SUF}:"_cgo_main.c"} && $YMAKE_PYTHON3 ${input:"build/scripts/link_o.py"} $C_COMPILER $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} -o ${tmp;noauto;suf=${OBJECT_SUF}:"_cgo_"} $LDFLAGS $LDFLAGS_GLOBAL $CGO2_LDFLAGS_VALUE ${hide;input:"_cgo_export.h"} ${tmp;noauto;suf=${OBJECT_SUF}:"_cgo_main.c"} ${input;suf=${OBJECT_SUF}:"_cgo_export.c"} ${input;nopath;noext;suf=.cgo2.c${OBJECT_SUF}:FILES} ${input;suf=${OBJECT_SUF}:C_FILES} ${input;suf=.o:S_FILES} ${input:OBJ_FILES} $CGO_LDFLAGS_VALUE && ${GO_TOOLS_ROOT}/pkg/tool/$_GO_TC_PATH/cgo -dynpackage $NAME -dynimport ${tmp;noauto;suf=${OBJECT_SUF}:"_cgo_"} -dynout ${output:"_cgo_import.go"} -dynlinker $GO_CGO2_FLAGS_VALUE $GO_TOOLCHAIN_ENV ${hide;kv:"p go"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
+ .CMD=${hide:_CGO_FAKEID} $C_COMPILER $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} $CGO_CFLAGS_VALUE ${tobindir;input:"_cgo_main.c"} -c -o ${noauto;tmp;suf=${OBJECT_SUF}:"_cgo_main.c"} && $YMAKE_PYTHON3 ${input:"build/scripts/link_o.py"} $C_COMPILER $C_FLAGS_PLATFORM ${pre=-I:_C__INCLUDE} -o ${noauto;tmp;suf=${OBJECT_SUF}:"_cgo_"} $LDFLAGS $LDFLAGS_GLOBAL $CGO2_LDFLAGS_VALUE ${hide;input:"_cgo_export.h"} ${noauto;tmp;suf=${OBJECT_SUF}:"_cgo_main.c"} ${input;suf=${OBJECT_SUF}:"_cgo_export.c"} ${input;suf=.cgo2.c${OBJECT_SUF};nopath;noext:FILES} ${input;suf=${OBJECT_SUF}:C_FILES} ${input;suf=.o:S_FILES} ${input:OBJ_FILES} $CGO_LDFLAGS_VALUE && ${GO_TOOLS_ROOT}/pkg/tool/$_GO_TC_PATH/cgo -dynpackage $NAME -dynimport ${noauto;tmp;suf=${OBJECT_SUF}:"_cgo_"} -dynout ${output:"_cgo_import.go"} -dynlinker $GO_CGO2_FLAGS_VALUE $GO_TOOLCHAIN_ENV ${hide;kv:"p go"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
_USE_LINKER()
}
@@ -549,7 +549,7 @@ macro _GO_GRPC_GATEWAY_SRCS_IMPL(Files...) {
# tag:go-specific
macro _SETUP_GO_GRPC_GATEWAY() {
SET(_GO_PROTO_GRPC_GATEWAY_OPTS $_PROTO_PLUGIN_ARGS_BASE(go_grpc_gw vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway allow_repeated_fields_in_body=true))
- SET(_GO_PROTO_GRPC_GATEWAY_OUTS \${hide;output;norel;nopath;noext;suf=.pb.gw.go:File})
+ SET(_GO_PROTO_GRPC_GATEWAY_OUTS \${hide;norel;output;suf=.pb.gw.go;nopath;noext:File})
}
# tag:go-specific
@@ -562,7 +562,7 @@ macro _GO_GRPC_GATEWAY_SRCS(Files...) {
macro _GO_GRPC_GATEWAY_SWAGGER_SRCS(Files...) {
_SETUP_GO_GRPC_GATEWAY()
SET_APPEND(_GO_PROTO_GRPC_GATEWAY_OPTS $_PROTO_PLUGIN_ARGS_BASE(swagger vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger logtostderr=true allow_repeated_fields_in_body=true))
- SET_APPEND(_GO_PROTO_GRPC_GATEWAY_OUTS \${hide;output;norel;noauto;nopath;noext;suf=.swagger.json:File})
+ SET_APPEND(_GO_PROTO_GRPC_GATEWAY_OUTS \${hide;noauto;norel;output;suf=.swagger.json;nopath;noext:File})
_GO_GRPC_GATEWAY_SRCS_IMPL($Files)
PEERDIR(vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options)
@@ -580,7 +580,7 @@ macro _GO_GRPC_GATEWAY_V2_OPENAPI_SRCS(NO_JSON_NAMES_FOR_FIELDS?"json_names_for_
_SETUP_GO_GRPC_GATEWAY_V2()
SET_APPEND(_GO_PROTO_GRPC_GATEWAY_V2_OPTS $_PROTO_PLUGIN_ARGS_BASE(openapiv2 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 $NO_JSON_NAMES_FOR_FIELDS logtostderr=true))
- SET_APPEND(_GO_PROTO_GRPC_GATEWAY_V2_OPTS \${hide;output;norel;noauto;nopath;noext;suf=.swagger.json:File})
+ SET_APPEND(_GO_PROTO_GRPC_GATEWAY_V2_OPTS \${hide;noauto;norel;output;suf=.swagger.json;nopath;noext:File})
_GO_GRPC_GATEWAY_V2_SRCS_IMPL($Files)
PEERDIR(vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options)
@@ -615,7 +615,7 @@ macro _GO_GRPC_GATEWAY_V2_SRCS_IMPL(Files...) {
# tag:go-specific
macro _SETUP_GO_GRPC_GATEWAY_V2() {
SET(_GO_PROTO_GRPC_GATEWAY_V2_OPTS $_PROTO_PLUGIN_ARGS_BASE(go_grpc_gw vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway allow_repeated_fields_in_body=true))
- SET(_GO_PROTO_GRPC_GATEWAY_V2_OUTS \${hide;output;norel;nopath;noext;suf=.pb.gw.go:File})
+ SET(_GO_PROTO_GRPC_GATEWAY_V2_OUTS \${hide;norel;output;suf=.pb.gw.go;nopath;noext:File})
}
# tag:go-specific
@@ -667,16 +667,16 @@ module _GO_BASE_UNIT: _BASE_UNIT {
PEERDIR(build/external_resources/go_tools)
GO_PROTO_GRPC_OPTS_V2=--plugin=protoc-gen-go-grpc=${tool:_TOOL_PROTOC_GEN_GO_GRPC_V2} --go-grpc_out=${ARCADIA_BUILD_ROOT}/$PROTO_NAMESPACE
- GO_PROTO_GRPC_OUTS_V2=${output;norel;nopath;noext;suf=_grpc.pb.go:File}
+ GO_PROTO_GRPC_OUTS_V2=${norel;output;suf=_grpc.pb.go;nopath;noext:File}
select ($GO_PROTO_V2) {
"yes" | "on" ? {
- GO_PROTO_OUTS+=${hide;output;norel;nopath;noext;suf=.pb.go:File}
+ GO_PROTO_OUTS+=${hide;norel;output;suf=.pb.go;nopath;noext:File}
GO_PROTO_OPTS+=--plugin=protoc-gen-go=${tool:_TOOL_PROTOC_GEN_GO_V2} --go_out=${ARCADIA_BUILD_ROOT}/$PROTO_NAMESPACE
_GO_PROTO_CHECK_OUTPUT=--check $GO_PROTO_GRPC_OUTS_V2
GO_PROTO_OPTS+=$GO_PROTO_GRPC_OPTS
}
default ? {
- GO_PROTO_OUTS+=${hide;output;norel;nopath;noext;suf=.pb.go:File}
+ GO_PROTO_OUTS+=${hide;norel;output;suf=.pb.go;nopath;noext:File}
GO_PROTO_OPTS+=--plugin=protoc-gen-go=${tool:_TOOL_PROTOC_GEN_GO} --go_opt=${GO_PROTO_GEN_PLUGINS} --go_out=${ARCADIA_BUILD_ROOT}/$PROTO_NAMESPACE
}
}
@@ -1020,12 +1020,34 @@ module GO_TEST: GO_PROGRAM {
}
# tag:go-specific
+_GO_TOOL_ENV=${env:"PATH=${GO_TOOLS_ROOT}/bin"} ${env:"GOROOT=${GO_TOOLS_ROOT}"} ${env:"GOCACHE=${BINDIR}/.gocache"}
+
+# tag:go-specific
+_GO_GO_MOD=${ARCADIA_ROOT}/go.mod
+_GO_MODULES_TXT=${ARCADIA_ROOT}/vendor/modules.txt
+
+# tag:go-specific
+_GO_TOOL_MOCKGEN=vendor/go.uber.org/mock/mockgen
+_GO_TOOL_REFLECTOR=library/go/mockgen/reflector
+_GO_TOOL_OAPI_CODEGEN=vendor/github.com/deepmap/oapi-codegen/cmd/oapi-codegen
+_GO_TOOL_OAPI_CODEGEN_TAXI=taxi/infra/go/platform/tools/pkg/deepmap/oapi-codegen/cmd/oapi-codegen
+_GO_TOOL_OAPI_CODEGEN_TAXI_1134=taxi/infra/go/platform/tools/pkg1134/oapi-codegen-1.13.4/cmd/oapi-codegen
+
+# tag:go-specific
+### @usage: GO_MOCKGEN_FROM(Path)
+###
+### Part of Go mock module definition, both reflect and source mode.
+### Defines path for mock interfaces source
macro GO_MOCKGEN_FROM(Path) {
SET(MOCKGEN_FROM ${Path})
SET(MOCKGEN_MODULE ${GO_ARCADIA_PROJECT_PREFIX}${Path})
}
# tag:go-specific
+### @usage: GO_MOCKGEN_CONTRIB_FROM(Path)
+###
+### Part of Go mock module definition, both reflect and source mode.
+### Defines path for mock interfaces source for contrib (vendored) sources
macro GO_MOCKGEN_CONTRIB_FROM(Path) {
SET(MOCKGEN_FROM ${GO_CONTRIB_PROJECT_PREFIX}${Path})
SET(MOCKGEN_MODULE ${Path})
@@ -1033,10 +1055,24 @@ macro GO_MOCKGEN_CONTRIB_FROM(Path) {
# tag:go-specific
macro GO_MOCKGEN_TYPES(Types...) {
- SET(MOCKGEN_TYPES ${join=\\${__COMMA__}:Types})
+ SET(MOCKGEN_TYPES ${join=,:Types})
}
+MOCKGEN_PACKAGE="mocks"
# tag:go-specific
+### @usage: GO_MOCKGEN_PACKAGE(package)
+###
+### Part of Go mock module definition, source mode.
+### Specifies generated package name, instead of default one "mocks"
+macro GO_MOCKGEN_PACKAGE(PACKAGE) {
+ SET(MOCKGEN_PACKAGE ${PACKAGE})
+}
+
+# tag:go-specific
+### @usage: GO_MOCKGEN_REFLECT()
+###
+### Part of Go mock module definition, reflect mode.
+### Creates generator program, expected in `gen` folder
macro GO_MOCKGEN_REFLECT() {
PEERDIR(${GOSTD}/encoding/gob)
PEERDIR(${GOSTD}/flag)
@@ -1047,20 +1083,80 @@ macro GO_MOCKGEN_REFLECT() {
PEERDIR(vendor/go.uber.org/mock/mockgen/model)
PEERDIR(${MOCKGEN_FROM})
- RUN_PROGRAM(vendor/go.uber.org/mock/mockgen -prog_only $MOCKGEN_MODULE $MOCKGEN_TYPES STDOUT main.go CWD $ARCADIA_BUILD_ROOT)
+ .CMD=${cwd:BINDIR} ${tool:_GO_TOOL_REFLECTOR} ${MOCKGEN_MODULE} ${MOCKGEN_TYPES} ${stdout;output:"main.go"} ${hide;kv:"p GR"} ${hide;kv:"pc blue"}
+ .STRUCT_CMD=yes
}
# tag:go-specific
-_GO_EXE_SUFFIX=
-when ($OS_WINDOWS == "yes") {
- _GO_EXE_SUFFIX=.exe
+macro _GO_MOCKGEN_MOCKS_IMPL(GEN_TOOL) {
+ PEERDIR(${GOSTD}/reflect)
+ PEERDIR(vendor/go.uber.org/mock/gomock)
+
+ .CMD=${cwd:BINDIR} ${tool:GEN_TOOL} -output gob.data && ${cwd:BINDIR} ${tool:_GO_TOOL_MOCKGEN} -package ${MOCKGEN_PACKAGE} -model_gob gob.data ${stdout;output:"main.go"} $_GO_TOOL_ENV ${hide;kv:"p GM"} ${hide;kv:"pc blue"}
}
# tag:go-specific
+### @usage: GO_MOCKGEN_MOCKS()
+###
+### Part of Go mock module definition, reflect mode.
+### Generates mocks, expect to have `gen` folder with GO_MOCKGEN_REFLECT
macro GO_MOCKGEN_MOCKS() {
+ _GO_MOCKGEN_MOCKS_IMPL(${MODDIR}/gen)
+}
+
+_GO_MOCKGEN_SOURCE_CMDLINE=\
+ $FS_TOOLS md ${BINDIR}/.arcadia/vendor && \
+ $FS_TOOLS md ${BINDIR}/.arcadia/$MOCKGEN_FROM && \
+ $COPY_CMD ${context=TEXT;input:_GO_GO_MOD} ${BINDIR}/.arcadia/go.mod && \
+ $COPY_CMD ${context=TEXT;input:_GO_MODULES_TXT} ${BINDIR}/.arcadia/vendor/modules.txt && \
+ $COPY_CMD ${context=TEXT;input:SOURCE} ${BINDIR}/.arcadia/$SOURCE && \
+ ${cwd;suf=/.arcadia:BINDIR} ${tool:_GO_TOOL_MOCKGEN} -package ${MOCKGEN_PACKAGE} -source ${rootrel;context=TEXT;input:SOURCE} ${ARGS} \
+ $_GO_TOOL_ENV ${stdout;output;suf=.source.gen.go;noext:FILENAME} \
+ ${hide;context=TEXT;input;suf=/.arcadia/go.mod:BINDIR} ${hide;context=TEXT;input;suf=/.arcadia/vendor/modules.txt:BINDIR} ${hide;context=TEXT;input:IN_NOPARSE} \
+ ${hide;kv:"p GS"} ${hide;kv:"pc blue"}
+
+# tag:go-specific
+macro _GO_MOCKGEN_SOURCE_IMPL(SOURCE, FILENAME, ARGS[], IN_NOPARSE[]) {
PEERDIR(${GOSTD}/reflect)
+ PEERDIR(${GOSTD}/context)
PEERDIR(vendor/go.uber.org/mock/gomock)
+ .CMD=$_GO_MOCKGEN_SOURCE_CMDLINE
+}
+
+# tag:go-specific
+### @usage: GO_MOCKGEN_SOURCE(FILE, ARGS[], IN_NOPARSE[])
+###
+### Part of Go mock module definition, source mode.
+### Generates mocks from file from GO_MOCKGEN_FROM or GO_MOCKGEN_CONTRIB_FROM
+### Can be placed multiple times in same ya.make
+macro GO_MOCKGEN_SOURCE(FILE, ARGS[], IN_NOPARSE[]) {
+ _GO_MOCKGEN_SOURCE_IMPL($MOCKGEN_FROM/$FILE $FILE ARGS ${ARGS} IN_NOPARSE ${IN_NOPARSE})
+}
+
+macro _GO_OAPI_CODEGEN_IMPL(TOOL, IN, FILENAME, GENERATE="", PACKAGE="", TEMPLATES="", CONFIG="", IN_NOPARSE[], Args...) {
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} ${tool:TOOL} ${pre=--generate :GENERATE} ${pre=--package :PACKAGE} ${pre=--templates :TEMPLATES} ${pre=--config :CONFIG} $Args ${context=TEXT;input:IN} ${hide;context=TEXT;input:IN_NOPARSE} ${stdout;output:FILENAME} ${_GO_TOOL_ENV} ${hide;kv:"p GC"} ${hide;kv:"pc blue"}
+}
+
+# tag:go-specific
+### @usage: GO_OAPI_CODEGEN(GENERATE, PACKAGE, IN, IN_NOPARSE[], Args...)
+###
+### Go oapi-codegen module
+### Generates GENERATE thing with PACKAGE package from file IN into STDOUT file
+### Optional arguments will be passed into generator
+### IN_NOPARSE - input files required for running generation, except IN
+### Can be placed multiple times in same ya.make
+macro GO_OAPI_CODEGEN(GENERATE, PACKAGE, IN, IN_NOPARSE[], Args...) {
+ _GO_OAPI_CODEGEN_IMPL(${_GO_TOOL_OAPI_CODEGEN} ${IN} ${suf=.gen.go:GENERATE} GENERATE ${GENERATE} PACKAGE ${PACKAGE} ${Args} IN_NOPARSE ${IN_NOPARSE})
+}
- # Unfortunately ${rootrel;tool:TOOL} doesn't work currently, so we use this ugly workaround $MODDIR/gen/gen$_GO_EXE_SUFFIX
- RUN_PROGRAM(vendor/go.uber.org/mock/mockgen -package mocks -exec_only $MODDIR/gen/gen$_GO_EXE_SUFFIX ${GO_ARCADIA_PROJECT_PREFIX}${MOCKGEN_FROM} $MOCKGEN_TYPES STDOUT main.go TOOL $MODDIR/gen CWD $ARCADIA_BUILD_ROOT ENV PATH=${GO_TOOLS_ROOT}/bin GOROOT=${GO_TOOLS_ROOT} GOCACHE=${BINDIR}/.gocache)
+# tag:go-specific
+### private, taxi only
+macro GO_OAPI_CODEGEN_TAXI(GENERATE, PACKAGE, IN, IN_NOPARSE[], OUT_SUFFIX="", Args...) {
+ _GO_OAPI_CODEGEN_IMPL(${_GO_TOOL_OAPI_CODEGEN_TAXI} ${IN} ${suf=${OUT_SUFFIX}.gen.go:GENERATE} GENERATE ${GENERATE} PACKAGE ${PACKAGE} ${Args} IN_NOPARSE ${IN_NOPARSE})
+}
+
+# tag:go-specific
+### private, taxi only
+macro GO_OAPI_CODEGEN_TAXI_1134(GENERATE="", PACKAGE="", TEMPLATES="", CONFIG="", IN, FILENAME, IN_NOPARSE[], Args...) {
+ _GO_OAPI_CODEGEN_IMPL(${_GO_TOOL_OAPI_CODEGEN_TAXI_1134} ${IN} ${suf=.gen.go:FILENAME} ${pre=GENERATE :GENERATE} ${pre=PACKAGE :PACKAGE} ${pre=TEMPLATES :TEMPLATES} ${pre=CONFIG :CONFIG} ${Args} IN_NOPARSE ${IN_NOPARSE})
}
diff --git a/build/conf/java.conf b/build/conf/java.conf
index 053c0c4a1d..3b4da7292f 100644
--- a/build/conf/java.conf
+++ b/build/conf/java.conf
@@ -8,7 +8,7 @@ macro _INPUT_WITH_FLAG_IMPL(IN{input}[], Args...) {
macro ACCELEO(XSD{input}[], MTL{input}[], MTL_ROOT="${MODDIR}", LANG{input}[], OUT{output}[], OUT_NOAUTO{output}[], OUTPUT_INCLUDES[], DEBUG?"stdout2stderr":"stderr2stdout") {
.PEERDIR=build/platform/java/jdk $JDK_RESOURCE_PEERDIR
- .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON ${input;pre=build/scripts/:DEBUG.py} $JDK_RESOURCE/bin/java -Dfile.encoding=utf8 -classpath ${RUN_JAR_PROG_CP_PRE}${tool:"tools/acceleo"}${RUN_JAR_PROG_CP_SUF} ru.yandex.se.logsng.tool.Cli $_INPUT_WITH_FLAG(--xsd IN $XSD) $_INPUT_WITH_FLAG(--mtl IN $MTL) $_INPUT_WITH_FLAG(--lang IN $LANG) --output-dir $BINDIR --build-root ${ARCADIA_BUILD_ROOT} --source-root ${ARCADIA_ROOT} --mtl-root $MTL_ROOT ${hide;output_include:OUTPUT_INCLUDES} ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${hide;kv:"p JV"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
+ .CMD=${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON ${input;pre=build/scripts/;suf=.py:DEBUG} $JDK_RESOURCE/bin/java -Dfile.encoding=utf8 -classpath ${RUN_JAR_PROG_CP_PRE}${tool:"tools/acceleo"}${RUN_JAR_PROG_CP_SUF} ru.yandex.se.logsng.tool.Cli $_INPUT_WITH_FLAG(--xsd IN $XSD) $_INPUT_WITH_FLAG(--mtl IN $MTL) $_INPUT_WITH_FLAG(--lang IN $LANG) --output-dir $BINDIR --build-root ${ARCADIA_BUILD_ROOT} --source-root ${ARCADIA_ROOT} --mtl-root $MTL_ROOT ${hide;output_include:OUTPUT_INCLUDES} ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${hide;kv:"p JV"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
}
### @usage: JAVA_LIBRARY()
@@ -1280,8 +1280,8 @@ module _JAR_RUNNABLE: _COMPILABLE_JAR_BASE {
CONSUME_NON_MANAGEABLE_PEERS=yes
when ($RUN_WITH_SOURCES == "yes") {
- _SOURCE_JARS=${suf=-sources.jar;noext;ext=.jar:MANAGED_PEERS_CLOSURE} ${hide;late_out;pre=$BINDIR/$REALPRJNAME/;nopath;suf=-sources.jar;noext;ext=.jar:MANAGED_PEERS_CLOSURE}
- _SOURCE_JARS_CPLIST=${rootrel;pre=$BINDIR/$REALPRJNAME/;nopath;suf=-sources.jar;noext;ext=.jar:MANAGED_PEERS_CLOSURE}
+ _SOURCE_JARS=${suf=-sources.jar;noext;ext=.jar:MANAGED_PEERS_CLOSURE} ${hide;late_out;pre=$BINDIR/$REALPRJNAME/;suf=-sources.jar;nopath;noext;ext=.jar:MANAGED_PEERS_CLOSURE}
+ _SOURCE_JARS_CPLIST=${rootrel;pre=$BINDIR/$REALPRJNAME/;suf=-sources.jar;nopath;noext;ext=.jar:MANAGED_PEERS_CLOSURE}
}
when ($TARED_CLASSPATH == "yes") {
diff --git a/build/conf/licenses.json b/build/conf/licenses.json
index 8f525169f1..72edb8cdc6 100644
--- a/build/conf/licenses.json
+++ b/build/conf/licenses.json
@@ -216,6 +216,7 @@
"curl",
"Custom-clarified-artistic-proofread",
"Custom-eigen",
+ "Custom-epsg-database-license",
"Custom-fft2d",
"Custom-fft4g",
"Custom-Oasis-Pkcs11",
diff --git a/build/conf/linkers/msvc_linker.conf b/build/conf/linkers/msvc_linker.conf
index c89dfe6d1d..48ca1c2dfc 100644
--- a/build/conf/linkers/msvc_linker.conf
+++ b/build/conf/linkers/msvc_linker.conf
@@ -166,7 +166,7 @@ REAL_LINK_DYN_LIB_CMDLINE=\
--ya-start-command-file \
${VCS_C_OBJ_RR} \
${qe;rootrel:AUTO_INPUT} \
- ${qe;rootrel;ext=.lib;tags_cut:PEERS} ${qe;rootrel;ext=.dll;noext;suf=.lib;tags_cut:PEERS} \
+ ${qe;rootrel;ext=.lib;tags_cut:PEERS} ${qe;rootrel;ext=.dll;suf=.lib;noext;tags_cut:PEERS} \
$LINK_EXE_FLAGS \
$LINK_STDLIBS $LDFLAGS \
$LDFLAGS_GLOBAL \
@@ -235,7 +235,7 @@ LINK_EXEC_DYN_LIB_CMDLINE=\
'--ya-start-command-file \
${VCS_C_OBJ_RR} \
${qe;rootrel:AUTO_INPUT} \
- ${qe;rootrel;ext=.lib;tags_cut:PEERS} ${qe;rootrel;ext=.dll;noext;suf=.lib;tags_cut:PEERS} \
+ ${qe;rootrel;ext=.lib;tags_cut:PEERS} ${qe;rootrel;ext=.dll;suf=.lib;noext;tags_cut:PEERS} \
$LINK_EXE_FLAGS \
$LINK_STDLIBS \
$LDFLAGS $LDFLAGS_GLOBAL \
diff --git a/build/conf/project_specific/other.conf b/build/conf/project_specific/other.conf
index 0a121a5f04..35fa0ce04f 100644
--- a/build/conf/project_specific/other.conf
+++ b/build/conf/project_specific/other.conf
@@ -7,5 +7,5 @@ BUILD_CATBOOST_SCRIPT=build/scripts/build_catboost.py
### cbname - name for a variable (of NCatboostCalcer::TCatboostCalcer type) to be available in CPP code.
### CatBoost specific macro.
macro BUILD_CATBOOST(CbModel, CbName) {
- .CMD=$YMAKE_PYTHON ${input:BUILD_CATBOOST_SCRIPT} build_cb_f $ARCADIA_ROOT $ARCH_TOOL ${input:CbModel} $CbName ${output;pre=cb.:CbName.cpp} ${hide;output;pre=CB_External_;suf=.rodata:CbName} ${hide;output_include:"kernel/catboost/catboost_calcer.h"} ${hide;kv:"p CB"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:BUILD_CATBOOST_SCRIPT} build_cb_f $ARCADIA_ROOT $ARCH_TOOL ${input:CbModel} $CbName ${output;pre=cb.;suf=.cpp:CbName} ${hide;output;pre=CB_External_;suf=.rodata:CbName} ${hide;output_include:"kernel/catboost/catboost_calcer.h"} ${hide;kv:"p CB"} ${hide;kv:"pc yellow"}
}
diff --git a/build/conf/project_specific/yt.conf b/build/conf/project_specific/yt.conf
index abb9a7f86e..c7eaa36050 100644
--- a/build/conf/project_specific/yt.conf
+++ b/build/conf/project_specific/yt.conf
@@ -1,5 +1,5 @@
macro GENERATE_YT_RECORD(Yaml, OUTPUT_INCLUDES[]) {
- .CMD=${tool:"yt/yt/tools/record_codegen"} --input ${input:Yaml} --output-root $ARCADIA_BUILD_ROOT --output-cpp ${output;norel;noext;suf=.record.cpp:Yaml} ${hide;output;norel;noext;suf=.record.h:Yaml} ${pre=--output-include :OUTPUT_INCLUDES} ${hide;output_include:OUTPUT_INCLUDES} ${hide;output_include:"yt/yt/client/table_client/record_codegen_deps.h"} ${hide;kv:"p RC"}
- .SEM=custom_runs-ITEM && custom_runs-outputs ${output;norel;noext;suf=.record.cpp:Yaml} ${output;norel;noext;suf=.record.h:Yaml} && custom_runs-depends ${input:Yaml} ${input:"yt/yt/tools/record_codegen/__main__.py"} ${input:"yt/python/yt/record_codegen_helpers/__init__.py"} && custom_runs-env "PYTHONPATH=$ENV{PYTHONPATH}:${ARCADIA_ROOT}/yt/python/yt" && custom_runs-command ${Python3_EXECUTABLE} ${input:"yt/yt/tools/record_codegen/__main__.py"} --input ${input:Yaml} --output-root $ARCADIA_BUILD_ROOT --output-cpp ${output;norel;noext;suf=.record.cpp:Yaml} ${pre=--output-include :OUTPUT_INCLUDES} ${hide;output;norel;noext;suf=.record.h:Yaml} && custom_runs-cmake_packages-ITEM && custom_runs-cmake_packages-name Python3
+ .CMD=${tool:"yt/yt/tools/record_codegen"} --input ${input:Yaml} --output-root $ARCADIA_BUILD_ROOT --output-cpp ${norel;output;suf=.record.cpp;noext:Yaml} ${hide;norel;output;suf=.record.h;noext:Yaml} ${pre=--output-include :OUTPUT_INCLUDES} ${hide;output_include:OUTPUT_INCLUDES} ${hide;output_include:"yt/yt/client/table_client/record_codegen_deps.h"} ${hide;kv:"p RC"}
+ .SEM=custom_runs-ITEM && custom_runs-outputs ${norel;output;suf=.record.cpp;noext:Yaml} ${norel;output;suf=.record.h;noext:Yaml} && custom_runs-depends ${input:Yaml} ${input:"yt/yt/tools/record_codegen/__main__.py"} ${input:"yt/python/yt/record_codegen_helpers/__init__.py"} && custom_runs-env "PYTHONPATH=$ENV{PYTHONPATH}:${ARCADIA_ROOT}/yt/python/yt" && custom_runs-command ${Python3_EXECUTABLE} ${input:"yt/yt/tools/record_codegen/__main__.py"} --input ${input:Yaml} --output-root $ARCADIA_BUILD_ROOT --output-cpp ${norel;output;suf=.record.cpp;noext:Yaml} ${pre=--output-include :OUTPUT_INCLUDES} ${hide;norel;output;suf=.record.h;noext:Yaml} && custom_runs-cmake_packages-ITEM && custom_runs-cmake_packages-name Python3
PEERDIR(yt/yt/client)
}
diff --git a/build/conf/proto.conf b/build/conf/proto.conf
index e268dc4236..dcc4389571 100644
--- a/build/conf/proto.conf
+++ b/build/conf/proto.conf
@@ -150,8 +150,8 @@ macro _PROTO_PLUGIN_ARGS_BASE(Name, Tool, OutParm...) {
# tag:proto tag:python-specific
macro _ADD_PY_PROTO_OUT(Suf) {
- SET_APPEND(PY_PROTO_OUTS \${hide;noauto;output;norel;nopath;noext;suf=$Suf:File})
- SET_APPEND(PY_PROTO_OUTS_INTERNAL \${hide;noauto;output;norel;nopath;noext;suf=__int${_PYTHON_VER}__$Suf:File} \${hide;kv:"ext_out_name_for_\${nopath;noext;suf=__int${_PYTHON_VER}__$Suf:File} \${nopath;noext;suf=$Suf:File}"})
+ SET_APPEND(PY_PROTO_OUTS \${hide;noauto;norel;output;suf=$Suf;nopath;noext:File})
+ SET_APPEND(PY_PROTO_OUTS_INTERNAL \${hide;noauto;norel;output;suf=__int${_PYTHON_VER}__$Suf;nopath;noext:File} \${hide;kv:"ext_out_name_for_\${suf=__int${_PYTHON_VER}__$Suf;nopath;noext:File} \${suf=$Suf;nopath;noext:File}"})
# XXX fix variable expansion in plugins
SET(PY_PROTO_SUFFIXES $PY_PROTO_SUFFIXES $Suf)
}
@@ -213,7 +213,7 @@ macro WITH_KOTLIN_GRPC() {
# tag:proto tag:cpp-specific
macro _ADD_CPP_PROTO_OUT(Suf) {
- SET_APPEND(CPP_PROTO_OUTS \${output;norel;nopath;noext;suf=$Suf:File})
+ SET_APPEND(CPP_PROTO_OUTS \${norel;output;suf=$Suf;nopath;noext:File})
SET_APPEND(PROTOC_EXTRA_OUTS_SEM && protoc_extra_outs $Suf \${hide;output;suf=.o:Suf} \$_ADD_SEM_PROP_IF_NON_EMPTY(PROTO_NAMESPACE $PROTO_NAMESPACE))
# XXX fix variable expansion in plugins
@@ -258,7 +258,7 @@ macro CPP_PROTO_PLUGIN2(NAME, TOOL, SUF1, SUF2, DEPS[], EXTRA_OUT_FLAG="") {
CPP_PROTO_PLUGIN($NAME $TOOL $SUF1 DEPS $DEPS ${pre=EXTRA_OUT_FLAG :EXTRA_OUT_FLAG})
_ADD_CPP_PROTO_OUT($SUF2)
- SET_APPEND(CPP_PROTO_OUTS_SEM \${hide;output;norel;nopath;noext;suf=$SUF2:File})
+ SET_APPEND(CPP_PROTO_OUTS_SEM \${hide;norel;output;suf=$SUF2;nopath;noext:File})
}
# tag:proto
@@ -373,12 +373,15 @@ macro _UPDATE_GO_PROTO_ENV(NAME, ENV[], DUMMY...) {
### Ext using Tool. Extra dependencies are passed via DEPS.
macro GO_PROTO_PLUGIN(NAME, EXT, TOOL, DEPS[]) {
SET_APPEND(GO_PROTO_OPTS $_PROTO_PLUGIN_ARGS_BASE($NAME $TOOL))
- SET_APPEND(GO_PROTO_OUTS \${hide;noauto;output;norel;nopath;noext;suf=$EXT:File})
+ SET_APPEND(GO_PROTO_OUTS \${hide;noauto;norel;output;suf=$EXT;nopath;noext:File})
+ _UPDATE_GO_PROTO_ENV(${suf= ENV;ext=mdb-validators:NAME} PATH=${GO_TOOLS_ROOT}/bin GOROOT=${GO_TOOLS_ROOT} GOCACHE=${BINDIR}/.gocache)
+ _UPDATE_GO_PROTO_ENV(${suf= ENV;ext=proto_validation:NAME} PATH=${GO_TOOLS_ROOT}/bin GOROOT=${GO_TOOLS_ROOT} GOCACHE=${BINDIR}/.gocache)
+ _UPDATE_GO_PROTO_ENV(${suf= ENV;ext=mdb-dynforms:NAME} PATH=${GO_TOOLS_ROOT}/bin GOROOT=${GO_TOOLS_ROOT} GOCACHE=${BINDIR}/.gocache)
PEERDIR(${DEPS})
}
# tag:go-specific tag:proto
-GO_PROTO_CMDLINE=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/go_proto_wrapper.py"} --arcadia-prefix $GO_ARCADIA_PROJECT_PREFIX --contrib-prefix $GO_CONTRIB_PROJECT_PREFIX --namespace ./$PROTO_NAMESPACE $_GO_PROTO_CHECK_OUTPUT --proto ${input;rootrel:File} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH $_PROTOC_FLAGS ${hide:PROTO_FAKEID}
+GO_PROTO_CMDLINE=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/go_proto_wrapper.py"} --arcadia-prefix $GO_ARCADIA_PROJECT_PREFIX --contrib-prefix $GO_CONTRIB_PROJECT_PREFIX --namespace ./$PROTO_NAMESPACE $_GO_PROTO_CHECK_OUTPUT --proto ${rootrel;input:File} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH $_PROTOC_FLAGS ${hide:PROTO_FAKEID}
# tag:go-specific tag:proto
macro _GO_PROTO_CMD_IMPL(File, ENV[], OPTS...) {
@@ -394,7 +397,7 @@ macro _GO_PROTO_CMD(File) {
# tag:proto tag:docs-specific
macro _DOCS_PROTO_CMD(File) {
- .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/docs_proto_wrapper.py"} --docs-output ${output;norel;nopath;suf=.md;noext:File} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH $_PROTOC_FLAGS --plugin=protoc-gen-doc=${tool:"vendor/github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc"} ${input;rootrel:File} ${hide:PROTO_FAKEID} ${hide;kv:"p PD"} ${hide;kv:"pc yellow"}
+ .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/docs_proto_wrapper.py"} --docs-output ${norel;output;suf=.md;nopath;noext:File} --template ${input:"build/scripts/docs_proto_markdown.tmpl"} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH $_PROTOC_FLAGS --plugin=protoc-gen-doc=${tool:"vendor/github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc"} ${rootrel;input:File} ${hide:PROTO_FAKEID} ${hide;kv:"p PD"} ${hide;kv:"pc yellow"}
}
# tag:proto
@@ -402,7 +405,7 @@ macro _DOCS_PROTO_CMD(File) {
###
### Generate .yson.go from .proto using yt/yt/orm/go/codegen/yson/internal/proto-yson-gen/cmd/proto-yson-gen
macro YT_ORM_PROTO_YSON(OUT_OPTS[], Files...) {
- .CMD=${cwd:BINDIR} $PROTOC --plugin=protoc-gen-custom=${tool:"yt/yt/orm/go/codegen/yson/internal/proto-yson-gen/cmd/proto-yson-gen"} -I=${ARCADIA_ROOT}/${PROTO_NAMESPACE} ${pre=-I=:_PROTO__INCLUDE} -I=${ARCADIA_ROOT} --custom_out="$OUT_OPTS paths=base_name:." --custom_opt="goroot=${GO_TOOLS_ROOT}" $_PROTOC_FLAGS ${input:Files} ${hide;noauto;output;nopath;noext;suf=.yson.go:Files} ${hide:PROTO_FAKEID}
+ .CMD=${cwd:BINDIR} $PROTOC --plugin=protoc-gen-custom=${tool:"yt/yt/orm/go/codegen/yson/internal/proto-yson-gen/cmd/proto-yson-gen"} -I=${ARCADIA_ROOT}/${PROTO_NAMESPACE} ${pre=-I=:_PROTO__INCLUDE} -I=${ARCADIA_ROOT} --custom_out="$OUT_OPTS paths=base_name:." --custom_opt="goroot=${GO_TOOLS_ROOT}" $_PROTOC_FLAGS ${input:Files} ${hide;noauto;output;suf=.yson.go;nopath;noext:Files} ${hide:PROTO_FAKEID}
.ADDINCL=FOR proto ${ARCADIA_ROOT}/${MODDIR} FOR proto ${ARCADIA_ROOT}/${GO_TEST_IMPORT_PATH} FOR proto yt ${ARCADIA_BUILD_ROOT}/yt FOR proto ${PROTOBUF_INCLUDE_PATH}
.PEERDIR=$GOSTD/strings $GOSTD/fmt $GOSTD/errors $GOSTD/encoding/json library/go/core/xerrors yt/go/yson yt/go/yterrors yt/yt/orm/go/codegen/yson/ytypes contrib/libs/protobuf
@@ -471,7 +474,7 @@ macro NO_MYPY() {
# tag:proto tag:python-specific
macro _PY_PROTO_CMD_BASE(File, Suf, Args...) {
- .CMD=$PY_PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --python_out=$ARCADIA_BUILD_ROOT/$PROTO_NAMESPACE $_PROTOC_FLAGS ${input;rootrel:File} ${output;main;hide;noauto;norel;nopath;noext;suf=$Suf:File} ${hide;kv:"p PB"} ${hide;kv:"pc yellow"} $Args ${hide:PROTO_FAKEID}
+ .CMD=$PY_PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --python_out=$ARCADIA_BUILD_ROOT/$PROTO_NAMESPACE $_PROTOC_FLAGS ${rootrel;input:File} ${hide;main;noauto;norel;output;suf=$Suf;nopath;noext:File} ${hide;kv:"p PB"} ${hide;kv:"pc yellow"} $Args ${hide:PROTO_FAKEID}
}
# tag:proto tag:python-specific
@@ -481,7 +484,7 @@ macro _PY_PROTO_CMD(File) {
# tag:proto tag:python-specific
macro _PY_PROTO_CMD_INTERNAL(File) {
- .CMD=${cwd;rootdir;input:File} $GEN_PY_PROTOS --suffixes $PY_PROTO_SUFFIXES $PY_PROTO_MYPY_SUFFIX --input ${input;rootrel:File} --ns /$PROTO_NAMESPACE -- $_PY_PROTO_CMD_BASE($File __int${_PYTHON_VER}___pb2.py $PY_PROTO_OPTS $PY_PROTO_OUTS_INTERNAL ${hide;kv:"ext_out_name_for_${nopath;noext;suf=__int${_PYTHON_VER}___pb2.py:File} ${nopath;noext;suf=_pb2.py:File}"} $PY_PROTO_MYPY_PLUGIN_INTERNAL)
+ .CMD=${cwd;rootdir;input:File} $GEN_PY_PROTOS --suffixes $PY_PROTO_SUFFIXES $PY_PROTO_MYPY_SUFFIX --input ${rootrel;input:File} --ns /$PROTO_NAMESPACE -- $_PY_PROTO_CMD_BASE($File __int${_PYTHON_VER}___pb2.py $PY_PROTO_OPTS $PY_PROTO_OUTS_INTERNAL ${hide;kv:"ext_out_name_for_${suf=__int${_PYTHON_VER}___pb2.py;nopath;noext:File} ${suf=_pb2.py;nopath;noext:File}"} $PY_PROTO_MYPY_PLUGIN_INTERNAL)
}
# tag:proto tag:java-specific
@@ -519,8 +522,8 @@ otherwise {
KOTLIN_PROTO_FLAGS=
# tag:proto tag:java-specific
macro _JAVA_PROTO_CMD(File) {
- .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON ${input:"build/scripts/tared_protoc.py"} --tar-output ${output;norel;nopath;noext;suf=.jsrc:File} --protoc-out-dir $ARCADIA_BUILD_ROOT/java_out $JAVA_PROTOC -I=./$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_ROOT --java_out=${_JAVA_PROTO_LITE_ARG}$ARCADIA_BUILD_ROOT/java_out ${KOTLIN_PROTO_FLAGS} $_PROTOC_FLAGS ${input;rootrel:File} ${hide;kv:"p PB"} ${hide;kv:"pc yellow"} $JAVA_PROTO_ARGS ${hide:PROTO_FAKEID} ${hide:"UID_BANHAMMER"}
- .SEM=proto_files ${input;rootrel:File} ${hide;output:File.jsrc}
+ .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON ${input:"build/scripts/tared_protoc.py"} --tar-output ${norel;output;suf=.jsrc;nopath;noext:File} --protoc-out-dir $ARCADIA_BUILD_ROOT/java_out $JAVA_PROTOC -I=./$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_ROOT --java_out=${_JAVA_PROTO_LITE_ARG}$ARCADIA_BUILD_ROOT/java_out ${KOTLIN_PROTO_FLAGS} $_PROTOC_FLAGS ${rootrel;input:File} ${hide;kv:"p PB"} ${hide;kv:"pc yellow"} $JAVA_PROTO_ARGS ${hide:PROTO_FAKEID} ${hide:"UID_BANHAMMER"}
+ .SEM=proto_files ${rootrel;input:File} ${hide;output;suf=.jsrc:File}
}
# tag:proto tag:python-specific
@@ -541,12 +544,12 @@ macro _PY_EVLOG_CMD(File) {
# tag:python-specific tag:proto
macro _PY_EVLOG_CMD_INTERNAL(File) {
- .CMD=${cwd;rootdir;input:File} $GEN_PY_PROTOS --suffixes $PY_EVLOG_SUFFIXES --input ${input;rootrel:File} --ns /$PROTO_NAMESPACE -- $_PY_EVLOG_CMD_BASE($File __int${_PYTHON_VER}___ev_pb2.py ${hide;kv:"ext_out_name_for_${nopath;noext;suf=__int${_PYTHON_VER}___ev_pb2.py:File} ${nopath;noext;suf=_ev_pb2.py:File}"})
+ .CMD=${cwd;rootdir;input:File} $GEN_PY_PROTOS --suffixes $PY_EVLOG_SUFFIXES --input ${rootrel;input:File} --ns /$PROTO_NAMESPACE -- $_PY_EVLOG_CMD_BASE($File __int${_PYTHON_VER}___ev_pb2.py ${hide;kv:"ext_out_name_for_${suf=__int${_PYTHON_VER}___ev_pb2.py;nopath;noext:File} ${suf=_ev_pb2.py;nopath;noext:File}"})
}
# tag:java-specific tag:proto
macro _JAVA_EVLOG_CMD(File) {
- .CMD=$COPY_CMD ${input:File} ${output;nopath;noext;norel;suf=_ev.proto:File} ${hide;kv:"p EV"} ${hide;kv:"pc yellow"}
+ .CMD=$COPY_CMD ${input:File} ${norel;output;suf=_ev.proto;nopath;noext:File} ${hide;kv:"p EV"} ${hide;kv:"pc yellow"}
.PEERDIR=library/cpp/eventlog/proto
}
@@ -585,7 +588,7 @@ macro GRPC_WITH_GMOCK() {
SET(_GRPC_GMOCK_OUTFLAG EXTRA_OUT_FLAG generate_mock_code=true)
GRPC()
_ADD_CPP_PROTO_OUT(_mock$_GRPC_SUF_H)
- SET_APPEND(CPP_PROTO_OUTS_SEM ${hide;output;norel;nopath;noext;suf=_mock$_GRPC_SUF_H:File})
+ SET_APPEND(CPP_PROTO_OUTS_SEM ${hide;norel;output;suf=_mock$_GRPC_SUF_H;nopath;noext:File})
}
macro GO_PROTO_USE_V2() {
@@ -640,18 +643,18 @@ macro _GENERATE_PY_EVS_INTERNAL(FILES...) {
###
### TODO: proper implementation needed
macro LIST_PROTO(TO="files.proto", Files...) {
- .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/list.py"} ${Files} ${hide;input:Files} ${stdout;noauto;output:TO} ${output_include;from_input;hide:Files}
+ .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/list.py"} ${Files} ${hide;input:Files} ${stdout;noauto;output:TO} ${hide;from_input;output_include:Files}
_COMPILE_LIST_PROTO(${TO})
_EXPOSE(${TO})
}
macro _COMPILE_LIST_PROTO(SRC) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/touch.py"} ${hide;input:SRC} ${output;noext;suf=.pb.h:SRC}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/touch.py"} ${hide;input:SRC} ${output;suf=.pb.h;noext:SRC}
}
# tag:proto
macro _PROTO_DESC_RAWPROTO_CMD(File) {
- .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/desc_rawproto_wrapper.py"} --desc-output ${output;suf=.desc:File} --rawproto-output ${output;norel;suf=.${_MODDIR_HASH}.rawproto:File} --proto-file ${input;rootrel:File} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --include_source_info $_PROTOC_FLAGS ${hide:PROTO_FAKEID}
+ .CMD=${cwd;rootdir;input:File} $YMAKE_PYTHON3 ${input:"build/scripts/desc_rawproto_wrapper.py"} --desc-output ${output;suf=.desc:File} --rawproto-output ${norel;output;suf=.${_MODDIR_HASH}.rawproto:File} --proto-file ${rootrel;input:File} -- $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --include_source_info $_PROTOC_FLAGS ${hide:PROTO_FAKEID}
}
_PROTO_DESC_MERGE_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/merge_files.py"} $TARGET ${ext=.desc:AUTO_INPUT} ${hide;kv:"p PD"} ${hide;kv:"pc light-cyan"} && ${cwd:ARCADIA_BUILD_ROOT} $YMAKE_PYTHON3 ${input:"build/scripts/collect_rawproto.py"} --output ${output;suf=.protosrc:REALPRJNAME} ${rootrel;ext=.rawproto:AUTO_INPUT}
@@ -814,11 +817,13 @@ module _TS_PREPARE_DEPS: _PREPARE_DEPS_BASE {
}
module _DESC_PROTO: _BARE_UNIT {
- .CMD=_PROTO_DESC_MERGE_CMD
+ .CMD=$_PROTO_DESC_MERGE_CMD
.EXTS=.desc .rawproto
.NODE_TYPE=Library
.IGNORED=GENERATE_ENUM_SERIALIZATION GENERATE_ENUM_SERIALIZATION_WITH_HEADER YMAPS_SPROTO RESOURCE GO_PROTO_PLUGIN GRPC
.ALIASES=SRCS=_SRCS_NO_GLOBAL
+ .STRUCT_CMD=yes
+ .STRUCT_SEM=yes
ENABLE(DESC_PROTO)
DISABLE(_NEED_SBOM_INFO)
@@ -917,17 +922,19 @@ multimodule PROTO_LIBRARY {
}
module DESC_PROTO: _DESC_PROTO {
- .SEM=_SEM_IGNORED
+ .SEM=$_SEM_IGNORED
SET_APPEND(PEERDIR_TAGS DESC_PROTO)
}
}
module PROTO_DESCRIPTIONS: _BARE_UNIT {
- .CMD=_PROTO_DESC_MERGE_PEERS_CMD
+ .CMD=$_PROTO_DESC_MERGE_PEERS_CMD
.PEERDIR_POLICY=as_build_from
.NODE_TYPE=Library
.RESTRICTED=SRCS
.FINAL_TARGET=yes
+ .STRUCT_CMD=yes
+ .STRUCT_SEM=yes
SET(MODULE_TAG PROTO_DESCRIPTIONS)
SET(PEERDIR_TAGS DESC_PROTO DESC_PROTO_FROM_SCHEMA)
@@ -1006,13 +1013,13 @@ multimodule PROTO_SCHEMA {
}
module DESC_PROTO_FROM_SCHEMA: _DESC_PROTO {
- .SEM=_SEM_IGNORED
+ .SEM=$_SEM_IGNORED
DISABLE(START_TARGET)
SET_APPEND(PEERDIR_TAGS DESC_PROTO DESC_PROTO_FROM_SCHEMA)
}
module PROTO_DESCRIPTIONS_: PROTO_DESCRIPTIONS {
- .SEM=_SEM_IGNORED
+ .SEM=$_SEM_IGNORED
.PEERDIRSELF=DESC_PROTO_FROM_SCHEMA
.FINAL_TARGET=yes
SET_APPEND(PEERDIR_TAGS DESC_PROTO_FROM_SCHEMA)
diff --git a/build/conf/python.conf b/build/conf/python.conf
index 92fef22499..d99de1e854 100644
--- a/build/conf/python.conf
+++ b/build/conf/python.conf
@@ -302,18 +302,16 @@ macro STYLE_DUMMY() {
### Check python3 sources for style issues using black.
macro STYLE_PYTHON(CONFIG_TYPE="") {
.ALLOWED_IN_LINTERS_MAKE=yes
- _ADD_PY_LINTER_CHECK(NAME black LINTER tools/black_linter/black_linter FILE_PROCESSING_TIME $BLACK_FILE_PROCESSING_TIME CONFIGS $PYTHON_LINTERS_DEFAULT_CONFIGS CUSTOM_CONFIG $CONFIG_TYPE CONFIG_TYPE $CONFIG_TYPE)
+ _ADD_PY_LINTER_CHECK(NAME black LINTER tools/black_linter/black_linter FILE_PROCESSING_TIME $BLACK_FILE_PROCESSING_TIME CONFIGS $PYTHON_LINTERS_DEFAULT_CONFIGS CONFIG_TYPE $CONFIG_TYPE)
}
# tag:python-specific tag:test
### @usage: STYLE_RUFF([CONFIG_TYPE config_type] [CHECK_FORMAT])
###
### Check python3 sources for style issues using ruff. `CHECK_FORMAT` enables `ruff format` check.
-RUFF_PROJECT_TO_CONFIG_MAP=build/config/tests/ruff/ruff_config_paths.json
macro STYLE_RUFF(CONFIG_TYPE="", CHECK_FORMAT?"yes":"no") {
.ALLOWED_IN_LINTERS_MAKE=yes
- SET_APPEND(_MAKEFILE_INCLUDE_LIKE_DEPS ${ARCADIA_ROOT}/${RUFF_PROJECT_TO_CONFIG_MAP})
- _ADD_PY_LINTER_CHECK(NAME ruff LINTER tools/ruff_linter/bin/ruff_linter GLOBAL_RESOURCES build/external_resources/ruff FILE_PROCESSING_TIME $RUFF_FILE_PROCESSING_TIME CONFIGS $PYTHON_LINTERS_DEFAULT_CONFIGS PROJECT_TO_CONFIG_MAP $RUFF_PROJECT_TO_CONFIG_MAP CONFIG_TYPE $CONFIG_TYPE EXTRA_PARAMS check_format=${CHECK_FORMAT})
+ _ADD_PY_LINTER_CHECK(NAME ruff LINTER tools/ruff_linter/bin/ruff_linter GLOBAL_RESOURCES build/external_resources/ruff FILE_PROCESSING_TIME $RUFF_FILE_PROCESSING_TIME CONFIGS $PYTHON_LINTERS_DEFAULT_CONFIGS CONFIG_TYPE $CONFIG_TYPE EXTRA_PARAMS check_format=${CHECK_FORMAT})
}
# tag:python-specific tag:test
@@ -629,8 +627,8 @@ module PY2_LIBRARY: _LIBRARY {
when ($PY_PROTO_MYPY_ENABLED == "yes") {
PY_PROTO_MYPY_SUFFIX=_pb2.pyi
- PY_PROTO_MYPY_PLUGIN=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;output;norel;nopath;noext;suf=_pb2.pyi:File}
- PY_PROTO_MYPY_PLUGIN_INTERNAL=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;output;norel;nopath;noext;suf=__intpy2___pb2.pyi:File} ${hide;kv:"ext_out_name_for_${nopath;noext;suf=__intpy2___pb2.pyi:File} ${nopath;noext;suf=_pb2.pyi:File}"})
+ PY_PROTO_MYPY_PLUGIN=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;norel;output;suf=_pb2.pyi;nopath;noext:File}
+ PY_PROTO_MYPY_PLUGIN_INTERNAL=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;norel;output;suf=__intpy2___pb2.pyi;nopath;noext:File} ${hide;kv:"ext_out_name_for_${suf=__intpy2___pb2.pyi;nopath;noext:File} ${suf=_pb2.pyi;nopath;noext:File}"})
}
SET(MODULE_LANG PY2)
@@ -676,8 +674,8 @@ module PY3_LIBRARY: _LIBRARY {
when ($PY_PROTO_MYPY_ENABLED == "yes") {
PY_PROTO_MYPY_SUFFIX=_pb2.pyi
- PY_PROTO_MYPY_PLUGIN=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;output;norel;nopath;noext;suf=_pb2.pyi:File}
- PY_PROTO_MYPY_PLUGIN_INTERNAL=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;output;norel;nopath;noext;suf=__intpy3___pb2.pyi:File} ${hide;kv:"ext_out_name_for_${nopath;noext;suf=__intpy3___pb2.pyi:File} ${nopath;noext;suf=_pb2.pyi:File}"})
+ PY_PROTO_MYPY_PLUGIN=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;norel;output;suf=_pb2.pyi;nopath;noext:File}
+ PY_PROTO_MYPY_PLUGIN_INTERNAL=$PY_PROTO_MYPY_PLUGIN_BASE ${hide;noauto;norel;output;suf=__intpy3___pb2.pyi;nopath;noext:File} ${hide;kv:"ext_out_name_for_${suf=__intpy3___pb2.pyi;nopath;noext:File} ${suf=_pb2.pyi;nopath;noext:File}"})
}
SET(MODULE_LANG PY3)
@@ -746,6 +744,14 @@ module _BASE_PY_PROGRAM: _BASE_PROGRAM {
SET(MODULE_LANG PY2)
}
+# tag:codenav tag:py2 tag:deprecated
+when ($CODENAVIGATION && $NOCODENAVIGATION != "yes") {
+ PY_PROGRAM_LINK_EXE=$LINK_EXE && ${hide;kv:"pyndex $TARGET"}
+}
+otherwise {
+ PY_PROGRAM_LINK_EXE=$LINK_EXE
+}
+
# tag:python-specific tag:codenav
when ($CODENAVIGATION && $NOCODENAVIGATION != "yes") {
PY3_PROGRAM_LINK_EXE=$LINK_EXE && ${hide;kv:"py3yndex $TARGET"}
diff --git a/build/conf/swig.conf b/build/conf/swig.conf
index ab33d88798..8143dde63e 100644
--- a/build/conf/swig.conf
+++ b/build/conf/swig.conf
@@ -8,13 +8,13 @@ SWIG_IMPLICIT_INCLUDES = swig.swg go.swg java.swg perl5.swg python.swg
_SWIG_CMD=$_SWIG_PYTHON_CMD
_SWIG_SEM_TO_MODULE_LINK=${hide;output;suf=.o:SRC}
-_SWIG_PYTHON_CMD=${_SWIG_TOOL} -module ${REALPRJNAME} -cpperraswarn -c++ -python -interface ${MODULE_PREFIX}${REALPRJNAME} -o ${output;noext;main;suf=_wrap.swg.cpp:SRC} ${noauto;output;add_to_outs;hide;tobindir;suf=.py:REALPRJNAME} -outdir ${BINDIR} ${pre=-I:_SWIG__INCLUDE} ${input:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+_SWIG_PYTHON_CMD=${_SWIG_TOOL} -module ${REALPRJNAME} -cpperraswarn -c++ -python -interface ${MODULE_PREFIX}${REALPRJNAME} -o ${output;noext;main;suf=_wrap.swg.cpp:SRC} ${hide;noauto;output;add_to_outs;tobindir;suf=.py:REALPRJNAME} -outdir ${BINDIR} ${pre=-I:_SWIG__INCLUDE} ${input:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
-_SWIG_PERL_CMD=${_SWIG_TOOL} -c++ -cpperraswarn -module ${REALPRJNAME} -shadow -perl -o ${output;noext;main;suf=_wrap.swg.cpp:SRC} ${noauto;output;add_to_outs;hide;tobindir;suf=.pm:REALPRJNAME} -outdir ${ARCADIA_BUILD_ROOT}/${MODDIR} ${pre=-I:_SWIG__INCLUDE} ${input:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+_SWIG_PERL_CMD=${_SWIG_TOOL} -c++ -cpperraswarn -module ${REALPRJNAME} -shadow -perl -o ${output;noext;main;suf=_wrap.swg.cpp:SRC} ${hide;noauto;output;add_to_outs;tobindir;suf=.pm:REALPRJNAME} -outdir ${ARCADIA_BUILD_ROOT}/${MODDIR} ${pre=-I:_SWIG__INCLUDE} ${input:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
-_SWIG_JNI_CPP_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --out-header ${output;main;noext;suf=_wrap.swg.h:SRC} --package-by-file ru/yandex/${input;rootrel:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${output;noext;suf=_wrap.swg.cpp:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
-_SWIG_JNI_JAVA_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --package-by-file ru/yandex/${input;rootrel:SRC} --jsrc ${output;main;suf=.jsrc:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${BINDIR}/unused.cpp ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
-_SWIG_JNI_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --out-header ${output;main;noext;suf=_wrap.swg.h:SRC} --package-by-file ru/yandex/${input;rootrel:SRC} --jsrc ${output;suf=.jsrc:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${output;noext;suf=_wrap.swg.cpp:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+_SWIG_JNI_CPP_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --out-header ${main;output;suf=_wrap.swg.h;noext:SRC} --package-by-file ru/yandex/${rootrel;input:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${output;suf=_wrap.swg.cpp;noext:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+_SWIG_JNI_JAVA_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --package-by-file ru/yandex/${rootrel;input:SRC} --jsrc ${main;output;suf=.jsrc:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${BINDIR}/unused.cpp ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+_SWIG_JNI_CMD=$YMAKE_PYTHON3 ${input:"build/scripts/jni_swig.py"} --swig ${_SWIG_TOOL} --default-module ${nopath;noext:SRC} --src ${input:SRC} --out-header ${main;output;suf=_wrap.swg.h;noext:SRC} --package-by-file ru/yandex/${rootrel;input:SRC} --jsrc ${output;suf=.jsrc:SRC} -- ${pre=-I:_SWIG__INCLUDE} -o ${output;suf=_wrap.swg.cpp;noext:SRC} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
_SWIG_JNI_PEERDIR=contrib/libs/jdk
when ($USE_SYSTEM_JDK == "yes" || $OS_ANDROID == "yes") {
@@ -27,7 +27,7 @@ when ($USE_SYSTEM_JDK == "yes" || $OS_ANDROID == "yes") {
### Run swig on Src to produce DstSubPrefix.py and DstSubPrefix_swg.cpp that
### provides DstSubPrefix_swg python module.
macro _SWIG_PYTHON_CPP(Src, DstSubPrefix) {
- .CMD=$_SWIG_TOOL -I$ARCADIA_BUILD_ROOT -I$ARCADIA_ROOT -I$_SWIG_LIBRARY_ABS/python -I$_SWIG_LIBRARY_ABS -c++ -python -module ${nopath:DstSubPrefix} -interface ${nopath;suf=_swg:DstSubPrefix} -o ${output;suf=.swg.cpp:DstSubPrefix} ${input:Src} ${hide;noauto;output;suf=.py:DstSubPrefix} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+ .CMD=$_SWIG_TOOL -I$ARCADIA_BUILD_ROOT -I$ARCADIA_ROOT -I$_SWIG_LIBRARY_ABS/python -I$_SWIG_LIBRARY_ABS -c++ -python -module ${nopath:DstSubPrefix} -interface ${suf=_swg;nopath:DstSubPrefix} -o ${output;suf=.swg.cpp:DstSubPrefix} ${input:Src} ${hide;noauto;output;suf=.py:DstSubPrefix} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
.PEERDIR=contrib/tools/swig/Lib/python
}
@@ -36,7 +36,7 @@ macro _SWIG_PYTHON_CPP(Src, DstSubPrefix) {
###
### Like _SWIG_PYTHON_CPP but generate DstSubPrefix_swg.c.
macro _SWIG_PYTHON_C(Src, DstSubPrefix) {
- .CMD=$_SWIG_TOOL -I$ARCADIA_BUILD_ROOT -I$ARCADIA_ROOT -I$_SWIG_LIBRARY_ABS/python -I$_SWIG_LIBRARY_ABS -python -module ${nopath:DstSubPrefix} -interface ${nopath;suf=_swg:DstSubPrefix} -o ${output;suf=.swg.c:DstSubPrefix} ${input:Src} ${hide;noauto;output;suf=.py:DstSubPrefix} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
+ .CMD=$_SWIG_TOOL -I$ARCADIA_BUILD_ROOT -I$ARCADIA_ROOT -I$_SWIG_LIBRARY_ABS/python -I$_SWIG_LIBRARY_ABS -python -module ${nopath:DstSubPrefix} -interface ${suf=_swg;nopath:DstSubPrefix} -o ${output;suf=.swg.c:DstSubPrefix} ${input:Src} ${hide;noauto;output;suf=.py:DstSubPrefix} ${hide;kv:"p SW"} ${hide;kv:"pc yellow"}
.PEERDIR=contrib/tools/swig/Lib/python
}
diff --git a/build/conf/ts/node_modules.conf b/build/conf/ts/node_modules.conf
index f5a1e9a645..6524c1084a 100644
--- a/build/conf/ts/node_modules.conf
+++ b/build/conf/ts/node_modules.conf
@@ -7,7 +7,7 @@ NPM_SCRIPT=$NPM_ROOT/node_modules/npm/bin/npm-cli.js
PM_SCRIPT=
PM_TYPE=
-# combined input/outputs records as list of directives ${input;hide:<path>} ${output;hide:<path>}, used in builders
+# combined input/outputs records as list of directives ${hide;input:<path>} ${hide;output:<path>}, used in builders
_NODE_MODULES_INOUTS=
_YATOOL_PREBUILDER_ARG=
diff --git a/build/export_generators/ide-gradle/build.gradle.kts.jinja b/build/export_generators/ide-gradle/build.gradle.kts.jinja
index c037f3e7bc..603e0a2525 100644
--- a/build/export_generators/ide-gradle/build.gradle.kts.jinja
+++ b/build/export_generators/ide-gradle/build.gradle.kts.jinja
@@ -1,8 +1,11 @@
-{%- macro PatchRoots(arg, depend = false) -%}
+{%- macro PatchRoots(arg, depend = false, output = false) -%}
{#- Always replace (arcadia_root) === (SOURCE_ROOT in ymake) to $project_root in Gradle -#}
{%- if depend -%}
{#- Replace (export_root) === (BUILD_ROOT in ymake) to $project_root in Gradle, because prebuilt tools in arcadia, not in build_root -#}
"{{ arg|replace(export_root, "$project_root")|replace(arcadia_root, "$project_root") }}"
+{%- elif output and arg[0] != '/' -%}
+{#- Relative outputs in buildDir -#}
+"$buildDir/{{ arg }}"
{%- else -%}
{#- Replace (export_root) === (BUILD_ROOT in ymake) to baseBuildDir in Gradle - root of all build folders for modules -#}
"{{ arg|replace(export_root, "$baseBuildDir")|replace(arcadia_root, "$project_root") }}"
@@ -25,6 +28,7 @@
{%- include "[generator]/javadoc.jinja" -%}
{%- include "[generator]/run_program.jinja" -%}
{%- include "[generator]/run_java_program.jinja" -%}
+{%- include "[generator]/run_common.jinja" -%}
{%- include "[generator]/dependencies.jinja" -%}
{%- include "extra-tests.gradle.kts" ignore missing -%}
{%- if publish -%}
diff --git a/build/export_generators/ide-gradle/build.gradle.kts.proto.jinja b/build/export_generators/ide-gradle/build.gradle.kts.proto.jinja
index ec3465d316..f490b8f309 100644
--- a/build/export_generators/ide-gradle/build.gradle.kts.proto.jinja
+++ b/build/export_generators/ide-gradle/build.gradle.kts.proto.jinja
@@ -1,16 +1,23 @@
-{%- macro PatchRoots(arg, depend = false) -%}
+{%- macro PatchRoots(arg, depend = false, output = false) -%}
{#- Always replace (arcadia_root) === (SOURCE_ROOT in ymake) to $project_root in Gradle -#}
{%- if depend -%}
{#- Replace (export_root) === (BUILD_ROOT in ymake) to $project_root in Gradle, because prebuilt tools in arcadia, not in build_root -#}
"{{ arg|replace(export_root, "$project_root")|replace(arcadia_root, "$project_root") }}"
+{%- elif output and arg[0] != '/' -%}
+{#- Relative outputs in buildDir -#}
+"$buildDir/{{ arg }}"
{%- else -%}
{#- Replace (export_root) === (BUILD_ROOT in ymake) to baseBuildDir in Gradle - root of all build folders for modules -#}
"{{ arg|replace(export_root, "$baseBuildDir")|replace(arcadia_root, "$project_root") }}"
{%- endif -%}
{%- endmacro -%}
-{%- macro PatchGeneratedProto(arg) -%}
-"{{ arg|replace(export_root, "$mainExtractedIncludeProtosDir")|replace(arcadia_root, "$mainExtractedIncludeProtosDir") }}"
+{%- macro PatchGeneratedProto(arg, relative = false) -%}
+{%- if relative -%}
+"{{ arg|replace(export_root + "/", "")|replace(arcadia_root + "/", "") }}"
+{%- else -%}
+"{{ arg|replace(export_root, "$baseBuildDir")|replace(arcadia_root, "$baseBuildDir") }}"
+{%- endif -%}
{%- endmacro -%}
{%- include "[generator]/proto_vars.jinja" -%}
@@ -27,6 +34,7 @@
{%- include "[generator]/proto_prepare.jinja" -%}
{%- include "[generator]/run_program.jinja" -%}
{%- include "[generator]/run_java_program.jinja" -%}
+{%- include "[generator]/run_common.jinja" -%}
{%- include "[generator]/javadoc.jinja" -%}
{%- include "[generator]/proto_dependencies.jinja" -%}
{%- include "[generator]/debug.jinja" ignore missing -%}
diff --git a/build/export_generators/ide-gradle/builddir.jinja b/build/export_generators/ide-gradle/builddir.jinja
index f3fb92f3ce..88a5694d15 100644
--- a/build/export_generators/ide-gradle/builddir.jinja
+++ b/build/export_generators/ide-gradle/builddir.jinja
@@ -1,6 +1,6 @@
{#- empty string #}
-val baseBuildDir = "{{ export_root }}/gradle.build/"
-buildDir = file(baseBuildDir + project.path.replaceFirst(":", "/").replace(":", "."))
+val baseBuildDir = "{{ export_root }}/gradle.build"
+buildDir = file(baseBuildDir + "/" + project.path.replace(":", "/"))
subprojects {
- buildDir = file(baseBuildDir + project.path.replaceFirst(":", "/").replace(":", "."))
+ buildDir = file(baseBuildDir + "/" + project.path.replace(":", "/"))
}
diff --git a/build/export_generators/ide-gradle/proto_builddir.jinja b/build/export_generators/ide-gradle/proto_builddir.jinja
index ac50a1d33b..745f52e6e2 100644
--- a/build/export_generators/ide-gradle/proto_builddir.jinja
+++ b/build/export_generators/ide-gradle/proto_builddir.jinja
@@ -1,5 +1,5 @@
{%- include "[generator]/builddir.jinja" %}
val mainProtosDir = File(buildDir, "main_protos")
-{%- if libraries|length %}
+{%- if extractLibrariesProtosTask %}
val mainExtractedIncludeProtosDir = File(buildDir, "extracted-include-protos/main")
{%- endif %}
diff --git a/build/export_generators/ide-gradle/proto_prepare.jinja b/build/export_generators/ide-gradle/proto_prepare.jinja
index cf6fc96adf..804428a964 100644
--- a/build/export_generators/ide-gradle/proto_prepare.jinja
+++ b/build/export_generators/ide-gradle/proto_prepare.jinja
@@ -1,18 +1,39 @@
-{#- empty string #}
-{%- if target.proto_files|length %}
+{%- if prepareProtosTask %}
+
val prepareMainProtos = tasks.register<Copy>("prepareMainProtos") {
+{%- if target.proto_files|length %}
from("$project_root") {
{#- list of all current project proto files -#}
-{%- for proto in target.proto_files %}
+{%- for proto in target.proto_files %}
include("{{ proto }}")
-{%- endfor %}
+{%- endfor %}
+ }
+{% endif -%}
+{%- if target.runs|length or target.custom_runs|length %}
+ from("$baseBuildDir") {
+{%- for run in target.runs -%}
+{%- for out in run.out %}
+ include({{ PatchGeneratedProto(out, true) }})
+{%- endfor -%}
+{%- for out_dir in run.out_dir %}
+ include({{ PatchGeneratedProto(out_dir, true) }} + "/**/*.proto")
+{%- endfor -%}
+{%- endfor -%}
+{%- for custom_run in target.custom_runs -%}
+{%- for out in custom_run.outputs %}
+ include({{ PatchGeneratedProto(out, true) }})
+{%- endfor -%}
+{%- endfor %}
}
+{% endif -%}
into(mainProtosDir)
}
-{%- endif %}
+{%- endif -%}
+
+{%- if extractLibrariesProtosTask -%}
-{% if libraries|length -%}
val extractMainLibrariesProtos = tasks.register<Copy>("extractMainLibrariesProtos") {
+{%- if libraries|length -%}
from("$project_root") {
{#- list of all library directories -#}
{%- for library in libraries -%}
@@ -20,15 +41,16 @@ val extractMainLibrariesProtos = tasks.register<Copy>("extractMainLibrariesProto
include("{{ path_and_jar[0] }}/**/*.proto")
{%- endfor %}
}
+{% endif -%}
into(mainExtractedIncludeProtosDir)
}
+{%- endif %}
-{% endif -%}
afterEvaluate {
-{%- if target.proto_files|length %}
+{%- if prepareProtosTask %}
tasks.getByName("extractProto").dependsOn(prepareMainProtos)
{%- endif %}
-{%- if libraries|length %}
+{%- if extractLibrariesProtosTask %}
tasks.getByName("extractProto").dependsOn(extractMainLibrariesProtos)
{%- endif %}
}
diff --git a/build/export_generators/ide-gradle/proto_vars.jinja b/build/export_generators/ide-gradle/proto_vars.jinja
index 3f357ce243..95120c106e 100644
--- a/build/export_generators/ide-gradle/proto_vars.jinja
+++ b/build/export_generators/ide-gradle/proto_vars.jinja
@@ -1,7 +1,9 @@
{%- set publish = target.publish -%}
-{%- set libraries = target.consumer|selectattr('type', 'eq', 'library') -%}
{%- set with_kotlin = target.with_kotlin -%}
{%- set kotlin_version = target.kotlin_version -%}
{%- set proto_template = true -%}
+{%- set prepareProtosTask = target.proto_files|length or target.runs|length or target.custom_runs|length -%}
+{%- set libraries = target.consumer|selectattr('type', 'eq', 'library') -%}
+{%- set extractLibrariesProtosTask = libraries|length -%}
{%- include "[generator]/jdk.jinja" -%}
diff --git a/build/export_generators/ide-gradle/run_common.jinja b/build/export_generators/ide-gradle/run_common.jinja
new file mode 100644
index 0000000000..4f8a207bb4
--- /dev/null
+++ b/build/export_generators/ide-gradle/run_common.jinja
@@ -0,0 +1,11 @@
+{%- if not proto_template and (target.runs|length or target.custom_runs|length) %}
+
+tasks.getByName("sourcesJar").dependsOn(tasks.compileJava)
+{%- if with_kotlin %}
+tasks.getByName("sourcesJar").dependsOn(tasks.compileKotlin)
+{%- endif %}
+tasks.getByName("sourcesJar").mustRunAfter(tasks.compileTestJava)
+{%- if with_kotlin %}
+tasks.getByName("sourcesJar").mustRunAfter(tasks.compileTestKotlin)
+{%- endif -%}
+{%- endif -%}
diff --git a/build/export_generators/ide-gradle/run_java_program.jinja b/build/export_generators/ide-gradle/run_java_program.jinja
index 527a81c5b4..5630c84be3 100644
--- a/build/export_generators/ide-gradle/run_java_program.jinja
+++ b/build/export_generators/ide-gradle/run_java_program.jinja
@@ -5,8 +5,11 @@ val runJav{{ loop.index }} = task<JavaExec>("runJavaProgram{{ loop.index }}") {
group = "build"
description = "Code generation by run java program"
-{%- if run.cwd %}
+{#- Ignore default CWD to export_root -#}
+{%- if run.cwd and run.cwd != export_root %}
workingDir = file({{ PatchRoots(run.cwd) }})
+{%- else %}
+ workingDir = file("$buildDir")
{%- endif -%}
{%- set classpaths = run.classpath|reject('eq', '@.cplst') -%}
@@ -25,10 +28,16 @@ val runJav{{ loop.index }} = task<JavaExec>("runJavaProgram{{ loop.index }}") {
args = listOf(
{%- for arg in run.args -%}
{%- if not loop.first %}
-{%- if proto_template and (run.out_dir|select("eq", arg)|length or run.out|select("eq", arg)|length) %}
+{%- if run.out_dir|select("eq", arg)|length or run.out|select("eq", arg)|length -%}
+{%- if proto_template %}
{{ PatchGeneratedProto(arg) }},
+{%- else %}
+ {{ PatchRoots(arg, false, true) }},
+{%- endif %}
+{%- elif run.tool|select("in", arg)|length or run.in|select("eq", arg)|length or run.in_dir|select("eq", arg)|length %}
+ {{ PatchRoots(arg, true) }},
{%- else %}
- {{ PatchRoots(arg, run.tool|select("in", arg)|length) }},
+ {{ PatchRoots(arg) }},
{%- endif -%}
{%- endif -%}
{%- endfor %}
@@ -55,13 +64,21 @@ val runJav{{ loop.index }} = task<JavaExec>("runJavaProgram{{ loop.index }}") {
{%- if run.out_dir|length -%}
{%- for out_dir in run.out_dir|unique %}
- outputs.dir({{ PatchRoots(out_dir) }})
+{%- if proto_template %}
+ outputs.files({{ PatchGeneratedProto(out_dir) }})
+{%- else %}
+ outputs.dir({{ PatchRoots(out_dir, false, true) }})
+{%- endif -%}
{%- endfor -%}
{%- endif -%}
{%- if run.out|length -%}
{%- for out in run.out|unique %}
- outputs.files({{ PatchRoots(out) }})
+{%- if proto_template %}
+ outputs.files({{ PatchGeneratedProto(out) }})
+{%- else %}
+ outputs.files({{ PatchRoots(out, false, true) }})
+{%- endif -%}
{%- endfor -%}
{%- endif -%}
@@ -75,20 +92,23 @@ val runJav{{ loop.index }} = task<JavaExec>("runJavaProgram{{ loop.index }}") {
{%- if proto_template %}
tasks.getByName("prepareMainProtos").dependsOn(runJav{{ loop.index }})
-tasks.getByName("extractMainLibrariesProtos").dependsOn(runJav{{ loop.index }})
-{% else %}
-
-tasks.getByName("sourcesJar").dependsOn(runJav{{ loop.index }})
-{% endif -%}
+{%- endif %}
tasks.compileJava.configure {
dependsOn(runJav{{ loop.index }})
}
+tasks.compileTestJava.configure {
+ dependsOn(runJav{{ loop.index }})
+}
{%- if with_kotlin %}
tasks.compileKotlin.configure {
dependsOn(runJav{{ loop.index }})
}
-{%- endif %}
-{% endfor -%}
-{% endif -%}
+
+tasks.compileTestKotlin.configure {
+ dependsOn(runJav{{ loop.index }})
+}
+{%- endif -%}
+{%- endfor -%}
+{%- endif -%}
diff --git a/build/export_generators/ide-gradle/run_program.jinja b/build/export_generators/ide-gradle/run_program.jinja
index 147ae6c165..91a314c952 100644
--- a/build/export_generators/ide-gradle/run_program.jinja
+++ b/build/export_generators/ide-gradle/run_program.jinja
@@ -5,22 +5,30 @@ val runProg{{ loop.index }} = task<Exec>("runProgram{{ loop.index }}") {
group = "build"
description = "Code generation by run custom program"
-{%- if custom_run.cwd %}
+{#- Ignore default CWD to export_root -#}
+{%- if custom_run.cwd and custom_run.cwd != export_root %}
workingDir = file({{ PatchRoots(custom_run.cwd) }})
+{%- else %}
+ workingDir = file("$buildDir")
{%- endif %}
- commandLine(
+ commandLine("bash", "-c", listOf(
{%- for arg in custom_run.command -%}
-{%- if custom_run.depends|select("eq", arg)|length -%}
-{{ PatchRoots(arg, true) }}
-{%- elif proto_template -%}
-{#- generated proto put to prepared proto dir -#}
-{{ PatchGeneratedProto(arg) }}
-{%- else -%}
-{{ PatchRoots(arg) }}
+{%- if custom_run.depends|select("eq", arg)|length %}
+ {{ PatchRoots(arg, true) }}
+{%- elif custom_run.outputs|select("eq", arg)|length %}
+{%- if proto_template -%}
+ {#- generated proto put to prepared proto dir #}
+ {{ PatchGeneratedProto(arg) }}
+{%- else %}
+ {{ PatchRoots(arg, false, true) }}
+{%- endif -%}
+{%- else %}
+ {{ PatchRoots(arg) }}
{%- endif -%}
{%- if not loop.last %}, {% endif -%}
-{%- endfor -%})
+{%- endfor %}
+ ).joinToString(" "))
{%- if custom_run.depends|length -%}
{%- for depend in custom_run.depends|unique %}
@@ -33,7 +41,7 @@ val runProg{{ loop.index }} = task<Exec>("runProgram{{ loop.index }}") {
{%- if proto_template %}
outputs.files({{ PatchGeneratedProto(out) }})
{%- else %}
- outputs.files({{ PatchRoots(out) }})
+ outputs.files({{ PatchRoots(out, false, true) }})
{%- endif -%}
{%- endfor -%}
{%- endif -%}
@@ -45,20 +53,23 @@ val runProg{{ loop.index }} = task<Exec>("runProgram{{ loop.index }}") {
{%- if proto_template %}
-tasks.getByName("extractMainLibrariesProtos").dependsOn(runProg{{ loop.index }})
-{% else %}
-
-tasks.getByName("sourcesJar").dependsOn(runProg{{ loop.index }})
-{% endif -%}
+tasks.getByName("prepareMainProtos").dependsOn(runProg{{ loop.index }})
+{%- endif %}
tasks.compileJava.configure {
dependsOn(runProg{{ loop.index }})
}
+tasks.compileTestJava.configure {
+ dependsOn(runProg{{ loop.index }})
+}
{%- if with_kotlin %}
tasks.compileKotlin.configure {
dependsOn(runProg{{ loop.index }})
}
-{%- endif %}
-{% endfor -%}
-{% endif -%}
+tasks.compileTestKotlin.configure {
+ dependsOn(runProg{{ loop.index }})
+}
+{% endif -%}
+{%- endfor -%}
+{%- endif -%}
diff --git a/build/external_resources/ymake/public.resources.json b/build/external_resources/ymake/public.resources.json
index 8dfbb03c13..3674bbaa86 100644
--- a/build/external_resources/ymake/public.resources.json
+++ b/build/external_resources/ymake/public.resources.json
@@ -1,19 +1,19 @@
{
"by_platform": {
"darwin": {
- "uri": "sbr:8350151492"
+ "uri": "sbr:8383217030"
},
"darwin-arm64": {
- "uri": "sbr:8350149418"
+ "uri": "sbr:8383215700"
},
"linux": {
- "uri": "sbr:8350156616"
+ "uri": "sbr:8383219483"
},
"linux-aarch64": {
- "uri": "sbr:8350147184"
+ "uri": "sbr:8383214378"
},
"win32": {
- "uri": "sbr:8350153953"
+ "uri": "sbr:8383218300"
}
}
}
diff --git a/build/external_resources/ymake/resources.json b/build/external_resources/ymake/resources.json
index cb4a0f9e39..1ad3f10e4d 100644
--- a/build/external_resources/ymake/resources.json
+++ b/build/external_resources/ymake/resources.json
@@ -1,19 +1,19 @@
{
"by_platform": {
"darwin": {
- "uri": "sbr:8350186976"
+ "uri": "sbr:8383201449"
},
"darwin-arm64": {
- "uri": "sbr:8350181974"
+ "uri": "sbr:8383200356"
},
"linux": {
- "uri": "sbr:8350194229"
+ "uri": "sbr:8383203472"
},
"linux-aarch64": {
- "uri": "sbr:8350177737"
+ "uri": "sbr:8383199084"
},
"win32": {
- "uri": "sbr:8350190839"
+ "uri": "sbr:8383202531"
}
}
}
diff --git a/build/mapping.conf.json b/build/mapping.conf.json
index 019d916756..ba9c62501f 100644
--- a/build/mapping.conf.json
+++ b/build/mapping.conf.json
@@ -726,6 +726,7 @@
"8295444951": "{registry_endpoint}/8295444951",
"8326169329": "{registry_endpoint}/8326169329",
"8350151492": "{registry_endpoint}/8350151492",
+ "8383217030": "{registry_endpoint}/8383217030",
"5766171800": "{registry_endpoint}/5766171800",
"5805430761": "{registry_endpoint}/5805430761",
"5829025456": "{registry_endpoint}/5829025456",
@@ -800,6 +801,7 @@
"8295443254": "{registry_endpoint}/8295443254",
"8326167937": "{registry_endpoint}/8326167937",
"8350149418": "{registry_endpoint}/8350149418",
+ "8383215700": "{registry_endpoint}/8383215700",
"5766173070": "{registry_endpoint}/5766173070",
"5805432830": "{registry_endpoint}/5805432830",
"5829031598": "{registry_endpoint}/5829031598",
@@ -874,6 +876,7 @@
"8295448171": "{registry_endpoint}/8295448171",
"8326171209": "{registry_endpoint}/8326171209",
"8350156616": "{registry_endpoint}/8350156616",
+ "8383219483": "{registry_endpoint}/8383219483",
"5766171341": "{registry_endpoint}/5766171341",
"5805430188": "{registry_endpoint}/5805430188",
"5829023352": "{registry_endpoint}/5829023352",
@@ -948,10 +951,12 @@
"8295441779": "{registry_endpoint}/8295441779",
"8326166988": "{registry_endpoint}/8326166988",
"8350147184": "{registry_endpoint}/8350147184",
+ "8383214378": "{registry_endpoint}/8383214378",
"8270821739": "{registry_endpoint}/8270821739",
"8295446553": "{registry_endpoint}/8295446553",
"8326170338": "{registry_endpoint}/8326170338",
"8350153953": "{registry_endpoint}/8350153953",
+ "8383218300": "{registry_endpoint}/8383218300",
"5766172695": "{registry_endpoint}/5766172695",
"5805432230": "{registry_endpoint}/5805432230",
"5829029743": "{registry_endpoint}/5829029743",
@@ -1190,9 +1195,11 @@
"6561719783": "{registry_endpoint}/6561719783",
"6648883615": "{registry_endpoint}/6648883615",
"8107723363": "{registry_endpoint}/8107723363",
+ "8415400280": "{registry_endpoint}/8415400280",
"6561716686": "{registry_endpoint}/6561716686",
"6648881012": "{registry_endpoint}/6648881012",
"8107722414": "{registry_endpoint}/8107722414",
+ "8415398075": "{registry_endpoint}/8415398075",
"6184290684": "{registry_endpoint}/6184290684",
"6561765464": "{registry_endpoint}/6561765464",
"6184289846": "{registry_endpoint}/6184289846",
@@ -1313,6 +1320,7 @@
"7686710688": "{registry_endpoint}/7686710688",
"7879860842": "{registry_endpoint}/7879860842",
"8367004015": "{registry_endpoint}/8367004015",
+ "8418036683": "{registry_endpoint}/8418036683",
"2980468199": "{registry_endpoint}/2980468199",
"5562224408": "{registry_endpoint}/5562224408",
"7663495611": "{registry_endpoint}/7663495611"
@@ -2040,6 +2048,7 @@
"8295444951": "devtools/ymake/bin/ymake for darwin",
"8326169329": "devtools/ymake/bin/ymake for darwin",
"8350151492": "devtools/ymake/bin/ymake for darwin",
+ "8383217030": "devtools/ymake/bin/ymake for darwin",
"5766171800": "devtools/ymake/bin/ymake for darwin-arm64",
"5805430761": "devtools/ymake/bin/ymake for darwin-arm64",
"5829025456": "devtools/ymake/bin/ymake for darwin-arm64",
@@ -2114,6 +2123,7 @@
"8295443254": "devtools/ymake/bin/ymake for darwin-arm64",
"8326167937": "devtools/ymake/bin/ymake for darwin-arm64",
"8350149418": "devtools/ymake/bin/ymake for darwin-arm64",
+ "8383215700": "devtools/ymake/bin/ymake for darwin-arm64",
"5766173070": "devtools/ymake/bin/ymake for linux",
"5805432830": "devtools/ymake/bin/ymake for linux",
"5829031598": "devtools/ymake/bin/ymake for linux",
@@ -2188,6 +2198,7 @@
"8295448171": "devtools/ymake/bin/ymake for linux",
"8326171209": "devtools/ymake/bin/ymake for linux",
"8350156616": "devtools/ymake/bin/ymake for linux",
+ "8383219483": "devtools/ymake/bin/ymake for linux",
"5766171341": "devtools/ymake/bin/ymake for linux-aarch64",
"5805430188": "devtools/ymake/bin/ymake for linux-aarch64",
"5829023352": "devtools/ymake/bin/ymake for linux-aarch64",
@@ -2262,10 +2273,12 @@
"8295441779": "devtools/ymake/bin/ymake for linux-aarch64",
"8326166988": "devtools/ymake/bin/ymake for linux-aarch64",
"8350147184": "devtools/ymake/bin/ymake for linux-aarch64",
+ "8383214378": "devtools/ymake/bin/ymake for linux-aarch64",
"8270821739": "devtools/ymake/bin/ymake for win32",
"8295446553": "devtools/ymake/bin/ymake for win32",
"8326170338": "devtools/ymake/bin/ymake for win32",
"8350153953": "devtools/ymake/bin/ymake for win32",
+ "8383218300": "devtools/ymake/bin/ymake for win32",
"5766172695": "devtools/ymake/bin/ymake for win32-clang-cl",
"5805432230": "devtools/ymake/bin/ymake for win32-clang-cl",
"5829029743": "devtools/ymake/bin/ymake for win32-clang-cl",
@@ -2504,9 +2517,11 @@
"6561719783": "tools/black_linter/bin/black_linter for linux",
"6648883615": "tools/black_linter/bin/black_linter for linux",
"8107723363": "tools/black_linter/bin/black_linter for linux",
+ "8415400280": "tools/black_linter/bin/black_linter for linux",
"6561716686": "tools/black_linter/bin/black_linter for linux-aarch64",
"6648881012": "tools/black_linter/bin/black_linter for linux-aarch64",
"8107722414": "tools/black_linter/bin/black_linter for linux-aarch64",
+ "8415398075": "tools/black_linter/bin/black_linter for linux-aarch64",
"6184290684": "tools/flake8_linter/bin/flake8_linter for linux",
"6561765464": "tools/flake8_linter/bin/flake8_linter for linux",
"6184289846": "tools/flake8_linter/bin/flake8_linter for linux-aarch64",
@@ -2627,6 +2642,7 @@
"7686710688": "yt/go/ytrecipe/cmd/ytexec for linux",
"7879860842": "yt/go/ytrecipe/cmd/ytexec for linux",
"8367004015": "yt/go/ytrecipe/cmd/ytexec for linux",
+ "8418036683": "yt/go/ytrecipe/cmd/ytexec for linux",
"2980468199": "ytexec for linux",
"5562224408": "ytexec for linux",
"7663495611": "ytexec for linux"
diff --git a/build/platform/yfm/resources.json b/build/platform/yfm/resources.json
index 7fe8b459be..388bdb225f 100644
--- a/build/platform/yfm/resources.json
+++ b/build/platform/yfm/resources.json
@@ -1,16 +1,16 @@
{
"by_platform": {
"win32-x86_64": {
- "uri": "sbr:8369232374"
+ "uri": "sbr:8405194101"
},
"darwin-x86_64": {
- "uri": "sbr:8369230226"
+ "uri": "sbr:8405192217"
},
"linux-x86_64": {
- "uri": "sbr:8369228543"
+ "uri": "sbr:8405190721"
},
"darwin-arm64": {
- "uri": "sbr:8369230226"
+ "uri": "sbr:8405192217"
}
}
}
diff --git a/build/plugins/_dart_fields.py b/build/plugins/_dart_fields.py
index b3519c86e9..64232696d1 100644
--- a/build/plugins/_dart_fields.py
+++ b/build/plugins/_dart_fields.py
@@ -598,34 +598,10 @@ class LintConfigs:
@classmethod
def python_configs(cls, unit, flat_args, spec_args):
- resolved_configs = []
-
- if (custom_config := spec_args.get('CUSTOM_CONFIG')) and '/' in custom_config[0]:
- # black if custom config is passed.
- # XXX During migration we want to use the same macro parameter
- # for path to linter config and config type
- # thus, we check if '/' is present, if it is then it's a path
- # TODO delete once custom configs migrated to autoincludes scheme
- custom_config = custom_config[0]
- assert_file_exists(unit, custom_config)
- resolved_configs.append(custom_config)
- return {cls.KEY: serialize_list(resolved_configs)}
-
if config := cls._from_config_type(unit, spec_args):
# specified by config type, autoincludes scheme
return {cls.KEY: serialize_list([config])}
- if project_to_config_map := spec_args.get('PROJECT_TO_CONFIG_MAP'):
- # ruff, TODO delete once custom configs migrated to autoincludes scheme
- project_to_config_map = project_to_config_map[0]
- assert_file_exists(unit, project_to_config_map)
- resolved_configs.append(project_to_config_map)
- cfgs = get_linter_configs(unit, project_to_config_map).values()
- for c in cfgs:
- assert_file_exists(unit, c)
- resolved_configs.append(c)
- return {cls.KEY: serialize_list(resolved_configs)}
-
# default config
linter_name = spec_args['NAME'][0]
default_configs_path = spec_args['CONFIGS'][0]
@@ -636,10 +612,10 @@ class LintConfigs:
ymake.report_configure_error(message)
raise DartValueError()
assert_file_exists(unit, config)
- resolved_configs.append(config)
+ configs = [config]
if linter_name in ('flake8', 'py2_flake8'):
- resolved_configs.extend(spec_args.get('FLAKE_MIGRATIONS_CONFIG', []))
- return {cls.KEY: serialize_list(resolved_configs)}
+ configs.extend(spec_args.get('FLAKE_MIGRATIONS_CONFIG', []))
+ return {cls.KEY: serialize_list(configs)}
@classmethod
def cpp_configs(cls, unit, flat_args, spec_args):
@@ -697,7 +673,6 @@ class LintName:
def value(cls, unit, flat_args, spec_args):
lint_name = spec_args['NAME'][0]
if lint_name in ('flake8', 'py2_flake8') and (unit.get('DISABLE_FLAKE8') or 'no') == 'yes':
- unit.message(['INFO', 'Flake8 linting is disabled by `DISABLE_FLAKE8`'])
raise DartValueError()
return {cls.KEY: lint_name}
@@ -1132,27 +1107,6 @@ class TestFiles:
# XXX: this is a workaround to support very specific linting settings.
# Do not use it as a general mechanism!
- _GRUT_PREFIX = 'grut'
- _GRUT_INCLUDE_LINTER_TEST_PATHS = (
- 'grut/libs/bigrt/clients',
- 'grut/libs/bigrt/common',
- 'grut/libs/bigrt/data',
- 'grut/libs/bigrt/event_filter',
- 'grut/libs/bigrt/graph',
- 'grut/libs/bigrt/info_keepers',
- 'grut/libs/bigrt/processor',
- 'grut/libs/bigrt/profile',
- 'grut/libs/bigrt/profiles',
- 'grut/libs/bigrt/queue_info_config',
- 'grut/libs/bigrt/resharder/compute_shard_number',
- 'grut/libs/bigrt/server',
- 'grut/libs/bigrt/testlib',
- 'grut/libs/bigrt/transaction',
- 'grut/libs/shooter',
- )
-
- # XXX: this is a workaround to support very specific linting settings.
- # Do not use it as a general mechanism!
_MAPS_RENDERER_PREFIX = 'maps/renderer'
_MAPS_RENDERER_INCLUDE_LINTER_TEST_PATHS = (
'maps/renderer/cartograph',
@@ -1283,12 +1237,6 @@ class TestFiles:
@classmethod
def cpp_linter_files(cls, unit, flat_args, spec_args):
upath = unit.path()[3:]
- if upath.startswith(cls._GRUT_PREFIX):
- for path in cls._GRUT_INCLUDE_LINTER_TEST_PATHS:
- if os.path.commonpath([upath, path]) == path:
- break
- else:
- raise DartValueError()
if upath.startswith(cls._MAPS_RENDERER_PREFIX):
for path in cls._MAPS_RENDERER_INCLUDE_LINTER_TEST_PATHS:
diff --git a/build/plugins/pybuild.py b/build/plugins/pybuild.py
index 441579c561..f45239106d 100644
--- a/build/plugins/pybuild.py
+++ b/build/plugins/pybuild.py
@@ -589,7 +589,7 @@ def onpy_srcs(unit, *args):
root_rel_path = rootrel_arc_src(path, unit)
if with_py:
key = '/py_modules/' + mod
- res += [path, key, '-', 'resfs/src/{}=${{rootrel;input;context=TEXT:"{}"}}'.format(key, path)]
+ res += [path, key, '-', 'resfs/src/{}=${{rootrel;context=TEXT;input:"{}"}}'.format(key, path)]
if with_pyc:
src = unit.resolve_arc_path(path) or path
dst = path + uniq_suffix(path, unit)
diff --git a/build/plugins/ytest.py b/build/plugins/ytest.py
index 513887ac16..e65d1e65d4 100644
--- a/build/plugins/ytest.py
+++ b/build/plugins/ytest.py
@@ -1082,9 +1082,7 @@ def on_add_py_linter_check(fields, unit, *args):
"GLOBAL_RESOURCES": unlimited,
"FILE_PROCESSING_TIME": 1,
"EXTRA_PARAMS": unlimited,
- "PROJECT_TO_CONFIG_MAP": 1,
"FLAKE_MIGRATIONS_CONFIG": 1,
- "CUSTOM_CONFIG": 1,
"CONFIG_TYPE": 1,
}
_, spec_args = _common.sort_by_keywords(keywords, args)
diff --git a/build/prebuilt/tools/black_linter/resources.json b/build/prebuilt/tools/black_linter/resources.json
index 11ee13ddea..04cfd1c119 100644
--- a/build/prebuilt/tools/black_linter/resources.json
+++ b/build/prebuilt/tools/black_linter/resources.json
@@ -1,22 +1,22 @@
{
"by_platform": {
"darwin": {
- "uri": "sbr:8107723162"
+ "uri": "sbr:8415405351"
},
"darwin-arm64": {
- "uri": "sbr:8107722855"
+ "uri": "sbr:8415402574"
},
"linux": {
- "uri": "sbr:8107723363"
+ "uri": "sbr:8415400280"
},
"linux-aarch64": {
- "uri": "sbr:8107722414"
+ "uri": "sbr:8415398075"
},
"linux-ppc64le": {
"uri": "sbr:6648879110"
},
- "win32-clang-cl": {
- "uri": "sbr:8107721942"
+ "win32": {
+ "uri": "sbr:8415396109"
}
}
}
diff --git a/build/scripts/check_config_h.py b/build/scripts/check_config_h.py
index 07bc12e230..26facbc48f 100644
--- a/build/scripts/check_config_h.py
+++ b/build/scripts/check_config_h.py
@@ -82,6 +82,14 @@ static_assert(sizeof(wchar_t) == SIZEOF_WCHAR_T, "fixme 16");
#if defined(SIZEOF__BOOL)
//TODO
#endif
+
+#if defined(ALIGNOF_VOID_P)
+static_assert(alignof(void*) == ALIGNOF_VOID_P, "fixme 18");
+#endif
+
+#if defined(ALIGNOF_DOUBLE)
+static_assert(alignof(double) == ALIGNOF_DOUBLE, "fixme 19");
+#endif
"""
if __name__ == '__main__':
with open(sys.argv[2], 'w') as f:
diff --git a/build/scripts/docs_proto_markdown.tmpl b/build/scripts/docs_proto_markdown.tmpl
new file mode 100644
index 0000000000..87a94ce8c4
--- /dev/null
+++ b/build/scripts/docs_proto_markdown.tmpl
@@ -0,0 +1,112 @@
+# Protocol Documentation
+<a name="top"></a>
+
+## Table of Contents
+{{range .Files}}
+{{$file_name := .Name}}- [{{.Name}}](#{{.Name | anchor}})
+ {{- if .Messages }}
+ {{range .Messages}} - [{{.LongName}}](#{{.FullName | anchor}})
+ {{end}}
+ {{- end -}}
+ {{- if .Enums }}
+ {{range .Enums}} - [{{.LongName}}](#{{.FullName | anchor}})
+ {{end}}
+ {{- end -}}
+ {{- if .Extensions }}
+ {{range .Extensions}} - [File-level Extensions](#{{$file_name | anchor}}-extensions)
+ {{end}}
+ {{- end -}}
+ {{- if .Services }}
+ {{range .Services}} - [{{.Name}}](#{{.FullName | anchor}})
+ {{end}}
+ {{- end -}}
+{{end}}
+- [Scalar Value Types](#scalar-value-types)
+
+{{range .Files}}
+{{$file_name := .Name}}
+<a name="{{.Name | anchor}}"></a>
+<p align="right"><a href="#top">Top</a></p>
+
+## {{.Name}} {{ .Name | anchor}}
+{{.Description}}
+
+{{range .Messages}}
+<a name="{{.FullName | anchor}}"></a>
+
+### {{.LongName}} {{"{#"}}{{ .FullName | anchor}}{{"}"}}
+{{.Description}}
+
+{{if .HasFields}}
+#|
+|| *Field* | *Type* | *Label* | *Description* ||
+{{range .Fields -}}
+ || {{.Name}} | [{{.LongType}}](#{{.FullType | anchor}}) | {{.Label}} | {{if (index .Options "deprecated"|default false)}}**Deprecated.** {{end}}{{nobr .Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}} ||
+{{end}}
+|#
+{{end}}
+
+{{if .HasExtensions}}
+#{
+|| *Extension* | *Type* | *Base* | *Number* | *Description* ||
+{{range .Extensions -}}
+ || {{.Name}} | {{.LongType}} | {{.ContainingLongType}} | {{.Number}} | {{nobr .Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}} ||
+{{end}}
+|#
+{{end}}
+
+{{end}} <!-- end messages -->
+
+{{range .Enums}}
+<a name="{{.FullName | anchor}}"></a>
+
+### {{.LongName}}
+{{.Description}}
+
+#|
+|| *Name* | *Number* | *Description* ||
+{{range .Values -}}
+ || {{.Name}} | {{.Number}} | {{nobr .Description}} ||
+{{end}}
+|#
+
+{{end}} <!-- end enums -->
+
+{{if .HasExtensions}}
+<a name="{{$file_name | anchor}}-extensions"></a>
+
+### File-level Extensions
+
+#|
+|| *Extension* | *Type* | *Base* | *Number* | *Description* ||
+{{range .Extensions -}}
+ || {{.Name}} | {{.LongType}} | {{.ContainingLongType}} | {{.Number}} | {{nobr .Description}}{{if .DefaultValue}} Default: `{{.DefaultValue}}`{{end}} ||
+{{end}}
+|#
+{{end}} <!-- end HasExtensions -->
+
+{{range .Services}}
+<a name="{{.FullName | anchor}}"></a>
+
+### {{.Name}}
+{{.Description}}
+
+#|
+|| *Method Name* | *Request Type* | *Response Type* | *Description* ||
+{{range .Methods -}}
+ || {{.Name}} | [{{.RequestLongType}}](#{{.RequestFullType | anchor}}){{if .RequestStreaming}} stream{{end}} | [{{.ResponseLongType}}](#{{.ResponseFullType | anchor}}){{if .ResponseStreaming}} stream{{end}} | {{nobr .Description }} ||
+{{end}}
+|#
+
+{{end}} <!-- end services -->
+
+{{end}}
+
+## Scalar Value Types
+
+#|
+|| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby ||
+{{range .Scalars -}}
+ || <a name="{{.ProtoType | anchor}}" /> {{.ProtoType}} | {{.Notes}} | {{.CppType}} | {{.JavaType}} | {{.PythonType}} | {{.GoType}} | {{.CSharp}} | {{.PhpType}} | {{.RubyType}} ||
+{{end}}
+|#
diff --git a/build/scripts/docs_proto_wrapper.py b/build/scripts/docs_proto_wrapper.py
index 71cb2124a2..75ae807612 100644
--- a/build/scripts/docs_proto_wrapper.py
+++ b/build/scripts/docs_proto_wrapper.py
@@ -7,6 +7,7 @@ import pathlib
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--docs-output', required=True)
+ parser.add_argument('--template', required=True)
parser.add_argument('args', nargs='+')
return parser.parse_args()
@@ -19,7 +20,8 @@ def main(args):
# --doc_opt=markdon,TARGET_FILE_NAME
target_file = pathlib.Path(args.docs_output)
- cmd.append(f'--doc_opt=markdown,{target_file.name}')
+ target_template = pathlib.Path(args.template)
+ cmd.append(f'--doc_opt={target_template},{target_file.name}')
cmd.append(f'--doc_out={target_file.parent}')
try:
diff --git a/build/sysincl/esp-idf.yml b/build/sysincl/esp-idf.yml
index 736166396e..f03767203d 100644
--- a/build/sysincl/esp-idf.yml
+++ b/build/sysincl/esp-idf.yml
@@ -1,4 +1,5 @@
-- includes:
+- source_filter: "^contrib/libs/esp-idf|contrib/libs/connectedhomeip|taxi"
+ includes:
# freertos
- freertos/FreeRTOS.h: contrib/libs/esp-idf/components/freertos/include/freertos/FreeRTOS.h
- freertos/semphr.h: contrib/libs/esp-idf/components/freertos/include/freertos/semphr.h
diff --git a/build/ymake.core.conf b/build/ymake.core.conf
index 99dfcbdef9..b6f076e92d 100644
--- a/build/ymake.core.conf
+++ b/build/ymake.core.conf
@@ -250,14 +250,6 @@ otherwise {
COPY_PROFILE_RUNTIME=
}
-# tag:codenav
-when ($CODENAVIGATION && $NOCODENAVIGATION != "yes") {
- PY_PROGRAM_LINK_EXE=$LINK_EXE && ${hide;kv:"pyndex $TARGET"}
-}
-otherwise {
- PY_PROGRAM_LINK_EXE=$LINK_EXE
-}
-
when ($RETRY == "yes") {
RETRY_ARGS=$YMAKE_PYTHON ${input:"build/scripts/retry_cc.py"}
}
@@ -430,8 +422,8 @@ when ($OS_EMSCRIPTEN == "yes") {
###
### @see https://a.yandex-team.ru/arc/trunk/arcadia/build/scripts/check_config_h.py for exact details
macro CHECK_CONFIG_H(Conf) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/check_config_h.py"} ${input;rootrel:Conf} ${output;nopath;noext:Conf.config.cpp} ${hide;kv:"p CH"} ${hide;kv:"pc yellow"}
- .SEM=${hide;input;rootrel:Conf} ${hide;output;nopath;noext:Conf.config.cpp}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/check_config_h.py"} ${rootrel;input:Conf} ${output;suf=.config.cpp;nopath;noext:Conf} ${hide;kv:"p CH"} ${hide;kv:"pc yellow"}
+ .SEM=${hide;rootrel;input:Conf} ${hide;output;suf=.config.cpp;nopath;noext:Conf}
OUTPUT_INCLUDES=$Conf
}
@@ -442,7 +434,7 @@ REQUIRED_TRANSITIVE_PEERS=
###
### This macro can be used if module depends on the directories specified but they can't be listed
### as direct PEERDIR dependencies (due to public include order or link order issues).
-macro REQUIRES(DIRS[]) {
+macro REQUIRES(DIRS...) {
SET_APPEND(REQUIRED_TRANSITIVE_PEERS $DIRS)
}
@@ -476,7 +468,7 @@ macro _HASH_HELPER(Args...) {
macro _RESOURCE_SEM(INPUTS[], KEYS[], OPTS[]) {
SET(RESOURCE_OUTPUT $_HASH_HELPER($INPUTS $KEYS $OPTS).cpp)
- .SEM=target_macroses-ITEM && target_macroses-macro resources && target_macroses-args ${output;global:RESOURCE_OUTPUT} INPUTS ${input:INPUTS} KEYS $KEYS OPTS $OPTS ${hide;tool:"tools/rescompiler/bin"}
+ .SEM=target_macroses-ITEM && target_macroses-macro resources && target_macroses-args ${global;output:RESOURCE_OUTPUT} INPUTS ${input:INPUTS} KEYS $KEYS OPTS $OPTS ${hide;tool:"tools/rescompiler/bin"}
}
# tag:built-in
@@ -593,13 +585,13 @@ module _BASE_UNIT: _BARE_UNIT {
PEERDIR_TAGS=CPP_PROTO CPP_PROTO_FROM_SCHEMA CPP_FBS CPP_ROS CPP_MLAUNCH H_IDL PY2 PY2_NATIVE YQL_UDF_STATIC __EMPTY__ RESOURCE_LIB DLL_LIB
_CPP_PROTO_WRAPPER_BASE=$YMAKE_PYTHON3 ${input:"build/scripts/cpp_proto_wrapper.py"}
- _CPP_PROTO_CMDLINE_BASE=${cwd;rootdir;input:File} $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --cpp_out=${CPP_PROTO_PLUGINS}$ARCADIA_BUILD_ROOT/$PROTO_NAMESPACE $_PROTOC_FLAGS $PROTOC_STYLEGUIDE_OUT $PROTOC_PLUGIN_STYLEGUIDE ${hide:PROTO_FAKEID} ${input;rootrel:File}
+ _CPP_PROTO_CMDLINE_BASE=${cwd;rootdir;input:File} $PROTOC -I=./$PROTO_NAMESPACE -I=$ARCADIA_ROOT/$PROTO_NAMESPACE ${pre=-I=:_PROTO__INCLUDE} -I=$ARCADIA_BUILD_ROOT -I=$PROTOBUF_INCLUDE_PATH --cpp_out=${CPP_PROTO_PLUGINS}$ARCADIA_BUILD_ROOT/$PROTO_NAMESPACE $_PROTOC_FLAGS $PROTOC_STYLEGUIDE_OUT $PROTOC_PLUGIN_STYLEGUIDE ${hide:PROTO_FAKEID} ${rootrel;input:File}
CPP_PROTO_CMDLINE=$_CPP_PROTO_WRAPPER_BASE --outputs $CPP_PROTO_OUTS -- $_CPP_PROTO_CMDLINE_BASE
- CPP_PROTO_OUTS+=${output;norel;nopath;noext:File.pb.cc} ${output;main;norel;nopath;noext:File.pb.h}
- CPP_PROTO_OUTS_SEM+=${output;main;hide;norel;nopath;noext:File.pb.h}
+ CPP_PROTO_OUTS+=${norel;output;suf=.pb.cc;nopath;noext:File} ${main;norel;output;suf=.pb.h;nopath;noext:File}
+ CPP_PROTO_OUTS_SEM+=${hide;main;norel;output;suf=.pb.h;nopath;noext:File}
CPP_EV_CMDLINE=$_CPP_PROTO_WRAPPER_BASE --outputs $CPP_EV_OUTS -- $_CPP_PROTO_CMDLINE_BASE
- CPP_EV_OUTS+=${output;norel:File.pb.cc} ${output;norel:File.pb.h}
- CPP_EV_OUTS_SEM+=${hide;output;norel:File.pb.h}
+ CPP_EV_OUTS+=${norel;output;suf=.pb.cc:File} ${norel;output;suf=.pb.h:File}
+ CPP_EV_OUTS_SEM+=${hide;norel;output;suf=.pb.h:File}
when ($SWIG_LANG == "perl") {
_SWIG_CMD=$_SWIG_PERL_CMD
@@ -640,8 +632,8 @@ module _BASE_UNIT: _BARE_UNIT {
}
when ($PROTOC_TRANSITIVE_HEADERS == "no") {
CPP_PROTO_PLUGINS=proto_h=true:${CPP_PROTO_PLUGINS}
- CPP_PROTO_OUTS+=${output;main;norel;nopath;noext:File.deps.pb.h}
- CPP_PROTO_OUTS_SEM+=${output;main;hide;norel;nopath;noext:File.deps.pb.h}
+ CPP_PROTO_OUTS+=${main;norel;output;suf=.deps.pb.h;nopath;noext:File}
+ CPP_PROTO_OUTS_SEM+=${hide;main;norel;output;suf=.deps.pb.h;nopath;noext:File}
PROTOC_EXTRA_OUTS_SEM=&& protoc_extra_outs .deps.pb.h
}
}
@@ -727,10 +719,12 @@ module _BASE_UNIT: _BARE_UNIT {
when ($CLANG) {
when ($PGO_ADD == "yes") {
CFLAGS+=-fprofile-instr-generate
+ NO_PGO_CFLAGS=-fno-profile-instr-generate
LDFLAGS+=-fprofile-instr-generate
}
when ($PGO_PATH) {
CFLAGS+=-fprofile-instr-use=$PGO_PATH -Wno-profile-instr-unprofiled -Wno-profile-instr-out-of-date
+ NO_PGO_CFLAGS=-fno-profile-instr-use
LDFLAGS+=-fprofile-instr-use=$PGO_PATH
}
}
@@ -1688,12 +1682,14 @@ macro LINT(level) {
###
### @see: [RUN()](#macro_RUN)
module EXECTEST: _BARE_UNIT {
+ .CMD=$TOUCH_UNIT
.NODE_TYPE=Program
.FINAL_TARGET=no
.ALLOWED=YT_SPEC
.RESTRICTED=FORK_TEST_FILES
.DEFAULT_NAME_GENERATOR=FullPath
.ARGS_PARSER=Base
+ .STRUCT_CMD=yes
SET(MODULE_SUFFIX .pkg.fake)
SETUP_EXECTEST()
SET_APPEND(_MAKEFILE_INCLUDE_LIKE_DEPS canondata/result.json)
@@ -2614,7 +2610,7 @@ macro ADD_COMPILABLE_TRANSLIT(TranslitTable, NGrams, Name, Options...) {
_COPY_FILE_CONTEXT=TEXT
macro _COPY_FILE_IMPL(TEXT[], AUTO_DST="", NOAUTO_DST="", OUTPUT_INCLUDES[], INDUCED_DEPS[], OUTPUT_INCLUDES_INP[], FILE...) {
- .CMD=$COPY_CMD ${input:FILE} ${context=TEXT;input:TEXT} ${output:AUTO_DST} ${noauto;output:NOAUTO_DST} ${hide;output_include:OUTPUT_INCLUDES} ${output_include;from_input;hide:OUTPUT_INCLUDES_INP} $INDUCED_DEPS ${hide;kv:"p CP"} ${hide;kv:"pc light-cyan"}
+ .CMD=$COPY_CMD ${input:FILE} ${context=TEXT;input:TEXT} ${output:AUTO_DST} ${noauto;output:NOAUTO_DST} ${hide;output_include:OUTPUT_INCLUDES} ${hide;from_input;output_include:OUTPUT_INCLUDES_INP} $INDUCED_DEPS ${hide;kv:"p CP"} ${hide;kv:"pc light-cyan"}
.SEM=target_commands-ITEM && target_commands-macro copy_file && target_commands-args ${input:FILE} ${output:AUTO_DST} ${noauto;output:NOAUTO_DST}
}
@@ -2778,7 +2774,7 @@ macro SIZE(Type) {
### This macro doesn't place all file into Out, it emits #include<Src>... Use the for C++ source files only.
### You should specify file name with the extension as Out. Further processing will be done according this extension.
macro JOIN_SRCS(Out, Src...) {
- .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/gen_join_srcs.py"} ${hide;input:"build/scripts/process_command_files.py"} ${output:Out} --ya-start-command-file ${context=TEXT;input;rootrel:Src} --ya-end-command-file ${output_include;from_input;hide:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
+ .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/gen_join_srcs.py"} ${hide;input:"build/scripts/process_command_files.py"} ${output:Out} --ya-start-command-file ${rootrel;context=TEXT;input:Src} --ya-end-command-file ${hide;from_input;output_include:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
.SEM=target_macroses-ITEM && target_macroses-macro target_joined_source && target_macroses-args $Out ${input:Src} ${hide;output;suf=.o:Out} ${hide;input:"build/scripts/gen_join_srcs.py"} ${hide;input:"build/scripts/process_command_files.py"}
_CONDITIONAL_SRCS($TIDY_VALUE $Src)
}
@@ -2789,7 +2785,7 @@ macro JOIN_SRCS(Out, Src...) {
### This macro doesn't place all file into Out, it emits #include<Src>... Use the for C++ source files only.
### You should specify file name with the extension as Out. Further processing will be done according to this extension.
macro JOIN_SRCS_GLOBAL(Out, Src...) {
- .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/gen_join_srcs.py"} ${hide;input:"build/scripts/process_command_files.py"} ${noauto;output:Out} --ya-start-command-file ${input;rootrel;context=TEXT:Src} --ya-end-command-file ${output_include;from_input;hide:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
+ .CMD=$YMAKE_PYTHON3 ${input:"build/scripts/gen_join_srcs.py"} ${hide;input:"build/scripts/process_command_files.py"} ${noauto;output:Out} --ya-start-command-file ${rootrel;input;context=TEXT:Src} --ya-end-command-file ${hide;from_input;output_include:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
SRCS(GLOBAL $Out)
}
@@ -2799,7 +2795,7 @@ macro JOIN_SRCS_GLOBAL(Out, Src...) {
### This macro places all files into single file, so will work with any sources.
### You should specify file name with the extension as Out. Further processing will be done according to this extension.
macro FLAT_JOIN_SRCS_GLOBAL(Out, Src...) {
- .CMD=$FS_TOOLS cat ${noauto;output:Out} --ya-start-command-file ${context=TEXT;input:Src} --ya-end-command-file ${output_include;from_input;hide:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
+ .CMD=$FS_TOOLS cat ${noauto;output:Out} --ya-start-command-file ${context=TEXT;input:Src} --ya-end-command-file ${hide;from_input;output_include:Src} ${hide;kv:"p JS"} ${hide;kv:"pc magenta"}
SRCS(GLOBAL $Out)
}
@@ -2826,6 +2822,7 @@ SSE4_CFLAGS=
XOP_CFLAGS=
NO_LTO_CFLAGS=
+NO_PGO_CFLAGS=
# tag:cpu
when (($ARCH_X86_64 || $ARCH_I386) && $DISABLE_INSTRUCTION_SETS != "yes") {
@@ -2979,7 +2976,7 @@ when (($ARCH_AARCH64 || $ARCH_ARM || $ARCH_PPC64LE || $ARCH_RISCV32) == "yes") {
# tag:src-processing
macro _SRC("rodata", SRC, SRCFLAGS...) {
- .CMD=$RODATA_COMPILE ${hide;kv:"p RD"} ${hide;kv:"pc light-green"}
+ .CMD=$RODATA_COMPILE && ${hide;kv:"p RD"} ${hide;kv:"pc light-green"}
.SEM=target_macroses-ITEM && target_macroses-macro target_rodata_sources && target_macroses-args PRIVATE ${input:SRC} ${hide;output;suf=${OBJ_SUF}.o:SRC} ${hide;input:"build/scripts/rodata2cpp.py"}
}
@@ -3030,12 +3027,12 @@ macro _SRC("xs", SRC, SRCFLAGS...) {
# tag:src-processing
macro _SRC("gperf", SRC, SRCFLAGS...) {
- .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/gperf"} $GP_FLAGS ${SRCFLAGS} ${pre=-Nin_;suf=_set;nopath;noallext:SRC} ${input:SRC} ${stdout;output;nopath;noext;defext=.gperf.cpp:SRC} ${hide;kv:"p GP"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/gperf"} $GP_FLAGS ${SRCFLAGS} ${pre=-Nin_;suf=_set;nopath;noallext:SRC} ${input:SRC} ${stdout;output;defext=.gperf.cpp;nopath;noext:SRC} ${hide;kv:"p GP"} ${hide;kv:"pc yellow"}
}
# tag:src-processing
macro _SRC("rl", SRC, SRCFLAGS...) {
- .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel5/ragel"} $RAGEL_FLAGS ${SRCFLAGS} -o ${tmp:SRC.tmp} ${input:SRC} && $RUN_NO_SANITIZE ${tool:"contrib/tools/ragel5/rlgen-cd"} $RLGEN_FLAGS -o ${output;nopath;noext;defext=.rl5.cpp:SRC} ${tmp:SRC.tmp} ${hide;kv:"p R5"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel5/ragel"} $RAGEL_FLAGS ${SRCFLAGS} -o ${tmp;suf=.tmp:SRC} ${input:SRC} && $RUN_NO_SANITIZE ${tool:"contrib/tools/ragel5/rlgen-cd"} $RLGEN_FLAGS -o ${output;defext=.rl5.cpp;nopath;noext:SRC} ${tmp;suf=.tmp:SRC} ${hide;kv:"p R5"} ${hide;kv:"pc yellow"}
}
macro _SRC("xsyn", SRC, SRCFLAGS...) {
@@ -3049,18 +3046,18 @@ macro _SRC("rl5", SRC, SRCFLAGS...) {
# tag:src-processing
macro _SRC("asp", SRC, SRCFLAGS...) {
- .CMD=$RUN_NO_SANITIZE ${tool:"tools/html2cpp"} ${input:SRC} ${output:SRC.cpp} ${hide;kv:"p HT"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_NO_SANITIZE ${tool:"tools/html2cpp"} ${input:SRC} ${output;suf=.cpp:SRC} ${hide;kv:"p HT"} ${hide;kv:"pc yellow"}
}
# tag:src-processing
macro _SRC("rl6", SRC, SRCFLAGS...) {
- .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel6"} $RAGEL6_FLAGS ${SRCFLAGS} -L -I${ARCADIA_ROOT} -o ${output;nopath;noext;defext=.rl6.cpp:SRC} ${input:SRC} ${hide;kv:"p R6"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel6"} $RAGEL6_FLAGS ${SRCFLAGS} -L -I${ARCADIA_ROOT} -o ${output;defext=.rl6.cpp;nopath;noext:SRC} ${input:SRC} ${hide;kv:"p R6"} ${hide;kv:"pc yellow"}
.SEM=target_macroses-ITEM && target_macroses-macro target_ragel_lexers && target_macroses-args PRIVATE ${input:SRC} ${hide;output;suf=${OBJ_SUF}.o:SRC} $RAGEL6_FLAGS ${SRCFLAGS} && platform_vars-RAGEL_FLAGS "-L -I $S/" && conan-tool_requires ragel/6.10 && conan-imports 'bin, ragel* -> ./bin' ${hide;input:"build/scripts/run_tool.py"}
}
# tag:src-processing
macro _SRC("xsyn", SRC, SRCFLAGS...) {
- .CMD=$YMAKE_PYTHON ${input:"library/cpp/xml/parslib/xsyn2ragel.py"} ${input:SRC} ${input:"library/cpp/xml/parslib/xmlpars.xh"} dontuse ${stdout;output:SRC.h.rl5} ${hide;kv:"p XN"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"library/cpp/xml/parslib/xsyn2ragel.py"} ${input:SRC} ${input:"library/cpp/xml/parslib/xmlpars.xh"} dontuse ${stdout;output;suf=.h.rl5:SRC} ${hide;kv:"p XN"} ${hide;kv:"pc yellow"}
}
# tag:src-processing
@@ -3079,7 +3076,7 @@ macro _SRC("proto", SRC, SRCFLAGS...) {
macro _SRC("gztproto", SRC, SRCFLAGS...) {
# _PROTO__INCLUDE is before ARCADIA_ROOT in includes because in gazetteer we don't use builtins now and paths' canonization (resolving) depends on order of roots.
# descriptor.proto must be resolved as google/protobuf/descriptor.proto
- .CMD=${tool:"dict/gazetteer/converter"} -I$PROTOBUF_INCLUDE_PATH ${pre="-I":_PROTO__INCLUDE} -I$ARCADIA_ROOT ${SRCFLAGS} ${input:SRC} ${output;nopath;noext;norel:SRC.proto} ${hide;kv:"p GZ"} ${hide;kv:"pc yellow"}
+ .CMD=${tool:"dict/gazetteer/converter"} -I$PROTOBUF_INCLUDE_PATH ${pre="-I":_PROTO__INCLUDE} -I$ARCADIA_ROOT ${SRCFLAGS} ${input:SRC} ${norel;output;suf=.proto;nopath;noext:SRC} ${hide;kv:"p GZ"} ${hide;kv:"pc yellow"}
.PEERDIR=kernel/gazetteer/proto
}
@@ -3106,8 +3103,8 @@ otherwise {
# tag:src-processing
macro _SRC("pyx", SRC, SRCFLAGS...) {
# Copy-paste from BUILDWITH_CYTHON
- .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:SRC} -o ${output;tobindir;suf=${OBJ_SUF}.cpp:SRC} $CYTHON_OUTPUT_INCLUDES ${SRCFLAGS} ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
- .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;output;tobindir:Src.fake.o} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS --cplus ${SRCFLAGS} ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
+ .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:SRC} -o ${tobindir;output;suf=${OBJ_SUF}.cpp:SRC} $CYTHON_OUTPUT_INCLUDES ${SRCFLAGS} ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;tobindir;output;suf=.fake.o:Src} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS --cplus ${SRCFLAGS} ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
.ADDINCL=$_CYTHON_SYS_INCLUDES
}
@@ -3123,21 +3120,21 @@ macro MANUAL_GENERATION(Outs...) {
# tag:src-processing
macro _SRC("sc", SRC, SRCFLAGS...) {
- .CMD=${tool:"tools/domschemec"} --in ${input:SRC} --out ${output;norel:SRC.h} ${hide;output_include:"library/cpp/domscheme/runtime.h"} ${SRCFLAGS} ${hide;kv:"p SC"} ${hide;kv:"pc yellow"}
+ .CMD=${tool:"tools/domschemec"} --in ${input:SRC} --out ${norel;output;suf=.h:SRC} ${hide;output_include:"library/cpp/domscheme/runtime.h"} ${SRCFLAGS} ${hide;kv:"p SC"} ${hide;kv:"pc yellow"}
.PEERDIR=library/cpp/domscheme
}
# tag:src-processing
macro _SRC("ssqls", SRC, SRCFLAGS...) {
- .CMD=${tool:"metrika/core/tools/ssqls"} ${input;notransformbuilddir:SRC} -S $ARCADIA_ROOT -B $ARCADIA_BUILD_ROOT $SRCFLAGS ${output;noext;hide:SRC.cpp} ${output;noext;hide:SRC.h} ${hide;kv:"p SS"} ${hide;kv:"pc yellow"}
+ .CMD=${tool:"metrika/core/tools/ssqls"} ${input;notransformbuilddir:SRC} -S $ARCADIA_ROOT -B $ARCADIA_BUILD_ROOT $SRCFLAGS ${hide;output;suf=.cpp;noext:SRC} ${hide;output;suf=.h;noext:SRC} ${hide;kv:"p SS"} ${hide;kv:"pc yellow"}
}
macro _SRC_f_new(SRC, SRCFLAGS...) {
- .CMD=${tool:"build/platform/flang"} -c ${input:SRC} -o ${output:SRC.o} ${hide;kv:"p FL"} ${hide;kv:"pc light-green"}
+ .CMD=${tool:"build/platform/flang"} -c ${input:SRC} -o ${output;suf=.o:SRC} ${hide;kv:"p FL"} ${hide;kv:"pc light-green"}
}
macro _SRC_f_old(SRC, SRCFLAGS...) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/f2c.py"} -t ${tool:"contrib/tools/f2c"} -c ${input:SRC} -o ${output:SRC.c} ${hide;output_include:"f2c.h"} ${hide;kv:"p FT"} ${hide;kv:"pc light-green"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/f2c.py"} -t ${tool:"contrib/tools/f2c"} -c ${input:SRC} -o ${output;suf=.c:SRC} ${hide;output_include:"f2c.h"} ${hide;kv:"p FT"} ${hide;kv:"pc light-green"}
}
### @usage: AR_PLUGIN(plugin_name)
@@ -3154,7 +3151,7 @@ macro AR_PLUGIN(name) {
### Script will receive all arguments to link_exe.py, and can output into stdout preprocessed list
### of all arguments, in JSON format
macro LD_PLUGIN(Name) {
- .CMD=$COPY_CMD ${context=TEXT;input:Name} ${noauto;output;global;suf=.pyplugin:Name}
+ .CMD=$COPY_CMD ${context=TEXT;input:Name} ${global;noauto;output;suf=.pyplugin:Name}
.SEM=_SEM_IGNORED
}
@@ -3304,12 +3301,12 @@ macro _SRC("make", SRC, SRCFLAGS...) {
# tag:src-processing
macro _SRC_py2src(SRC, SRCFLAGS...) {
- .CMD=${cwd:BINDIR} $YMAKE_PYTHON3 ${input:"build/scripts/compile_pysrc.py"} --input ${input:SRC} --output ${output;noext;suf=.py2_raw.cpp:SRC} --rescompiler ${tool:"tools/rescompiler"} py2 --python $(PYTHON)/python --py_compile ${input:"build/scripts/py_compile.py"} ${hide;kv:"p P2"} ${hide;kv:"pc light-green"}
+ .CMD=${cwd:BINDIR} $YMAKE_PYTHON3 ${input:"build/scripts/compile_pysrc.py"} --input ${input:SRC} --output ${output;suf=.py2_raw.cpp;noext:SRC} --rescompiler ${tool:"tools/rescompiler"} py2 --python $(PYTHON)/python --py_compile ${input:"build/scripts/py_compile.py"} ${hide;kv:"p P2"} ${hide;kv:"pc light-green"}
}
# tag:src-processing
macro _SRC_py3src(SRC, SRCFLAGS...) {
- .CMD=${cwd:BINDIR} $YMAKE_PYTHON3 ${input:"build/scripts/compile_pysrc.py"} --input ${input:SRC} --output ${output;noext;suf=.py3_raw.cpp:SRC} --rescompiler ${tool:"tools/rescompiler"} py3 --pycc ${tool:"tools/py3cc"} ${hide;kv:"p P3"} ${hide;kv:"pc light-green"}
+ .CMD=${cwd:BINDIR} $YMAKE_PYTHON3 ${input:"build/scripts/compile_pysrc.py"} --input ${input:SRC} --output ${output;suf=.py3_raw.cpp;noext:SRC} --rescompiler ${tool:"tools/rescompiler"} py3 --pycc ${tool:"tools/py3cc"} ${hide;kv:"p P3"} ${hide;kv:"pc light-green"}
}
# tag:src-processing python-specific
@@ -3454,10 +3451,10 @@ macro COMPILE_LUA_OPENRESTY(Src, NAME="") {
GETTEXT_KEEP_PATH=no
_MO_OUTPUT=
when ($GETTEXT_KEEP_PATH == "yes") {
- _MO_OUTPUT=${noauto;output;norel;noext:SRC.mo}
+ _MO_OUTPUT=${noauto;norel;output;suf=.mo;noext:SRC}
}
otherwise {
- _MO_OUTPUT=${noauto;output;nopath;noext;tobindir:SRC.mo}
+ _MO_OUTPUT=${noauto;tobindir;output;suf=.mo;nopath;noext:SRC}
}
# tag:src-processing
@@ -3761,8 +3758,8 @@ macro SRC_C_NO_LTO(FILE, FLAGS...) {
###
### Generates .cpp file from .pyx.
macro BUILDWITH_CYTHON_CPP(Src, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} -o ${output;tobindir;suf=${OBJ_SUF}.cpp:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
- .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;output;tobindir:Src.fake.o} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
+ .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} -o ${tobindir;output;suf=${OBJ_SUF}.cpp:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;tobindir;output;suf=.fake.o:Src} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3772,8 +3769,8 @@ macro BUILDWITH_CYTHON_CPP(Src, Options...) {
### Generates .cpp file from .pyx and attach extra input Dep.
### If Dep changes the .cpp file will be re-generated.
macro _BUILDWITH_CYTHON_CPP_DEP(Src, Dep, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${output;tobindir;suf=${OBJ_SUF}.cpp:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
- .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;input:Dep} ${hide;output;tobindir:Src.fake.o} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
+ .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${tobindir;output;suf=${OBJ_SUF}.cpp:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;input:Dep} ${hide;tobindir;output;suf=.fake.o:Src} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3782,7 +3779,7 @@ macro _BUILDWITH_CYTHON_CPP_DEP(Src, Dep, Options...) {
###
### BUILDWITH_CYTHON_CPP without .pyx infix and with cdef public .h file.
macro _BUILDWITH_CYTHON_CPP_H(Src, Dep, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${output;noext;tobindir:Src.cpp} ${hide;output;addincl;noext;tobindir:Src.h} ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} --cplus ${CYTHON_CPP_OUTPUT_INCLUDES} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${tobindir;output;suf=.cpp;noext:Src} ${hide;addincl;tobindir;output;suf=.h;noext:Src} ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3792,8 +3789,8 @@ macro _BUILDWITH_CYTHON_CPP_H(Src, Dep, Options...) {
###
### Generates .c file from .pyx.
macro BUILDWITH_CYTHON_C(Src, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} -o ${output;tobindir;suf=${OBJ_SUF}.c:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
- .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;output;tobindir:Src.fake.o} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} ${CYTHON_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
+ .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} -o ${tobindir;output;suf=${OBJ_SUF}.c:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;tobindir;output;suf=.fake.o:Src} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} ${CYTHON_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3803,8 +3800,8 @@ macro BUILDWITH_CYTHON_C(Src, Options...) {
### Generates .c file from .pyx and attach extra input Dep.
### If Dep changes the .c file will be re-generated.
macro _BUILDWITH_CYTHON_C_DEP(Src, Dep, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${output;tobindir;suf=${OBJ_SUF}.c:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
- .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;input:Dep} ${hide;output;tobindir:Src.fake.o} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} ${CYTHON_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
+ .CMD=$RUN_CYTHON_SCRIPT $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${tobindir;output;suf=${OBJ_SUF}.c:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .SEM=target_options-privates-ITEM && target_options-privates-option target_cython_sources && target_options-privates-args ${input:Src} ${hide;input:Dep} ${hide;tobindir;output;suf=.fake.o:Src} && target_macroses-ITEM && target_macroses-macro target_cython_options && target_macroses-args $CYTHON_OPTIONS ${Options} ${CYTHON_OUTPUT_INCLUDES} && target_macroses-ITEM && target_macroses-macro target_cython_include_directories && target_macroses-args $_CYTHON__INCLUDE && target_macroses-ITEM && target_macroses-macro set_python_type_for_cython && target_macroses-args $PYTHON_TYPE_FOR_CYTHON
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3813,7 +3810,7 @@ macro _BUILDWITH_CYTHON_C_DEP(Src, Dep, Options...) {
###
### BUILDWITH_CYTHON_C without .pyx infix and with cdef public .h file.
macro _BUILDWITH_CYTHON_C_H(Src, Dep, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${output;noext;tobindir:Src.c} ${hide;output;addincl;noext;tobindir:Src.h} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${tobindir;output;suf=.c;noext:Src} ${hide;addincl;tobindir;output;suf=.h;noext:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3822,7 +3819,7 @@ macro _BUILDWITH_CYTHON_C_H(Src, Dep, Options...) {
###
### BUILDWITH_CYTHON_C_H with cdef api _api.h file.
macro _BUILDWITH_CYTHON_C_API_H(Src, Dep, Options...) {
- .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${output;noext;tobindir:Src.c} ${hide;output;addincl;noext;tobindir:Src.h} ${hide;output;addincl;noext;defext=_api.h;tobindir:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_CYTHON_SCRIPT_H $CYTHON_OPTIONS ${Options} ${pre=-I:_CYTHON__INCLUDE} ${input:Src} ${hide;input:Dep} -o ${tobindir;output;suf=.c;noext:Src} ${hide;addincl;tobindir;output;suf=.h;noext:Src} ${hide;addincl;tobindir;output;defext=_api.h;noext:Src} $CYTHON_OUTPUT_INCLUDES ${hide;kv:"p CY"} ${hide;kv:"pc yellow"}
ADDINCL($_CYTHON_SYS_INCLUDES)
}
@@ -3830,7 +3827,7 @@ macro _BUILDWITH_CYTHON_C_API_H(Src, Dep, Options...) {
###
### Compile .rl file using Ragel6.
macro BUILDWITH_RAGEL6(Src, Options...) {
- .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel6"} $RAGEL6_FLAGS ${Options} -I${ARCADIA_ROOT} -o ${output;nopath;noext;defext=.rl6.cpp:Src} ${input:Src} ${hide;kv:"p R6"} ${hide;kv:"pc yellow"}
+ .CMD=$RUN_NO_SANITIZE ${tool:"contrib/tools/ragel6"} $RAGEL6_FLAGS ${Options} -I${ARCADIA_ROOT} -o ${output;defext=.rl6.cpp;nopath;noext:Src} ${input:Src} ${hide;kv:"p R6"} ${hide;kv:"pc yellow"}
}
# tag:python-processing tag:internal
@@ -3841,7 +3838,7 @@ macro BUILDWITH_RAGEL6(Src, Options...) {
###
### Documentation: https://wiki.yandex-team.ru/devtools/commandsandvars/pysrcs/#makrospyregister
macro _PY_REGISTER(Func) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_py_reg.py"} $Func ${noauto;output:Func.reg.cpp} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_py_reg.py"} $Func ${noauto;output;suf=.reg.cpp:Func} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
SRCS(GLOBAL $Func.reg.cpp)
}
@@ -3852,7 +3849,7 @@ macro _PY_REGISTER(Func) {
###
### Documentation: https://wiki.yandex-team.ru/devtools/commandsandvars/pysrcs/#makrospyregister
macro _PY3_REGISTER(Func) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_py3_reg.py"} $Func ${noauto;output:Func.reg3.cpp} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_py3_reg.py"} $Func ${noauto;output;suf=.reg3.cpp:Func} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
SRCS(GLOBAL $Func.reg3.cpp)
}
@@ -3863,7 +3860,7 @@ macro _PY3_REGISTER(Func) {
###
### Documentation: https://wiki.yandex-team.ru/devtools/commandsandvars/pysrcs/#makrospyregister
macro _PY_COMPILE_BYTECODE(SrcX, Src, Dst) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/py_compile.py"} $SrcX ${input:Src} ${noauto;output:Dst.yapyc} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/py_compile.py"} $SrcX ${input:Src} ${noauto;output;suf=.yapyc:Dst} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
}
# tag:python-processing tag:internal
@@ -3873,7 +3870,7 @@ macro _PY_COMPILE_BYTECODE(SrcX, Src, Dst) {
###
### Documentation: https://wiki.yandex-team.ru/devtools/commandsandvars/pysrcs/#makrospyregister
macro _PY3_COMPILE_BYTECODE(SrcX, Src, Dst) {
- .CMD=${env:"PYTHONHASHSEED=0"} ${tool:"tools/py3cc"} $SrcX ${input:Src} ${noauto;output:Dst.yapyc3} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
+ .CMD=${env:"PYTHONHASHSEED=0"} ${tool:"tools/py3cc"} $SrcX ${input:Src} ${noauto;output;suf=.yapyc3:Dst} ${hide;kv:"p PY"} ${hide;kv:"pc yellow"}
}
macro _ARCHIVE_SEM_HELPER(FLAGS[], OUT, Files...) {
@@ -3908,8 +3905,8 @@ macro PIRE_INLINE(FILES...) {
###
### Example: https://wiki.yandex-team.ru/yatool/howtowriteyamakefiles/#a1ispolzujjtekomanduarchive
macro ARCHIVE(NAME="", DONTCOMPRESS?"-p":"", Files...) {
- .CMD=$ARCH_TOOL -q -x $DONTCOMPRESS ${input;join=\: :Files}: -o ${output;addincl;noauto:NAME} ${hide;kv:"p AR"} ${hide;kv:"pc light-red"}
- .SEM=$_ARCHIVE_SEM_HELPER(${output;addincl;noauto:NAME} ${input:Files} FLAGS -q -x $DONTCOMPRESS) && target_options-privates-ITEM && target_options-privates-option target_sources && target_options-privates-args $BINDIR/$NAME
+ .CMD=$ARCH_TOOL -q -x $DONTCOMPRESS ${input;join=\: :Files}: -o ${addincl;noauto;output:NAME} ${hide;kv:"p AR"} ${hide;kv:"pc light-red"}
+ .SEM=$_ARCHIVE_SEM_HELPER(${addincl;noauto;output:NAME} ${input:Files} FLAGS -q -x $DONTCOMPRESS) && target_options-privates-ITEM && target_options-privates-option target_sources && target_options-privates-args $BINDIR/$NAME
}
### @usage: ARCHIVE_BY_KEYS(archive_name key [DONT_COMPRESS] files...)
@@ -3919,7 +3916,7 @@ macro ARCHIVE(NAME="", DONTCOMPRESS?"-p":"", Files...) {
###
### Example: https://wiki.yandex-team.ru/yatool/howtowriteyamakefiles/#a1ispolzujjtekomanduarchive
macro ARCHIVE_BY_KEYS(NAME="", KEYS="", DONTCOMPRESS?"-p":"", Files...) {
- .CMD=$ARCH_TOOL -q -x $DONTCOMPRESS ${input:Files} -k $KEYS -o ${output;addincl;noauto:NAME} ${hide;kv:"p AR"} ${hide;kv:"pc light-red"}
+ .CMD=$ARCH_TOOL -q -x $DONTCOMPRESS ${input:Files} -k $KEYS -o ${addincl;noauto;output:NAME} ${hide;kv:"p AR"} ${hide;kv:"pc light-red"}
}
#scripts
@@ -4005,8 +4002,8 @@ macro CFG_VARS() {
### The values are collected during configure stage, while replacement itself happens during build stage.
### Used implicitly for .in-files processing.
macro CONFIGURE_FILE(Src, Dst) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/configure_file.py"} ${input:Src} ${output;addincl:Dst} $CFG_VARS ${hide;kv:"p CF"} ${hide;kv:"pc yellow"}
- .SEM=dir_macroses-ITEM && dir_macroses-macro set_vars && dir_macroses-args ${CFG_VARS} && target_commands-ITEM && target_commands-macro configure_file && target_commands-args $S/${input;rootrel:Src} $B/${output;addincl;rootrel:Dst}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/configure_file.py"} ${input:Src} ${addincl;output:Dst} $CFG_VARS ${hide;kv:"p CF"} ${hide;kv:"pc yellow"}
+ .SEM=dir_macroses-ITEM && dir_macroses-macro set_vars && dir_macroses-args ${CFG_VARS} && target_commands-ITEM && target_commands-macro configure_file && target_commands-args $S/${rootrel;input:Src} $B/${addincl;output;rootrel:Dst}
}
# tag:flags
@@ -4280,8 +4277,8 @@ macro _LUAJIT_OPENRESTY_OBJDUMP(Src, OUT="") {
###
### Documentation: https://wiki.yandex-team.ru/yatool/HowToWriteYaMakeFiles/
macro GENERATE_ENUM_SERIALIZATION(File) {
- .CMD=$ENUM_PARSER_TOOL ${input:File} --include-path ${input;rootrel:File} --output ${output;suf=_serialized.cpp:File} ${output_include;from_input;hide:File} ${hide;output_include:"util/generic/serialized_enum.h"} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
- .SEM=target_macroses-ITEM && target_macroses-macro generate_enum_serilization && target_macroses-args ${input:File} ${hide;output;suf=_serialized.o:File} INCLUDE_HEADERS ${input;rootrel:File} ${hide;tool:"tools/enum_parser/enum_parser"}
+ .CMD=$ENUM_PARSER_TOOL ${input:File} --include-path ${rootrel;input:File} --output ${output;suf=_serialized.cpp:File} ${hide;from_input;output_include:File} ${hide;output_include:"util/generic/serialized_enum.h"} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
+ .SEM=target_macroses-ITEM && target_macroses-macro generate_enum_serilization && target_macroses-args ${input:File} ${hide;output;suf=_serialized.o:File} INCLUDE_HEADERS ${rootrel;input:File} ${hide;tool:"tools/enum_parser/enum_parser"}
PEERDIR(tools/enum_parser/enum_serialization_runtime)
}
@@ -4292,8 +4289,8 @@ macro GENERATE_ENUM_SERIALIZATION(File) {
###
### Documentation: https://wiki.yandex-team.ru/yatool/HowToWriteYaMakeFiles/
macro GENERATE_ENUM_SERIALIZATION_WITH_HEADER(File) {
- .CMD=$ENUM_PARSER_TOOL ${input:File} --include-path ${input;rootrel:File} --output ${output;suf=_serialized.cpp:File} --header ${output;suf=_serialized.h:File} ${output_include;from_input;hide:File} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
- .SEM=target_macroses-ITEM && target_macroses-macro generate_enum_serilization && target_macroses-args ${input:File} ${hide;output;suf=_serialized.o:File} GEN_HEADER ${output;suf=_serialized.h:File} INCLUDE_HEADERS ${input;rootrel:File} ${hide;tool:"tools/enum_parser/enum_parser"}
+ .CMD=$ENUM_PARSER_TOOL ${input:File} --include-path ${rootrel;input:File} --output ${output;suf=_serialized.cpp:File} --header ${output;suf=_serialized.h:File} ${hide;from_input;output_include:File} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
+ .SEM=target_macroses-ITEM && target_macroses-macro generate_enum_serilization && target_macroses-args ${input:File} ${hide;output;suf=_serialized.o:File} GEN_HEADER ${output;suf=_serialized.h:File} INCLUDE_HEADERS ${rootrel;input:File} ${hide;tool:"tools/enum_parser/enum_parser"}
PEERDIR(tools/enum_parser/enum_serialization_runtime)
}
@@ -4388,7 +4385,7 @@ SCHEEME2_STRUCT_INFO_FLAGS=-f "const static ui32 RecordSig" -u "RecordSig" --gcc
### for compatibility with C++ compiler and the external environment.
### See tools/structparser for more details.
macro GEN_SCHEEME2(ScheemeName, FromFile) {
- .CMD=$CXX_COMPILER_OLD $C_FLAGS_PLATFORM -c ${stdout;tmp:FromFile.cph} $SCHEEME2_CFLAGS ${input:FromFile} ${pre=-I:_C__INCLUDE} $CXXFLAGS -Wno-error && ${tool:"tools/structparser"} -o ${output:ScheemeName.inc} -n N${ScheemeName}SchemeInfo $SCHEEME2_STRUCT_INFO_FLAGS $DATAWORK_SCHEEME_EXPORT_FLAGS ${tmp:FromFile.cph} ${stdout;noauto;output:ScheemeName.inc.log} ${hide;kv:"p SH"} ${hide;kv:"pc yellow"}
+ .CMD=$CXX_COMPILER_OLD $C_FLAGS_PLATFORM -c ${stdout;tmp;suf=.cph:FromFile} $SCHEEME2_CFLAGS ${input:FromFile} ${pre=-I:_C__INCLUDE} $CXXFLAGS -Wno-error && ${tool:"tools/structparser"} -o ${output;suf=.inc:ScheemeName} -n N${ScheemeName}SchemeInfo $SCHEEME2_STRUCT_INFO_FLAGS $DATAWORK_SCHEEME_EXPORT_FLAGS ${tmp;suf=.cph:FromFile} ${stdout;noauto;output;suf=.inc.log:ScheemeName} ${hide;kv:"p SH"} ${hide;kv:"pc yellow"}
}
### @usage: SYMLINK(from to)
@@ -4613,7 +4610,7 @@ macro RUN_PY3_PROGRAM(Tool, IN{input}[], IN_NOPARSE{input}[], OUT{output}[], OUT
# tag:java-specific
macro _RUN_ANTLR_BASE(IN{input}[], IN_NOPARSE{input}[], OUT{output}[], OUT_NOAUTO{output}[], OUTPUT_INCLUDES[], INDUCED_DEPS[], TOOL[], STDOUT="", STDOUT_NOAUTO="", CWD="", JAR[], SEM="run_java", ENV[], HIDE_OUTPUT?"stderr2stdout":"stdout2stderr", Args...) {
PEERDIR(build/platform/java/jdk $JDK_RESOURCE_PEERDIR)
- .CMD=${cwd:CWD} ${env:ENV} $YMAKE_PYTHON ${input;pre=build/scripts/:HIDE_OUTPUT.py} $JDK_RESOURCE/bin/java $JAR $Args ${hide;tool:TOOL} ${hide;input:IN} ${hide;context=TEXT;input:IN_NOPARSE} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${stdout;output:STDOUT} ${stdout;noauto;output:STDOUT_NOAUTO} ${hide;kv:"p JV"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
+ .CMD=${cwd:CWD} ${env:ENV} $YMAKE_PYTHON ${input;pre=build/scripts/;suf=.py:HIDE_OUTPUT} $JDK_RESOURCE/bin/java $JAR $Args ${hide;tool:TOOL} ${hide;input:IN} ${hide;context=TEXT;input:IN_NOPARSE} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${stdout;output:STDOUT} ${stdout;noauto;output:STDOUT_NOAUTO} ${hide;kv:"p JV"} ${hide;kv:"pc light-blue"} ${hide;kv:"show_out"}
.SEM=custom_runs-ITEM && custom_runs-depends ${input:IN} && custom_runs-command $SEM && custom_runs-command $Args && custom_runs-outputs ${output:OUT} ${noauto;output:OUT_NOAUTO} ${pre=&& custom_runs-cwd :CWD}
}
@@ -4640,7 +4637,7 @@ macro _RUN_ANTLR_BASE(IN{input}[], IN_NOPARSE{input}[], OUT{output}[], OUT_NOAUT
### changes resource_ids in such macros if newer resource of specified type is available. Note that the task seeks AUTOUPDATED in specific position,
### so you shall place it immediately after resource_id.
macro FROM_SANDBOX(Id, OUT{output}[], OUT_NOAUTO{output}[], OUTPUT_INCLUDES[], INDUCED_DEPS[], FILE?"--copy-to-dir":"--untar-to", AUTOUPDATED="", PREFIX=".", RENAME[], EXECUTABLE?"--executable":"", SBR="sbr:") {
- .CMD=${hide:SANDBOX_FAKEID} ${cwd:BINDIR} ${resource;pre=$SBR:Id} $YMAKE_PYTHON ${input:"build/scripts/fetch_from_sandbox.py"} --resource-file $(RESOURCE_ROOT)/sbr/$Id/resource --resource-id $Id $FILE $PREFIX ${pre=--rename :RENAME} $EXECUTABLE -- $OUT $OUT_NOAUTO ${hide;input:"build/scripts/fetch_from.py"} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${requirements;hide:"network:full"} ${hide;kv:"p SB"} ${hide;kv:"pc yellow"} ${hide;kv:"show_out"}
+ .CMD=${hide:SANDBOX_FAKEID} ${cwd:BINDIR} ${resource;pre=$SBR:Id} $YMAKE_PYTHON ${input:"build/scripts/fetch_from_sandbox.py"} --resource-file $(RESOURCE_ROOT)/sbr/$Id/resource --resource-id $Id $FILE $PREFIX ${pre=--rename :RENAME} $EXECUTABLE -- $OUT $OUT_NOAUTO ${hide;input:"build/scripts/fetch_from.py"} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${hide;requirements:"network:full"} ${hide;kv:"p SB"} ${hide;kv:"pc yellow"} ${hide;kv:"show_out"}
ADD_CHECK(check.resource $Id)
}
@@ -4690,7 +4687,7 @@ macro NO_DEBUG_INFO() {
###
### Documentation: https://a.yandex-team.ru/arc/trunk/arcadia/contrib/libs/ctemplate/README.md
macro CTEMPLATE_VARNAMES(File) {
- .CMD=${tool:"contrib/libs/ctemplate/make_tpl_varnames_h"} -f ${output;addincl;nopath;noallext:File.varnames.h} ${input:File}
+ .CMD=${tool:"contrib/libs/ctemplate/make_tpl_varnames_h"} -f ${addincl;output;nopath;noallext;suf=.varnames.h:File} ${input:File}
}
LLVM_OPTS=
@@ -4720,8 +4717,8 @@ macro CLANG_EMIT_AST_CXX(Input, Output, Opts...) {
### Emit LLVM bytecode from .cpp file. BC_CXXFLAGS, LLVM_OPTS and C_FLAGS_PLATFORM are passed in, while CFLAGS are not.
### Note: Output name is used as is, no extension added.
macro LLVM_COMPILE_CXX(Input, Output, Opts...) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_BC_ROOT}/bin/clang++ ${pre=-I:_C__INCLUDE} $BC_CXXFLAGS $C_FLAGS_PLATFORM -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} -emit-llvm -c ${input:Input} -o ${noauto;output:Output} $Opts ${hide;kv:"p BC"} ${hide;kv:"pc light-green"}
- .SEM=target_macroses-ITEM && target_macroses-macro llvm_compile_cxx && target_macroses-args ${input:Input} ${noauto;output:Output} ${"${CLANGPLUSPLUS}"} -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} -emit-llvm ${Opts}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_BC_ROOT}/bin/clang++ ${pre=-I:_C__INCLUDE} $BC_CXXFLAGS $C_FLAGS_PLATFORM -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} $NO_PGO_CFLAGS -emit-llvm -c ${input:Input} -o ${noauto;output:Output} $Opts ${hide;kv:"p BC"} ${hide;kv:"pc light-green"}
+ .SEM=target_macroses-ITEM && target_macroses-macro llvm_compile_cxx && target_macroses-args ${input:Input} ${noauto;output:Output} ${"${CLANGPLUSPLUS}"} -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} $NO_PGO_CFLAGS -emit-llvm ${Opts}
.STRUCT_CMD=yes
when ($CLANG_BC_ROOT == "") {
_OK = no
@@ -4735,8 +4732,8 @@ macro LLVM_COMPILE_CXX(Input, Output, Opts...) {
### Emit LLVM bytecode from .c file. BC_CFLAGS, LLVM_OPTS and C_FLAGS_PLATFORM are passed in, while CFLAGS are not.
### Note: Output name is used as is, no extension added.
macro LLVM_COMPILE_C(Input, Output, Opts...) {
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_BC_ROOT}/bin/clang ${pre=-I:_C__INCLUDE} $BC_CFLAGS $C_FLAGS_PLATFORM $LLVM_OPTS ${NO_LTO_CFLAGS} -emit-llvm -c ${input:Input} -o ${noauto;output:Output} $Opts ${hide;kv:"p BC"} ${hide;kv:"pc light-green"}
- .SEM=target_macroses-ITEM && target_macroses-macro llvm_compile_c && target_macroses-args ${input:Input} ${noauto;output:Output} ${"${CLANGC}"} -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} -emit-llvm ${Opts}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_BC_ROOT}/bin/clang ${pre=-I:_C__INCLUDE} $BC_CFLAGS $C_FLAGS_PLATFORM $LLVM_OPTS ${NO_LTO_CFLAGS} $NO_PGO_CFLAGS -emit-llvm -c ${input:Input} -o ${noauto;output:Output} $Opts ${hide;kv:"p BC"} ${hide;kv:"pc light-green"}
+ .SEM=target_macroses-ITEM && target_macroses-macro llvm_compile_c && target_macroses-args ${input:Input} ${noauto;output:Output} ${"${CLANGC}"} -Wno-unknown-warning-option $LLVM_OPTS ${NO_LTO_CFLAGS} $NO_PGO_CFLAGS -emit-llvm ${Opts}
.STRUCT_CMD=yes
when ($CLANG_BC_ROOT == "") {
_OK = no
@@ -4860,7 +4857,7 @@ UNION_CMD=$YMAKE_PYTHON3 -c open(\'$TARGET\',\'w\').close() ${hide;kv:"p UN"} ${
UNION_CMD_MF=$UNION_CMD && $GENERATE_MF
macro _EXPAND_INS_OUTS(FILES{input}[]) {
- .CMD=${hide;input:FILES} ${late_out;hide:INPUT}
+ .CMD=${hide;input:FILES} ${hide;late_out:INPUT}
}
_UNION_EXPLICIT_OUTPUTS=
@@ -4920,12 +4917,12 @@ YASM_PREINCLUDES_VALUE=
# tag:yasm-specific
macro _SRC_yasm_helper(SRC, PREINCLUDES[], SRCFLAGS...) {
- .CMD=${tool:"contrib/tools/yasm"} -f ${_YASM_FMT_VALUE}${HARDWARE_ARCH} $_YASM_PLATFORM_FLAGS_VALUE $YASM_DEBUG_INFO $YASM_DEBUG_INFO_DISABLE_CACHE__NO_UID__ -D ${pre=_;suf=_:HARDWARE_TYPE} -D_YASM_ $ASM_PREFIX_VALUE $_YASM_PREDEFINED_FLAGS_VALUE $YASM_FLAGS ${pre=-I :_ASM__INCLUDE} $SRCFLAGS -o ${output;noext;suf=${OBJECT_SUF}:SRC} ${pre=-P :PREINCLUDES} ${hide;input:PREINCLUDES} ${SRC} ${hide;kv:"p AS"} ${hide;kv:"pc light-green"}
+ .CMD=${tool:"contrib/tools/yasm"} -f ${_YASM_FMT_VALUE}${HARDWARE_ARCH} $_YASM_PLATFORM_FLAGS_VALUE $YASM_DEBUG_INFO $YASM_DEBUG_INFO_DISABLE_CACHE__NO_UID__ -D ${pre=_;suf=_:HARDWARE_TYPE} -D_YASM_ $ASM_PREFIX_VALUE $_YASM_PREDEFINED_FLAGS_VALUE $YASM_FLAGS ${pre=-I :_ASM__INCLUDE} $SRCFLAGS -o ${output;suf=${OBJECT_SUF};noext:SRC} ${pre=-P :PREINCLUDES} ${hide;input:PREINCLUDES} ${SRC} ${hide;kv:"p AS"} ${hide;kv:"pc light-green"}
}
# tag:yasm-specific
macro _SRC_yasm(SRC, PREINCLUDES[], SRCFLAGS...) {
- .CMD=$_SRC_yasm_helper(${input:SRC}, $SRCFLAGS, PREINCLUDES $PREINCLUDES)
+ .CMD=$_SRC_yasm_helper(${input:SRC} $SRCFLAGS PREINCLUDES $PREINCLUDES)
}
# tag:nasm-specific
@@ -4933,7 +4930,7 @@ NASM_FLAGS=
# tag:nasm-specific
macro _SRC_nasm_helper(SRC, SRCFLAGS...) {
- .CMD=${NASM_RESOURCE_GLOBAL}/bin/nasm -f ${_YASM_FMT_VALUE}${HARDWARE_ARCH} -D ${pre=_;suf=_:HARDWARE_TYPE} $NASM_FLAGS ${pre=-I :_ASM__INCLUDE} $SRCFLAGS -o ${output;noext;suf=${OBJECT_SUF}:SRC} ${SRC} ${hide;kv:"p AS"} ${hide;kv:"pc light-green"}
+ .CMD=${NASM_RESOURCE_GLOBAL}/bin/nasm -f ${_YASM_FMT_VALUE}${HARDWARE_ARCH} -D ${pre=_;suf=_:HARDWARE_TYPE} $NASM_FLAGS ${pre=-I :_ASM__INCLUDE} $SRCFLAGS -o ${output;suf=${OBJECT_SUF};noext:SRC} ${SRC} ${hide;kv:"p AS"} ${hide;kv:"pc light-green"}
}
# tag:nasm-specific
@@ -4989,7 +4986,7 @@ _ANTLR4_VISITOR__ANTLR4_EMPTY=-no-visitor
###
### Macro to invoke ANTLR4 generator for separate lexer and parser grammars (Cpp)
macro RUN_ANTLR4_CPP_SPLIT(LEXER, PARSER, OUTPUT_INCLUDES[], LISTENER?"PARSER":"_ANTLR4_EMPTY", VISITOR?"PARSER":"_ANTLR4_EMPTY", _ANTLR4_EMPTY="", Args...) {
- RUN_ANTLR4(${LEXER} ${PARSER} -Dlanguage=Cpp -o ${BINDIR} ${_ANTLR4_VISITOR_$VISITOR} ${_ANTLR4_LISTENER_$LISTENER} ${Args} CWD ${BINDIR} IN ${LEXER} ${PARSER} OUT ${noext;suf=.cpp:LEXER} ${noext;suf=.h:LEXER} ${noext;suf=.cpp:PARSER} ${noext;suf=.h:PARSER} ${noext;suf=Listener.h:$LISTENER} ${noext;suf=BaseListener.h:$LISTENER} ${noext;suf=Visitor.h:$VISITOR} ${noext;suf=BaseVisitor.h:$VISITOR} OUTPUT_INCLUDES ${ARCADIA_ROOT}/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h ${OUTPUT_INCLUDES})
+ RUN_ANTLR4(${LEXER} ${PARSER} -Dlanguage=Cpp -o ${BINDIR} ${_ANTLR4_VISITOR_$VISITOR} ${_ANTLR4_LISTENER_$LISTENER} ${Args} CWD ${BINDIR} IN ${LEXER} ${PARSER} OUT ${suf=.cpp;noext:LEXER} ${suf=.h;noext:LEXER} ${suf=.cpp;noext:PARSER} ${suf=.h;noext:PARSER} ${suf=Listener.h;noext:$LISTENER} ${suf=BaseListener.h;noext:$LISTENER} ${suf=Visitor.h;noext:$VISITOR} ${suf=BaseVisitor.h;noext:$VISITOR} OUTPUT_INCLUDES ${ARCADIA_ROOT}/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h ${OUTPUT_INCLUDES})
PEERDIR(contrib/libs/antlr4_cpp_runtime)
}
@@ -4997,7 +4994,7 @@ macro RUN_ANTLR4_CPP_SPLIT(LEXER, PARSER, OUTPUT_INCLUDES[], LISTENER?"PARSER":"
###
### Macro to invoke ANTLR4 generator for combined lexer+parser grammars (Cpp)
macro RUN_ANTLR4_CPP(GRAMMAR, OUTPUT_INCLUDES[], LISTENER?"GRAMMAR":"_ANTLR4_EMPTY", VISITOR?"GRAMMAR":"_ANTLR4_EMPTY", _ANTLR4_EMPTY="", Args...) {
- RUN_ANTLR4(${GRAMMAR} -Dlanguage=Cpp -o ${BINDIR} ${_ANTLR4_VISITOR_$VISITOR} ${_ANTLR4_LISTENER_$LISTENER} ${Args} CWD ${BINDIR} IN ${GRAMMAR} OUT ${noext;suf=Lexer.cpp:GRAMMAR} ${noext;suf=Lexer.h:GRAMMAR} ${noext;suf=Parser.cpp:GRAMMAR} ${noext;suf=Parser.h:GRAMMAR} ${noext;suf=Listener.h:$LISTENER} ${noext;suf=BaseListener.h:$LISTENER} ${noext;suf=Visitor.h:$VISITOR} ${noext;suf=BaseVisitor.h:$VISITOR} OUTPUT_INCLUDES ${ARCADIA_ROOT}/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h ${OUTPUT_INCLUDES})
+ RUN_ANTLR4(${GRAMMAR} -Dlanguage=Cpp -o ${BINDIR} ${_ANTLR4_VISITOR_$VISITOR} ${_ANTLR4_LISTENER_$LISTENER} ${Args} CWD ${BINDIR} IN ${GRAMMAR} OUT ${suf=Lexer.cpp;noext:GRAMMAR} ${suf=Lexer.h;noext:GRAMMAR} ${suf=Parser.cpp;noext:GRAMMAR} ${suf=Parser.h;noext:GRAMMAR} ${suf=Listener.h;noext:$LISTENER} ${suf=BaseListener.h;noext:$LISTENER} ${suf=Visitor.h;noext:$VISITOR} ${suf=BaseVisitor.h;noext:$VISITOR} OUTPUT_INCLUDES ${ARCADIA_ROOT}/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h ${OUTPUT_INCLUDES})
PEERDIR(contrib/libs/antlr4_cpp_runtime)
}
@@ -5063,7 +5060,7 @@ macro TASKLET_REG(Name, Lang, Impl, Includes...) {
PEERDIR+=tasklet/v1/runtime/js
}
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_tasklet_reg.py"} $Name -l $Lang -i $Impl ${noauto;output:Name.task.cpp} $Includes ${hide;output_include:Includes} $TASKLET_REG_INCLUDES ${hide;kv:"p TT"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_tasklet_reg.py"} $Name -l $Lang -i $Impl ${noauto;output;suf=.task.cpp:Name} $Includes ${hide;output_include:Includes} $TASKLET_REG_INCLUDES ${hide;kv:"p TT"} ${hide;kv:"pc yellow"}
SRCS(GLOBAL $Name.task.cpp)
}
@@ -5072,7 +5069,7 @@ macro TASKLET_REG(Name, Lang, Impl, Includes...) {
macro TASKLET_REG_EXT(Name, Lang, Impl, Wrapper, Includes...) {
PEERDIR(tasklet/v1/domain sandbox/bin sandbox/taskbox/worker)
- .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_tasklet_reg.py"} $Name -l $Lang -i $Impl -w $Wrapper ${noauto;output:Name.task.cpp} $Includes ${hide;output_include:Includes} $TASKLET_REG_INCLUDES ${hide;kv:"p TT"} ${hide;kv:"pc yellow"}
+ .CMD=$YMAKE_PYTHON ${input:"build/scripts/gen_tasklet_reg.py"} $Name -l $Lang -i $Impl -w $Wrapper ${noauto;output;suf=.task.cpp:Name} $Includes ${hide;output_include:Includes} $TASKLET_REG_INCLUDES ${hide;kv:"p TT"} ${hide;kv:"pc yellow"}
SRCS(GLOBAL $Name.task.cpp)
}
@@ -5542,12 +5539,12 @@ IBTOOL_PATH=$XCODE_TOOLS_ROOT_RESOURCE_GLOBAL/Xcode/Contents/Developer/usr/bin/i
# tag:src-processing
STORYBOARD_FLAGS=--errors --warnings --notices --auto-activate-custom-fonts --output-format human-readable-text
macro _SRC("storyboard", SRC, SRCFLAGS...) {
- .CMD=$IBTOOL_PATH $STORYBOARD_FLAGS --module $REALPRJNAME --output-partial-info-plist ${output;suf=.partial_plist:SRC} --compilation-directory $BINDIR ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${output;tobindir;suf=.compiled_storyboard_tar:SRC} $BINDIR/${nopath;suf=c:SRC} $BINDIR/${nopath;suf=c:SRC}
+ .CMD=$IBTOOL_PATH $STORYBOARD_FLAGS --module $REALPRJNAME --output-partial-info-plist ${output;suf=.partial_plist:SRC} --compilation-directory $BINDIR ${input:SRC} && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${tobindir;output;suf=.compiled_storyboard_tar:SRC} $BINDIR/${suf=c;nopath:SRC} $BINDIR/${suf=c;nopath:SRC}
}
# tag:src-processing
macro _SRC("xib", SRC, SRCFLAGS...) {
- .CMD=$IBTOOL_PATH $STORYBOARD_FLAGS --module $REALPRJNAME --output-partial-info-plist ${output;suf=.partial_plist:SRC} --compile ${output;tobindir;nopath;noext;suf=.nib:SRC} ${input:SRC}
+ .CMD=$IBTOOL_PATH $STORYBOARD_FLAGS --module $REALPRJNAME --output-partial-info-plist ${output;suf=.partial_plist:SRC} --compile ${tobindir;output;suf=.nib;nopath;noext:SRC} ${input:SRC}
}
# tag:src-processing
@@ -5579,11 +5576,11 @@ macro IOS_APP_ASSETS_FLAGS(Flags...) {
}
macro DARWIN_STRINGS_RESOURCE(Resource, Relpath) {
- .CMD=$COPY_CMD ${input:Resource} $BINDIR/$Relpath && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${output;tobindir;suf=.strings_tar:Relpath} $BINDIR/$Relpath $BINDIR
+ .CMD=$COPY_CMD ${input:Resource} $BINDIR/$Relpath && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${tobindir;output;suf=.strings_tar:Relpath} $BINDIR/$Relpath $BINDIR
}
macro DARWIN_SIGNED_RESOURCE(Resource, Relpath) {
- .CMD=$COPY_CMD $Resource $BINDIR/$Relpath && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${output;tobindir;suf=.signed_resource_tar:Relpath} $BINDIR/$Relpath $BINDIR
+ .CMD=$COPY_CMD $Resource $BINDIR/$Relpath && $YMAKE_PYTHON3 ${input:"build/scripts/tar_directory.py"} ${tobindir;output;suf=.signed_resource_tar:Relpath} $BINDIR/$Relpath $BINDIR
}
# tag:ios-specific
@@ -5729,24 +5726,24 @@ CFLAGS+=$_CFLAGS_VISIBILITY
# tag:cpp-specific
macro SDBUS_CPP_ADAPTOR(File) {
- .CMD=${tool:"contrib/libs/sdbus-cpp/tools/xml2cpp-codegen"} --adaptor=${output;nopath;noext:File.adaptor.h} ${input:File}
+ .CMD=${tool:"contrib/libs/sdbus-cpp/tools/xml2cpp-codegen"} --adaptor=${output;suf=.adaptor.h;nopath;noext:File} ${input:File}
.PEERDIR=contrib/libs/sdbus-cpp
}
# tag:cpp-specific
macro SDBUS_CPP_PROXY(File) {
- .CMD=${tool:"contrib/libs/sdbus-cpp/tools/xml2cpp-codegen"} --proxy=${output;nopath;noext:File.proxy.h} ${input:File}
+ .CMD=${tool:"contrib/libs/sdbus-cpp/tools/xml2cpp-codegen"} --proxy=${output;suf=.proxy.h;nopath;noext:File} ${input:File}
.PEERDIR=contrib/libs/sdbus-cpp
}
# tag:python-specific
macro _PY_ENUM_SERIALIZATION_TO_JSON(File) {
- .CMD=$ENUM_PARSER_TOOL ${input:File} --output ${output;noext;suf=.generated.h:File} --json-output ${output;noext:File.json} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
+ .CMD=$ENUM_PARSER_TOOL ${input:File} --output ${output;suf=.generated.h;noext:File} --json-output ${output;suf=.json;noext:File} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
}
# tag:python-specific
macro _PY_ENUM_SERIALIZATION_TO_PY(File) {
- .CMD=${tool:"metrika/core/tools/python_enum_generator"} ${input;noext:File.json} -D ${MODDIR} --output ${output;noext:File.py} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
+ .CMD=${tool:"metrika/core/tools/python_enum_generator"} ${input;suf=.json;noext:File} -D ${MODDIR} --output ${output;suf=.py;noext:File} ${hide;kv:"p EN"} ${hide;kv:"pc yellow"}
}
macro NGINX_MODULES(Modules...) {
@@ -5851,7 +5848,7 @@ macro HEADERS(EXCLUDE[], Dirs...) {
### - CWD - path to the working directory of the Tool
### Note: Generated AST files generated into BINDIR according to corresponding .cpp file names listed in SOURCES parameter.
macro CLANG_EMIT_AST_CXX_RUN_TOOL(SOURCES[], OPTS[], Tool, IN{input}[], IN_NOPARSE{input}[], OUT{output}[], OUT_NOAUTO{output}[], TOOL{tool}[], OUTPUT_INCLUDES[], INDUCED_DEPS[], IN_DEPS[], STDOUT="", STDOUT_NOAUTO="", CWD="", ENV[], Args...) {
- .CMD=${cwd:BINDIR} $YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_RESOURCE_GLOBAL}/bin/clang++ ${pre=-I:_C__INCLUDE} $CXXFLAGS $C_FLAGS_PLATFORM $LLVM_OPTS -emit-ast -c ${input:SOURCES} ${hide;tmp;noext;nopath:SOURCES.ast} $OPTS ${hide;kv:"p ST"} ${hide;kv:"pc light-green"} && ${cwd:CWD} ${env:ENV} ${tool:Tool} $Args ${hide;input:IN} ${hide;context=TEXT;input:IN_NOPARSE} ${hide;input:IN_DEPS} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;tool:TOOL} ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${stdout;output:STDOUT} ${stdout;noauto;output:STDOUT_NOAUTO} ${hide;kv:"p PR"} ${hide;kv:"pc yellow"} ${hide;kv:"show_out"}
+ .CMD=${cwd:BINDIR} $YMAKE_PYTHON ${input:"build/scripts/clang_wrapper.py"} $WINDOWS ${CLANG_RESOURCE_GLOBAL}/bin/clang++ ${pre=-I:_C__INCLUDE} $CXXFLAGS $C_FLAGS_PLATFORM $LLVM_OPTS -emit-ast -c ${input:SOURCES} ${hide;tmp;suf=.ast;noext;nopath:SOURCES} $OPTS ${hide;kv:"p ST"} ${hide;kv:"pc light-green"} && ${cwd:CWD} ${env:ENV} ${tool:Tool} $Args ${hide;input:IN} ${hide;context=TEXT;input:IN_NOPARSE} ${hide;input:IN_DEPS} ${hide;output_include:OUTPUT_INCLUDES} $INDUCED_DEPS ${hide;tool:TOOL} ${hide;output:OUT} ${hide;noauto;output:OUT_NOAUTO} ${stdout;output:STDOUT} ${stdout;noauto;output:STDOUT_NOAUTO} ${hide;kv:"p PR"} ${hide;kv:"pc yellow"} ${hide;kv:"show_out"}
PEERDIR(build/platform/clang)
}
@@ -5906,7 +5903,7 @@ macro LINK_EXCLUDE_LIBRARIES(Libs...) {
### GENERATE_IMPLIB(cuda $CUDA_TARGET_ROOT/lib64/stubs/libcuda.so SONAME libcuda.so.1)
###
macro GENERATE_IMPLIB(Lib, Path, SONAME="") {
- .CMD=${tool:"contrib/tools/implib"} --target $HARDWARE_TYPE --outdir $BINDIR ${pre=--library-load-name :SONAME} $Path ${hide;output;nopath;suf=.init.c:Path} ${hide;output;nopath;suf=.tramp.S:Path}
+ .CMD=${tool:"contrib/tools/implib"} --target $HARDWARE_TYPE --outdir $BINDIR ${pre=--library-load-name :SONAME} $Path ${hide;output;suf=.init.c;nopath:Path} ${hide;output;suf=.tramp.S;nopath:Path}
LINK_EXCLUDE_LIBRARIES($Lib)
}
diff --git a/contrib/libs/croaring/.yandex_meta/override.nix b/contrib/libs/croaring/.yandex_meta/override.nix
index 616330eb5a..2c7a84ab8f 100644
--- a/contrib/libs/croaring/.yandex_meta/override.nix
+++ b/contrib/libs/croaring/.yandex_meta/override.nix
@@ -1,12 +1,12 @@
pkgs: attrs: with pkgs; with attrs; rec {
pname = "croaring";
- version = "4.3.0";
+ version = "4.3.1";
src = fetchFromGitHub {
owner = "RoaringBitmap";
repo = "CRoaring";
rev = "v${version}";
- hash = "sha256-Se/m+qcYwZu1Bp5F2dcWacHYe4awX7EclB1iChTBkYE=";
+ hash = "sha256-c4o8AMCtDGLChXxJKJyxkWhuYu7axqLb2ce8IOGk920=";
};
patches = [];
diff --git a/contrib/libs/croaring/include/roaring/roaring_version.h b/contrib/libs/croaring/include/roaring/roaring_version.h
index a6c5b01b41..5d5014b42e 100644
--- a/contrib/libs/croaring/include/roaring/roaring_version.h
+++ b/contrib/libs/croaring/include/roaring/roaring_version.h
@@ -2,11 +2,11 @@
// /include/roaring/roaring_version.h automatically generated by release.py, do not change by hand
#ifndef ROARING_INCLUDE_ROARING_VERSION
#define ROARING_INCLUDE_ROARING_VERSION
-#define ROARING_VERSION "4.3.0"
+#define ROARING_VERSION "4.3.1"
enum {
ROARING_VERSION_MAJOR = 4,
ROARING_VERSION_MINOR = 3,
- ROARING_VERSION_REVISION = 0
+ ROARING_VERSION_REVISION = 1
};
#endif // ROARING_INCLUDE_ROARING_VERSION
// clang-format on \ No newline at end of file
diff --git a/contrib/libs/croaring/src/roaring.c b/contrib/libs/croaring/src/roaring.c
index 8f6b5a4f37..2363afa48f 100644
--- a/contrib/libs/croaring/src/roaring.c
+++ b/contrib/libs/croaring/src/roaring.c
@@ -614,7 +614,7 @@ void roaring_bitmap_remove(roaring_bitmap_t *r, uint32_t val) {
ra_set_container_at_index(&r->high_low_container, i, container2,
newtypecode);
}
- if (container_get_cardinality(container2, newtypecode) != 0) {
+ if (container_nonzero_cardinality(container2, newtypecode)) {
ra_set_container_at_index(&r->high_low_container, i, container2,
newtypecode);
} else {
diff --git a/contrib/libs/croaring/src/roaring64.c b/contrib/libs/croaring/src/roaring64.c
index bc65e8b0e5..de1009ea2c 100644
--- a/contrib/libs/croaring/src/roaring64.c
+++ b/contrib/libs/croaring/src/roaring64.c
@@ -697,6 +697,7 @@ static inline bool containerptr_roaring64_bitmap_remove(roaring64_bitmap_t *r,
container_free(container2, typecode2);
bool erased = art_erase(&r->art, high48, (art_val_t *)leaf);
assert(erased);
+ remove_container(r, *leaf);
return true;
}
return false;
@@ -2419,6 +2420,9 @@ roaring64_bitmap_t *roaring64_bitmap_frozen_view(const char *buf,
if (buf == NULL) {
return NULL;
}
+ if ((uintptr_t)buf % CROARING_BITSET_ALIGNMENT != 0) {
+ return NULL;
+ }
roaring64_bitmap_t *r = roaring64_bitmap_create();
diff --git a/contrib/libs/croaring/ya.make b/contrib/libs/croaring/ya.make
index 90f16b7b3d..557df6b83b 100644
--- a/contrib/libs/croaring/ya.make
+++ b/contrib/libs/croaring/ya.make
@@ -10,9 +10,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(4.3.0)
+VERSION(4.3.1)
-ORIGINAL_SOURCE(https://github.com/RoaringBitmap/CRoaring/archive/v4.3.0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/RoaringBitmap/CRoaring/archive/v4.3.1.tar.gz)
ADDINCL(
GLOBAL contrib/libs/croaring/include
diff --git a/contrib/libs/cxxsupp/builtins/.yandex_meta/build.ym b/contrib/libs/cxxsupp/builtins/.yandex_meta/build.ym
index 04bc79b0c7..cecee81a03 100644
--- a/contrib/libs/cxxsupp/builtins/.yandex_meta/build.ym
+++ b/contrib/libs/cxxsupp/builtins/.yandex_meta/build.ym
@@ -1,6 +1,6 @@
{% extends '//builtin/bag.ym' %}
-{% block current_version %}20.1.0{% endblock %}
+{% block current_version %}20.1.1{% endblock %}
{% block current_url %}
https://github.com/llvm/llvm-project/releases/download/llvmorg-{{self.version().strip()}}/compiler-rt-{{self.version().strip()}}.src.tar.xz
diff --git a/contrib/libs/cxxsupp/builtins/ya.make b/contrib/libs/cxxsupp/builtins/ya.make
index dce2f27201..67e47b1a65 100644
--- a/contrib/libs/cxxsupp/builtins/ya.make
+++ b/contrib/libs/cxxsupp/builtins/ya.make
@@ -12,9 +12,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20.1.0)
+VERSION(20.1.1)
-ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.0/compiler-rt-20.1.0.src.tar.xz)
+ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.1/compiler-rt-20.1.1.src.tar.xz)
NO_COMPILER_WARNINGS()
diff --git a/contrib/libs/expat/.yandex_meta/devtools.copyrights.report b/contrib/libs/expat/.yandex_meta/devtools.copyrights.report
index b67c3a92e5..57cb69b65e 100644
--- a/contrib/libs/expat/.yandex_meta/devtools.copyrights.report
+++ b/contrib/libs/expat/.yandex_meta/devtools.copyrights.report
@@ -174,16 +174,6 @@ BELONGS ya.make
expat.h [9:22]
lib/internal.h [28:36]
-KEEP COPYRIGHT_SERVICE_LABEL 3781ecbe791ef15dc4cdefd436071b60
-BELONGS ya.make
- Note: matched license text is too long. Read it in the source files.
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- lib/xmlparse.c [9:44]
-
KEEP COPYRIGHT_SERVICE_LABEL 387a03e23bfe968e0bc1919b0ef65164
BELONGS ya.make
Note: matched license text is too long. Read it in the source files.
@@ -342,6 +332,27 @@ BELONGS ya.make
Files with this license:
lib/xmlparse.c [9:44]
+KEEP COPYRIGHT_SERVICE_LABEL 610290a5946ae0c99de2de99646ba086
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ expat.h [9:22]
+ lib/xmlparse.c [9:44]
+
+KEEP COPYRIGHT_SERVICE_LABEL 61052a80fd00eeac5fb41a5ab5fdeff7
+BELONGS ya.make
+ Note: matched license text is too long. Read it in the source files.
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ lib/xmlparse.c [9:44]
+
KEEP COPYRIGHT_SERVICE_LABEL 6451d5e490271b354ad3b567c7a03423
BELONGS ya.make
Note: matched license text is too long. Read it in the source files.
@@ -791,9 +802,7 @@ BELONGS ya.make
Score : 100.00
Match type : COPYRIGHT
Files with this license:
- expat.h [9:22]
lib/internal.h [28:36]
- lib/xmlparse.c [9:44]
lib/xmltok.c [9:27]
lib/xmltok.h [9:15]
diff --git a/contrib/libs/expat/.yandex_meta/licenses.list.txt b/contrib/libs/expat/.yandex_meta/licenses.list.txt
index e23955221e..34ffa1ea4e 100644
--- a/contrib/libs/expat/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/expat/.yandex_meta/licenses.list.txt
@@ -21,7 +21,7 @@
Copyright (c) 2000-2005 Fred L. Drake, Jr. <fdrake@users.sourceforge.net>
Copyright (c) 2001-2002 Greg Stein <gstein@users.sourceforge.net>
Copyright (c) 2002-2016 Karl Waclawek <karl@waclawek.net>
- Copyright (c) 2016-2024 Sebastian Pipping <sebastian@pipping.org>
+ Copyright (c) 2016-2025 Sebastian Pipping <sebastian@pipping.org>
Copyright (c) 2016 Cristian Rodríguez <crrodriguez@opensuse.org>
Copyright (c) 2016 Thomas Beutlich <tc@tbeu.de>
Copyright (c) 2017 Rhodri James <rhodri@wildebeest.org.uk>
@@ -40,7 +40,7 @@
Copyright (c) 2002-2016 Karl Waclawek <karl@waclawek.net>
Copyright (c) 2005-2009 Steven Solie <steven@solie.ca>
Copyright (c) 2016 Eric Rahm <erahm@mozilla.com>
- Copyright (c) 2016-2024 Sebastian Pipping <sebastian@pipping.org>
+ Copyright (c) 2016-2025 Sebastian Pipping <sebastian@pipping.org>
Copyright (c) 2016 Gaurav <g.gupta@samsung.com>
Copyright (c) 2016 Thomas Beutlich <tc@tbeu.de>
Copyright (c) 2016 Gustavo Grieco <gustavo.grieco@imag.fr>
@@ -66,7 +66,7 @@
Copyright (c) 2022 Sean McBride <sean@rogue-research.com>
Copyright (c) 2023 Owain Davies <owaind@bath.edu>
Copyright (c) 2023-2024 Sony Corporation / Snild Dolkow <snild@sony.com>
- Copyright (c) 2024 Berkay Eren Ürün <berkay.ueruen@siemens.com>
+ Copyright (c) 2024-2025 Berkay Eren Ürün <berkay.ueruen@siemens.com>
Copyright (c) 2024 Hanno Böck <hanno@gentoo.org>
Licensed under the MIT license:
diff --git a/contrib/libs/expat/.yandex_meta/override.nix b/contrib/libs/expat/.yandex_meta/override.nix
index a7493ed82e..01e7ca5b3c 100644
--- a/contrib/libs/expat/.yandex_meta/override.nix
+++ b/contrib/libs/expat/.yandex_meta/override.nix
@@ -1,12 +1,12 @@
pkgs: attrs: with pkgs; with attrs; rec {
- version = "2.6.4";
+ version = "2.7.0";
versionTag = "R_${lib.replaceStrings ["."] ["_"] version}";
src = fetchFromGitHub {
owner = "libexpat";
repo = "libexpat";
rev = "${versionTag}";
- hash = "sha256-ek8/3c8bKG+z7fIM+QCNsH7eoVGAt7z3bXBHZ3QjlS8=";
+ hash = "sha256-5is+ZwHM+tmKaVzDgO20wCJKJafnwxxRjNMDsv2qnYY=";
};
nativeBuildInputs = [ autoreconfHook ];
diff --git a/contrib/libs/expat/Changes b/contrib/libs/expat/Changes
index aa19f70ae2..1f5ba0a028 100644
--- a/contrib/libs/expat/Changes
+++ b/contrib/libs/expat/Changes
@@ -11,16 +11,23 @@
!! The following topics need *additional skilled C developers* to progress !!
!! in a timely manner or at all (loosely ordered by descending priority): !!
!! !!
-!! - <blink>fixing a complex non-public security issue</blink>, !!
!! - teaming up on researching and fixing future security reports and !!
!! ClusterFuzz findings with few-days-max response times in communication !!
!! in order to (1) have a sound fix ready before the end of a 90 days !!
!! grace period and (2) in a sustainable manner, !!
+!! - helping CPython Expat bindings with supporting Expat's billion laughs !!
+!! attack protection API (https://github.com/python/cpython/issues/90949): !!
+!! - XML_SetBillionLaughsAttackProtectionActivationThreshold !!
+!! - XML_SetBillionLaughsAttackProtectionMaximumAmplification !!
+!! - helping Perl's XML::Parser Expat bindings with supporting Expat's !!
+!! security API (https://github.com/cpan-authors/XML-Parser/issues/102): !!
+!! - XML_SetBillionLaughsAttackProtectionActivationThreshold !!
+!! - XML_SetBillionLaughsAttackProtectionMaximumAmplification !!
+!! - XML_SetReparseDeferralEnabled !!
!! - implementing and auto-testing XML 1.0r5 support !!
!! (needs discussion before pull requests), !!
!! - smart ideas on fixing the Autotools CMake files generation issue !!
!! without breaking CI (needs discussion before pull requests), !!
-!! - the Windows binaries topic (needs requirements engineering first), !!
!! - pushing migration from `int` to `size_t` further !!
!! including edge-cases test coverage (needs discussion before anything). !!
!! !!
@@ -30,6 +37,78 @@
!! THANK YOU! Sebastian Pipping -- Berlin, 2024-03-09 !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+Release 2.7.0 Thu March 13 2025
+ Security fixes:
+ #893 #973 CVE-2024-8176 -- Fix crash from chaining a large number
+ of entities caused by stack overflow by resolving use of
+ recursion, for all three uses of entities:
+ - general entities in character data ("<e>&g1;</e>")
+ - general entities in attribute values ("<e k1='&g1;'/>")
+ - parameter entities ("%p1;")
+ Known impact is (reliable and easy) denial of service:
+ CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H/E:H/RL:O/RC:C
+ (Base Score: 7.5, Temporal Score: 7.2)
+ Please note that a layer of compression around XML can
+ significantly reduce the minimum attack payload size.
+
+ Other changes:
+ #935 #937 Autotools: Make generated CMake files look for
+ libexpat.@SO_MAJOR@.dylib on macOS
+ #925 Autotools: Sync CMake templates with CMake 3.29
+ #945 #962 #966 CMake: Drop support for CMake <3.13
+ #942 CMake: Small fuzzing related improvements
+ #921 docs: Add missing documentation of error code
+ XML_ERROR_NOT_STARTED that was introduced with 2.6.4
+ #941 docs: Document need for C++11 compiler for use from C++
+ #959 tests/benchmark: Fix a (harmless) TOCTTOU
+ #944 Windows: Fix installer target location of file xmlwf.xml
+ for CMake
+ #953 Windows: Address warning -Wunknown-warning-option
+ about -Wno-pedantic-ms-format from LLVM MinGW
+ #971 Address Cppcheck warnings
+ #969 #970 Mass-migrate links from http:// to https://
+ #947 #958 ..
+ #974 #975 Document changes since the previous release
+ #974 #975 Version info bumped from 11:0:10 (libexpat*.so.1.10.0)
+ to 11:1:10 (libexpat*.so.1.10.1); see https://verbump.de/
+ for what these numbers do
+
+ Infrastructure:
+ #926 tests: Increase robustness
+ #927 #932 ..
+ #930 #933 tests: Increase test coverage
+ #617 #950 ..
+ #951 #952 ..
+ #954 #955 .. Fuzzing: Add new fuzzer "xml_lpm_fuzzer" based on
+ #961 Google's libprotobuf-mutator ("LPM")
+ #957 Fuzzing|CI: Start producing fuzzing code coverage reports
+ #936 CI: Pass -q -q for LCOV >=2.1 in coverage.sh
+ #942 CI: Small fuzzing related improvements
+ #139 #203 ..
+ #791 #946 CI: Make GitHub Actions build using MSVC on Windows and
+ produce 32bit and 64bit Windows binaries
+ #956 CI: Get off of about-to-be-removed Ubuntu 20.04
+ #960 #964 CI: Start uploading to Coverity Scan for static analysis
+ #972 CI: Stop loading DTD from the internet to address flaky CI
+ #971 CI: Adapt to breaking changes in Cppcheck
+
+ Special thanks to:
+ Alexander Gieringer
+ Berkay Eren Ürün
+ Hanno Böck
+ Jann Horn
+ Mark Brand
+ Sebastian Andrzej Siewior
+ Snild Dolkow
+ Thomas Pröll
+ Tomas Korbar
+ valord577
+ and
+ Google Project Zero
+ Linutronix
+ Red Hat
+ Siemens
+
Release 2.6.4 Wed November 6 2024
Security fixes:
#915 CVE-2024-50602 -- Fix crash within function XML_ResumeParser
@@ -46,6 +125,8 @@ Release 2.6.4 Wed November 6 2024
#904 tests: Resolve duplicate handler
#317 #918 tests: Improve tests on doctype closing (ex CVE-2019-15903)
#914 Fix signedness of format strings
+ #915 For use from C++, expat.h started requiring C++11 due to
+ use of C99 features
#919 #920 Version info bumped from 10:3:9 (libexpat*.so.1.9.3)
to 11:0:10 (libexpat*.so.1.10.0); see https://verbump.de/
for what these numbers do
diff --git a/contrib/libs/expat/README.md b/contrib/libs/expat/README.md
index 23d26dad2b..04db829909 100644
--- a/contrib/libs/expat/README.md
+++ b/contrib/libs/expat/README.md
@@ -11,7 +11,7 @@
> at the top of the `Changes` file.
-# Expat, Release 2.6.4
+# Expat, Release 2.7.0
This is Expat, a C99 library for parsing
[XML 1.0 Fourth Edition](https://www.w3.org/TR/2006/REC-xml-20060816/), started by
@@ -22,9 +22,9 @@ are called when the parser discovers the associated structures in the
document being parsed. A start tag is an example of the kind of
structures for which you may register handlers.
-Expat supports the following compilers:
+Expat supports the following C99 compilers:
-- GNU GCC >=4.5
+- GNU GCC >=4.5 (for use from C) or GNU GCC >=4.8.1 (for use from C++)
- LLVM Clang >=3.5
- Microsoft Visual Studio >=16.0/2019 (rolling `${today} minus 5 years`)
@@ -52,7 +52,7 @@ This approach leverages CMake's own [module `FindEXPAT`](https://cmake.org/cmake
Notice the *uppercase* `EXPAT` in the following example:
```cmake
-cmake_minimum_required(VERSION 3.0) # or 3.10, see below
+cmake_minimum_required(VERSION 3.10)
project(hello VERSION 1.0.0)
@@ -62,12 +62,7 @@ add_executable(hello
hello.c
)
-# a) for CMake >=3.10 (see CMake's FindEXPAT docs)
target_link_libraries(hello PUBLIC EXPAT::EXPAT)
-
-# b) for CMake >=3.0
-target_include_directories(hello PRIVATE ${EXPAT_INCLUDE_DIRS})
-target_link_libraries(hello PUBLIC ${EXPAT_LIBRARIES})
```
### b) `find_package` with Config Mode
@@ -85,7 +80,7 @@ or
Notice the *lowercase* `expat` in the following example:
```cmake
-cmake_minimum_required(VERSION 3.0)
+cmake_minimum_required(VERSION 3.10)
project(hello VERSION 1.0.0)
@@ -295,7 +290,7 @@ EXPAT_ENABLE_INSTALL:BOOL=ON
// Use /MT flag (static CRT) when compiling in MSVC
EXPAT_MSVC_STATIC_CRT:BOOL=OFF
-// Build fuzzers via ossfuzz for the expat library
+// Build fuzzers via OSS-Fuzz for the expat library
EXPAT_OSSFUZZ_BUILD:BOOL=OFF
// Build a shared expat library
diff --git a/contrib/libs/expat/expat.h b/contrib/libs/expat/expat.h
index 523b37d8d5..192cfd3f07 100644
--- a/contrib/libs/expat/expat.h
+++ b/contrib/libs/expat/expat.h
@@ -11,7 +11,7 @@
Copyright (c) 2000-2005 Fred L. Drake, Jr. <fdrake@users.sourceforge.net>
Copyright (c) 2001-2002 Greg Stein <gstein@users.sourceforge.net>
Copyright (c) 2002-2016 Karl Waclawek <karl@waclawek.net>
- Copyright (c) 2016-2024 Sebastian Pipping <sebastian@pipping.org>
+ Copyright (c) 2016-2025 Sebastian Pipping <sebastian@pipping.org>
Copyright (c) 2016 Cristian Rodríguez <crrodriguez@opensuse.org>
Copyright (c) 2016 Thomas Beutlich <tc@tbeu.de>
Copyright (c) 2017 Rhodri James <rhodri@wildebeest.org.uk>
@@ -1067,8 +1067,8 @@ XML_SetReparseDeferralEnabled(XML_Parser parser, XML_Bool enabled);
See https://semver.org
*/
#define XML_MAJOR_VERSION 2
-#define XML_MINOR_VERSION 6
-#define XML_MICRO_VERSION 4
+#define XML_MINOR_VERSION 7
+#define XML_MICRO_VERSION 0
#ifdef __cplusplus
}
diff --git a/contrib/libs/expat/expat_config.h b/contrib/libs/expat/expat_config.h
index 48d7b064fa..4bb2d079bd 100644
--- a/contrib/libs/expat/expat_config.h
+++ b/contrib/libs/expat/expat_config.h
@@ -83,7 +83,7 @@
#define PACKAGE_NAME "expat"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "expat 2.6.4"
+#define PACKAGE_STRING "expat 2.7.0"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "expat"
@@ -92,7 +92,7 @@
#define PACKAGE_URL ""
/* Define to the version of this package. */
-#define PACKAGE_VERSION "2.6.4"
+#define PACKAGE_VERSION "2.7.0"
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
@@ -100,7 +100,7 @@
#define STDC_HEADERS 1
/* Version number of package */
-#define VERSION "2.6.4"
+#define VERSION "2.7.0"
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
diff --git a/contrib/libs/expat/lib/xmlparse.c b/contrib/libs/expat/lib/xmlparse.c
index e3e46da03b..1dca1c0397 100644
--- a/contrib/libs/expat/lib/xmlparse.c
+++ b/contrib/libs/expat/lib/xmlparse.c
@@ -1,4 +1,4 @@
-/* c5625880f4bf417c1463deee4eb92d86ff413f802048621c57e25fe483eb59e4 (2.6.4+)
+/* 7d6840a33c250b74adb0ba295d6ec818dccebebaffc8c3ed27d0b29c28adbeb3 (2.7.0+)
__ __ _
___\ \/ /_ __ __ _| |_
/ _ \\ /| '_ \ / _` | __|
@@ -13,7 +13,7 @@
Copyright (c) 2002-2016 Karl Waclawek <karl@waclawek.net>
Copyright (c) 2005-2009 Steven Solie <steven@solie.ca>
Copyright (c) 2016 Eric Rahm <erahm@mozilla.com>
- Copyright (c) 2016-2024 Sebastian Pipping <sebastian@pipping.org>
+ Copyright (c) 2016-2025 Sebastian Pipping <sebastian@pipping.org>
Copyright (c) 2016 Gaurav <g.gupta@samsung.com>
Copyright (c) 2016 Thomas Beutlich <tc@tbeu.de>
Copyright (c) 2016 Gustavo Grieco <gustavo.grieco@imag.fr>
@@ -39,7 +39,7 @@
Copyright (c) 2022 Sean McBride <sean@rogue-research.com>
Copyright (c) 2023 Owain Davies <owaind@bath.edu>
Copyright (c) 2023-2024 Sony Corporation / Snild Dolkow <snild@sony.com>
- Copyright (c) 2024 Berkay Eren Ürün <berkay.ueruen@siemens.com>
+ Copyright (c) 2024-2025 Berkay Eren Ürün <berkay.ueruen@siemens.com>
Copyright (c) 2024 Hanno Böck <hanno@gentoo.org>
Licensed under the MIT license:
@@ -325,6 +325,10 @@ typedef struct {
const XML_Char *publicId;
const XML_Char *notation;
XML_Bool open;
+ XML_Bool hasMore; /* true if entity has not been completely processed */
+ /* An entity can be open while being already completely processed (hasMore ==
+ XML_FALSE). The reason is the delayed closing of entities until their inner
+ entities are processed and closed */
XML_Bool is_param;
XML_Bool is_internal; /* true if declared in internal subset outside PE */
} ENTITY;
@@ -415,6 +419,12 @@ typedef struct {
int *scaffIndex;
} DTD;
+enum EntityType {
+ ENTITY_INTERNAL,
+ ENTITY_ATTRIBUTE,
+ ENTITY_VALUE,
+};
+
typedef struct open_internal_entity {
const char *internalEventPtr;
const char *internalEventEndPtr;
@@ -422,6 +432,7 @@ typedef struct open_internal_entity {
ENTITY *entity;
int startTagLevel;
XML_Bool betweenDecl; /* WFC: PE Between Declarations */
+ enum EntityType type;
} OPEN_INTERNAL_ENTITY;
enum XML_Account {
@@ -481,8 +492,8 @@ static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc,
const char *next, const char **nextPtr,
XML_Bool haveMore, XML_Bool allowClosingDoctype,
enum XML_Account account);
-static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity,
- XML_Bool betweenDecl);
+static enum XML_Error processEntity(XML_Parser parser, ENTITY *entity,
+ XML_Bool betweenDecl, enum EntityType type);
static enum XML_Error doContent(XML_Parser parser, int startTagLevel,
const ENCODING *enc, const char *start,
const char *end, const char **endPtr,
@@ -513,18 +524,22 @@ static enum XML_Error storeAttributeValue(XML_Parser parser,
const char *ptr, const char *end,
STRING_POOL *pool,
enum XML_Account account);
-static enum XML_Error appendAttributeValue(XML_Parser parser,
- const ENCODING *enc,
- XML_Bool isCdata, const char *ptr,
- const char *end, STRING_POOL *pool,
- enum XML_Account account);
+static enum XML_Error
+appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
+ const char *ptr, const char *end, STRING_POOL *pool,
+ enum XML_Account account, const char **nextPtr);
static ATTRIBUTE_ID *getAttributeId(XML_Parser parser, const ENCODING *enc,
const char *start, const char *end);
static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *elementType);
#if XML_GE == 1
static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc,
const char *start, const char *end,
- enum XML_Account account);
+ enum XML_Account account,
+ const char **nextPtr);
+static enum XML_Error callStoreEntityValue(XML_Parser parser,
+ const ENCODING *enc,
+ const char *start, const char *end,
+ enum XML_Account account);
#else
static enum XML_Error storeSelfEntityValue(XML_Parser parser, ENTITY *entity);
#endif
@@ -709,6 +724,10 @@ struct XML_ParserStruct {
const char *m_positionPtr;
OPEN_INTERNAL_ENTITY *m_openInternalEntities;
OPEN_INTERNAL_ENTITY *m_freeInternalEntities;
+ OPEN_INTERNAL_ENTITY *m_openAttributeEntities;
+ OPEN_INTERNAL_ENTITY *m_freeAttributeEntities;
+ OPEN_INTERNAL_ENTITY *m_openValueEntities;
+ OPEN_INTERNAL_ENTITY *m_freeValueEntities;
XML_Bool m_defaultExpandInternalEntities;
int m_tagLevel;
ENTITY *m_declEntity;
@@ -756,6 +775,7 @@ struct XML_ParserStruct {
ACCOUNTING m_accounting;
ENTITY_STATS m_entity_stats;
#endif
+ XML_Bool m_reenter;
};
#define MALLOC(parser, s) (parser->m_mem.malloc_fcn((s)))
@@ -1028,7 +1048,29 @@ callProcessor(XML_Parser parser, const char *start, const char *end,
#if defined(XML_TESTING)
g_bytesScanned += (unsigned)have_now;
#endif
- const enum XML_Error ret = parser->m_processor(parser, start, end, endPtr);
+ // Run in a loop to eliminate dangerous recursion depths
+ enum XML_Error ret;
+ *endPtr = start;
+ while (1) {
+ // Use endPtr as the new start in each iteration, since it will
+ // be set to the next start point by m_processor.
+ ret = parser->m_processor(parser, *endPtr, end, endPtr);
+
+ // Make parsing status (and in particular XML_SUSPENDED) take
+ // precedence over re-enter flag when they disagree
+ if (parser->m_parsingStatus.parsing != XML_PARSING) {
+ parser->m_reenter = XML_FALSE;
+ }
+
+ if (! parser->m_reenter) {
+ break;
+ }
+
+ parser->m_reenter = XML_FALSE;
+ if (ret != XML_ERROR_NONE)
+ return ret;
+ }
+
if (ret == XML_ERROR_NONE) {
// if we consumed nothing, remember what we had on this parse attempt.
if (*endPtr == start) {
@@ -1139,6 +1181,8 @@ parserCreate(const XML_Char *encodingName,
parser->m_freeBindingList = NULL;
parser->m_freeTagList = NULL;
parser->m_freeInternalEntities = NULL;
+ parser->m_freeAttributeEntities = NULL;
+ parser->m_freeValueEntities = NULL;
parser->m_groupSize = 0;
parser->m_groupConnector = NULL;
@@ -1241,6 +1285,8 @@ parserInit(XML_Parser parser, const XML_Char *encodingName) {
parser->m_eventEndPtr = NULL;
parser->m_positionPtr = NULL;
parser->m_openInternalEntities = NULL;
+ parser->m_openAttributeEntities = NULL;
+ parser->m_openValueEntities = NULL;
parser->m_defaultExpandInternalEntities = XML_TRUE;
parser->m_tagLevel = 0;
parser->m_tagStack = NULL;
@@ -1251,6 +1297,8 @@ parserInit(XML_Parser parser, const XML_Char *encodingName) {
parser->m_unknownEncodingData = NULL;
parser->m_parentParser = NULL;
parser->m_parsingStatus.parsing = XML_INITIALIZED;
+ // Reentry can only be triggered inside m_processor calls
+ parser->m_reenter = XML_FALSE;
#ifdef XML_DTD
parser->m_isParamEntity = XML_FALSE;
parser->m_useForeignDTD = XML_FALSE;
@@ -1310,6 +1358,24 @@ XML_ParserReset(XML_Parser parser, const XML_Char *encodingName) {
openEntity->next = parser->m_freeInternalEntities;
parser->m_freeInternalEntities = openEntity;
}
+ /* move m_openAttributeEntities to m_freeAttributeEntities (i.e. same task but
+ * for attributes) */
+ openEntityList = parser->m_openAttributeEntities;
+ while (openEntityList) {
+ OPEN_INTERNAL_ENTITY *openEntity = openEntityList;
+ openEntityList = openEntity->next;
+ openEntity->next = parser->m_freeAttributeEntities;
+ parser->m_freeAttributeEntities = openEntity;
+ }
+ /* move m_openValueEntities to m_freeValueEntities (i.e. same task but
+ * for value entities) */
+ openEntityList = parser->m_openValueEntities;
+ while (openEntityList) {
+ OPEN_INTERNAL_ENTITY *openEntity = openEntityList;
+ openEntityList = openEntity->next;
+ openEntity->next = parser->m_freeValueEntities;
+ parser->m_freeValueEntities = openEntity;
+ }
moveToFreeBindingList(parser, parser->m_inheritedBindings);
FREE(parser, parser->m_unknownEncodingMem);
if (parser->m_unknownEncodingRelease)
@@ -1323,6 +1389,19 @@ XML_ParserReset(XML_Parser parser, const XML_Char *encodingName) {
return XML_TRUE;
}
+static XML_Bool
+parserBusy(XML_Parser parser) {
+ switch (parser->m_parsingStatus.parsing) {
+ case XML_PARSING:
+ case XML_SUSPENDED:
+ return XML_TRUE;
+ case XML_INITIALIZED:
+ case XML_FINISHED:
+ default:
+ return XML_FALSE;
+ }
+}
+
enum XML_Status XMLCALL
XML_SetEncoding(XML_Parser parser, const XML_Char *encodingName) {
if (parser == NULL)
@@ -1331,8 +1410,7 @@ XML_SetEncoding(XML_Parser parser, const XML_Char *encodingName) {
XXX There's no way for the caller to determine which of the
XXX possible error cases caused the XML_STATUS_ERROR return.
*/
- if (parser->m_parsingStatus.parsing == XML_PARSING
- || parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parserBusy(parser))
return XML_STATUS_ERROR;
/* Get rid of any previous encoding name */
@@ -1569,7 +1647,34 @@ XML_ParserFree(XML_Parser parser) {
entityList = entityList->next;
FREE(parser, openEntity);
}
-
+ /* free m_openAttributeEntities and m_freeAttributeEntities */
+ entityList = parser->m_openAttributeEntities;
+ for (;;) {
+ OPEN_INTERNAL_ENTITY *openEntity;
+ if (entityList == NULL) {
+ if (parser->m_freeAttributeEntities == NULL)
+ break;
+ entityList = parser->m_freeAttributeEntities;
+ parser->m_freeAttributeEntities = NULL;
+ }
+ openEntity = entityList;
+ entityList = entityList->next;
+ FREE(parser, openEntity);
+ }
+ /* free m_openValueEntities and m_freeValueEntities */
+ entityList = parser->m_openValueEntities;
+ for (;;) {
+ OPEN_INTERNAL_ENTITY *openEntity;
+ if (entityList == NULL) {
+ if (parser->m_freeValueEntities == NULL)
+ break;
+ entityList = parser->m_freeValueEntities;
+ parser->m_freeValueEntities = NULL;
+ }
+ openEntity = entityList;
+ entityList = entityList->next;
+ FREE(parser, openEntity);
+ }
destroyBindings(parser->m_freeBindingList, parser);
destroyBindings(parser->m_inheritedBindings, parser);
poolDestroy(&parser->m_tempPool);
@@ -1611,8 +1716,7 @@ XML_UseForeignDTD(XML_Parser parser, XML_Bool useDTD) {
return XML_ERROR_INVALID_ARGUMENT;
#ifdef XML_DTD
/* block after XML_Parse()/XML_ParseBuffer() has been called */
- if (parser->m_parsingStatus.parsing == XML_PARSING
- || parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parserBusy(parser))
return XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING;
parser->m_useForeignDTD = useDTD;
return XML_ERROR_NONE;
@@ -1627,8 +1731,7 @@ XML_SetReturnNSTriplet(XML_Parser parser, int do_nst) {
if (parser == NULL)
return;
/* block after XML_Parse()/XML_ParseBuffer() has been called */
- if (parser->m_parsingStatus.parsing == XML_PARSING
- || parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parserBusy(parser))
return;
parser->m_ns_triplets = do_nst ? XML_TRUE : XML_FALSE;
}
@@ -1897,8 +2000,7 @@ XML_SetParamEntityParsing(XML_Parser parser,
if (parser == NULL)
return 0;
/* block after XML_Parse()/XML_ParseBuffer() has been called */
- if (parser->m_parsingStatus.parsing == XML_PARSING
- || parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parserBusy(parser))
return 0;
#ifdef XML_DTD
parser->m_paramEntityParsing = peParsing;
@@ -1915,8 +2017,7 @@ XML_SetHashSalt(XML_Parser parser, unsigned long hash_salt) {
if (parser->m_parentParser)
return XML_SetHashSalt(parser->m_parentParser, hash_salt);
/* block after XML_Parse()/XML_ParseBuffer() has been called */
- if (parser->m_parsingStatus.parsing == XML_PARSING
- || parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parserBusy(parser))
return 0;
parser->m_hash_secret_salt = hash_salt;
return 1;
@@ -2230,6 +2331,11 @@ XML_GetBuffer(XML_Parser parser, int len) {
return parser->m_bufferEnd;
}
+static void
+triggerReenter(XML_Parser parser) {
+ parser->m_reenter = XML_TRUE;
+}
+
enum XML_Status XMLCALL
XML_StopParser(XML_Parser parser, XML_Bool resumable) {
if (parser == NULL)
@@ -2704,8 +2810,9 @@ static enum XML_Error PTRCALL
contentProcessor(XML_Parser parser, const char *start, const char *end,
const char **endPtr) {
enum XML_Error result = doContent(
- parser, 0, parser->m_encoding, start, end, endPtr,
- (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_ACCOUNT_DIRECT);
+ parser, parser->m_parentParser ? 1 : 0, parser->m_encoding, start, end,
+ endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer,
+ XML_ACCOUNT_DIRECT);
if (result == XML_ERROR_NONE) {
if (! storeRawNames(parser))
return XML_ERROR_NO_MEMORY;
@@ -2793,6 +2900,11 @@ externalEntityInitProcessor3(XML_Parser parser, const char *start,
return XML_ERROR_NONE;
case XML_FINISHED:
return XML_ERROR_ABORTED;
+ case XML_PARSING:
+ if (parser->m_reenter) {
+ return XML_ERROR_UNEXPECTED_STATE; // LCOV_EXCL_LINE
+ }
+ /* Fall through */
default:
start = next;
}
@@ -2966,7 +3078,7 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
reportDefault(parser, enc, s, next);
break;
}
- result = processInternalEntity(parser, entity, XML_FALSE);
+ result = processEntity(parser, entity, XML_FALSE, ENTITY_INTERNAL);
if (result != XML_ERROR_NONE)
return result;
} else if (parser->m_externalEntityRefHandler) {
@@ -3092,7 +3204,9 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
}
if ((parser->m_tagLevel == 0)
&& (parser->m_parsingStatus.parsing != XML_FINISHED)) {
- if (parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parser->m_parsingStatus.parsing == XML_SUSPENDED
+ || (parser->m_parsingStatus.parsing == XML_PARSING
+ && parser->m_reenter))
parser->m_processor = epilogProcessor;
else
return epilogProcessor(parser, next, end, nextPtr);
@@ -3153,7 +3267,9 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
}
if ((parser->m_tagLevel == 0)
&& (parser->m_parsingStatus.parsing != XML_FINISHED)) {
- if (parser->m_parsingStatus.parsing == XML_SUSPENDED)
+ if (parser->m_parsingStatus.parsing == XML_SUSPENDED
+ || (parser->m_parsingStatus.parsing == XML_PARSING
+ && parser->m_reenter))
parser->m_processor = epilogProcessor;
else
return epilogProcessor(parser, next, end, nextPtr);
@@ -3293,6 +3409,12 @@ doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
return XML_ERROR_NONE;
case XML_FINISHED:
return XML_ERROR_ABORTED;
+ case XML_PARSING:
+ if (parser->m_reenter) {
+ *nextPtr = next;
+ return XML_ERROR_NONE;
+ }
+ /* Fall through */
default:;
}
}
@@ -4217,6 +4339,11 @@ doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr,
return XML_ERROR_NONE;
case XML_FINISHED:
return XML_ERROR_ABORTED;
+ case XML_PARSING:
+ if (parser->m_reenter) {
+ return XML_ERROR_UNEXPECTED_STATE; // LCOV_EXCL_LINE
+ }
+ /* Fall through */
default:;
}
}
@@ -4549,7 +4676,7 @@ entityValueInitProcessor(XML_Parser parser, const char *s, const char *end,
}
/* found end of entity value - can store it now */
return storeEntityValue(parser, parser->m_encoding, s, end,
- XML_ACCOUNT_DIRECT);
+ XML_ACCOUNT_DIRECT, NULL);
} else if (tok == XML_TOK_XML_DECL) {
enum XML_Error result;
result = processXmlDecl(parser, 0, start, next);
@@ -4676,7 +4803,7 @@ entityValueProcessor(XML_Parser parser, const char *s, const char *end,
break;
}
/* found end of entity value - can store it now */
- return storeEntityValue(parser, enc, s, end, XML_ACCOUNT_DIRECT);
+ return storeEntityValue(parser, enc, s, end, XML_ACCOUNT_DIRECT, NULL);
}
start = next;
}
@@ -5119,9 +5246,9 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
#if XML_GE == 1
// This will store the given replacement text in
// parser->m_declEntity->textPtr.
- enum XML_Error result
- = storeEntityValue(parser, enc, s + enc->minBytesPerChar,
- next - enc->minBytesPerChar, XML_ACCOUNT_NONE);
+ enum XML_Error result = callStoreEntityValue(
+ parser, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar,
+ XML_ACCOUNT_NONE);
if (parser->m_declEntity) {
parser->m_declEntity->textPtr = poolStart(&dtd->entityValuePool);
parser->m_declEntity->textLen
@@ -5546,7 +5673,7 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
enum XML_Error result;
XML_Bool betweenDecl
= (role == XML_ROLE_PARAM_ENTITY_REF ? XML_TRUE : XML_FALSE);
- result = processInternalEntity(parser, entity, betweenDecl);
+ result = processEntity(parser, entity, betweenDecl, ENTITY_INTERNAL);
if (result != XML_ERROR_NONE)
return result;
handleDefault = XML_FALSE;
@@ -5751,6 +5878,12 @@ doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end,
return XML_ERROR_NONE;
case XML_FINISHED:
return XML_ERROR_ABORTED;
+ case XML_PARSING:
+ if (parser->m_reenter) {
+ *nextPtr = next;
+ return XML_ERROR_NONE;
+ }
+ /* Fall through */
default:
s = next;
tok = XmlPrologTok(enc, s, end, &next);
@@ -5825,21 +5958,49 @@ epilogProcessor(XML_Parser parser, const char *s, const char *end,
return XML_ERROR_NONE;
case XML_FINISHED:
return XML_ERROR_ABORTED;
+ case XML_PARSING:
+ if (parser->m_reenter) {
+ return XML_ERROR_UNEXPECTED_STATE; // LCOV_EXCL_LINE
+ }
+ /* Fall through */
default:;
}
}
}
static enum XML_Error
-processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
- const char *textStart, *textEnd;
- const char *next;
- enum XML_Error result;
- OPEN_INTERNAL_ENTITY *openEntity;
+processEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl,
+ enum EntityType type) {
+ OPEN_INTERNAL_ENTITY *openEntity, **openEntityList, **freeEntityList;
+ switch (type) {
+ case ENTITY_INTERNAL:
+ parser->m_processor = internalEntityProcessor;
+ openEntityList = &parser->m_openInternalEntities;
+ freeEntityList = &parser->m_freeInternalEntities;
+ break;
+ case ENTITY_ATTRIBUTE:
+ openEntityList = &parser->m_openAttributeEntities;
+ freeEntityList = &parser->m_freeAttributeEntities;
+ break;
+ case ENTITY_VALUE:
+ openEntityList = &parser->m_openValueEntities;
+ freeEntityList = &parser->m_freeValueEntities;
+ break;
+ /* default case serves merely as a safety net in case of a
+ * wrong entityType. Therefore we exclude the following lines
+ * from the test coverage.
+ *
+ * LCOV_EXCL_START
+ */
+ default:
+ // Should not reach here
+ assert(0);
+ /* LCOV_EXCL_STOP */
+ }
- if (parser->m_freeInternalEntities) {
- openEntity = parser->m_freeInternalEntities;
- parser->m_freeInternalEntities = openEntity->next;
+ if (*freeEntityList) {
+ openEntity = *freeEntityList;
+ *freeEntityList = openEntity->next;
} else {
openEntity
= (OPEN_INTERNAL_ENTITY *)MALLOC(parser, sizeof(OPEN_INTERNAL_ENTITY));
@@ -5847,55 +6008,34 @@ processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) {
return XML_ERROR_NO_MEMORY;
}
entity->open = XML_TRUE;
+ entity->hasMore = XML_TRUE;
#if XML_GE == 1
entityTrackingOnOpen(parser, entity, __LINE__);
#endif
entity->processed = 0;
- openEntity->next = parser->m_openInternalEntities;
- parser->m_openInternalEntities = openEntity;
+ openEntity->next = *openEntityList;
+ *openEntityList = openEntity;
openEntity->entity = entity;
+ openEntity->type = type;
openEntity->startTagLevel = parser->m_tagLevel;
openEntity->betweenDecl = betweenDecl;
openEntity->internalEventPtr = NULL;
openEntity->internalEventEndPtr = NULL;
- textStart = (const char *)entity->textPtr;
- textEnd = (const char *)(entity->textPtr + entity->textLen);
- /* Set a safe default value in case 'next' does not get set */
- next = textStart;
-
- if (entity->is_param) {
- int tok
- = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next);
- result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd,
- tok, next, &next, XML_FALSE, XML_FALSE,
- XML_ACCOUNT_ENTITY_EXPANSION);
- } else {
- result = doContent(parser, parser->m_tagLevel, parser->m_internalEncoding,
- textStart, textEnd, &next, XML_FALSE,
- XML_ACCOUNT_ENTITY_EXPANSION);
- }
- if (result == XML_ERROR_NONE) {
- if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) {
- entity->processed = (int)(next - textStart);
- parser->m_processor = internalEntityProcessor;
- } else if (parser->m_openInternalEntities->entity == entity) {
-#if XML_GE == 1
- entityTrackingOnClose(parser, entity, __LINE__);
-#endif /* XML_GE == 1 */
- entity->open = XML_FALSE;
- parser->m_openInternalEntities = openEntity->next;
- /* put openEntity back in list of free instances */
- openEntity->next = parser->m_freeInternalEntities;
- parser->m_freeInternalEntities = openEntity;
- }
+ // Only internal entities make use of the reenter flag
+ // therefore no need to set it for other entity types
+ if (type == ENTITY_INTERNAL) {
+ triggerReenter(parser);
}
- return result;
+ return XML_ERROR_NONE;
}
static enum XML_Error PTRCALL
internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
const char **nextPtr) {
+ UNUSED_P(s);
+ UNUSED_P(end);
+ UNUSED_P(nextPtr);
ENTITY *entity;
const char *textStart, *textEnd;
const char *next;
@@ -5905,68 +6045,67 @@ internalEntityProcessor(XML_Parser parser, const char *s, const char *end,
return XML_ERROR_UNEXPECTED_STATE;
entity = openEntity->entity;
- textStart = ((const char *)entity->textPtr) + entity->processed;
- textEnd = (const char *)(entity->textPtr + entity->textLen);
- /* Set a safe default value in case 'next' does not get set */
- next = textStart;
-
- if (entity->is_param) {
- int tok
- = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next);
- result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd,
- tok, next, &next, XML_FALSE, XML_TRUE,
- XML_ACCOUNT_ENTITY_EXPANSION);
- } else {
- result = doContent(parser, openEntity->startTagLevel,
- parser->m_internalEncoding, textStart, textEnd, &next,
- XML_FALSE, XML_ACCOUNT_ENTITY_EXPANSION);
- }
- if (result != XML_ERROR_NONE)
- return result;
+ // This will return early
+ if (entity->hasMore) {
+ textStart = ((const char *)entity->textPtr) + entity->processed;
+ textEnd = (const char *)(entity->textPtr + entity->textLen);
+ /* Set a safe default value in case 'next' does not get set */
+ next = textStart;
+
+ if (entity->is_param) {
+ int tok
+ = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next);
+ result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd,
+ tok, next, &next, XML_FALSE, XML_FALSE,
+ XML_ACCOUNT_ENTITY_EXPANSION);
+ } else {
+ result = doContent(parser, openEntity->startTagLevel,
+ parser->m_internalEncoding, textStart, textEnd, &next,
+ XML_FALSE, XML_ACCOUNT_ENTITY_EXPANSION);
+ }
+
+ if (result != XML_ERROR_NONE)
+ return result;
+ // Check if entity is complete, if not, mark down how much of it is
+ // processed
+ if (textEnd != next
+ && (parser->m_parsingStatus.parsing == XML_SUSPENDED
+ || (parser->m_parsingStatus.parsing == XML_PARSING
+ && parser->m_reenter))) {
+ entity->processed = (int)(next - (const char *)entity->textPtr);
+ return result;
+ }
- if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) {
- entity->processed = (int)(next - (const char *)entity->textPtr);
+ // Entity is complete. We cannot close it here since we need to first
+ // process its possible inner entities (which are added to the
+ // m_openInternalEntities during doProlog or doContent calls above)
+ entity->hasMore = XML_FALSE;
+ triggerReenter(parser);
return result;
- }
+ } // End of entity processing, "if" block will return here
+ // Remove fully processed openEntity from open entity list.
#if XML_GE == 1
entityTrackingOnClose(parser, entity, __LINE__);
#endif
+ // openEntity is m_openInternalEntities' head, as we set it at the start of
+ // this function and we skipped doProlog and doContent calls with hasMore set
+ // to false. This means we can directly remove the head of
+ // m_openInternalEntities
+ assert(parser->m_openInternalEntities == openEntity);
entity->open = XML_FALSE;
- parser->m_openInternalEntities = openEntity->next;
+ parser->m_openInternalEntities = parser->m_openInternalEntities->next;
+
/* put openEntity back in list of free instances */
openEntity->next = parser->m_freeInternalEntities;
parser->m_freeInternalEntities = openEntity;
- // If there are more open entities we want to stop right here and have the
- // upcoming call to XML_ResumeParser continue with entity content, or it would
- // be ignored altogether.
- if (parser->m_openInternalEntities != NULL
- && parser->m_parsingStatus.parsing == XML_SUSPENDED) {
- return XML_ERROR_NONE;
- }
-
- if (entity->is_param) {
- int tok;
- parser->m_processor = prologProcessor;
- tok = XmlPrologTok(parser->m_encoding, s, end, &next);
- return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr,
- (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE,
- XML_ACCOUNT_DIRECT);
- } else {
- parser->m_processor = contentProcessor;
- /* see externalEntityContentProcessor vs contentProcessor */
- result = doContent(parser, parser->m_parentParser ? 1 : 0,
- parser->m_encoding, s, end, nextPtr,
- (XML_Bool)! parser->m_parsingStatus.finalBuffer,
- XML_ACCOUNT_DIRECT);
- if (result == XML_ERROR_NONE) {
- if (! storeRawNames(parser))
- return XML_ERROR_NO_MEMORY;
- }
- return result;
+ if (parser->m_openInternalEntities == NULL) {
+ parser->m_processor = entity->is_param ? prologProcessor : contentProcessor;
}
+ triggerReenter(parser);
+ return XML_ERROR_NONE;
}
static enum XML_Error PTRCALL
@@ -5982,8 +6121,70 @@ static enum XML_Error
storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
const char *ptr, const char *end, STRING_POOL *pool,
enum XML_Account account) {
- enum XML_Error result
- = appendAttributeValue(parser, enc, isCdata, ptr, end, pool, account);
+ const char *next = ptr;
+ enum XML_Error result = XML_ERROR_NONE;
+
+ while (1) {
+ if (! parser->m_openAttributeEntities) {
+ result = appendAttributeValue(parser, enc, isCdata, next, end, pool,
+ account, &next);
+ } else {
+ OPEN_INTERNAL_ENTITY *const openEntity = parser->m_openAttributeEntities;
+ if (! openEntity)
+ return XML_ERROR_UNEXPECTED_STATE;
+
+ ENTITY *const entity = openEntity->entity;
+ const char *const textStart
+ = ((const char *)entity->textPtr) + entity->processed;
+ const char *const textEnd
+ = (const char *)(entity->textPtr + entity->textLen);
+ /* Set a safe default value in case 'next' does not get set */
+ const char *nextInEntity = textStart;
+ if (entity->hasMore) {
+ result = appendAttributeValue(
+ parser, parser->m_internalEncoding, isCdata, textStart, textEnd,
+ pool, XML_ACCOUNT_ENTITY_EXPANSION, &nextInEntity);
+ if (result != XML_ERROR_NONE)
+ break;
+ // Check if entity is complete, if not, mark down how much of it is
+ // processed. A XML_SUSPENDED check here is not required as
+ // appendAttributeValue will never suspend the parser.
+ if (textEnd != nextInEntity) {
+ entity->processed
+ = (int)(nextInEntity - (const char *)entity->textPtr);
+ continue;
+ }
+
+ // Entity is complete. We cannot close it here since we need to first
+ // process its possible inner entities (which are added to the
+ // m_openAttributeEntities during appendAttributeValue)
+ entity->hasMore = XML_FALSE;
+ continue;
+ } // End of entity processing, "if" block skips the rest
+
+ // Remove fully processed openEntity from open entity list.
+#if XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+#endif
+ // openEntity is m_openAttributeEntities' head, since we set it at the
+ // start of this function and because we skipped appendAttributeValue call
+ // with hasMore set to false. This means we can directly remove the head
+ // of m_openAttributeEntities
+ assert(parser->m_openAttributeEntities == openEntity);
+ entity->open = XML_FALSE;
+ parser->m_openAttributeEntities = parser->m_openAttributeEntities->next;
+
+ /* put openEntity back in list of free instances */
+ openEntity->next = parser->m_freeAttributeEntities;
+ parser->m_freeAttributeEntities = openEntity;
+ }
+
+ // Break if an error occurred or there is nothing left to process
+ if (result || (parser->m_openAttributeEntities == NULL && end == next)) {
+ break;
+ }
+ }
+
if (result)
return result;
if (! isCdata && poolLength(pool) && poolLastChar(pool) == 0x20)
@@ -5996,7 +6197,7 @@ storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
static enum XML_Error
appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
const char *ptr, const char *end, STRING_POOL *pool,
- enum XML_Account account) {
+ enum XML_Account account, const char **nextPtr) {
DTD *const dtd = parser->m_dtd; /* save one level of indirection */
#ifndef XML_DTD
UNUSED_P(account);
@@ -6014,6 +6215,9 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
#endif
switch (tok) {
case XML_TOK_NONE:
+ if (nextPtr) {
+ *nextPtr = next;
+ }
return XML_ERROR_NONE;
case XML_TOK_INVALID:
if (enc == parser->m_encoding)
@@ -6154,21 +6358,11 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
return XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF;
} else {
enum XML_Error result;
- const XML_Char *textEnd = entity->textPtr + entity->textLen;
- entity->open = XML_TRUE;
-#if XML_GE == 1
- entityTrackingOnOpen(parser, entity, __LINE__);
-#endif
- result = appendAttributeValue(parser, parser->m_internalEncoding,
- isCdata, (const char *)entity->textPtr,
- (const char *)textEnd, pool,
- XML_ACCOUNT_ENTITY_EXPANSION);
-#if XML_GE == 1
- entityTrackingOnClose(parser, entity, __LINE__);
-#endif
- entity->open = XML_FALSE;
- if (result)
- return result;
+ result = processEntity(parser, entity, XML_FALSE, ENTITY_ATTRIBUTE);
+ if ((result == XML_ERROR_NONE) && (nextPtr != NULL)) {
+ *nextPtr = next;
+ }
+ return result;
}
} break;
default:
@@ -6197,7 +6391,7 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
static enum XML_Error
storeEntityValue(XML_Parser parser, const ENCODING *enc,
const char *entityTextPtr, const char *entityTextEnd,
- enum XML_Account account) {
+ enum XML_Account account, const char **nextPtr) {
DTD *const dtd = parser->m_dtd; /* save one level of indirection */
STRING_POOL *pool = &(dtd->entityValuePool);
enum XML_Error result = XML_ERROR_NONE;
@@ -6215,8 +6409,9 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
return XML_ERROR_NO_MEMORY;
}
+ const char *next;
for (;;) {
- const char *next
+ next
= entityTextPtr; /* XmlEntityValueTok doesn't always set the last arg */
int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next);
@@ -6278,16 +6473,8 @@ storeEntityValue(XML_Parser parser, const ENCODING *enc,
} else
dtd->keepProcessing = dtd->standalone;
} else {
- entity->open = XML_TRUE;
- entityTrackingOnOpen(parser, entity, __LINE__);
- result = storeEntityValue(
- parser, parser->m_internalEncoding, (const char *)entity->textPtr,
- (const char *)(entity->textPtr + entity->textLen),
- XML_ACCOUNT_ENTITY_EXPANSION);
- entityTrackingOnClose(parser, entity, __LINE__);
- entity->open = XML_FALSE;
- if (result)
- goto endEntityValue;
+ result = processEntity(parser, entity, XML_FALSE, ENTITY_VALUE);
+ goto endEntityValue;
}
break;
}
@@ -6375,6 +6562,81 @@ endEntityValue:
# ifdef XML_DTD
parser->m_prologState.inEntityValue = oldInEntityValue;
# endif /* XML_DTD */
+ // If 'nextPtr' is given, it should be updated during the processing
+ if (nextPtr != NULL) {
+ *nextPtr = next;
+ }
+ return result;
+}
+
+static enum XML_Error
+callStoreEntityValue(XML_Parser parser, const ENCODING *enc,
+ const char *entityTextPtr, const char *entityTextEnd,
+ enum XML_Account account) {
+ const char *next = entityTextPtr;
+ enum XML_Error result = XML_ERROR_NONE;
+ while (1) {
+ if (! parser->m_openValueEntities) {
+ result
+ = storeEntityValue(parser, enc, next, entityTextEnd, account, &next);
+ } else {
+ OPEN_INTERNAL_ENTITY *const openEntity = parser->m_openValueEntities;
+ if (! openEntity)
+ return XML_ERROR_UNEXPECTED_STATE;
+
+ ENTITY *const entity = openEntity->entity;
+ const char *const textStart
+ = ((const char *)entity->textPtr) + entity->processed;
+ const char *const textEnd
+ = (const char *)(entity->textPtr + entity->textLen);
+ /* Set a safe default value in case 'next' does not get set */
+ const char *nextInEntity = textStart;
+ if (entity->hasMore) {
+ result = storeEntityValue(parser, parser->m_internalEncoding, textStart,
+ textEnd, XML_ACCOUNT_ENTITY_EXPANSION,
+ &nextInEntity);
+ if (result != XML_ERROR_NONE)
+ break;
+ // Check if entity is complete, if not, mark down how much of it is
+ // processed. A XML_SUSPENDED check here is not required as
+ // appendAttributeValue will never suspend the parser.
+ if (textEnd != nextInEntity) {
+ entity->processed
+ = (int)(nextInEntity - (const char *)entity->textPtr);
+ continue;
+ }
+
+ // Entity is complete. We cannot close it here since we need to first
+ // process its possible inner entities (which are added to the
+ // m_openValueEntities during storeEntityValue)
+ entity->hasMore = XML_FALSE;
+ continue;
+ } // End of entity processing, "if" block skips the rest
+
+ // Remove fully processed openEntity from open entity list.
+# if XML_GE == 1
+ entityTrackingOnClose(parser, entity, __LINE__);
+# endif
+ // openEntity is m_openValueEntities' head, since we set it at the
+ // start of this function and because we skipped storeEntityValue call
+ // with hasMore set to false. This means we can directly remove the head
+ // of m_openValueEntities
+ assert(parser->m_openValueEntities == openEntity);
+ entity->open = XML_FALSE;
+ parser->m_openValueEntities = parser->m_openValueEntities->next;
+
+ /* put openEntity back in list of free instances */
+ openEntity->next = parser->m_freeValueEntities;
+ parser->m_freeValueEntities = openEntity;
+ }
+
+ // Break if an error occurred or there is nothing left to process
+ if (result
+ || (parser->m_openValueEntities == NULL && entityTextEnd == next)) {
+ break;
+ }
+ }
+
return result;
}
@@ -8542,11 +8804,13 @@ unsignedCharToPrintable(unsigned char c) {
return "\\xFE";
case 255:
return "\\xFF";
+ // LCOV_EXCL_START
default:
assert(0); /* never gets here */
return "dead code";
}
assert(0); /* never gets here */
+ // LCOV_EXCL_STOP
}
#endif /* XML_GE == 1 */
diff --git a/contrib/libs/expat/ya.make b/contrib/libs/expat/ya.make
index 489ae45db1..b41c7311e2 100644
--- a/contrib/libs/expat/ya.make
+++ b/contrib/libs/expat/ya.make
@@ -10,9 +10,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(2.6.4)
+VERSION(2.7.0)
-ORIGINAL_SOURCE(https://github.com/libexpat/libexpat/archive/R_2_6_4.tar.gz)
+ORIGINAL_SOURCE(https://github.com/libexpat/libexpat/archive/R_2_7_0.tar.gz)
ADDINCL(
contrib/libs/expat
diff --git a/contrib/libs/libfuzzer/.yandex_meta/override.nix b/contrib/libs/libfuzzer/.yandex_meta/override.nix
index 7415420b65..15a7622b27 100644
--- a/contrib/libs/libfuzzer/.yandex_meta/override.nix
+++ b/contrib/libs/libfuzzer/.yandex_meta/override.nix
@@ -1,11 +1,11 @@
pkgs: attrs: with pkgs; with attrs; rec {
- version = "20.1.0";
+ version = "20.1.2";
src = fetchFromGitHub {
owner = "llvm";
repo = "llvm-project";
rev = "llvmorg-${version}";
- hash = "sha256-86Z8e4ubnHJc1cYHjYPLeQC9eoPF417HYtqg8NAzxts=";
+ hash = "sha256-t30Jh8ckp5qD6XDxtvnSaYiAWbEi6L6hAWh6tN8JjtY=";
};
sourceRoot = "source/compiler-rt";
diff --git a/contrib/libs/libfuzzer/lib/fuzzer/afl/ya.make b/contrib/libs/libfuzzer/lib/fuzzer/afl/ya.make
index 0315d60cc6..2cf1cb720d 100644
--- a/contrib/libs/libfuzzer/lib/fuzzer/afl/ya.make
+++ b/contrib/libs/libfuzzer/lib/fuzzer/afl/ya.make
@@ -8,7 +8,7 @@ LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
SUBSCRIBER(g:cpp-contrib)
-VERSION(20.1.0)
+VERSION(20.1.2)
PEERDIR(
contrib/libs/afl/llvm_mode
diff --git a/contrib/libs/libfuzzer/ya.make b/contrib/libs/libfuzzer/ya.make
index 938438e570..787eeaea14 100644
--- a/contrib/libs/libfuzzer/ya.make
+++ b/contrib/libs/libfuzzer/ya.make
@@ -12,9 +12,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20.1.0)
+VERSION(20.1.2)
-ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/archive/llvmorg-20.1.0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/archive/llvmorg-20.1.2.tar.gz)
SET(SANITIZER_CFLAGS)
diff --git a/contrib/libs/libunwind/.yandex_meta/override.nix b/contrib/libs/libunwind/.yandex_meta/override.nix
index 29e81dd677..2666c93f5f 100644
--- a/contrib/libs/libunwind/.yandex_meta/override.nix
+++ b/contrib/libs/libunwind/.yandex_meta/override.nix
@@ -1,11 +1,11 @@
pkgs: attrs: with pkgs; with attrs; rec {
- version = "20.1.0";
+ version = "20.1.2";
src = fetchFromGitHub {
owner = "llvm";
repo = "llvm-project";
rev = "llvmorg-${version}";
- hash = "sha256-86Z8e4ubnHJc1cYHjYPLeQC9eoPF417HYtqg8NAzxts=";
+ hash = "sha256-t30Jh8ckp5qD6XDxtvnSaYiAWbEi6L6hAWh6tN8JjtY=";
};
patches = [];
diff --git a/contrib/libs/libunwind/ya.make b/contrib/libs/libunwind/ya.make
index b04a57ec9e..c2478a4035 100644
--- a/contrib/libs/libunwind/ya.make
+++ b/contrib/libs/libunwind/ya.make
@@ -11,9 +11,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(20.1.0)
+VERSION(20.1.2)
-ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/archive/llvmorg-20.1.0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/llvm/llvm-project/archive/llvmorg-20.1.2.tar.gz)
PEERDIR(
library/cpp/sanitizer/include
diff --git a/contrib/python/iniconfig/.dist-info/METADATA b/contrib/python/iniconfig/.dist-info/METADATA
index 3ea1e01cb0..3a8ef46a3b 100644
--- a/contrib/python/iniconfig/.dist-info/METADATA
+++ b/contrib/python/iniconfig/.dist-info/METADATA
@@ -1,6 +1,6 @@
-Metadata-Version: 2.1
+Metadata-Version: 2.4
Name: iniconfig
-Version: 2.0.0
+Version: 2.1.0
Summary: brain-dead simple config-ini parsing
Project-URL: Homepage, https://github.com/pytest-dev/iniconfig
Author-email: Ronny Pfannschmidt <opensource@ronnypfannschmidt.de>, Holger Krekel <holger.krekel@gmail.com>
@@ -14,14 +14,15 @@ Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
Classifier: Topic :: Software Development :: Libraries
Classifier: Topic :: Utilities
-Requires-Python: >=3.7
+Requires-Python: >=3.8
Description-Content-Type: text/x-rst
iniconfig: brain-dead simple parsing of ini files
diff --git a/contrib/python/iniconfig/LICENSE b/contrib/python/iniconfig/LICENSE
index 31ecdfb1db..46f4b2846f 100644
--- a/contrib/python/iniconfig/LICENSE
+++ b/contrib/python/iniconfig/LICENSE
@@ -1,19 +1,21 @@
+The MIT License (MIT)
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
+Copyright (c) 2010 - 2023 Holger Krekel and others
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/contrib/python/iniconfig/iniconfig/__init__.py b/contrib/python/iniconfig/iniconfig/__init__.py
index c1c94f70ae..ed6499bc6c 100644
--- a/contrib/python/iniconfig/iniconfig/__init__.py
+++ b/contrib/python/iniconfig/iniconfig/__init__.py
@@ -20,7 +20,7 @@ from typing import (
import os
if TYPE_CHECKING:
- from typing_extensions import Final
+ from typing import Final
__all__ = ["IniConfig", "ParseError", "COMMENTCHARS", "iscommentline"]
diff --git a/contrib/python/iniconfig/iniconfig/_version.py b/contrib/python/iniconfig/iniconfig/_version.py
index dd1883d734..e058e2c657 100644
--- a/contrib/python/iniconfig/iniconfig/_version.py
+++ b/contrib/python/iniconfig/iniconfig/_version.py
@@ -1,4 +1,21 @@
-# file generated by setuptools_scm
+# file generated by setuptools-scm
# don't change, don't track in version control
-__version__ = version = '2.0.0'
-__version_tuple__ = version_tuple = (2, 0, 0)
+
+__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
+
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+ from typing import Tuple
+ from typing import Union
+
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+ VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '2.1.0'
+__version_tuple__ = version_tuple = (2, 1, 0)
diff --git a/contrib/python/iniconfig/iniconfig/exceptions.py b/contrib/python/iniconfig/iniconfig/exceptions.py
index bc898e68ee..8c4dc9a8b0 100644
--- a/contrib/python/iniconfig/iniconfig/exceptions.py
+++ b/contrib/python/iniconfig/iniconfig/exceptions.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
- from typing_extensions import Final
+ from typing import Final
class ParseError(Exception):
diff --git a/contrib/python/iniconfig/ya.make b/contrib/python/iniconfig/ya.make
index 0121cca743..20492d75c6 100644
--- a/contrib/python/iniconfig/ya.make
+++ b/contrib/python/iniconfig/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(2.0.0)
+VERSION(2.1.0)
LICENSE(MIT)
diff --git a/contrib/python/multidict/.dist-info/METADATA b/contrib/python/multidict/.dist-info/METADATA
index 93f85177b9..b5c6dad90b 100644
--- a/contrib/python/multidict/.dist-info/METADATA
+++ b/contrib/python/multidict/.dist-info/METADATA
@@ -1,6 +1,6 @@
-Metadata-Version: 2.1
+Metadata-Version: 2.2
Name: multidict
-Version: 6.1.0
+Version: 6.2.0
Summary: multidict implementation
Home-page: https://github.com/aio-libs/multidict
Author: Andrew Svetlov
@@ -20,16 +20,15 @@ Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
-Requires-Python: >=3.8
+Requires-Python: >=3.9
Description-Content-Type: text/x-rst
License-File: LICENSE
-Requires-Dist: typing-extensions >=4.1.0 ; python_version < "3.11"
+Requires-Dist: typing-extensions>=4.1.0; python_version < "3.11"
=========
multidict
diff --git a/contrib/python/multidict/multidict/__init__.py b/contrib/python/multidict/multidict/__init__.py
index 25ddca41e9..b6b532a1f2 100644
--- a/contrib/python/multidict/multidict/__init__.py
+++ b/contrib/python/multidict/multidict/__init__.py
@@ -5,6 +5,8 @@ multidict. It behaves mostly like a dict but it can have
several values for the same key.
"""
+from typing import TYPE_CHECKING
+
from ._abc import MultiMapping, MutableMultiMapping
from ._compat import USE_EXTENSIONS
@@ -20,13 +22,11 @@ __all__ = (
"getversion",
)
-__version__ = "6.1.0"
+__version__ = "6.2.0"
-try:
- if not USE_EXTENSIONS:
- raise ImportError
- from ._multidict import (
+if TYPE_CHECKING or not USE_EXTENSIONS:
+ from ._multidict_py import (
CIMultiDict,
CIMultiDictProxy,
MultiDict,
@@ -34,8 +34,8 @@ try:
getversion,
istr,
)
-except ImportError: # pragma: no cover
- from ._multidict_py import (
+else:
+ from ._multidict import (
CIMultiDict,
CIMultiDictProxy,
MultiDict,
diff --git a/contrib/python/multidict/multidict/__init__.pyi b/contrib/python/multidict/multidict/__init__.pyi
deleted file mode 100644
index 0940340f81..0000000000
--- a/contrib/python/multidict/multidict/__init__.pyi
+++ /dev/null
@@ -1,152 +0,0 @@
-import abc
-from typing import (
- Generic,
- Iterable,
- Iterator,
- Mapping,
- MutableMapping,
- TypeVar,
- overload,
-)
-
-class istr(str): ...
-
-upstr = istr
-
-_S = str | istr
-
-_T = TypeVar("_T")
-
-_T_co = TypeVar("_T_co", covariant=True)
-
-_D = TypeVar("_D")
-
-class MultiMapping(Mapping[_S, _T_co]):
- @overload
- @abc.abstractmethod
- def getall(self, key: _S) -> list[_T_co]: ...
- @overload
- @abc.abstractmethod
- def getall(self, key: _S, default: _D) -> list[_T_co] | _D: ...
- @overload
- @abc.abstractmethod
- def getone(self, key: _S) -> _T_co: ...
- @overload
- @abc.abstractmethod
- def getone(self, key: _S, default: _D) -> _T_co | _D: ...
-
-_Arg = (
- Mapping[str, _T]
- | Mapping[istr, _T]
- | dict[str, _T]
- | dict[istr, _T]
- | MultiMapping[_T]
- | Iterable[tuple[str, _T]]
- | Iterable[tuple[istr, _T]]
-)
-
-class MutableMultiMapping(MultiMapping[_T], MutableMapping[_S, _T], Generic[_T]):
- @abc.abstractmethod
- def add(self, key: _S, value: _T) -> None: ...
- @abc.abstractmethod
- def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
- @overload
- @abc.abstractmethod
- def popone(self, key: _S) -> _T: ...
- @overload
- @abc.abstractmethod
- def popone(self, key: _S, default: _D) -> _T | _D: ...
- @overload
- @abc.abstractmethod
- def popall(self, key: _S) -> list[_T]: ...
- @overload
- @abc.abstractmethod
- def popall(self, key: _S, default: _D) -> list[_T] | _D: ...
-
-class MultiDict(MutableMultiMapping[_T], Generic[_T]):
- def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
- def copy(self) -> MultiDict[_T]: ...
- def __getitem__(self, k: _S) -> _T: ...
- def __setitem__(self, k: _S, v: _T) -> None: ...
- def __delitem__(self, v: _S) -> None: ...
- def __iter__(self) -> Iterator[_S]: ...
- def __len__(self) -> int: ...
- @overload
- def getall(self, key: _S) -> list[_T]: ...
- @overload
- def getall(self, key: _S, default: _D) -> list[_T] | _D: ...
- @overload
- def getone(self, key: _S) -> _T: ...
- @overload
- def getone(self, key: _S, default: _D) -> _T | _D: ...
- def add(self, key: _S, value: _T) -> None: ...
- def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
- @overload
- def popone(self, key: _S) -> _T: ...
- @overload
- def popone(self, key: _S, default: _D) -> _T | _D: ...
- @overload
- def popall(self, key: _S) -> list[_T]: ...
- @overload
- def popall(self, key: _S, default: _D) -> list[_T] | _D: ...
-
-class CIMultiDict(MutableMultiMapping[_T], Generic[_T]):
- def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
- def copy(self) -> CIMultiDict[_T]: ...
- def __getitem__(self, k: _S) -> _T: ...
- def __setitem__(self, k: _S, v: _T) -> None: ...
- def __delitem__(self, v: _S) -> None: ...
- def __iter__(self) -> Iterator[_S]: ...
- def __len__(self) -> int: ...
- @overload
- def getall(self, key: _S) -> list[_T]: ...
- @overload
- def getall(self, key: _S, default: _D) -> list[_T] | _D: ...
- @overload
- def getone(self, key: _S) -> _T: ...
- @overload
- def getone(self, key: _S, default: _D) -> _T | _D: ...
- def add(self, key: _S, value: _T) -> None: ...
- def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
- @overload
- def popone(self, key: _S) -> _T: ...
- @overload
- def popone(self, key: _S, default: _D) -> _T | _D: ...
- @overload
- def popall(self, key: _S) -> list[_T]: ...
- @overload
- def popall(self, key: _S, default: _D) -> list[_T] | _D: ...
-
-class MultiDictProxy(MultiMapping[_T], Generic[_T]):
- def __init__(self, arg: MultiMapping[_T] | MutableMultiMapping[_T]) -> None: ...
- def copy(self) -> MultiDict[_T]: ...
- def __getitem__(self, k: _S) -> _T: ...
- def __iter__(self) -> Iterator[_S]: ...
- def __len__(self) -> int: ...
- @overload
- def getall(self, key: _S) -> list[_T]: ...
- @overload
- def getall(self, key: _S, default: _D) -> list[_T] | _D: ...
- @overload
- def getone(self, key: _S) -> _T: ...
- @overload
- def getone(self, key: _S, default: _D) -> _T | _D: ...
-
-class CIMultiDictProxy(MultiMapping[_T], Generic[_T]):
- def __init__(self, arg: MultiMapping[_T] | MutableMultiMapping[_T]) -> None: ...
- def __getitem__(self, k: _S) -> _T: ...
- def __iter__(self) -> Iterator[_S]: ...
- def __len__(self) -> int: ...
- @overload
- def getall(self, key: _S) -> list[_T]: ...
- @overload
- def getall(self, key: _S, default: _D) -> list[_T] | _D: ...
- @overload
- def getone(self, key: _S) -> _T: ...
- @overload
- def getone(self, key: _S, default: _D) -> _T | _D: ...
- def copy(self) -> CIMultiDict[_T]: ...
-
-def getversion(
- md: MultiDict[_T] | CIMultiDict[_T] | MultiDictProxy[_T] | CIMultiDictProxy[_T],
-) -> int: ...
diff --git a/contrib/python/multidict/multidict/_abc.py b/contrib/python/multidict/multidict/_abc.py
index 0603cdd244..ff0e2a6976 100644
--- a/contrib/python/multidict/multidict/_abc.py
+++ b/contrib/python/multidict/multidict/_abc.py
@@ -1,48 +1,69 @@
import abc
-import sys
-import types
-from collections.abc import Mapping, MutableMapping
+from collections.abc import Iterable, Mapping, MutableMapping
+from typing import TYPE_CHECKING, Protocol, TypeVar, Union, overload
+if TYPE_CHECKING:
+ from ._multidict_py import istr
+else:
+ istr = str
-class _TypingMeta(abc.ABCMeta):
- # A fake metaclass to satisfy typing deps in runtime
- # basically MultiMapping[str] and other generic-like type instantiations
- # are emulated.
- # Note: real type hints are provided by __init__.pyi stub file
- if sys.version_info >= (3, 9):
+_V = TypeVar("_V")
+_V_co = TypeVar("_V_co", covariant=True)
+_T = TypeVar("_T")
- def __getitem__(self, key):
- return types.GenericAlias(self, key)
- else:
+class SupportsKeys(Protocol[_V_co]):
+ def keys(self) -> Iterable[str]: ...
+ def __getitem__(self, key: str, /) -> _V_co: ...
- def __getitem__(self, key):
- return self
+class SupportsIKeys(Protocol[_V_co]):
+ def keys(self) -> Iterable[istr]: ...
+ def __getitem__(self, key: istr, /) -> _V_co: ...
-class MultiMapping(Mapping, metaclass=_TypingMeta):
+
+MDArg = Union[SupportsKeys[_V], SupportsIKeys[_V], Iterable[tuple[str, _V]], None]
+
+
+class MultiMapping(Mapping[str, _V_co]):
+ @overload
+ def getall(self, key: str) -> list[_V_co]: ...
+ @overload
+ def getall(self, key: str, default: _T) -> Union[list[_V_co], _T]: ...
@abc.abstractmethod
- def getall(self, key, default=None):
- raise KeyError
+ def getall(self, key: str, default: _T = ...) -> Union[list[_V_co], _T]:
+ """Return all values for key."""
+ @overload
+ def getone(self, key: str) -> _V_co: ...
+ @overload
+ def getone(self, key: str, default: _T) -> Union[_V_co, _T]: ...
@abc.abstractmethod
- def getone(self, key, default=None):
- raise KeyError
+ def getone(self, key: str, default: _T = ...) -> Union[_V_co, _T]:
+ """Return first value for key."""
-class MutableMultiMapping(MultiMapping, MutableMapping):
+class MutableMultiMapping(MultiMapping[_V], MutableMapping[str, _V]):
@abc.abstractmethod
- def add(self, key, value):
- raise NotImplementedError
+ def add(self, key: str, value: _V) -> None:
+ """Add value to list."""
@abc.abstractmethod
- def extend(self, *args, **kwargs):
- raise NotImplementedError
+ def extend(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:
+ """Add everything from arg and kwargs to the mapping."""
+ @overload
+ def popone(self, key: str) -> _V: ...
+ @overload
+ def popone(self, key: str, default: _T) -> Union[_V, _T]: ...
@abc.abstractmethod
- def popone(self, key, default=None):
- raise KeyError
+ def popone(self, key: str, default: _T = ...) -> Union[_V, _T]:
+ """Remove specified key and return the corresponding value."""
+ @overload
+ def popall(self, key: str) -> list[_V]: ...
+ @overload
+ def popall(self, key: str, default: _T) -> Union[list[_V], _T]: ...
@abc.abstractmethod
- def popall(self, key, default=None):
- raise KeyError
+ def popall(self, key: str, default: _T = ...) -> Union[list[_V], _T]:
+ """Remove all occurrences of key and return the list of corresponding values."""
diff --git a/contrib/python/multidict/multidict/_multidict.c b/contrib/python/multidict/multidict/_multidict.c
index 60864953b1..ebb1949f0a 100644
--- a/contrib/python/multidict/multidict/_multidict.c
+++ b/contrib/python/multidict/multidict/_multidict.c
@@ -1,6 +1,8 @@
#include "Python.h"
#include "structmember.h"
+#include "_multilib/pythoncapi_compat.h"
+
// Include order important
#include "_multilib/defs.h"
#include "_multilib/istr.h"
@@ -9,7 +11,7 @@
#include "_multilib/iter.h"
#include "_multilib/views.h"
-#if PY_MAJOR_VERSION < 3 || PY_MINOR_VERSION < 12
+#if PY_MINOR_VERSION < 12
#ifndef _PyArg_UnpackKeywords
#define FASTCALL_OLD
#endif
@@ -19,14 +21,13 @@
static PyObject *collections_abc_mapping;
static PyObject *collections_abc_mut_mapping;
static PyObject *collections_abc_mut_multi_mapping;
+static PyObject *repr_func;
static PyTypeObject multidict_type;
static PyTypeObject cimultidict_type;
static PyTypeObject multidict_proxy_type;
static PyTypeObject cimultidict_proxy_type;
-static PyObject *repr_func;
-
#define MultiDict_CheckExact(o) (Py_TYPE(o) == &multidict_type)
#define CIMultiDict_CheckExact(o) (Py_TYPE(o) == &cimultidict_type)
#define MultiDictProxy_CheckExact(o) (Py_TYPE(o) == &multidict_proxy_type)
@@ -155,13 +156,17 @@ _multidict_append_items_seq(MultiDictObject *self, PyObject *arg,
Py_INCREF(value);
}
else if (PyList_CheckExact(item)) {
- if (PyList_GET_SIZE(item) != 2) {
+ if (PyList_Size(item) != 2) {
+ goto invalid_type;
+ }
+ key = PyList_GetItemRef(item, 0);
+ if (key == NULL) {
+ goto invalid_type;
+ }
+ value = PyList_GetItemRef(item, 1);
+ if (value == NULL) {
goto invalid_type;
}
- key = PyList_GET_ITEM(item, 0);
- Py_INCREF(key);
- value = PyList_GET_ITEM(item, 1);
- Py_INCREF(value);
}
else if (PySequence_Check(item)) {
if (PySequence_Size(item) != 2) {
@@ -339,8 +344,8 @@ _multidict_extend(MultiDictObject *self, PyObject *args, PyObject *kwds,
if (args && PyObject_Length(args) > 1) {
PyErr_Format(
PyExc_TypeError,
- "%s takes at most 1 positional argument (%zd given)",
- name, PyObject_Length(args), NULL
+ "%s takes from 1 to 2 positional arguments but %zd were given",
+ name, PyObject_Length(args) + 1, NULL
);
return -1;
}
@@ -769,21 +774,13 @@ static inline void
multidict_tp_dealloc(MultiDictObject *self)
{
PyObject_GC_UnTrack(self);
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
Py_TRASHCAN_BEGIN(self, multidict_tp_dealloc)
-#else
- Py_TRASHCAN_SAFE_BEGIN(self);
-#endif
if (self->weaklist != NULL) {
PyObject_ClearWeakRefs((PyObject *)self);
};
pair_list_dealloc(&self->pairs);
Py_TYPE(self)->tp_free((PyObject *)self);
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
Py_TRASHCAN_END // there should be no code after this
-#else
- Py_TRASHCAN_SAFE_END(self);
-#endif
}
static inline int
@@ -1230,16 +1227,7 @@ PyDoc_STRVAR(multidict_update_doc,
"Update the dictionary from *other*, overwriting existing keys.");
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
#define multidict_class_getitem Py_GenericAlias
-#else
-static inline PyObject *
-multidict_class_getitem(PyObject *self, PyObject *arg)
-{
- Py_INCREF(self);
- return self;
-}
-#endif
PyDoc_STRVAR(sizeof__doc__,
@@ -1941,9 +1929,7 @@ getversion(PyObject *self, PyObject *md)
static inline void
module_free(void *m)
{
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
Py_CLEAR(multidict_str_lower);
-#endif
Py_CLEAR(collections_abc_mapping);
Py_CLEAR(collections_abc_mut_mapping);
Py_CLEAR(collections_abc_mut_multi_mapping);
@@ -1972,29 +1958,14 @@ static PyModuleDef multidict_module = {
PyMODINIT_FUNC
PyInit__multidict(void)
{
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
multidict_str_lower = PyUnicode_InternFromString("lower");
if (multidict_str_lower == NULL) {
goto fail;
}
-#endif
PyObject *module = NULL,
*reg_func_call_result = NULL;
-#define WITH_MOD(NAME) \
- Py_CLEAR(module); \
- module = PyImport_ImportModule(NAME); \
- if (module == NULL) { \
- goto fail; \
- }
-
-#define GET_MOD_ATTR(VAR, NAME) \
- VAR = PyObject_GetAttrString(module, NAME); \
- if (VAR == NULL) { \
- goto fail; \
- }
-
if (multidict_views_init() < 0) {
goto fail;
}
@@ -2015,18 +1986,31 @@ PyInit__multidict(void)
goto fail;
}
+#define WITH_MOD(NAME) \
+ Py_CLEAR(module); \
+ module = PyImport_ImportModule(NAME); \
+ if (module == NULL) { \
+ goto fail; \
+ }
+
+#define GET_MOD_ATTR(VAR, NAME) \
+ VAR = PyObject_GetAttrString(module, NAME); \
+ if (VAR == NULL) { \
+ goto fail; \
+ }
+
WITH_MOD("collections.abc");
GET_MOD_ATTR(collections_abc_mapping, "Mapping");
WITH_MOD("multidict._abc");
GET_MOD_ATTR(collections_abc_mut_mapping, "MultiMapping");
-
- WITH_MOD("multidict._abc");
GET_MOD_ATTR(collections_abc_mut_multi_mapping, "MutableMultiMapping");
WITH_MOD("multidict._multidict_base");
GET_MOD_ATTR(repr_func, "_mdrepr");
+ Py_CLEAR(module); \
+
/* Register in _abc mappings (CI)MultiDict and (CI)MultiDictProxy */
reg_func_call_result = PyObject_CallMethod(
collections_abc_mut_mapping,
@@ -2070,6 +2054,13 @@ PyInit__multidict(void)
/* Instantiate this module */
module = PyModule_Create(&multidict_module);
+ if (module == NULL) {
+ goto fail;
+ }
+
+#ifdef Py_GIL_DISABLED
+ PyUnstable_Module_SetGIL(module, Py_MOD_GIL_NOT_USED);
+#endif
Py_INCREF(&istr_type);
if (PyModule_AddObject(
@@ -2109,9 +2100,7 @@ PyInit__multidict(void)
return module;
fail:
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
Py_XDECREF(multidict_str_lower);
-#endif
Py_XDECREF(collections_abc_mapping);
Py_XDECREF(collections_abc_mut_mapping);
Py_XDECREF(collections_abc_mut_multi_mapping);
diff --git a/contrib/python/multidict/multidict/_multidict_base.py b/contrib/python/multidict/multidict/_multidict_base.py
index de2f762a5c..df0d70097a 100644
--- a/contrib/python/multidict/multidict/_multidict_base.py
+++ b/contrib/python/multidict/multidict/_multidict_base.py
@@ -1,5 +1,19 @@
import sys
-from collections.abc import ItemsView, Iterable, KeysView, Set, ValuesView
+from collections.abc import (
+ Container,
+ ItemsView,
+ Iterable,
+ KeysView,
+ Mapping,
+ Set,
+ ValuesView,
+)
+from typing import Literal, Union
+
+if sys.version_info >= (3, 10):
+ from types import NotImplementedType
+else:
+ from typing import Any as NotImplementedType
if sys.version_info >= (3, 11):
from typing import assert_never
@@ -7,26 +21,28 @@ else:
from typing_extensions import assert_never
-def _abc_itemsview_register(view_cls):
+def _abc_itemsview_register(view_cls: type[object]) -> None:
ItemsView.register(view_cls)
-def _abc_keysview_register(view_cls):
+def _abc_keysview_register(view_cls: type[object]) -> None:
KeysView.register(view_cls)
-def _abc_valuesview_register(view_cls):
+def _abc_valuesview_register(view_cls: type[object]) -> None:
ValuesView.register(view_cls)
-def _viewbaseset_richcmp(view, other, op):
+def _viewbaseset_richcmp(
+ view: set[object], other: object, op: Literal[0, 1, 2, 3, 4, 5]
+) -> Union[bool, NotImplementedType]:
if op == 0: # <
if not isinstance(other, Set):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
return len(view) < len(other) and view <= other
elif op == 1: # <=
if not isinstance(other, Set):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if len(view) > len(other):
return False
for elem in view:
@@ -35,17 +51,17 @@ def _viewbaseset_richcmp(view, other, op):
return True
elif op == 2: # ==
if not isinstance(other, Set):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
return len(view) == len(other) and view <= other
elif op == 3: # !=
return not view == other
elif op == 4: # >
if not isinstance(other, Set):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
return len(view) > len(other) and view >= other
elif op == 5: # >=
if not isinstance(other, Set):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if len(view) < len(other):
return False
for elem in other:
@@ -56,9 +72,11 @@ def _viewbaseset_richcmp(view, other, op):
assert_never(op)
-def _viewbaseset_and(view, other):
+def _viewbaseset_and(
+ view: set[object], other: object
+) -> Union[set[object], NotImplementedType]:
if not isinstance(other, Iterable):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
@@ -68,9 +86,11 @@ def _viewbaseset_and(view, other):
return view & other
-def _viewbaseset_or(view, other):
+def _viewbaseset_or(
+ view: set[object], other: object
+) -> Union[set[object], NotImplementedType]:
if not isinstance(other, Iterable):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
@@ -80,9 +100,11 @@ def _viewbaseset_or(view, other):
return view | other
-def _viewbaseset_sub(view, other):
+def _viewbaseset_sub(
+ view: set[object], other: object
+) -> Union[set[object], NotImplementedType]:
if not isinstance(other, Iterable):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
@@ -92,9 +114,11 @@ def _viewbaseset_sub(view, other):
return view - other
-def _viewbaseset_xor(view, other):
+def _viewbaseset_xor(
+ view: set[object], other: object
+) -> Union[set[object], NotImplementedType]:
if not isinstance(other, Iterable):
- return NotImplemented
+ return NotImplemented # type: ignore[no-any-return]
if isinstance(view, Set):
view = set(iter(view))
if isinstance(other, Set):
@@ -104,7 +128,7 @@ def _viewbaseset_xor(view, other):
return view ^ other
-def _itemsview_isdisjoint(view, other):
+def _itemsview_isdisjoint(view: Container[object], other: Iterable[object]) -> bool:
"Return True if two sets have a null intersection."
for v in other:
if v in view:
@@ -112,7 +136,7 @@ def _itemsview_isdisjoint(view, other):
return True
-def _itemsview_repr(view):
+def _itemsview_repr(view: Iterable[tuple[object, object]]) -> str:
lst = []
for k, v in view:
lst.append("{!r}: {!r}".format(k, v))
@@ -120,7 +144,7 @@ def _itemsview_repr(view):
return "{}({})".format(view.__class__.__name__, body)
-def _keysview_isdisjoint(view, other):
+def _keysview_isdisjoint(view: Container[object], other: Iterable[object]) -> bool:
"Return True if two sets have a null intersection."
for k in other:
if k in view:
@@ -128,7 +152,7 @@ def _keysview_isdisjoint(view, other):
return True
-def _keysview_repr(view):
+def _keysview_repr(view: Iterable[object]) -> str:
lst = []
for k in view:
lst.append("{!r}".format(k))
@@ -136,7 +160,7 @@ def _keysview_repr(view):
return "{}({})".format(view.__class__.__name__, body)
-def _valuesview_repr(view):
+def _valuesview_repr(view: Iterable[object]) -> str:
lst = []
for v in view:
lst.append("{!r}".format(v))
@@ -144,7 +168,7 @@ def _valuesview_repr(view):
return "{}({})".format(view.__class__.__name__, body)
-def _mdrepr(md):
+def _mdrepr(md: Mapping[object, object]) -> str:
lst = []
for k, v in md.items():
lst.append("'{}': {!r}".format(k, v))
diff --git a/contrib/python/multidict/multidict/_multidict_py.py b/contrib/python/multidict/multidict/_multidict_py.py
index 79c45aa19c..b8ecb8b962 100644
--- a/contrib/python/multidict/multidict/_multidict_py.py
+++ b/contrib/python/multidict/multidict/_multidict_py.py
@@ -1,47 +1,56 @@
+import enum
import sys
-import types
from array import array
-from collections import abc
-
-from ._abc import MultiMapping, MutableMultiMapping
-
-_marker = object()
-
-if sys.version_info >= (3, 9):
- GenericAlias = types.GenericAlias
+from collections.abc import (
+ Callable,
+ ItemsView,
+ Iterable,
+ Iterator,
+ KeysView,
+ Mapping,
+ ValuesView,
+)
+from typing import (
+ TYPE_CHECKING,
+ Generic,
+ NoReturn,
+ TypeVar,
+ Union,
+ cast,
+ overload,
+)
+
+from ._abc import MDArg, MultiMapping, MutableMultiMapping, SupportsKeys
+
+if sys.version_info >= (3, 11):
+ from typing import Self
else:
-
- def GenericAlias(cls):
- return cls
+ from typing_extensions import Self
class istr(str):
-
"""Case insensitive str."""
__is_istr__ = True
-upstr = istr # for relaxing backward compatibility problems
-
-
-def getversion(md):
- if not isinstance(md, _Base):
- raise TypeError("Parameter should be multidict or proxy")
- return md._impl._version
+_V = TypeVar("_V")
+_T = TypeVar("_T")
+_SENTINEL = enum.Enum("_SENTINEL", "sentinel")
+sentinel = _SENTINEL.sentinel
_version = array("Q", [0])
-class _Impl:
+class _Impl(Generic[_V]):
__slots__ = ("_items", "_version")
- def __init__(self):
- self._items = []
+ def __init__(self) -> None:
+ self._items: list[tuple[str, str, _V]] = []
self.incr_version()
- def incr_version(self):
+ def incr_version(self) -> None:
global _version
v = _version
v[0] += 1
@@ -49,25 +58,138 @@ class _Impl:
if sys.implementation.name != "pypy":
- def __sizeof__(self):
+ def __sizeof__(self) -> int:
return object.__sizeof__(self) + sys.getsizeof(self._items)
-class _Base:
- def _title(self, key):
+class _Iter(Generic[_T]):
+ __slots__ = ("_size", "_iter")
+
+ def __init__(self, size: int, iterator: Iterator[_T]):
+ self._size = size
+ self._iter = iterator
+
+ def __iter__(self) -> Self:
+ return self
+
+ def __next__(self) -> _T:
+ return next(self._iter)
+
+ def __length_hint__(self) -> int:
+ return self._size
+
+
+class _ViewBase(Generic[_V]):
+ def __init__(self, impl: _Impl[_V]):
+ self._impl = impl
+
+ def __len__(self) -> int:
+ return len(self._impl._items)
+
+
+class _ItemsView(_ViewBase[_V], ItemsView[str, _V]):
+ def __contains__(self, item: object) -> bool:
+ if not isinstance(item, (tuple, list)) or len(item) != 2:
+ return False
+ for i, k, v in self._impl._items:
+ if item[0] == k and item[1] == v:
+ return True
+ return False
+
+ def __iter__(self) -> _Iter[tuple[str, _V]]:
+ return _Iter(len(self), self._iter(self._impl._version))
+
+ def _iter(self, version: int) -> Iterator[tuple[str, _V]]:
+ for i, k, v in self._impl._items:
+ if version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield k, v
+
+ def __repr__(self) -> str:
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}: {!r}".format(item[1], item[2]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
+
+
+class _ValuesView(_ViewBase[_V], ValuesView[_V]):
+ def __contains__(self, value: object) -> bool:
+ for item in self._impl._items:
+ if item[2] == value:
+ return True
+ return False
+
+ def __iter__(self) -> _Iter[_V]:
+ return _Iter(len(self), self._iter(self._impl._version))
+
+ def _iter(self, version: int) -> Iterator[_V]:
+ for item in self._impl._items:
+ if version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield item[2]
+
+ def __repr__(self) -> str:
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}".format(item[2]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
+
+
+class _KeysView(_ViewBase[_V], KeysView[str]):
+ def __contains__(self, key: object) -> bool:
+ for item in self._impl._items:
+ if item[1] == key:
+ return True
+ return False
+
+ def __iter__(self) -> _Iter[str]:
+ return _Iter(len(self), self._iter(self._impl._version))
+
+ def _iter(self, version: int) -> Iterator[str]:
+ for item in self._impl._items:
+ if version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield item[1]
+
+ def __repr__(self) -> str:
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}".format(item[1]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
+
+
+class _Base(MultiMapping[_V]):
+ _impl: _Impl[_V]
+
+ def _title(self, key: str) -> str:
return key
- def getall(self, key, default=_marker):
+ @overload
+ def getall(self, key: str) -> list[_V]: ...
+ @overload
+ def getall(self, key: str, default: _T) -> Union[list[_V], _T]: ...
+ def getall(
+ self, key: str, default: Union[_T, _SENTINEL] = sentinel
+ ) -> Union[list[_V], _T]:
"""Return a list of all values matching the key."""
identity = self._title(key)
res = [v for i, k, v in self._impl._items if i == identity]
if res:
return res
- if not res and default is not _marker:
+ if not res and default is not sentinel:
return default
raise KeyError("Key not found: %r" % key)
- def getone(self, key, default=_marker):
+ @overload
+ def getone(self, key: str) -> _V: ...
+ @overload
+ def getone(self, key: str, default: _T) -> Union[_V, _T]: ...
+ def getone(
+ self, key: str, default: Union[_T, _SENTINEL] = sentinel
+ ) -> Union[_V, _T]:
"""Get first value matching the key.
Raises KeyError if the key is not found and no default is provided.
@@ -76,42 +198,46 @@ class _Base:
for i, k, v in self._impl._items:
if i == identity:
return v
- if default is not _marker:
+ if default is not sentinel:
return default
raise KeyError("Key not found: %r" % key)
# Mapping interface #
- def __getitem__(self, key):
+ def __getitem__(self, key: str) -> _V:
return self.getone(key)
- def get(self, key, default=None):
+ @overload
+ def get(self, key: str, /) -> Union[_V, None]: ...
+ @overload
+ def get(self, key: str, /, default: _T) -> Union[_V, _T]: ...
+ def get(self, key: str, default: Union[_T, None] = None) -> Union[_V, _T, None]:
"""Get first value matching the key.
If the key is not found, returns the default (or None if no default is provided)
"""
return self.getone(key, default)
- def __iter__(self):
+ def __iter__(self) -> Iterator[str]:
return iter(self.keys())
- def __len__(self):
+ def __len__(self) -> int:
return len(self._impl._items)
- def keys(self):
+ def keys(self) -> KeysView[str]:
"""Return a new view of the dictionary's keys."""
return _KeysView(self._impl)
- def items(self):
+ def items(self) -> ItemsView[str, _V]:
"""Return a new view of the dictionary's items *(key, value) pairs)."""
return _ItemsView(self._impl)
- def values(self):
+ def values(self) -> _ValuesView[_V]:
"""Return a new view of the dictionary's values."""
return _ValuesView(self._impl)
- def __eq__(self, other):
- if not isinstance(other, abc.Mapping):
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Mapping):
return NotImplemented
if isinstance(other, _Base):
lft = self._impl._items
@@ -125,124 +251,83 @@ class _Base:
if len(self._impl._items) != len(other):
return False
for k, v in self.items():
- nv = other.get(k, _marker)
+ nv = other.get(k, sentinel)
if v != nv:
return False
return True
- def __contains__(self, key):
+ def __contains__(self, key: object) -> bool:
+ if not isinstance(key, str):
+ return False
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return True
return False
- def __repr__(self):
+ def __repr__(self) -> str:
body = ", ".join("'{}': {!r}".format(k, v) for k, v in self.items())
return "<{}({})>".format(self.__class__.__name__, body)
- __class_getitem__ = classmethod(GenericAlias)
-
-
-class MultiDictProxy(_Base, MultiMapping):
- """Read-only proxy for MultiDict instance."""
-
- def __init__(self, arg):
- if not isinstance(arg, (MultiDict, MultiDictProxy)):
- raise TypeError(
- "ctor requires MultiDict or MultiDictProxy instance"
- ", not {}".format(type(arg))
- )
-
- self._impl = arg._impl
- def __reduce__(self):
- raise TypeError("can't pickle {} objects".format(self.__class__.__name__))
-
- def copy(self):
- """Return a copy of itself."""
- return MultiDict(self.items())
-
-
-class CIMultiDictProxy(MultiDictProxy):
- """Read-only proxy for CIMultiDict instance."""
-
- def __init__(self, arg):
- if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):
- raise TypeError(
- "ctor requires CIMultiDict or CIMultiDictProxy instance"
- ", not {}".format(type(arg))
- )
-
- self._impl = arg._impl
-
- def _title(self, key):
- return key.title()
-
- def copy(self):
- """Return a copy of itself."""
- return CIMultiDict(self.items())
-
-
-class MultiDict(_Base, MutableMultiMapping):
+class MultiDict(_Base[_V], MutableMultiMapping[_V]):
"""Dictionary with the support for duplicate keys."""
- def __init__(self, *args, **kwargs):
+ def __init__(self, arg: MDArg[_V] = None, /, **kwargs: _V):
self._impl = _Impl()
- self._extend(args, kwargs, self.__class__.__name__, self._extend_items)
+ self._extend(arg, kwargs, self.__class__.__name__, self._extend_items)
if sys.implementation.name != "pypy":
- def __sizeof__(self):
+ def __sizeof__(self) -> int:
return object.__sizeof__(self) + sys.getsizeof(self._impl)
- def __reduce__(self):
+ def __reduce__(self) -> tuple[type[Self], tuple[list[tuple[str, _V]]]]:
return (self.__class__, (list(self.items()),))
- def _title(self, key):
+ def _title(self, key: str) -> str:
return key
- def _key(self, key):
+ def _key(self, key: str) -> str:
if isinstance(key, str):
return key
else:
- raise TypeError(
- "MultiDict keys should be either str " "or subclasses of str"
- )
+ raise TypeError("MultiDict keys should be either str or subclasses of str")
- def add(self, key, value):
+ def add(self, key: str, value: _V) -> None:
identity = self._title(key)
self._impl._items.append((identity, self._key(key), value))
self._impl.incr_version()
- def copy(self):
+ def copy(self) -> Self:
"""Return a copy of itself."""
cls = self.__class__
return cls(self.items())
__copy__ = copy
- def extend(self, *args, **kwargs):
+ def extend(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:
"""Extend current MultiDict with more values.
This method must be used instead of update.
"""
- self._extend(args, kwargs, "extend", self._extend_items)
-
- def _extend(self, args, kwargs, name, method):
- if len(args) > 1:
- raise TypeError(
- "{} takes at most 1 positional argument"
- " ({} given)".format(name, len(args))
- )
- if args:
- arg = args[0]
- if isinstance(args[0], (MultiDict, MultiDictProxy)) and not kwargs:
+ self._extend(arg, kwargs, "extend", self._extend_items)
+
+ def _extend(
+ self,
+ arg: MDArg[_V],
+ kwargs: Mapping[str, _V],
+ name: str,
+ method: Callable[[list[tuple[str, str, _V]]], None],
+ ) -> None:
+ if arg:
+ if isinstance(arg, (MultiDict, MultiDictProxy)) and not kwargs:
items = arg._impl._items
else:
- if hasattr(arg, "items"):
- arg = arg.items()
+ if hasattr(arg, "keys"):
+ arg = cast(SupportsKeys[_V], arg)
+ arg = [(k, arg[k]) for k in arg.keys()]
if kwargs:
arg = list(arg)
arg.extend(list(kwargs.items()))
@@ -264,21 +349,21 @@ class MultiDict(_Base, MutableMultiMapping):
]
)
- def _extend_items(self, items):
+ def _extend_items(self, items: Iterable[tuple[str, str, _V]]) -> None:
for identity, key, value in items:
self.add(key, value)
- def clear(self):
+ def clear(self) -> None:
"""Remove all items from MultiDict."""
self._impl._items.clear()
self._impl.incr_version()
# Mapping interface #
- def __setitem__(self, key, value):
+ def __setitem__(self, key: str, value: _V) -> None:
self._replace(key, value)
- def __delitem__(self, key):
+ def __delitem__(self, key: str) -> None:
identity = self._title(key)
items = self._impl._items
found = False
@@ -291,16 +376,28 @@ class MultiDict(_Base, MutableMultiMapping):
else:
self._impl.incr_version()
- def setdefault(self, key, default=None):
+ @overload
+ def setdefault(
+ self: "MultiDict[Union[_T, None]]", key: str, default: None = None
+ ) -> Union[_T, None]: ...
+ @overload
+ def setdefault(self, key: str, default: _V) -> _V: ...
+ def setdefault(self, key: str, default: Union[_V, None] = None) -> Union[_V, None]: # type: ignore[misc]
"""Return value for key, set value to default if key is not present."""
identity = self._title(key)
for i, k, v in self._impl._items:
if i == identity:
return v
- self.add(key, default)
+ self.add(key, default) # type: ignore[arg-type]
return default
- def popone(self, key, default=_marker):
+ @overload
+ def popone(self, key: str) -> _V: ...
+ @overload
+ def popone(self, key: str, default: _T) -> Union[_V, _T]: ...
+ def popone(
+ self, key: str, default: Union[_T, _SENTINEL] = sentinel
+ ) -> Union[_V, _T]:
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
@@ -314,14 +411,22 @@ class MultiDict(_Base, MutableMultiMapping):
del self._impl._items[i]
self._impl.incr_version()
return value
- if default is _marker:
+ if default is sentinel:
raise KeyError(key)
else:
return default
- pop = popone # type: ignore
-
- def popall(self, key, default=_marker):
+ # Type checking will inherit signature for pop() if we don't confuse it here.
+ if not TYPE_CHECKING:
+ pop = popone
+
+ @overload
+ def popall(self, key: str) -> list[_V]: ...
+ @overload
+ def popall(self, key: str, default: _T) -> Union[list[_V], _T]: ...
+ def popall(
+ self, key: str, default: Union[_T, _SENTINEL] = sentinel
+ ) -> Union[list[_V], _T]:
"""Remove all occurrences of key and return the list of corresponding
values.
@@ -340,7 +445,7 @@ class MultiDict(_Base, MutableMultiMapping):
self._impl.incr_version()
found = True
if not found:
- if default is _marker:
+ if default is sentinel:
raise KeyError(key)
else:
return default
@@ -348,7 +453,7 @@ class MultiDict(_Base, MutableMultiMapping):
ret.reverse()
return ret
- def popitem(self):
+ def popitem(self) -> tuple[str, _V]:
"""Remove and return an arbitrary (key, value) pair."""
if self._impl._items:
i = self._impl._items.pop(0)
@@ -357,14 +462,14 @@ class MultiDict(_Base, MutableMultiMapping):
else:
raise KeyError("empty multidict")
- def update(self, *args, **kwargs):
+ def update(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:
"""Update the dictionary from *other*, overwriting existing keys."""
- self._extend(args, kwargs, "update", self._update_items)
+ self._extend(arg, kwargs, "update", self._update_items)
- def _update_items(self, items):
+ def _update_items(self, items: list[tuple[str, str, _V]]) -> None:
if not items:
return
- used_keys = {}
+ used_keys: dict[str, int] = {}
for identity, key, value in items:
start = used_keys.get(identity, 0)
for i in range(start, len(self._impl._items)):
@@ -393,7 +498,7 @@ class MultiDict(_Base, MutableMultiMapping):
self._impl.incr_version()
- def _replace(self, key, value):
+ def _replace(self, key: str, value: _V) -> None:
key = self._key(key)
identity = self._title(key)
items = self._impl._items
@@ -412,7 +517,8 @@ class MultiDict(_Base, MutableMultiMapping):
return
# remove all tail items
- i = rgt + 1
+ # Mypy bug: https://github.com/python/mypy/issues/14209
+ i = rgt + 1 # type: ignore[possibly-undefined]
while i < len(items):
item = items[i]
if item[0] == identity:
@@ -421,107 +527,54 @@ class MultiDict(_Base, MutableMultiMapping):
i += 1
-class CIMultiDict(MultiDict):
+class CIMultiDict(MultiDict[_V]):
"""Dictionary with the support for duplicate case-insensitive keys."""
- def _title(self, key):
+ def _title(self, key: str) -> str:
return key.title()
-class _Iter:
- __slots__ = ("_size", "_iter")
-
- def __init__(self, size, iterator):
- self._size = size
- self._iter = iterator
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iter)
-
- def __length_hint__(self):
- return self._size
-
-
-class _ViewBase:
- def __init__(self, impl):
- self._impl = impl
-
- def __len__(self):
- return len(self._impl._items)
-
-
-class _ItemsView(_ViewBase, abc.ItemsView):
- def __contains__(self, item):
- assert isinstance(item, tuple) or isinstance(item, list)
- assert len(item) == 2
- for i, k, v in self._impl._items:
- if item[0] == k and item[1] == v:
- return True
- return False
-
- def __iter__(self):
- return _Iter(len(self), self._iter(self._impl._version))
+class MultiDictProxy(_Base[_V]):
+ """Read-only proxy for MultiDict instance."""
- def _iter(self, version):
- for i, k, v in self._impl._items:
- if version != self._impl._version:
- raise RuntimeError("Dictionary changed during iteration")
- yield k, v
+ def __init__(self, arg: Union[MultiDict[_V], "MultiDictProxy[_V]"]):
+ if not isinstance(arg, (MultiDict, MultiDictProxy)):
+ raise TypeError(
+ "ctor requires MultiDict or MultiDictProxy instance"
+ ", not {}".format(type(arg))
+ )
- def __repr__(self):
- lst = []
- for item in self._impl._items:
- lst.append("{!r}: {!r}".format(item[1], item[2]))
- body = ", ".join(lst)
- return "{}({})".format(self.__class__.__name__, body)
+ self._impl = arg._impl
+ def __reduce__(self) -> NoReturn:
+ raise TypeError("can't pickle {} objects".format(self.__class__.__name__))
-class _ValuesView(_ViewBase, abc.ValuesView):
- def __contains__(self, value):
- for item in self._impl._items:
- if item[2] == value:
- return True
- return False
+ def copy(self) -> MultiDict[_V]:
+ """Return a copy of itself."""
+ return MultiDict(self.items())
- def __iter__(self):
- return _Iter(len(self), self._iter(self._impl._version))
- def _iter(self, version):
- for item in self._impl._items:
- if version != self._impl._version:
- raise RuntimeError("Dictionary changed during iteration")
- yield item[2]
+class CIMultiDictProxy(MultiDictProxy[_V]):
+ """Read-only proxy for CIMultiDict instance."""
- def __repr__(self):
- lst = []
- for item in self._impl._items:
- lst.append("{!r}".format(item[2]))
- body = ", ".join(lst)
- return "{}({})".format(self.__class__.__name__, body)
+ def __init__(self, arg: Union[MultiDict[_V], MultiDictProxy[_V]]):
+ if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):
+ raise TypeError(
+ "ctor requires CIMultiDict or CIMultiDictProxy instance"
+ ", not {}".format(type(arg))
+ )
+ self._impl = arg._impl
-class _KeysView(_ViewBase, abc.KeysView):
- def __contains__(self, key):
- for item in self._impl._items:
- if item[1] == key:
- return True
- return False
+ def _title(self, key: str) -> str:
+ return key.title()
- def __iter__(self):
- return _Iter(len(self), self._iter(self._impl._version))
+ def copy(self) -> CIMultiDict[_V]:
+ """Return a copy of itself."""
+ return CIMultiDict(self.items())
- def _iter(self, version):
- for item in self._impl._items:
- if version != self._impl._version:
- raise RuntimeError("Dictionary changed during iteration")
- yield item[1]
- def __repr__(self):
- lst = []
- for item in self._impl._items:
- lst.append("{!r}".format(item[1]))
- body = ", ".join(lst)
- return "{}({})".format(self.__class__.__name__, body)
+def getversion(md: Union[MultiDict[object], MultiDictProxy[object]]) -> int:
+ if not isinstance(md, _Base):
+ raise TypeError("Parameter should be multidict or proxy")
+ return md._impl._version
diff --git a/contrib/python/multidict/multidict/_multilib/defs.h b/contrib/python/multidict/multidict/_multilib/defs.h
index 55c21074dd..51a6639c42 100644
--- a/contrib/python/multidict/multidict/_multilib/defs.h
+++ b/contrib/python/multidict/multidict/_multilib/defs.h
@@ -5,11 +5,7 @@
extern "C" {
#endif
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
static PyObject *multidict_str_lower = NULL;
-#else
-_Py_IDENTIFIER(lower);
-#endif
/* We link this module statically for convenience. If compiled as a shared
library instead, some compilers don't allow addresses of Python objects
diff --git a/contrib/python/multidict/multidict/_multilib/istr.h b/contrib/python/multidict/multidict/_multilib/istr.h
index 61dc61aec6..8454f78b88 100644
--- a/contrib/python/multidict/multidict/_multilib/istr.h
+++ b/contrib/python/multidict/multidict/_multilib/istr.h
@@ -43,11 +43,7 @@ istr_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (!ret) {
goto fail;
}
-#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
s = PyObject_CallMethodNoArgs(ret, multidict_str_lower);
-#else
- s =_PyObject_CallMethodId(ret, &PyId_lower, NULL);
-#endif
if (!s) {
goto fail;
}
diff --git a/contrib/python/multidict/multidict/_multilib/pair_list.h b/contrib/python/multidict/multidict/_multilib/pair_list.h
index 15291d46a8..b23150dfad 100644
--- a/contrib/python/multidict/multidict/_multilib/pair_list.h
+++ b/contrib/python/multidict/multidict/_multilib/pair_list.h
@@ -31,11 +31,7 @@ The embedded buffer intention is to fit the vast majority of possible
HTTP headers into the buffer without allocating an extra memory block.
*/
-#if (PY_VERSION_HEX < 0x03080000)
-#define EMBEDDED_CAPACITY 28
-#else
#define EMBEDDED_CAPACITY 29
-#endif
typedef struct pair_list {
Py_ssize_t capacity;
@@ -110,11 +106,7 @@ ci_key_to_str(PyObject *key)
return ret;
}
if (PyUnicode_Check(key)) {
-#if PY_VERSION_HEX < 0x03090000
- return _PyObject_CallMethodId(key, &PyId_lower, NULL);
-#else
return PyObject_CallMethodNoArgs(key, multidict_str_lower);
-#endif
}
PyErr_SetString(PyExc_TypeError,
"CIMultiDict keys should be either str "
@@ -497,6 +489,10 @@ pair_list_contains(pair_list_t *list, PyObject *key)
PyObject *identity = NULL;
int tmp;
+ if (!PyUnicode_Check(key)) {
+ return 0;
+ }
+
ident = pair_list_calc_identity(list, key);
if (ident == NULL) {
goto fail;
@@ -916,13 +912,18 @@ _pair_list_post_update(pair_list_t *list, PyObject* used_keys, Py_ssize_t pos)
for (; pos < list->size; pos++) {
pair = pair_list_get(list, pos);
- tmp = PyDict_GetItem(used_keys, pair->identity);
- if (tmp == NULL) {
+ int status = PyDict_GetItemRef(used_keys, pair->identity, &tmp);
+ if (status == -1) {
+ // exception set
+ return -1;
+ }
+ else if (status == 0) {
// not found
continue;
}
num = PyLong_AsSsize_t(tmp);
+ Py_DECREF(tmp);
if (num == -1) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError, "invalid internal state");
@@ -955,12 +956,18 @@ _pair_list_update(pair_list_t *list, PyObject *key,
int found;
int ident_cmp_res;
- item = PyDict_GetItem(used_keys, identity);
- if (item == NULL) {
+ int status = PyDict_GetItemRef(used_keys, identity, &item);
+ if (status == -1) {
+ // exception set
+ return -1;
+ }
+ else if (status == 0) {
+ // not found
pos = 0;
}
else {
pos = PyLong_AsSsize_t(item);
+ Py_DECREF(item);
if (pos == -1) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError, "invalid internal state");
@@ -1087,18 +1094,28 @@ pair_list_update_from_seq(pair_list_t *list, PyObject *seq)
}
// Convert item to sequence, and verify length 2.
+#ifdef Py_GIL_DISABLED
+ if (!PySequence_Check(item)) {
+#else
fast = PySequence_Fast(item, "");
if (fast == NULL) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+#endif
PyErr_Format(PyExc_TypeError,
"multidict cannot convert sequence element #%zd"
" to a sequence",
i);
+#ifndef Py_GIL_DISABLED
}
+#endif
goto fail_1;
}
+#ifdef Py_GIL_DISABLED
+ n = PySequence_Size(item);
+#else
n = PySequence_Fast_GET_SIZE(fast);
+#endif
if (n != 2) {
PyErr_Format(PyExc_ValueError,
"multidict update sequence element #%zd "
@@ -1107,10 +1124,27 @@ pair_list_update_from_seq(pair_list_t *list, PyObject *seq)
goto fail_1;
}
+#ifdef Py_GIL_DISABLED
+ key = PySequence_ITEM(item, 0);
+ if (key == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "multidict update sequence element #%zd's "
+ "key could not be fetched", i);
+ goto fail_1;
+ }
+ value = PySequence_ITEM(item, 1);
+ if (value == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "multidict update sequence element #%zd's "
+ "value could not be fetched", i);
+ goto fail_1;
+ }
+#else
key = PySequence_Fast_GET_ITEM(fast, 0);
value = PySequence_Fast_GET_ITEM(fast, 1);
Py_INCREF(key);
Py_INCREF(value);
+#endif
identity = pair_list_calc_identity(list, key);
if (identity == NULL) {
@@ -1128,7 +1162,9 @@ pair_list_update_from_seq(pair_list_t *list, PyObject *seq)
Py_DECREF(key);
Py_DECREF(value);
+#ifndef Py_GIL_DISABLED
Py_DECREF(fast);
+#endif
Py_DECREF(item);
Py_DECREF(identity);
}
diff --git a/contrib/python/multidict/multidict/_multilib/pythoncapi_compat.h b/contrib/python/multidict/multidict/_multilib/pythoncapi_compat.h
new file mode 100644
index 0000000000..971981993b
--- /dev/null
+++ b/contrib/python/multidict/multidict/_multilib/pythoncapi_compat.h
@@ -0,0 +1,1142 @@
+// Header file providing new C API functions to old Python versions.
+//
+// File distributed under the Zero Clause BSD (0BSD) license.
+// Copyright Contributors to the pythoncapi_compat project.
+//
+// Homepage:
+// https://github.com/python/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h
+//
+// The vendored version comes from commit:
+// https://raw.githubusercontent.com/python/pythoncapi-compat/2d18aecd7b2f549d38a13e27b682ea4966f37bd8/pythoncapi_compat.h
+//
+// SPDX-License-Identifier: 0BSD
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+
+// Python 3.11.0b4 added PyFrame_Back() to Python.h
+#if PY_VERSION_HEX < 0x030b00B4 && !defined(PYPY_VERSION)
+# include "frameobject.h" // PyFrameObject, PyFrame_GetBack()
+#endif
+
+
+#ifndef _Py_CAST
+# define _Py_CAST(type, expr) ((type)(expr))
+#endif
+
+// Static inline functions should use _Py_NULL rather than using directly NULL
+// to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer,
+// _Py_NULL is defined as nullptr.
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \
+ || (defined(__cplusplus) && __cplusplus >= 201103)
+# define _Py_NULL nullptr
+#else
+# define _Py_NULL NULL
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+# define _PyObject_CAST(op) _Py_CAST(PyObject*, op)
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+ Py_XINCREF(obj);
+ return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse()
+// to Python 3.10.0b1.
+#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is)
+# define Py_Is(x, y) ((x) == (y))
+#endif
+#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone)
+# define Py_IsNone(x) Py_Is(x, Py_None)
+#endif
+#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsTrue)
+# define Py_IsTrue(x) Py_Is(x, Py_True)
+#endif
+#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsFalse)
+# define Py_IsFalse(x) Py_Is(x, Py_False)
+#endif
+
+
+#if defined(PYPY_VERSION)
+static inline PyCodeObject* PyFrame_GetCode(PyFrameObject *frame)
+{
+ assert(frame != _Py_NULL);
+ assert(frame->f_code != _Py_NULL);
+ return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code));
+}
+#endif
+
+static inline PyCodeObject* _PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+ PyCodeObject *code = PyFrame_GetCode(frame);
+ Py_DECREF(code);
+ return code;
+}
+
+#if !defined(PYPY_VERSION)
+static inline PyFrameObject* _PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+ PyFrameObject *back = PyFrame_GetBack(frame);
+ Py_XDECREF(back);
+ return back;
+}
+#endif
+
+
+// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7
+#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
+static inline PyObject* PyFrame_GetLocals(PyFrameObject *frame)
+{
+ if (PyFrame_FastToLocalsWithError(frame) < 0) {
+ return NULL;
+ }
+ return Py_NewRef(frame->f_locals);
+}
+#endif
+
+
+// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7
+#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
+static inline PyObject* PyFrame_GetGlobals(PyFrameObject *frame)
+{
+ return Py_NewRef(frame->f_globals);
+}
+#endif
+
+
+// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7
+#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION)
+static inline PyObject* PyFrame_GetBuiltins(PyFrameObject *frame)
+{
+ return Py_NewRef(frame->f_builtins);
+}
+#endif
+
+
+// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1
+#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
+static inline int PyFrame_GetLasti(PyFrameObject *frame)
+{
+#if PY_VERSION_HEX >= 0x030A00A7
+ // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset,
+ // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes)
+ // instructions.
+ if (frame->f_lasti < 0) {
+ return -1;
+ }
+ return frame->f_lasti * 2;
+#else
+ return frame->f_lasti;
+#endif
+}
+#endif
+
+
+// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2
+#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
+static inline PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name)
+{
+ PyObject *locals, *value;
+
+ locals = PyFrame_GetLocals(frame);
+ if (locals == NULL) {
+ return NULL;
+ }
+ value = PyDict_GetItemWithError(locals, name);
+ Py_DECREF(locals);
+
+ if (value == NULL) {
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
+ PyErr_Format(PyExc_NameError, "variable %R does not exist", name);
+ return NULL;
+ }
+ return Py_NewRef(value);
+}
+#endif
+
+
+// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2
+#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION)
+static inline PyObject*
+PyFrame_GetVarString(PyFrameObject *frame, const char *name)
+{
+ PyObject *name_obj, *value;
+ name_obj = PyUnicode_FromString(name);
+ if (name_obj == NULL) {
+ return NULL;
+ }
+ value = PyFrame_GetVar(frame, name_obj);
+ Py_DECREF(name_obj);
+ return value;
+}
+#endif
+
+
+#if defined(PYPY_VERSION)
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+ assert(tstate != _Py_NULL);
+ return tstate->interp;
+}
+#endif
+
+#if !defined(PYPY_VERSION)
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+ PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+ Py_XDECREF(frame);
+ return frame;
+}
+#endif
+
+
+#if defined(PYPY_VERSION)
+static inline PyInterpreterState* PyInterpreterState_Get(void)
+{
+ PyThreadState *tstate;
+ PyInterpreterState *interp;
+
+ tstate = PyThreadState_GET();
+ if (tstate == _Py_NULL) {
+ Py_FatalError("GIL released (tstate is NULL)");
+ }
+ interp = tstate->interp;
+ if (interp == _Py_NULL) {
+ Py_FatalError("no current interpreter");
+ }
+ return interp;
+}
+#endif
+
+// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2
+#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
+static inline void PyThreadState_EnterTracing(PyThreadState *tstate)
+{
+ tstate->tracing++;
+#if PY_VERSION_HEX >= 0x030A00A1
+ tstate->cframe->use_tracing = 0;
+#else
+ tstate->use_tracing = 0;
+#endif
+}
+#endif
+
+// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2
+#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION)
+static inline void PyThreadState_LeaveTracing(PyThreadState *tstate)
+{
+ int use_tracing = (tstate->c_tracefunc != _Py_NULL
+ || tstate->c_profilefunc != _Py_NULL);
+ tstate->tracing--;
+#if PY_VERSION_HEX >= 0x030A00A1
+ tstate->cframe->use_tracing = use_tracing;
+#else
+ tstate->use_tracing = use_tracing;
+#endif
+}
+#endif
+
+
+// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030A00A3
+static inline int
+PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value)
+{
+ int res;
+
+ if (!value && !PyErr_Occurred()) {
+ // PyModule_AddObject() raises TypeError in this case
+ PyErr_SetString(PyExc_SystemError,
+ "PyModule_AddObjectRef() must be called "
+ "with an exception raised if value is NULL");
+ return -1;
+ }
+
+ Py_XINCREF(value);
+ res = PyModule_AddObject(module, name, value);
+ if (res < 0) {
+ Py_XDECREF(value);
+ }
+ return res;
+}
+#endif
+
+
+// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7.
+// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal
+// C API: Python 3.11a2-3.11a6 versions are not supported.
+#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
+static inline int PyFloat_Pack2(double x, char *p, int le)
+{ return _PyFloat_Pack2(x, (unsigned char*)p, le); }
+
+static inline double PyFloat_Unpack2(const char *p, int le)
+{ return _PyFloat_Unpack2((const unsigned char *)p, le); }
+#endif
+
+
+// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and
+// PyFloat_Unpack8() to Python 3.11a7.
+// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4()
+// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions
+// are not supported.
+#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION)
+static inline int PyFloat_Pack4(double x, char *p, int le)
+{ return _PyFloat_Pack4(x, (unsigned char*)p, le); }
+
+static inline int PyFloat_Pack8(double x, char *p, int le)
+{ return _PyFloat_Pack8(x, (unsigned char*)p, le); }
+
+static inline double PyFloat_Unpack4(const char *p, int le)
+{ return _PyFloat_Unpack4((const unsigned char *)p, le); }
+
+static inline double PyFloat_Unpack8(const char *p, int le)
+{ return _PyFloat_Unpack8((const unsigned char *)p, le); }
+#endif
+
+
+// gh-92154 added PyCode_GetCode() to Python 3.11.0b1
+#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION)
+static inline PyObject* PyCode_GetCode(PyCodeObject *code)
+{
+ return Py_NewRef(code->co_code);
+}
+#endif
+
+
+// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1
+#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
+static inline PyObject* PyCode_GetVarnames(PyCodeObject *code)
+{
+ return Py_NewRef(code->co_varnames);
+}
+#endif
+
+// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1
+#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
+static inline PyObject* PyCode_GetFreevars(PyCodeObject *code)
+{
+ return Py_NewRef(code->co_freevars);
+}
+#endif
+
+// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1
+#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION)
+static inline PyObject* PyCode_GetCellvars(PyCodeObject *code)
+{
+ return Py_NewRef(code->co_cellvars);
+}
+#endif
+
+
+// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A0
+static inline PyObject* PyImport_AddModuleRef(const char *name)
+{
+ return Py_XNewRef(PyImport_AddModule(name));
+}
+#endif
+
+
+// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D0000
+static inline int PyWeakref_GetRef(PyObject *ref, PyObject **pobj)
+{
+ PyObject *obj;
+ if (ref != NULL && !PyWeakref_Check(ref)) {
+ *pobj = NULL;
+ PyErr_SetString(PyExc_TypeError, "expected a weakref");
+ return -1;
+ }
+ obj = PyWeakref_GetObject(ref);
+ if (obj == NULL) {
+ // SystemError if ref is NULL
+ *pobj = NULL;
+ return -1;
+ }
+ if (obj == Py_None) {
+ *pobj = NULL;
+ return 0;
+ }
+ *pobj = Py_NewRef(obj);
+ return (*pobj != NULL);
+}
+#endif
+
+
+// gh-106521 added PyObject_GetOptionalAttr() and
+// PyObject_GetOptionalAttrString() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result)
+{
+ return _PyObject_LookupAttr(obj, attr_name, result);
+}
+
+static inline int
+PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result)
+{
+ PyObject *name_obj;
+ int rc;
+ name_obj = PyUnicode_FromString(attr_name);
+ if (name_obj == NULL) {
+ *result = NULL;
+ return -1;
+ }
+ rc = PyObject_GetOptionalAttr(obj, name_obj, result);
+ Py_DECREF(name_obj);
+ return rc;
+}
+#endif
+
+
+// gh-106307 added PyObject_GetOptionalAttr() and
+// PyMapping_GetOptionalItemString() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result)
+{
+ *result = PyObject_GetItem(obj, key);
+ if (*result) {
+ return 1;
+ }
+ if (!PyErr_ExceptionMatches(PyExc_KeyError)) {
+ return -1;
+ }
+ PyErr_Clear();
+ return 0;
+}
+
+static inline int
+PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result)
+{
+ PyObject *key_obj;
+ int rc;
+ key_obj = PyUnicode_FromString(key);
+ if (key_obj == NULL) {
+ *result = NULL;
+ return -1;
+ }
+ rc = PyMapping_GetOptionalItem(obj, key_obj, result);
+ Py_DECREF(key_obj);
+ return rc;
+}
+#endif
+
+// gh-108511 added PyMapping_HasKeyWithError() and
+// PyMapping_HasKeyStringWithError() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyMapping_HasKeyWithError(PyObject *obj, PyObject *key)
+{
+ PyObject *res;
+ int rc = PyMapping_GetOptionalItem(obj, key, &res);
+ Py_XDECREF(res);
+ return rc;
+}
+
+static inline int
+PyMapping_HasKeyStringWithError(PyObject *obj, const char *key)
+{
+ PyObject *res;
+ int rc = PyMapping_GetOptionalItemString(obj, key, &res);
+ Py_XDECREF(res);
+ return rc;
+}
+#endif
+
+
+// gh-108511 added PyObject_HasAttrWithError() and
+// PyObject_HasAttrStringWithError() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyObject_HasAttrWithError(PyObject *obj, PyObject *attr)
+{
+ PyObject *res;
+ int rc = PyObject_GetOptionalAttr(obj, attr, &res);
+ Py_XDECREF(res);
+ return rc;
+}
+
+static inline int
+PyObject_HasAttrStringWithError(PyObject *obj, const char *attr)
+{
+ PyObject *res;
+ int rc = PyObject_GetOptionalAttrString(obj, attr, &res);
+ Py_XDECREF(res);
+ return rc;
+}
+#endif
+
+
+// gh-106004 added PyDict_GetItemRef() and PyDict_GetItemStringRef()
+// to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result)
+{
+ PyObject *item = PyDict_GetItemWithError(mp, key);
+ if (item != NULL) {
+ *result = Py_NewRef(item);
+ return 1; // found
+ }
+ if (!PyErr_Occurred()) {
+ *result = NULL;
+ return 0; // not found
+ }
+ *result = NULL;
+ return -1;
+}
+
+static inline int
+PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result)
+{
+ int res;
+ PyObject *key_obj = PyUnicode_FromString(key);
+ if (key_obj == NULL) {
+ *result = NULL;
+ return -1;
+ }
+ res = PyDict_GetItemRef(mp, key_obj, result);
+ Py_DECREF(key_obj);
+ return res;
+}
+#endif
+
+
+// gh-106307 added PyModule_Add() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyModule_Add(PyObject *mod, const char *name, PyObject *value)
+{
+ int res = PyModule_AddObjectRef(mod, name, value);
+ Py_XDECREF(value);
+ return res;
+}
+#endif
+
+
+// gh-108014 added Py_IsFinalizing() to Python 3.13.0a1
+// bpo-1856 added _Py_Finalizing to Python 3.2.1b1.
+// _Py_IsFinalizing() was added to PyPy 7.3.0.
+#if (PY_VERSION_HEX < 0x030D00A1) \
+ && (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x7030000)
+static inline int Py_IsFinalizing(void)
+{
+ return _Py_IsFinalizing();
+}
+#endif
+
+
+// gh-108323 added PyDict_ContainsString() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int PyDict_ContainsString(PyObject *op, const char *key)
+{
+ PyObject *key_obj = PyUnicode_FromString(key);
+ if (key_obj == NULL) {
+ return -1;
+ }
+ int res = PyDict_Contains(op, key_obj);
+ Py_DECREF(key_obj);
+ return res;
+}
+#endif
+
+
+// gh-108445 added PyLong_AsInt() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int PyLong_AsInt(PyObject *obj)
+{
+#ifdef PYPY_VERSION
+ long value = PyLong_AsLong(obj);
+ if (value == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+ if (value < (long)INT_MIN || (long)INT_MAX < value) {
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)value;
+#else
+ return _PyLong_AsInt(obj);
+#endif
+}
+#endif
+
+
+// gh-107073 added PyObject_VisitManagedDict() to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg)
+{
+ PyObject **dict = _PyObject_GetDictPtr(obj);
+ if (*dict == NULL) {
+ return -1;
+ }
+ Py_VISIT(*dict);
+ return 0;
+}
+
+static inline void
+PyObject_ClearManagedDict(PyObject *obj)
+{
+ PyObject **dict = _PyObject_GetDictPtr(obj);
+ if (*dict == NULL) {
+ return;
+ }
+ Py_CLEAR(*dict);
+}
+#endif
+
+// gh-108867 added PyThreadState_GetUnchecked() to Python 3.13.0a1.
+#if PY_VERSION_HEX < 0x030D00A1
+static inline PyThreadState*
+PyThreadState_GetUnchecked(void)
+{
+ return _PyThreadState_UncheckedGet();
+}
+#endif
+
+// gh-110289 added PyUnicode_EqualToUTF8() and PyUnicode_EqualToUTF8AndSize()
+// to Python 3.13.0a1
+#if PY_VERSION_HEX < 0x030D00A1
+static inline int
+PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *str, Py_ssize_t str_len)
+{
+ Py_ssize_t len;
+ const void *utf8;
+ PyObject *exc_type, *exc_value, *exc_tb;
+ int res;
+
+ // API cannot report errors so save/restore the exception
+ PyErr_Fetch(&exc_type, &exc_value, &exc_tb);
+
+ if (PyUnicode_IS_ASCII(unicode)) {
+ utf8 = PyUnicode_DATA(unicode);
+ len = PyUnicode_GET_LENGTH(unicode);
+ }
+ else {
+ utf8 = PyUnicode_AsUTF8AndSize(unicode, &len);
+ if (utf8 == NULL) {
+ // Memory allocation failure. The API cannot report error,
+ // so ignore the exception and return 0.
+ res = 0;
+ goto done;
+ }
+ }
+
+ if (len != str_len) {
+ res = 0;
+ goto done;
+ }
+ res = (memcmp(utf8, str, (size_t)len) == 0);
+
+done:
+ PyErr_Restore(exc_type, exc_value, exc_tb);
+ return res;
+}
+
+static inline int
+PyUnicode_EqualToUTF8(PyObject *unicode, const char *str)
+{
+ return PyUnicode_EqualToUTF8AndSize(unicode, str, (Py_ssize_t)strlen(str));
+}
+#endif
+
+
+// gh-111138 added PyList_Extend() and PyList_Clear() to Python 3.13.0a2
+#if PY_VERSION_HEX < 0x030D00A2
+static inline int
+PyList_Extend(PyObject *list, PyObject *iterable)
+{
+ return PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable);
+}
+
+static inline int
+PyList_Clear(PyObject *list)
+{
+ return PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL);
+}
+#endif
+
+// gh-111262 added PyDict_Pop() and PyDict_PopString() to Python 3.13.0a2
+#if PY_VERSION_HEX < 0x030D00A2
+static inline int
+PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result)
+{
+ PyObject *value;
+
+ if (!PyDict_Check(dict)) {
+ PyErr_BadInternalCall();
+ if (result) {
+ *result = NULL;
+ }
+ return -1;
+ }
+
+ // Python 3.13.0a1 removed _PyDict_Pop().
+#if defined(PYPY_VERSION) || PY_VERSION_HEX >= 0x030D0000
+ value = PyObject_CallMethod(dict, "pop", "O", key);
+#else
+ value = _PyDict_Pop(dict, key, NULL);
+#endif
+ if (value == NULL) {
+ if (result) {
+ *result = NULL;
+ }
+ if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_KeyError)) {
+ return -1;
+ }
+ PyErr_Clear();
+ return 0;
+ }
+ if (result) {
+ *result = value;
+ }
+ else {
+ Py_DECREF(value);
+ }
+ return 1;
+}
+
+static inline int
+PyDict_PopString(PyObject *dict, const char *key, PyObject **result)
+{
+ PyObject *key_obj = PyUnicode_FromString(key);
+ if (key_obj == NULL) {
+ if (result != NULL) {
+ *result = NULL;
+ }
+ return -1;
+ }
+
+ int res = PyDict_Pop(dict, key_obj, result);
+ Py_DECREF(key_obj);
+ return res;
+}
+#endif
+
+
+// gh-111545 added Py_HashPointer() to Python 3.13.0a3
+#if PY_VERSION_HEX < 0x030D00A3
+static inline Py_hash_t Py_HashPointer(const void *ptr)
+{
+#if !defined(PYPY_VERSION)
+ return _Py_HashPointer(ptr);
+#else
+ return _Py_HashPointer(_Py_CAST(void*, ptr));
+#endif
+}
+#endif
+
+
+// Python 3.13a4 added a PyTime API.
+#if PY_VERSION_HEX < 0x030D00A4
+typedef _PyTime_t PyTime_t;
+#define PyTime_MIN _PyTime_MIN
+#define PyTime_MAX _PyTime_MAX
+
+static inline double PyTime_AsSecondsDouble(PyTime_t t)
+{ return _PyTime_AsSecondsDouble(t); }
+
+static inline int PyTime_Monotonic(PyTime_t *result)
+{ return _PyTime_GetMonotonicClockWithInfo(result, NULL); }
+
+static inline int PyTime_Time(PyTime_t *result)
+{ return _PyTime_GetSystemClockWithInfo(result, NULL); }
+
+static inline int PyTime_PerfCounter(PyTime_t *result)
+{
+#if !defined(PYPY_VERSION)
+ return _PyTime_GetPerfCounterWithInfo(result, NULL);
+#else
+ // Call time.perf_counter_ns() and convert Python int object to PyTime_t.
+ // Cache time.perf_counter_ns() function for best performance.
+ static PyObject *func = NULL;
+ if (func == NULL) {
+ PyObject *mod = PyImport_ImportModule("time");
+ if (mod == NULL) {
+ return -1;
+ }
+
+ func = PyObject_GetAttrString(mod, "perf_counter_ns");
+ Py_DECREF(mod);
+ if (func == NULL) {
+ return -1;
+ }
+ }
+
+ PyObject *res = PyObject_CallNoArgs(func);
+ if (res == NULL) {
+ return -1;
+ }
+ long long value = PyLong_AsLongLong(res);
+ Py_DECREF(res);
+
+ if (value == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+
+ Py_BUILD_ASSERT(sizeof(value) >= sizeof(PyTime_t));
+ *result = (PyTime_t)value;
+ return 0;
+#endif
+}
+
+#endif
+
+// gh-111389 added hash constants to Python 3.13.0a5. These constants were
+// added first as private macros to Python 3.4.0b1 and PyPy 7.3.9.
+#if (!defined(PyHASH_BITS) \
+ && (!defined(PYPY_VERSION) \
+ || (defined(PYPY_VERSION) && PYPY_VERSION_NUM >= 0x07090000)))
+# define PyHASH_BITS _PyHASH_BITS
+# define PyHASH_MODULUS _PyHASH_MODULUS
+# define PyHASH_INF _PyHASH_INF
+# define PyHASH_IMAG _PyHASH_IMAG
+#endif
+
+
+// gh-111545 added Py_GetConstant() and Py_GetConstantBorrowed()
+// to Python 3.13.0a6
+#if PY_VERSION_HEX < 0x030D00A6 && !defined(Py_CONSTANT_NONE)
+
+#define Py_CONSTANT_NONE 0
+#define Py_CONSTANT_FALSE 1
+#define Py_CONSTANT_TRUE 2
+#define Py_CONSTANT_ELLIPSIS 3
+#define Py_CONSTANT_NOT_IMPLEMENTED 4
+#define Py_CONSTANT_ZERO 5
+#define Py_CONSTANT_ONE 6
+#define Py_CONSTANT_EMPTY_STR 7
+#define Py_CONSTANT_EMPTY_BYTES 8
+#define Py_CONSTANT_EMPTY_TUPLE 9
+
+static inline PyObject* Py_GetConstant(unsigned int constant_id)
+{
+ static PyObject* constants[Py_CONSTANT_EMPTY_TUPLE + 1] = {NULL};
+
+ if (constants[Py_CONSTANT_NONE] == NULL) {
+ constants[Py_CONSTANT_NONE] = Py_None;
+ constants[Py_CONSTANT_FALSE] = Py_False;
+ constants[Py_CONSTANT_TRUE] = Py_True;
+ constants[Py_CONSTANT_ELLIPSIS] = Py_Ellipsis;
+ constants[Py_CONSTANT_NOT_IMPLEMENTED] = Py_NotImplemented;
+
+ constants[Py_CONSTANT_ZERO] = PyLong_FromLong(0);
+ if (constants[Py_CONSTANT_ZERO] == NULL) {
+ goto fatal_error;
+ }
+
+ constants[Py_CONSTANT_ONE] = PyLong_FromLong(1);
+ if (constants[Py_CONSTANT_ONE] == NULL) {
+ goto fatal_error;
+ }
+
+ constants[Py_CONSTANT_EMPTY_STR] = PyUnicode_FromStringAndSize("", 0);
+ if (constants[Py_CONSTANT_EMPTY_STR] == NULL) {
+ goto fatal_error;
+ }
+
+ constants[Py_CONSTANT_EMPTY_BYTES] = PyBytes_FromStringAndSize("", 0);
+ if (constants[Py_CONSTANT_EMPTY_BYTES] == NULL) {
+ goto fatal_error;
+ }
+
+ constants[Py_CONSTANT_EMPTY_TUPLE] = PyTuple_New(0);
+ if (constants[Py_CONSTANT_EMPTY_TUPLE] == NULL) {
+ goto fatal_error;
+ }
+ // goto dance to avoid compiler warnings about Py_FatalError()
+ goto init_done;
+
+fatal_error:
+ // This case should never happen
+ Py_FatalError("Py_GetConstant() failed to get constants");
+ }
+
+init_done:
+ if (constant_id <= Py_CONSTANT_EMPTY_TUPLE) {
+ return Py_NewRef(constants[constant_id]);
+ }
+ else {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+}
+
+static inline PyObject* Py_GetConstantBorrowed(unsigned int constant_id)
+{
+ PyObject *obj = Py_GetConstant(constant_id);
+ Py_XDECREF(obj);
+ return obj;
+}
+#endif
+
+
+// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
+#if PY_VERSION_HEX < 0x030D00A4
+static inline PyObject *
+PyList_GetItemRef(PyObject *op, Py_ssize_t index)
+{
+ PyObject *item = PyList_GetItem(op, index);
+ Py_XINCREF(item);
+ return item;
+}
+#endif
+
+
+// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4
+#if PY_VERSION_HEX < 0x030D00A4
+static inline int
+PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value,
+ PyObject **result)
+{
+ PyObject *value;
+ if (PyDict_GetItemRef(d, key, &value) < 0) {
+ // get error
+ if (result) {
+ *result = NULL;
+ }
+ return -1;
+ }
+ if (value != NULL) {
+ // present
+ if (result) {
+ *result = value;
+ }
+ else {
+ Py_DECREF(value);
+ }
+ return 1;
+ }
+
+ // missing: set the item
+ if (PyDict_SetItem(d, key, default_value) < 0) {
+ // set error
+ if (result) {
+ *result = NULL;
+ }
+ return -1;
+ }
+ if (result) {
+ *result = Py_NewRef(default_value);
+ }
+ return 0;
+}
+#endif
+
+#if PY_VERSION_HEX < 0x030D00B3
+# define Py_BEGIN_CRITICAL_SECTION(op) {
+# define Py_END_CRITICAL_SECTION() }
+# define Py_BEGIN_CRITICAL_SECTION2(a, b) {
+# define Py_END_CRITICAL_SECTION2() }
+#endif
+
+#if PY_VERSION_HEX < 0x030E0000 && !defined(PYPY_VERSION)
+typedef struct PyUnicodeWriter PyUnicodeWriter;
+
+static inline void PyUnicodeWriter_Discard(PyUnicodeWriter *writer)
+{
+ _PyUnicodeWriter_Dealloc((_PyUnicodeWriter*)writer);
+ PyMem_Free(writer);
+}
+
+static inline PyUnicodeWriter* PyUnicodeWriter_Create(Py_ssize_t length)
+{
+ if (length < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "length must be positive");
+ return NULL;
+ }
+
+ const size_t size = sizeof(_PyUnicodeWriter);
+ PyUnicodeWriter *pub_writer = (PyUnicodeWriter *)PyMem_Malloc(size);
+ if (pub_writer == _Py_NULL) {
+ PyErr_NoMemory();
+ return _Py_NULL;
+ }
+ _PyUnicodeWriter *writer = (_PyUnicodeWriter *)pub_writer;
+
+ _PyUnicodeWriter_Init(writer);
+ if (_PyUnicodeWriter_Prepare(writer, length, 127) < 0) {
+ PyUnicodeWriter_Discard(pub_writer);
+ return NULL;
+ }
+ writer->overallocate = 1;
+ return pub_writer;
+}
+
+static inline PyObject* PyUnicodeWriter_Finish(PyUnicodeWriter *writer)
+{
+ PyObject *str = _PyUnicodeWriter_Finish((_PyUnicodeWriter*)writer);
+ assert(((_PyUnicodeWriter*)writer)->buffer == NULL);
+ PyMem_Free(writer);
+ return str;
+}
+
+static inline int
+PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch)
+{
+ if (ch > 0x10ffff) {
+ PyErr_SetString(PyExc_ValueError,
+ "character must be in range(0x110000)");
+ return -1;
+ }
+
+ return _PyUnicodeWriter_WriteChar((_PyUnicodeWriter*)writer, ch);
+}
+
+static inline int
+PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj)
+{
+ PyObject *str = PyObject_Str(obj);
+ if (str == NULL) {
+ return -1;
+ }
+
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
+ Py_DECREF(str);
+ return res;
+}
+
+static inline int
+PyUnicodeWriter_WriteRepr(PyUnicodeWriter *writer, PyObject *obj)
+{
+ PyObject *str = PyObject_Repr(obj);
+ if (str == NULL) {
+ return -1;
+ }
+
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
+ Py_DECREF(str);
+ return res;
+}
+
+static inline int
+PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer,
+ const char *str, Py_ssize_t size)
+{
+ if (size < 0) {
+ size = (Py_ssize_t)strlen(str);
+ }
+
+ PyObject *str_obj = PyUnicode_FromStringAndSize(str, size);
+ if (str_obj == _Py_NULL) {
+ return -1;
+ }
+
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
+ Py_DECREF(str_obj);
+ return res;
+}
+
+static inline int
+PyUnicodeWriter_WriteWideChar(PyUnicodeWriter *writer,
+ const wchar_t *str, Py_ssize_t size)
+{
+ if (size < 0) {
+ size = (Py_ssize_t)wcslen(str);
+ }
+
+ PyObject *str_obj = PyUnicode_FromWideChar(str, size);
+ if (str_obj == _Py_NULL) {
+ return -1;
+ }
+
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj);
+ Py_DECREF(str_obj);
+ return res;
+}
+
+static inline int
+PyUnicodeWriter_WriteSubstring(PyUnicodeWriter *writer, PyObject *str,
+ Py_ssize_t start, Py_ssize_t end)
+{
+ if (!PyUnicode_Check(str)) {
+ PyErr_Format(PyExc_TypeError, "expect str, not %T", str);
+ return -1;
+ }
+ if (start < 0 || start > end) {
+ PyErr_Format(PyExc_ValueError, "invalid start argument");
+ return -1;
+ }
+ if (end > PyUnicode_GET_LENGTH(str)) {
+ PyErr_Format(PyExc_ValueError, "invalid end argument");
+ return -1;
+ }
+
+ return _PyUnicodeWriter_WriteSubstring((_PyUnicodeWriter*)writer, str,
+ start, end);
+}
+
+static inline int
+PyUnicodeWriter_Format(PyUnicodeWriter *writer, const char *format, ...)
+{
+ va_list vargs;
+ va_start(vargs, format);
+ PyObject *str = PyUnicode_FromFormatV(format, vargs);
+ va_end(vargs);
+ if (str == _Py_NULL) {
+ return -1;
+ }
+
+ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str);
+ Py_DECREF(str);
+ return res;
+}
+#endif // PY_VERSION_HEX < 0x030E0000
+
+// gh-116560 added PyLong_GetSign() to Python 3.14.0a0
+#if PY_VERSION_HEX < 0x030E00A0
+static inline int PyLong_GetSign(PyObject *obj, int *sign)
+{
+ if (!PyLong_Check(obj)) {
+ PyErr_Format(PyExc_TypeError, "expect int, got %s", Py_TYPE(obj)->tp_name);
+ return -1;
+ }
+
+ *sign = _PyLong_Sign(obj);
+ return 0;
+}
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif // PYTHONCAPI_COMPAT
diff --git a/contrib/python/multidict/tests/conftest.py b/contrib/python/multidict/tests/conftest.py
index 0d003950cd..a37f58f2d1 100644
--- a/contrib/python/multidict/tests/conftest.py
+++ b/contrib/python/multidict/tests/conftest.py
@@ -3,26 +3,22 @@ from __future__ import annotations
import argparse
import pickle
from dataclasses import dataclass
+from functools import cached_property
from importlib import import_module
-from sys import version_info as _version_info
from types import ModuleType
-from typing import Callable, Type
-
-try:
- from functools import cached_property # Python 3.8+
-except ImportError:
- from functools import lru_cache as _lru_cache
-
- def cached_property(func):
- return property(_lru_cache()(func))
-
+from typing import Callable, Type, Union
import pytest
-from multidict import MultiMapping, MutableMultiMapping
+from multidict import (
+ CIMultiDict,
+ MultiDict,
+ MultiDictProxy,
+ MultiMapping,
+ MutableMultiMapping,
+)
C_EXT_MARK = pytest.mark.c_extension
-PY_38_AND_BELOW = _version_info < (3, 9)
@dataclass(frozen=True)
@@ -51,7 +47,7 @@ class MultidictImplementation:
importable_module = "_multidict_py" if self.is_pure_python else "_multidict"
return import_module(f"multidict.{importable_module}")
- def __str__(self):
+ def __str__(self) -> str:
"""Render the implementation facade instance as a string."""
return f"{self.tag}-module"
@@ -69,7 +65,7 @@ class MultidictImplementation:
)
def multidict_implementation(request: pytest.FixtureRequest) -> MultidictImplementation:
"""Return a multidict variant facade."""
- return request.param
+ return request.param # type: ignore[no-any-return]
@pytest.fixture(scope="session")
@@ -87,7 +83,7 @@ def multidict_module(
)
def any_multidict_class_name(request: pytest.FixtureRequest) -> str:
"""Return a class name of a mutable multidict implementation."""
- return request.param
+ return request.param # type: ignore[no-any-return]
@pytest.fixture(scope="session")
@@ -96,29 +92,29 @@ def any_multidict_class(
multidict_module: ModuleType,
) -> Type[MutableMultiMapping[str]]:
"""Return a class object of a mutable multidict implementation."""
- return getattr(multidict_module, any_multidict_class_name)
+ return getattr(multidict_module, any_multidict_class_name) # type: ignore[no-any-return]
@pytest.fixture(scope="session")
def case_sensitive_multidict_class(
multidict_module: ModuleType,
-) -> Type[MutableMultiMapping[str]]:
+) -> Type[MultiDict[str]]:
"""Return a case-sensitive mutable multidict class."""
- return multidict_module.MultiDict
+ return multidict_module.MultiDict # type: ignore[no-any-return]
@pytest.fixture(scope="session")
def case_insensitive_multidict_class(
multidict_module: ModuleType,
-) -> Type[MutableMultiMapping[str]]:
+) -> Type[CIMultiDict[str]]:
"""Return a case-insensitive mutable multidict class."""
- return multidict_module.CIMultiDict
+ return multidict_module.CIMultiDict # type: ignore[no-any-return]
@pytest.fixture(scope="session")
def case_insensitive_str_class(multidict_module: ModuleType) -> Type[str]:
"""Return a case-insensitive string class."""
- return multidict_module.istr
+ return multidict_module.istr # type: ignore[no-any-return]
@pytest.fixture(scope="session")
@@ -133,7 +129,7 @@ def any_multidict_proxy_class(
multidict_module: ModuleType,
) -> Type[MultiMapping[str]]:
"""Return an immutable multidict implementation class object."""
- return getattr(multidict_module, any_multidict_proxy_class_name)
+ return getattr(multidict_module, any_multidict_proxy_class_name) # type: ignore[no-any-return]
@pytest.fixture(scope="session")
@@ -141,7 +137,7 @@ def case_sensitive_multidict_proxy_class(
multidict_module: ModuleType,
) -> Type[MutableMultiMapping[str]]:
"""Return a case-sensitive immutable multidict class."""
- return multidict_module.MultiDictProxy
+ return multidict_module.MultiDictProxy # type: ignore[no-any-return]
@pytest.fixture(scope="session")
@@ -149,13 +145,15 @@ def case_insensitive_multidict_proxy_class(
multidict_module: ModuleType,
) -> Type[MutableMultiMapping[str]]:
"""Return a case-insensitive immutable multidict class."""
- return multidict_module.CIMultiDictProxy
+ return multidict_module.CIMultiDictProxy # type: ignore[no-any-return]
@pytest.fixture(scope="session")
-def multidict_getversion_callable(multidict_module: ModuleType) -> Callable:
+def multidict_getversion_callable(
+ multidict_module: ModuleType,
+) -> Callable[[Union[MultiDict[object], MultiDictProxy[object]]], int]:
"""Return a ``getversion()`` function for current implementation."""
- return multidict_module.getversion
+ return multidict_module.getversion # type: ignore[no-any-return]
def pytest_addoption(
@@ -171,20 +169,12 @@ def pytest_addoption(
parser.addoption(
"--c-extensions", # disabled with `--no-c-extensions`
- action="store_true" if PY_38_AND_BELOW else argparse.BooleanOptionalAction,
+ action=argparse.BooleanOptionalAction,
default=True,
dest="c_extensions",
help="Test C-extensions (on by default)",
)
- if PY_38_AND_BELOW:
- parser.addoption(
- "--no-c-extensions",
- action="store_false",
- dest="c_extensions",
- help="Skip testing C-extensions (on by default)",
- )
-
def pytest_collection_modifyitems(
session: pytest.Session,
@@ -197,8 +187,8 @@ def pytest_collection_modifyitems(
if test_c_extensions:
return
- selected_tests = []
- deselected_tests = []
+ selected_tests: list[pytest.Item] = []
+ deselected_tests: list[pytest.Item] = []
for item in items:
c_ext = item.get_closest_marker(C_EXT_MARK.name) is not None
@@ -218,7 +208,7 @@ def pytest_configure(config: pytest.Config) -> None:
)
-def pytest_generate_tests(metafunc):
+def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "pickle_protocol" in metafunc.fixturenames:
metafunc.parametrize(
"pickle_protocol", list(range(pickle.HIGHEST_PROTOCOL + 1)), scope="session"
diff --git a/contrib/python/multidict/tests/gen_pickles.py b/contrib/python/multidict/tests/gen_pickles.py
index 4e0d268bed..72f41b7565 100644
--- a/contrib/python/multidict/tests/gen_pickles.py
+++ b/contrib/python/multidict/tests/gen_pickles.py
@@ -1,18 +1,22 @@
import pickle
from importlib import import_module
from pathlib import Path
+from typing import Union
+
+from multidict import CIMultiDict, MultiDict
TESTS_DIR = Path(__file__).parent.resolve()
+_MD_Classes = Union[type[MultiDict[int]], type[CIMultiDict[int]]]
-def write(tag, cls, proto):
+def write(tag: str, cls: _MD_Classes, proto: int) -> None:
d = cls([("a", 1), ("a", 2)])
file_basename = f"{cls.__name__.lower()}-{tag}"
with (TESTS_DIR / f"{file_basename}.pickle.{proto}").open("wb") as f:
pickle.dump(d, f, proto)
-def generate():
+def generate() -> None:
_impl_map = {
"c-extension": "_multidict",
"pure-python": "_multidict_py",
diff --git a/contrib/python/multidict/tests/test_abc.py b/contrib/python/multidict/tests/test_abc.py
index e18ad83f82..611d0fa8c3 100644
--- a/contrib/python/multidict/tests/test_abc.py
+++ b/contrib/python/multidict/tests/test_abc.py
@@ -1,94 +1,32 @@
from collections.abc import Mapping, MutableMapping
-import pytest
+from multidict import (
+ MultiDict,
+ MultiDictProxy,
+ MultiMapping,
+ MutableMultiMapping,
+)
-from multidict import MultiMapping, MutableMultiMapping
-
-def test_abc_inheritance():
+def test_abc_inheritance() -> None:
assert issubclass(MultiMapping, Mapping)
assert not issubclass(MultiMapping, MutableMapping)
assert issubclass(MutableMultiMapping, Mapping)
assert issubclass(MutableMultiMapping, MutableMapping)
-class A(MultiMapping):
- def __getitem__(self, key):
- pass
-
- def __iter__(self):
- pass
-
- def __len__(self):
- pass
-
- def getall(self, key, default=None):
- super().getall(key, default)
-
- def getone(self, key, default=None):
- super().getone(key, default)
-
-
-def test_abc_getall():
- with pytest.raises(KeyError):
- A().getall("key")
-
-
-def test_abc_getone():
- with pytest.raises(KeyError):
- A().getone("key")
-
-
-class B(A, MutableMultiMapping):
- def __setitem__(self, key, value):
- pass
-
- def __delitem__(self, key):
- pass
-
- def add(self, key, value):
- super().add(key, value)
-
- def extend(self, *args, **kwargs):
- super().extend(*args, **kwargs)
-
- def popall(self, key, default=None):
- super().popall(key, default)
-
- def popone(self, key, default=None):
- super().popone(key, default)
-
-
-def test_abc_add():
- with pytest.raises(NotImplementedError):
- B().add("key", "val")
-
-
-def test_abc_extend():
- with pytest.raises(NotImplementedError):
- B().extend()
-
-
-def test_abc_popone():
- with pytest.raises(KeyError):
- B().popone("key")
-
-
-def test_abc_popall():
- with pytest.raises(KeyError):
- B().popall("key")
-
-
-def test_multidict_inheritance(any_multidict_class):
+def test_multidict_inheritance(any_multidict_class: type[MultiDict[str]]) -> None:
assert issubclass(any_multidict_class, MultiMapping)
assert issubclass(any_multidict_class, MutableMultiMapping)
-def test_proxy_inheritance(any_multidict_proxy_class):
+def test_proxy_inheritance(
+ any_multidict_proxy_class: type[MultiDictProxy[str]],
+) -> None:
assert issubclass(any_multidict_proxy_class, MultiMapping)
assert not issubclass(any_multidict_proxy_class, MutableMultiMapping)
-def test_generic_type_in_runtime():
+def test_generic_type_in_runtime() -> None:
MultiMapping[str]
MutableMultiMapping[str]
diff --git a/contrib/python/multidict/tests/test_copy.py b/contrib/python/multidict/tests/test_copy.py
index cd926cdc1d..deff64db37 100644
--- a/contrib/python/multidict/tests/test_copy.py
+++ b/contrib/python/multidict/tests/test_copy.py
@@ -1,7 +1,13 @@
import copy
+from typing import Union
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
-def test_copy(any_multidict_class):
+_MD_Classes = Union[type[MultiDict[int]], type[CIMultiDict[int]]]
+_MDP_Classes = Union[type[MultiDictProxy[int]], type[CIMultiDictProxy[int]]]
+
+
+def test_copy(any_multidict_class: _MD_Classes) -> None:
d = any_multidict_class()
d["foo"] = 6
d2 = d.copy()
@@ -10,7 +16,9 @@ def test_copy(any_multidict_class):
assert d2["foo"] == 7
-def test_copy_proxy(any_multidict_class, any_multidict_proxy_class):
+def test_copy_proxy(
+ any_multidict_class: _MD_Classes, any_multidict_proxy_class: _MDP_Classes
+) -> None:
d = any_multidict_class()
d["foo"] = 6
p = any_multidict_proxy_class(d)
@@ -21,7 +29,7 @@ def test_copy_proxy(any_multidict_class, any_multidict_proxy_class):
assert d2["foo"] == 7
-def test_copy_std_copy(any_multidict_class):
+def test_copy_std_copy(any_multidict_class: _MD_Classes) -> None:
d = any_multidict_class()
d["foo"] = 6
d2 = copy.copy(d)
@@ -30,7 +38,7 @@ def test_copy_std_copy(any_multidict_class):
assert d2["foo"] == 7
-def test_ci_multidict_clone(any_multidict_class):
+def test_ci_multidict_clone(any_multidict_class: _MD_Classes) -> None:
d = any_multidict_class(foo=6)
d2 = any_multidict_class(d)
d2["foo"] = 7
diff --git a/contrib/python/multidict/tests/test_guard.py b/contrib/python/multidict/tests/test_guard.py
index 225da67c8d..c877fbf803 100644
--- a/contrib/python/multidict/tests/test_guard.py
+++ b/contrib/python/multidict/tests/test_guard.py
@@ -1,12 +1,10 @@
-from typing import Type
-
import pytest
-from multidict import MultiMapping
+from multidict import MultiDict
def test_guard_items(
- case_sensitive_multidict_class: Type[MultiMapping[str]],
+ case_sensitive_multidict_class: type[MultiDict[str]],
) -> None:
md = case_sensitive_multidict_class({"a": "b"})
it = iter(md.items())
@@ -16,7 +14,7 @@ def test_guard_items(
def test_guard_keys(
- case_sensitive_multidict_class: Type[MultiMapping[str]],
+ case_sensitive_multidict_class: type[MultiDict[str]],
) -> None:
md = case_sensitive_multidict_class({"a": "b"})
it = iter(md.keys())
@@ -26,7 +24,7 @@ def test_guard_keys(
def test_guard_values(
- case_sensitive_multidict_class: Type[MultiMapping[str]],
+ case_sensitive_multidict_class: type[MultiDict[str]],
) -> None:
md = case_sensitive_multidict_class({"a": "b"})
it = iter(md.values())
diff --git a/contrib/python/multidict/tests/test_istr.py b/contrib/python/multidict/tests/test_istr.py
index 1918153532..101f5fe8e5 100644
--- a/contrib/python/multidict/tests/test_istr.py
+++ b/contrib/python/multidict/tests/test_istr.py
@@ -71,4 +71,4 @@ def test_leak(create_istrs: Callable[[], None]) -> None:
gc.collect()
cnt2 = len(gc.get_objects())
- assert abs(cnt - cnt2) < 10 # on PyPy these numbers are not equal
+ assert abs(cnt - cnt2) < 50 # on PyPy these numbers are not equal
diff --git a/contrib/python/multidict/tests/test_multidict.py b/contrib/python/multidict/tests/test_multidict.py
index bcfa699c15..d144130a41 100644
--- a/contrib/python/multidict/tests/test_multidict.py
+++ b/contrib/python/multidict/tests/test_multidict.py
@@ -5,27 +5,20 @@ import operator
import sys
import weakref
from collections import deque
-from collections.abc import Mapping
+from collections.abc import Callable, Iterable, Iterator, KeysView, Mapping
from types import ModuleType
-from typing import (
- Callable,
- Dict,
- Iterable,
- Iterator,
- KeysView,
- List,
- Mapping,
- Set,
- Tuple,
- Type,
- Union,
- cast,
-)
+from typing import Union, cast
import pytest
import multidict
-from multidict import CIMultiDict, MultiDict, MultiMapping, MutableMultiMapping
+from multidict import (
+ CIMultiDict,
+ MultiDict,
+ MultiDictProxy,
+ MultiMapping,
+ MutableMultiMapping,
+)
def chained_callable(
@@ -71,7 +64,7 @@ def cls( # type: ignore[misc]
def test_exposed_names(any_multidict_class_name: str) -> None:
- assert any_multidict_class_name in multidict.__all__ # type: ignore[attr-defined]
+ assert any_multidict_class_name in multidict.__all__
@pytest.mark.parametrize(
@@ -86,8 +79,8 @@ def test_exposed_names(any_multidict_class_name: str) -> None:
indirect=["cls"],
)
def test__iter__types(
- cls: Type[MultiDict[Union[str, int]]],
- key_cls: Type[object],
+ cls: type[MultiDict[Union[str, int]]],
+ key_cls: type[str],
) -> None:
d = cls([("key", "one"), ("key2", "two"), ("key", 3)])
for i in d:
@@ -95,26 +88,26 @@ def test__iter__types(
def test_proxy_copy(
- any_multidict_class: Type[MutableMultiMapping[str]],
- any_multidict_proxy_class: Type[MultiMapping[str]],
+ any_multidict_class: type[MultiDict[str]],
+ any_multidict_proxy_class: type[MultiDictProxy[str]],
) -> None:
d1 = any_multidict_class(key="value", a="b")
p1 = any_multidict_proxy_class(d1)
- d2 = p1.copy() # type: ignore[attr-defined]
+ d2 = p1.copy()
assert d1 == d2
assert d1 is not d2
def test_multidict_subclassing(
- any_multidict_class: Type[MutableMultiMapping[str]],
+ any_multidict_class: type[MultiDict[str]],
) -> None:
class DummyMultidict(any_multidict_class): # type: ignore[valid-type,misc]
pass
def test_multidict_proxy_subclassing(
- any_multidict_proxy_class: Type[MultiMapping[str]],
+ any_multidict_proxy_class: type[MultiDictProxy[str]],
) -> None:
class DummyMultidictProxy(
any_multidict_proxy_class, # type: ignore[valid-type,misc]
@@ -123,7 +116,7 @@ def test_multidict_proxy_subclassing(
class BaseMultiDictTest:
- def test_instantiate__empty(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_instantiate__empty(self, cls: type[MutableMultiMapping[str]]) -> None:
d = cls()
empty: Mapping[str, str] = {}
assert d == empty
@@ -133,14 +126,14 @@ class BaseMultiDictTest:
assert list(d.items()) == []
assert cls() != list() # type: ignore[comparison-overlap]
- with pytest.raises(TypeError, match=r"(2 given)"):
+ with pytest.raises(TypeError, match=r"3 were given"):
cls(("key1", "value1"), ("key2", "value2")) # type: ignore[call-arg] # noqa: E501
@pytest.mark.parametrize("arg0", ([("key", "value1")], {"key": "value1"}))
def test_instantiate__from_arg0(
self,
- cls: Type[MutableMultiMapping[str]],
- arg0: Union[List[Tuple[str, str]], Dict[str, str]],
+ cls: type[MultiDict[str]],
+ arg0: Union[list[tuple[str, str]], dict[str, str]],
) -> None:
d = cls(arg0)
@@ -152,7 +145,7 @@ class BaseMultiDictTest:
def test_instantiate__with_kwargs(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MultiDict[str]],
) -> None:
d = cls([("key", "value1")], key2="value2")
@@ -163,7 +156,7 @@ class BaseMultiDictTest:
assert sorted(d.items()) == [("key", "value1"), ("key2", "value2")]
def test_instantiate__from_generator(
- self, cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]]
+ self, cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]]
) -> None:
d = cls((str(i), i) for i in range(2))
@@ -175,7 +168,7 @@ class BaseMultiDictTest:
def test_instantiate__from_list_of_lists(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MutableMultiMapping[str]],
) -> None:
# Should work at runtime, but won't type check.
d = cls([["key", "value1"]]) # type: ignore[call-arg]
@@ -183,7 +176,7 @@ class BaseMultiDictTest:
def test_instantiate__from_list_of_custom_pairs(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MultiDict[str]],
) -> None:
class Pair:
def __len__(self) -> int:
@@ -193,10 +186,10 @@ class BaseMultiDictTest:
return ("key", "value1")[pos]
# Works at runtime, but won't type check.
- d = cls([Pair()])
+ d = cls([Pair()]) # type: ignore[list-item]
assert d == {"key": "value1"}
- def test_getone(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_getone(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")], key="value2")
assert d.getone("key") == "value1"
@@ -210,25 +203,42 @@ class BaseMultiDictTest:
assert d.getone("key2", "default") == "default"
- def test_call_with_kwargs(self, cls: Type[MultiDict[str]]) -> None:
+ def test_call_with_kwargs(self, cls: type[MultiDict[str]]) -> None:
d = cls([("present", "value")])
assert d.getall(default="missing", key="notfound") == "missing"
def test__iter__(
self,
cls: Union[
- Type[MultiDict[Union[str, int]]],
- Type[CIMultiDict[Union[str, int]]],
+ type[MultiDict[Union[str, int]]],
+ type[CIMultiDict[Union[str, int]]],
],
) -> None:
d = cls([("key", "one"), ("key2", "two"), ("key", 3)])
assert list(d) == ["key", "key2", "key"]
+ def test__contains(
+ self,
+ cls: Union[
+ type[MultiDict[Union[str, int]]],
+ type[CIMultiDict[Union[str, int]]],
+ ],
+ ) -> None:
+ d = cls([("key", "one"), ("key2", "two"), ("key", 3)])
+
+ assert list(d) == ["key", "key2", "key"]
+
+ assert "key" in d
+ assert "key2" in d
+
+ assert "foo" not in d
+ assert 42 not in d # type: ignore[comparison-overlap]
+
def test_keys__contains(
self,
cls: Union[
- Type[MultiDict[Union[str, int]]],
- Type[CIMultiDict[Union[str, int]]],
+ type[MultiDict[Union[str, int]]],
+ type[CIMultiDict[Union[str, int]]],
],
) -> None:
d = cls([("key", "one"), ("key2", "two"), ("key", 3)])
@@ -239,12 +249,13 @@ class BaseMultiDictTest:
assert "key2" in d.keys()
assert "foo" not in d.keys()
+ assert 42 not in d.keys() # type: ignore[comparison-overlap]
def test_values__contains(
self,
cls: Union[
- Type[MultiDict[Union[str, int]]],
- Type[CIMultiDict[Union[str, int]]],
+ type[MultiDict[Union[str, int]]],
+ type[CIMultiDict[Union[str, int]]],
],
) -> None:
d = cls([("key", "one"), ("key", "two"), ("key", 3)])
@@ -260,8 +271,8 @@ class BaseMultiDictTest:
def test_items__contains(
self,
cls: Union[
- Type[MultiDict[Union[str, int]]],
- Type[CIMultiDict[Union[str, int]]],
+ type[MultiDict[Union[str, int]]],
+ type[CIMultiDict[Union[str, int]]],
],
) -> None:
d = cls([("key", "one"), ("key", "two"), ("key", 3)])
@@ -273,15 +284,17 @@ class BaseMultiDictTest:
assert ("key", 3) in d.items()
assert ("foo", "bar") not in d.items()
+ assert (42, 3) not in d.items() # type: ignore[comparison-overlap]
+ assert 42 not in d.items() # type: ignore[comparison-overlap]
def test_cannot_create_from_unaccepted(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MutableMultiMapping[str]],
) -> None:
with pytest.raises(TypeError):
cls([(1, 2, 3)]) # type: ignore[call-arg]
- def test_keys_is_set_less(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_keys_is_set_less(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert d.keys() < {"key", "key2"}
@@ -297,8 +310,8 @@ class BaseMultiDictTest:
)
def test_keys_is_set_less_equal(
self,
- cls: Type[MutableMultiMapping[str]],
- contents: List[Tuple[str, str]],
+ cls: type[MultiDict[str]],
+ contents: list[tuple[str, str]],
expected: bool,
) -> None:
d = cls(contents)
@@ -306,12 +319,17 @@ class BaseMultiDictTest:
result = d.keys() <= {"key", "key2"}
assert result is expected
- def test_keys_is_set_equal(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_keys_is_set_equal(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert d.keys() == {"key"}
- def test_keys_is_set_greater(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_items_is_set_equal(self, cls: type[MultiDict[str]]) -> None:
+ d = cls([("key", "value1")])
+
+ assert d.items() == {("key", "value1")}
+
+ def test_keys_is_set_greater(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert d.keys() > {"key"}
@@ -326,16 +344,14 @@ class BaseMultiDictTest:
),
)
def test_keys_is_set_greater_equal(
- self, cls: Type[MutableMultiMapping[str]], set_: Set[str], expected: bool
+ self, cls: type[MultiDict[str]], set_: set[str], expected: bool
) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
result = d.keys() >= set_
assert result is expected
- def test_keys_less_than_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_keys_less_than_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
sentinel_operation_result = object()
@@ -348,7 +364,7 @@ class BaseMultiDictTest:
assert (d.keys() < RightOperand()) is sentinel_operation_result
def test_keys_less_than_or_equal_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
+ self, cls: type[MultiDict[str]]
) -> None:
d = cls([("key", "value1")])
@@ -361,9 +377,7 @@ class BaseMultiDictTest:
assert (d.keys() <= RightOperand()) is sentinel_operation_result
- def test_keys_greater_than_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_keys_greater_than_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
sentinel_operation_result = object()
@@ -376,7 +390,7 @@ class BaseMultiDictTest:
assert (d.keys() > RightOperand()) is sentinel_operation_result
def test_keys_greater_than_or_equal_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
+ self, cls: type[MultiDict[str]]
) -> None:
d = cls([("key", "value1")])
@@ -389,30 +403,28 @@ class BaseMultiDictTest:
assert (d.keys() >= RightOperand()) is sentinel_operation_result
- def test_keys_is_set_not_equal(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_keys_is_set_not_equal(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert d.keys() != {"key2"}
- def test_keys_not_equal_unrelated_type(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_keys_not_equal_unrelated_type(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
- assert d.keys() != "other"
+ assert d.keys() != "other" # type: ignore[comparison-overlap]
- def test_eq(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_eq(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key": "value1"} == d
- def test_eq2(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_eq2(self, cls: type[MultiDict[str]]) -> None:
d1 = cls([("key", "value1")])
d2 = cls([("key2", "value1")])
assert d1 != d2
- def test_eq3(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_eq3(self, cls: type[MultiDict[str]]) -> None:
d1 = cls([("key", "value1")])
d2 = cls()
@@ -420,7 +432,7 @@ class BaseMultiDictTest:
def test_eq_other_mapping_contains_more_keys(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MultiDict[str]],
) -> None:
d1 = cls(foo="bar")
d2 = dict(foo="bar", bar="baz")
@@ -428,7 +440,7 @@ class BaseMultiDictTest:
assert d1 != d2
def test_eq_bad_mapping_len(
- self, cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]]
+ self, cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]]
) -> None:
class BadMapping(Mapping[str, int]):
def __getitem__(self, key: str) -> int:
@@ -437,8 +449,8 @@ class BaseMultiDictTest:
def __iter__(self) -> Iterator[str]:
yield "a" # pragma: no cover # `len()` fails earlier
- def __len__(self) -> int: # type: ignore[return]
- 1 / 0
+ def __len__(self) -> int:
+ return 1 // 0
d1 = cls(a=1)
d2 = BadMapping()
@@ -447,11 +459,11 @@ class BaseMultiDictTest:
def test_eq_bad_mapping_getitem(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
class BadMapping(Mapping[str, int]):
- def __getitem__(self, key: str) -> int: # type: ignore[return]
- 1 / 0
+ def __getitem__(self, key: str) -> int:
+ return 1 // 0
def __iter__(self) -> Iterator[str]:
yield "a" # pragma: no cover # foreign objects no iterated
@@ -464,24 +476,22 @@ class BaseMultiDictTest:
with pytest.raises(ZeroDivisionError):
d1 == d2
- def test_ne(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_ne(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert d != {"key": "another_value"}
- def test_and(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_and(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key"} == d.keys() & {"key", "key2"}
- def test_and2(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_and2(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key"} == {"key", "key2"} & d.keys()
- def test_bitwise_and_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_bitwise_and_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
sentinel_operation_result = object()
@@ -493,26 +503,22 @@ class BaseMultiDictTest:
assert d.keys() & RightOperand() is sentinel_operation_result
- def test_bitwise_and_iterable_not_set(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_bitwise_and_iterable_not_set(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key"} == d.keys() & ["key", "key2"]
- def test_or(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_or(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key", "key2"} == d.keys() | {"key2"}
- def test_or2(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_or2(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key", "key2"} == {"key2"} | d.keys()
- def test_bitwise_or_not_implemented(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_bitwise_or_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
sentinel_operation_result = object()
@@ -524,24 +530,22 @@ class BaseMultiDictTest:
assert d.keys() | RightOperand() is sentinel_operation_result
- def test_bitwise_or_iterable_not_set(
- self, cls: Type[MutableMultiMapping[str]]
- ) -> None:
+ def test_bitwise_or_iterable_not_set(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")])
assert {"key", "key2"} == d.keys() | ["key2"]
- def test_sub(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_sub(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key"} == d.keys() - {"key2"}
- def test_sub2(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_sub2(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key3"} == {"key", "key2", "key3"} - d.keys()
- def test_sub_not_implemented(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_sub_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
sentinel_operation_result = object()
@@ -553,22 +557,22 @@ class BaseMultiDictTest:
assert d.keys() - RightOperand() is sentinel_operation_result
- def test_sub_iterable_not_set(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_sub_iterable_not_set(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key"} == d.keys() - ["key2"]
- def test_xor(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_xor(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key", "key3"} == d.keys() ^ {"key2", "key3"}
- def test_xor2(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_xor2(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key", "key3"} == {"key2", "key3"} ^ d.keys()
- def test_xor_not_implemented(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_xor_not_implemented(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
sentinel_operation_result = object()
@@ -580,7 +584,7 @@ class BaseMultiDictTest:
assert d.keys() ^ RightOperand() is sentinel_operation_result
- def test_xor_iterable_not_set(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_xor_iterable_not_set(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1"), ("key2", "value2")])
assert {"key", "key3"} == d.keys() ^ ["key2", "key3"]
@@ -590,13 +594,13 @@ class BaseMultiDictTest:
(("key2", "v", True), ("key", "value1", False)),
)
def test_isdisjoint(
- self, cls: Type[MutableMultiMapping[str]], key: str, value: str, expected: bool
+ self, cls: type[MultiDict[str]], key: str, value: str, expected: bool
) -> None:
d = cls([("key", "value1")])
assert d.items().isdisjoint({(key, value)}) is expected
assert d.keys().isdisjoint({key}) is expected
- def test_repr_aiohttp_issue_410(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_repr_aiohttp_issue_410(self, cls: type[MutableMultiMapping[str]]) -> None:
d = cls()
try:
@@ -614,9 +618,9 @@ class BaseMultiDictTest:
@pytest.mark.parametrize("other", ({"other"},))
def test_op_issue_aiohttp_issue_410(
self,
- cls: Type[MutableMultiMapping[str]],
+ cls: type[MultiDict[str]],
op: Callable[[object, object], object],
- other: Set[str],
+ other: set[str],
) -> None:
d = cls([("key", "value")])
@@ -628,7 +632,7 @@ class BaseMultiDictTest:
assert sys.exc_info()[1] == e # noqa: PT017
- def test_weakref(self, cls: Type[MutableMultiMapping[str]]) -> None:
+ def test_weakref(self, cls: type[MutableMultiMapping[str]]) -> None:
called = False
def cb(wr: object) -> None:
@@ -644,7 +648,7 @@ class BaseMultiDictTest:
def test_iter_length_hint_keys(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
md = cls(a=1, b=2)
it = iter(md.keys())
@@ -652,7 +656,7 @@ class BaseMultiDictTest:
def test_iter_length_hint_items(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
md = cls(a=1, b=2)
it = iter(md.items())
@@ -660,15 +664,15 @@ class BaseMultiDictTest:
def test_iter_length_hint_values(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
md = cls(a=1, b=2)
it = iter(md.values())
- assert it.__length_hint__() == 2 # type: ignore[attr-defined]
+ assert it.__length_hint__() == 2
def test_ctor_list_arg_and_kwds(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
arg = [("a", 1)]
obj = cls(arg, b=2)
@@ -677,7 +681,7 @@ class BaseMultiDictTest:
def test_ctor_tuple_arg_and_kwds(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
arg = (("a", 1),)
obj = cls(arg, b=2)
@@ -686,7 +690,7 @@ class BaseMultiDictTest:
def test_ctor_deque_arg_and_kwds(
self,
- cls: Union[Type[MultiDict[int]], Type[CIMultiDict[int]]],
+ cls: Union[type[MultiDict[int]], type[CIMultiDict[int]]],
) -> None:
arg = deque([("a", 1)])
obj = cls(arg, b=2)
@@ -709,7 +713,7 @@ class TestMultiDict(BaseMultiDictTest):
"""Make a case-sensitive multidict class/proxy constructor."""
return chained_callable(multidict_module, request.param)
- def test__repr__(self, cls: Type[MultiDict[str]]) -> None:
+ def test__repr__(self, cls: type[MultiDict[str]]) -> None:
d = cls()
_cls = type(d)
@@ -719,7 +723,7 @@ class TestMultiDict(BaseMultiDictTest):
assert str(d) == "<%s('key': 'one', 'key': 'two')>" % _cls.__name__
- def test_getall(self, cls: Type[MultiDict[str]]) -> None:
+ def test_getall(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")], key="value2")
assert d != {"key": "value1"}
@@ -735,27 +739,27 @@ class TestMultiDict(BaseMultiDictTest):
def test_preserve_stable_ordering(
self,
- cls: Type[MultiDict[Union[str, int]]],
+ cls: type[MultiDict[Union[str, int]]],
) -> None:
d = cls([("a", 1), ("b", "2"), ("a", 3)])
s = "&".join("{}={}".format(k, v) for k, v in d.items())
assert s == "a=1&b=2&a=3"
- def test_get(self, cls: Type[MultiDict[int]]) -> None:
+ def test_get(self, cls: type[MultiDict[int]]) -> None:
d = cls([("a", 1), ("a", 2)])
assert d["a"] == 1
- def test_items__repr__(self, cls: Type[MultiDict[str]]) -> None:
+ def test_items__repr__(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")], key="value2")
expected = "_ItemsView('key': 'value1', 'key': 'value2')"
assert repr(d.items()) == expected
- def test_keys__repr__(self, cls: Type[MultiDict[str]]) -> None:
+ def test_keys__repr__(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")], key="value2")
assert repr(d.keys()) == "_KeysView('key', 'key')"
- def test_values__repr__(self, cls: Type[MultiDict[str]]) -> None:
+ def test_values__repr__(self, cls: type[MultiDict[str]]) -> None:
d = cls([("key", "value1")], key="value2")
assert repr(d.values()) == "_ValuesView('value1', 'value2')"
@@ -775,7 +779,7 @@ class TestCIMultiDict(BaseMultiDictTest):
"""Make a case-insensitive multidict class/proxy constructor."""
return chained_callable(multidict_module, request.param)
- def test_basics(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test_basics(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], KEY="value2")
assert d.getone("key") == "value1"
@@ -789,7 +793,7 @@ class TestCIMultiDict(BaseMultiDictTest):
with pytest.raises(KeyError, match="key2"):
d.getone("key2")
- def test_getall(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test_getall(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], KEY="value2")
assert not d == {"KEY": "value1"}
@@ -800,26 +804,26 @@ class TestCIMultiDict(BaseMultiDictTest):
with pytest.raises(KeyError, match="some_key"):
d.getall("some_key")
- def test_get(self, cls: Type[CIMultiDict[int]]) -> None:
+ def test_get(self, cls: type[CIMultiDict[int]]) -> None:
d = cls([("A", 1), ("a", 2)])
assert 1 == d["a"]
- def test__repr__(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test__repr__(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], key="value2")
_cls = type(d)
expected = "<%s('KEY': 'value1', 'key': 'value2')>" % _cls.__name__
assert str(d) == expected
- def test_items__repr__(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test_items__repr__(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], key="value2")
expected = "_ItemsView('KEY': 'value1', 'key': 'value2')"
assert repr(d.items()) == expected
- def test_keys__repr__(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test_keys__repr__(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], key="value2")
assert repr(d.keys()) == "_KeysView('KEY', 'key')"
- def test_values__repr__(self, cls: Type[CIMultiDict[str]]) -> None:
+ def test_values__repr__(self, cls: type[CIMultiDict[str]]) -> None:
d = cls([("KEY", "value1")], key="value2")
assert repr(d.values()) == "_ValuesView('value1', 'value2')"
diff --git a/contrib/python/multidict/tests/test_multidict_benchmarks.py b/contrib/python/multidict/tests/test_multidict_benchmarks.py
new file mode 100644
index 0000000000..e6a538f3cc
--- /dev/null
+++ b/contrib/python/multidict/tests/test_multidict_benchmarks.py
@@ -0,0 +1,391 @@
+"""codspeed benchmarks for multidict."""
+
+from typing import Dict, Union
+
+from pytest_codspeed import BenchmarkFixture
+
+from multidict import CIMultiDict, MultiDict, istr
+
+# Note that this benchmark should not be refactored to use pytest.mark.parametrize
+# since each benchmark name should be unique.
+
+_SENTINEL = object()
+
+
+def test_multidict_insert_str(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict()
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i] = i
+
+
+def test_cimultidict_insert_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict()
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i] = i
+
+
+def test_cimultidict_insert_istr(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict()
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i] = i
+
+
+def test_multidict_add_str(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict()
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.add(i, i)
+
+
+def test_cimultidict_add_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict()
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.add(i, i)
+
+
+def test_cimultidict_add_istr(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict()
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.add(i, i)
+
+
+def test_multidict_pop_str(benchmark: BenchmarkFixture) -> None:
+ md_base: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ md.pop(i)
+
+
+def test_cimultidict_pop_str(benchmark: BenchmarkFixture) -> None:
+ md_base: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ md.pop(i)
+
+
+def test_cimultidict_pop_istr(benchmark: BenchmarkFixture) -> None:
+ md_base: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ md.pop(i)
+
+
+def test_multidict_popitem_str(benchmark: BenchmarkFixture) -> None:
+ md_base: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for _ in range(100):
+ md.popitem()
+
+
+def test_cimultidict_popitem_str(benchmark: BenchmarkFixture) -> None:
+ md_base: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for _ in range(100):
+ md.popitem()
+
+
+def test_multidict_clear_str(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md.clear()
+
+
+def test_cimultidict_clear_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md.clear()
+
+
+def test_multidict_update_str(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = {str(i): str(i) for i in range(100, 200)}
+
+ @benchmark
+ def _run() -> None:
+ md.update(items)
+
+
+def test_cimultidict_update_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = {str(i): str(i) for i in range(100, 200)}
+
+ @benchmark
+ def _run() -> None:
+ md.update(items)
+
+
+def test_cimultidict_update_istr(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items: Dict[Union[str, istr], istr] = {istr(i): istr(i) for i in range(100, 200)}
+
+ @benchmark
+ def _run() -> None:
+ md.update(items)
+
+
+def test_multidict_extend_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = {str(i): str(i) for i in range(200)}
+
+ @benchmark
+ def _run() -> None:
+ md.extend(items)
+
+
+def test_cimultidict_extend_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = {str(i): str(i) for i in range(200)}
+
+ @benchmark
+ def _run() -> None:
+ md.extend(items)
+
+
+def test_cimultidict_extend_istr(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = {istr(i): istr(i) for i in range(200)}
+
+ @benchmark
+ def _run() -> None:
+ md.extend(items)
+
+
+def test_multidict_delitem_str(benchmark: BenchmarkFixture) -> None:
+ md_base: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ del md[i]
+
+
+def test_cimultidict_delitem_str(benchmark: BenchmarkFixture) -> None:
+ md_base: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ del md[i]
+
+
+def test_cimultidict_delitem_istr(benchmark: BenchmarkFixture) -> None:
+ md_base: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ md = md_base.copy()
+ for i in items:
+ del md[i]
+
+
+def test_multidict_getall_str_hit(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict(("all", str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md.getall("all")
+
+
+def test_cimultidict_getall_str_hit(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict(("all", str(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md.getall("all")
+
+
+def test_cimultidict_getall_istr_hit(benchmark: BenchmarkFixture) -> None:
+ all_istr = istr("all")
+ md: CIMultiDict[istr] = CIMultiDict((all_istr, istr(i)) for i in range(100))
+
+ @benchmark
+ def _run() -> None:
+ md.getall(all_istr)
+
+
+def test_multidict_fetch(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i]
+
+
+def test_cimultidict_fetch_str(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i]
+
+
+def test_cimultidict_fetch_istr(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md[i]
+
+
+def test_multidict_get_hit(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_multidict_get_miss(benchmark: BenchmarkFixture) -> None:
+ md: MultiDict[str] = MultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100, 200)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_cimultidict_get_hit(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_cimultidict_get_miss(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100, 200)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_cimultidict_get_istr_hit(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_cimultidict_get_istr_miss(benchmark: BenchmarkFixture) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100, 200)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i)
+
+
+def test_cimultidict_get_hit_with_default(
+ benchmark: BenchmarkFixture,
+) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i, _SENTINEL)
+
+
+def test_cimultidict_get_miss_with_default(
+ benchmark: BenchmarkFixture,
+) -> None:
+ md: CIMultiDict[str] = CIMultiDict((str(i), str(i)) for i in range(100))
+ items = [str(i) for i in range(100, 200)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i, _SENTINEL)
+
+
+def test_cimultidict_get_istr_hit_with_default(
+ benchmark: BenchmarkFixture,
+) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i, _SENTINEL)
+
+
+def test_cimultidict_get_istr_with_default_miss(
+ benchmark: BenchmarkFixture,
+) -> None:
+ md: CIMultiDict[istr] = CIMultiDict((istr(i), istr(i)) for i in range(100))
+ items = [istr(i) for i in range(100, 200)]
+
+ @benchmark
+ def _run() -> None:
+ for i in items:
+ md.get(i, _SENTINEL)
diff --git a/contrib/python/multidict/tests/test_mutable_multidict.py b/contrib/python/multidict/tests/test_mutable_multidict.py
index 3cacec25af..45f1cdf5f6 100644
--- a/contrib/python/multidict/tests/test_mutable_multidict.py
+++ b/contrib/python/multidict/tests/test_mutable_multidict.py
@@ -1,16 +1,16 @@
import string
import sys
-from typing import Type
+from typing import Union
import pytest
-from multidict import MultiMapping, MutableMultiMapping
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDictProxy, istr
class TestMutableMultiDict:
def test_copy(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d1 = case_sensitive_multidict_class(key="value", a="b")
@@ -20,7 +20,7 @@ class TestMutableMultiDict:
def test__repr__(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
assert str(d) == "<%s()>" % case_sensitive_multidict_class.__name__
@@ -35,7 +35,7 @@ class TestMutableMultiDict:
def test_getall(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class([("key", "value1")], key="value2")
assert len(d) == 2
@@ -50,7 +50,7 @@ class TestMutableMultiDict:
def test_add(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
@@ -73,7 +73,7 @@ class TestMutableMultiDict:
def test_extend(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[Union[str, int]]],
) -> None:
d = case_sensitive_multidict_class()
assert d == {}
@@ -101,12 +101,12 @@ class TestMutableMultiDict:
assert 6 == len(d)
with pytest.raises(TypeError):
- d.extend("foo", "bar")
+ d.extend("foo", "bar") # type: ignore[arg-type, call-arg]
def test_extend_from_proxy(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_sensitive_multidict_proxy_class: Type[MultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
+ case_sensitive_multidict_proxy_class: type[MultiDictProxy[str]],
) -> None:
d = case_sensitive_multidict_class([("a", "a"), ("b", "b")])
proxy = case_sensitive_multidict_proxy_class(d)
@@ -118,7 +118,7 @@ class TestMutableMultiDict:
def test_clear(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class([("key", "one")], key="two", foo="bar")
@@ -128,7 +128,7 @@ class TestMutableMultiDict:
def test_del(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class([("key", "one"), ("key", "two")], foo="bar")
assert list(d.keys()) == ["key", "key", "foo"]
@@ -142,7 +142,7 @@ class TestMutableMultiDict:
def test_set_default(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class([("key", "one"), ("key", "two")], foo="bar")
assert "one" == d.setdefault("key", "three")
@@ -152,7 +152,7 @@ class TestMutableMultiDict:
def test_popitem(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
d.add("key", "val1")
@@ -163,7 +163,7 @@ class TestMutableMultiDict:
def test_popitem_empty_multidict(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
@@ -172,7 +172,7 @@ class TestMutableMultiDict:
def test_pop(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
d.add("key", "val1")
@@ -183,7 +183,7 @@ class TestMutableMultiDict:
def test_pop2(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
d.add("key", "val1")
@@ -195,7 +195,7 @@ class TestMutableMultiDict:
def test_pop_default(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class(other="val")
@@ -204,7 +204,7 @@ class TestMutableMultiDict:
def test_pop_raises(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class(other="val")
@@ -215,7 +215,7 @@ class TestMutableMultiDict:
def test_replacement_order(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
d.add("key1", "val1")
@@ -231,16 +231,16 @@ class TestMutableMultiDict:
def test_nonstr_key(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
with pytest.raises(TypeError):
- d[1] = "val"
+ d[1] = "val" # type: ignore[index]
def test_istr_key(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_insensitive_str_class: Type[str],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_str_class: type[str],
) -> None:
d = case_sensitive_multidict_class()
d[case_insensitive_str_class("1")] = "val"
@@ -248,7 +248,7 @@ class TestMutableMultiDict:
def test_str_derived_key(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
class A(str):
pass
@@ -259,8 +259,8 @@ class TestMutableMultiDict:
def test_istr_key_add(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_insensitive_str_class: Type[str],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_str_class: type[str],
) -> None:
d = case_sensitive_multidict_class()
d.add(case_insensitive_str_class("1"), "val")
@@ -268,7 +268,7 @@ class TestMutableMultiDict:
def test_str_derived_key_add(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
class A(str):
pass
@@ -279,7 +279,7 @@ class TestMutableMultiDict:
def test_popall(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
d.add("key1", "val1")
@@ -291,14 +291,14 @@ class TestMutableMultiDict:
def test_popall_default(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
assert "val" == d.popall("key", "val")
def test_popall_key_error(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_sensitive_multidict_class()
with pytest.raises(KeyError, match="key"):
@@ -306,7 +306,7 @@ class TestMutableMultiDict:
def test_large_multidict_resizing(
self,
- case_sensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_sensitive_multidict_class: type[CIMultiDict[int]],
) -> None:
SIZE = 1024
d = case_sensitive_multidict_class()
@@ -322,7 +322,7 @@ class TestMutableMultiDict:
class TestCIMutableMultiDict:
def test_getall(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class([("KEY", "value1")], KEY="value2")
@@ -336,7 +336,7 @@ class TestCIMutableMultiDict:
def test_ctor(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class(k1="v1")
assert "v1" == d["K1"]
@@ -344,7 +344,7 @@ class TestCIMutableMultiDict:
def test_setitem(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
d["k1"] = "v1"
@@ -353,7 +353,7 @@ class TestCIMutableMultiDict:
def test_delitem(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
d["k1"] = "v1"
@@ -363,7 +363,7 @@ class TestCIMutableMultiDict:
def test_copy(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d1 = case_insensitive_multidict_class(key="KEY", a="b")
@@ -374,7 +374,7 @@ class TestCIMutableMultiDict:
def test__repr__(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
assert str(d) == "<%s()>" % case_insensitive_multidict_class.__name__
@@ -389,7 +389,7 @@ class TestCIMutableMultiDict:
def test_add(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
@@ -421,7 +421,7 @@ class TestCIMutableMultiDict:
def test_extend(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[Union[str, int]]],
) -> None:
d = case_insensitive_multidict_class()
assert d == {}
@@ -450,12 +450,12 @@ class TestCIMutableMultiDict:
assert 6 == len(d)
with pytest.raises(TypeError):
- d.extend("foo", "bar")
+ d.extend("foo", "bar") # type: ignore[arg-type, call-arg]
def test_extend_from_proxy(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_insensitive_multidict_proxy_class: Type[MultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_multidict_proxy_class: type[CIMultiDictProxy[str]],
) -> None:
d = case_insensitive_multidict_class([("a", "a"), ("b", "b")])
proxy = case_insensitive_multidict_proxy_class(d)
@@ -467,7 +467,7 @@ class TestCIMutableMultiDict:
def test_clear(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class([("KEY", "one")], key="two", foo="bar")
@@ -477,7 +477,7 @@ class TestCIMutableMultiDict:
def test_del(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class(
[("KEY", "one"), ("key", "two")],
@@ -493,7 +493,7 @@ class TestCIMutableMultiDict:
def test_set_default(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class(
[("KEY", "one"), ("key", "two")],
@@ -507,7 +507,7 @@ class TestCIMutableMultiDict:
def test_popitem(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
d.add("KEY", "val1")
@@ -520,7 +520,7 @@ class TestCIMutableMultiDict:
def test_popitem_empty_multidict(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
@@ -529,7 +529,7 @@ class TestCIMutableMultiDict:
def test_pop(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
d.add("KEY", "val1")
@@ -540,7 +540,7 @@ class TestCIMutableMultiDict:
def test_pop_lowercase(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class()
d.add("KEY", "val1")
@@ -551,7 +551,7 @@ class TestCIMutableMultiDict:
def test_pop_default(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class(OTHER="val")
@@ -560,7 +560,7 @@ class TestCIMutableMultiDict:
def test_pop_raises(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d = case_insensitive_multidict_class(OTHER="val")
@@ -571,8 +571,8 @@ class TestCIMutableMultiDict:
def test_extend_with_istr(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_insensitive_str_class: Type[str],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_str_class: type[istr],
) -> None:
us = case_insensitive_str_class("aBc")
d = case_insensitive_multidict_class()
@@ -582,8 +582,8 @@ class TestCIMutableMultiDict:
def test_copy_istr(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
- case_insensitive_str_class: Type[str],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_str_class: type[istr],
) -> None:
d = case_insensitive_multidict_class({case_insensitive_str_class("Foo"): "bar"})
d2 = d.copy()
@@ -591,7 +591,7 @@ class TestCIMutableMultiDict:
def test_eq(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
d1 = case_insensitive_multidict_class(Key="val")
d2 = case_insensitive_multidict_class(KEY="val")
@@ -604,7 +604,7 @@ class TestCIMutableMultiDict:
)
def test_sizeof(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
md = case_insensitive_multidict_class()
s1 = sys.getsizeof(md)
@@ -621,14 +621,14 @@ class TestCIMutableMultiDict:
)
def test_min_sizeof(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
md = case_insensitive_multidict_class()
assert sys.getsizeof(md) < 1024
def test_issue_620_items(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
# https://github.com/aio-libs/multidict/issues/620
d = case_insensitive_multidict_class({"a": "123, 456", "b": "789"})
@@ -639,7 +639,7 @@ class TestCIMutableMultiDict:
def test_issue_620_keys(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
# https://github.com/aio-libs/multidict/issues/620
d = case_insensitive_multidict_class({"a": "123, 456", "b": "789"})
@@ -650,7 +650,7 @@ class TestCIMutableMultiDict:
def test_issue_620_values(
self,
- case_insensitive_multidict_class: Type[MutableMultiMapping[str]],
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
) -> None:
# https://github.com/aio-libs/multidict/issues/620
d = case_insensitive_multidict_class({"a": "123, 456", "b": "789"})
diff --git a/contrib/python/multidict/tests/test_pickle.py b/contrib/python/multidict/tests/test_pickle.py
index 48adea13f0..3159ea45c6 100644
--- a/contrib/python/multidict/tests/test_pickle.py
+++ b/contrib/python/multidict/tests/test_pickle.py
@@ -1,13 +1,21 @@
import pickle
from pathlib import Path
+from typing import TYPE_CHECKING
import pytest
+from multidict import MultiDict, MultiDictProxy
+
+if TYPE_CHECKING:
+ from conftest import MultidictImplementation
+
import yatest.common as yc
here = Path(yc.source_path(__file__)).resolve().parent
-def test_pickle(any_multidict_class, pickle_protocol):
+def test_pickle(
+ any_multidict_class: type[MultiDict[int]], pickle_protocol: int
+) -> None:
d = any_multidict_class([("a", 1), ("a", 2)])
pbytes = pickle.dumps(d, pickle_protocol)
obj = pickle.loads(pbytes)
@@ -15,14 +23,21 @@ def test_pickle(any_multidict_class, pickle_protocol):
assert isinstance(obj, any_multidict_class)
-def test_pickle_proxy(any_multidict_class, any_multidict_proxy_class):
+def test_pickle_proxy(
+ any_multidict_class: type[MultiDict[int]],
+ any_multidict_proxy_class: type[MultiDictProxy[int]],
+) -> None:
d = any_multidict_class([("a", 1), ("a", 2)])
proxy = any_multidict_proxy_class(d)
with pytest.raises(TypeError):
pickle.dumps(proxy)
-def test_load_from_file(any_multidict_class, multidict_implementation, pickle_protocol):
+def test_load_from_file(
+ any_multidict_class: type[MultiDict[int]],
+ multidict_implementation: "MultidictImplementation",
+ pickle_protocol: int,
+) -> None:
multidict_class_name = any_multidict_class.__name__
pickle_file_basename = "-".join(
(
diff --git a/contrib/python/multidict/tests/test_types.py b/contrib/python/multidict/tests/test_types.py
index ceaa391e37..6339006b68 100644
--- a/contrib/python/multidict/tests/test_types.py
+++ b/contrib/python/multidict/tests/test_types.py
@@ -1,52 +1,57 @@
-import sys
import types
import pytest
-def test_proxies(multidict_module):
+def test_proxies(multidict_module: types.ModuleType) -> None:
assert issubclass(
multidict_module.CIMultiDictProxy,
multidict_module.MultiDictProxy,
)
-def test_dicts(multidict_module):
+def test_dicts(multidict_module: types.ModuleType) -> None:
assert issubclass(multidict_module.CIMultiDict, multidict_module.MultiDict)
-def test_proxy_not_inherited_from_dict(multidict_module):
+def test_proxy_not_inherited_from_dict(multidict_module: types.ModuleType) -> None:
assert not issubclass(multidict_module.MultiDictProxy, multidict_module.MultiDict)
-def test_dict_not_inherited_from_proxy(multidict_module):
+def test_dict_not_inherited_from_proxy(multidict_module: types.ModuleType) -> None:
assert not issubclass(multidict_module.MultiDict, multidict_module.MultiDictProxy)
-def test_multidict_proxy_copy_type(multidict_module):
+def test_multidict_proxy_copy_type(multidict_module: types.ModuleType) -> None:
d = multidict_module.MultiDict(key="val")
p = multidict_module.MultiDictProxy(d)
assert isinstance(p.copy(), multidict_module.MultiDict)
-def test_cimultidict_proxy_copy_type(multidict_module):
+def test_cimultidict_proxy_copy_type(multidict_module: types.ModuleType) -> None:
d = multidict_module.CIMultiDict(key="val")
p = multidict_module.CIMultiDictProxy(d)
assert isinstance(p.copy(), multidict_module.CIMultiDict)
-def test_create_multidict_proxy_from_nonmultidict(multidict_module):
+def test_create_multidict_proxy_from_nonmultidict(
+ multidict_module: types.ModuleType,
+) -> None:
with pytest.raises(TypeError):
multidict_module.MultiDictProxy({})
-def test_create_multidict_proxy_from_cimultidict(multidict_module):
+def test_create_multidict_proxy_from_cimultidict(
+ multidict_module: types.ModuleType,
+) -> None:
d = multidict_module.CIMultiDict(key="val")
p = multidict_module.MultiDictProxy(d)
assert p == d
-def test_create_multidict_proxy_from_multidict_proxy_from_mdict(multidict_module):
+def test_create_multidict_proxy_from_multidict_proxy_from_mdict(
+ multidict_module: types.ModuleType,
+) -> None:
d = multidict_module.MultiDict(key="val")
p = multidict_module.MultiDictProxy(d)
assert p == d
@@ -54,7 +59,9 @@ def test_create_multidict_proxy_from_multidict_proxy_from_mdict(multidict_module
assert p2 == p
-def test_create_cimultidict_proxy_from_cimultidict_proxy_from_ci(multidict_module):
+def test_create_cimultidict_proxy_from_cimultidict_proxy_from_ci(
+ multidict_module: types.ModuleType,
+) -> None:
d = multidict_module.CIMultiDict(key="val")
p = multidict_module.CIMultiDictProxy(d)
assert p == d
@@ -62,7 +69,9 @@ def test_create_cimultidict_proxy_from_cimultidict_proxy_from_ci(multidict_modul
assert p2 == p
-def test_create_cimultidict_proxy_from_nonmultidict(multidict_module):
+def test_create_cimultidict_proxy_from_nonmultidict(
+ multidict_module: types.ModuleType,
+) -> None:
with pytest.raises(
TypeError,
match=(
@@ -73,7 +82,9 @@ def test_create_cimultidict_proxy_from_nonmultidict(multidict_module):
multidict_module.CIMultiDictProxy({})
-def test_create_ci_multidict_proxy_from_multidict(multidict_module):
+def test_create_ci_multidict_proxy_from_multidict(
+ multidict_module: types.ModuleType,
+) -> None:
d = multidict_module.MultiDict(key="val")
with pytest.raises(
TypeError,
@@ -85,20 +96,7 @@ def test_create_ci_multidict_proxy_from_multidict(multidict_module):
multidict_module.CIMultiDictProxy(d)
-@pytest.mark.skipif(
- sys.version_info >= (3, 9), reason="Python 3.9 uses GenericAlias which is different"
-)
-def test_generic_exists(multidict_module) -> None:
- assert multidict_module.MultiDict[int] is multidict_module.MultiDict
- assert multidict_module.MultiDictProxy[int] is multidict_module.MultiDictProxy
- assert multidict_module.CIMultiDict[int] is multidict_module.CIMultiDict
- assert multidict_module.CIMultiDictProxy[int] is multidict_module.CIMultiDictProxy
-
-
-@pytest.mark.skipif(
- sys.version_info < (3, 9), reason="Python 3.9 is required for GenericAlias"
-)
-def test_generic_alias(multidict_module) -> None:
+def test_generic_alias(multidict_module: types.ModuleType) -> None:
assert multidict_module.MultiDict[int] == types.GenericAlias(
multidict_module.MultiDict, (int,)
)
diff --git a/contrib/python/multidict/tests/test_update.py b/contrib/python/multidict/tests/test_update.py
index f455327857..46ab30a08b 100644
--- a/contrib/python/multidict/tests/test_update.py
+++ b/contrib/python/multidict/tests/test_update.py
@@ -1,10 +1,12 @@
from collections import deque
-from typing import Type
+from typing import Union
-from multidict import MultiMapping
+from multidict import CIMultiDict, MultiDict
+_MD_Classes = Union[type[MultiDict[int]], type[CIMultiDict[int]]]
-def test_update_replace(any_multidict_class: Type[MultiMapping[str]]) -> None:
+
+def test_update_replace(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = any_multidict_class([("a", 4), ("b", 5), ("a", 6)])
obj1.update(obj2)
@@ -12,7 +14,7 @@ def test_update_replace(any_multidict_class: Type[MultiMapping[str]]) -> None:
assert list(obj1.items()) == expected
-def test_update_append(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_append(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = any_multidict_class([("a", 4), ("a", 5), ("a", 6)])
obj1.update(obj2)
@@ -20,7 +22,7 @@ def test_update_append(any_multidict_class: Type[MultiMapping[str]]) -> None:
assert list(obj1.items()) == expected
-def test_update_remove(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_remove(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = any_multidict_class([("a", 4)])
obj1.update(obj2)
@@ -28,7 +30,7 @@ def test_update_remove(any_multidict_class: Type[MultiMapping[str]]) -> None:
assert list(obj1.items()) == expected
-def test_update_replace_seq(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_replace_seq(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = [("a", 4), ("b", 5), ("a", 6)]
obj1.update(obj2)
@@ -36,14 +38,14 @@ def test_update_replace_seq(any_multidict_class: Type[MultiMapping[str]]) -> Non
assert list(obj1.items()) == expected
-def test_update_replace_seq2(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_replace_seq2(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj1.update([("a", 4)], b=5, a=6)
expected = [("a", 4), ("b", 5), ("a", 6), ("c", 10)]
assert list(obj1.items()) == expected
-def test_update_append_seq(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_append_seq(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = [("a", 4), ("a", 5), ("a", 6)]
obj1.update(obj2)
@@ -51,7 +53,7 @@ def test_update_append_seq(any_multidict_class: Type[MultiMapping[str]]) -> None
assert list(obj1.items()) == expected
-def test_update_remove_seq(any_multidict_class: Type[MultiMapping[str]]) -> None:
+def test_update_remove_seq(any_multidict_class: _MD_Classes) -> None:
obj1 = any_multidict_class([("a", 1), ("b", 2), ("a", 3), ("c", 10)])
obj2 = [("a", 4)]
obj1.update(obj2)
@@ -59,9 +61,7 @@ def test_update_remove_seq(any_multidict_class: Type[MultiMapping[str]]) -> None
assert list(obj1.items()) == expected
-def test_update_md(
- case_sensitive_multidict_class: Type[MultiMapping[str]],
-) -> None:
+def test_update_md(case_sensitive_multidict_class: type[CIMultiDict[str]]) -> None:
d = case_sensitive_multidict_class()
d.add("key", "val1")
d.add("key", "val2")
@@ -73,8 +73,8 @@ def test_update_md(
def test_update_istr_ci_md(
- case_insensitive_multidict_class: Type[MultiMapping[str]],
- case_insensitive_str_class: str,
+ case_insensitive_multidict_class: type[CIMultiDict[str]],
+ case_insensitive_str_class: type[str],
) -> None:
d = case_insensitive_multidict_class()
d.add(case_insensitive_str_class("KEY"), "val1")
@@ -86,9 +86,7 @@ def test_update_istr_ci_md(
assert [("key", "val"), ("key2", "val3")] == list(d.items())
-def test_update_ci_md(
- case_insensitive_multidict_class: Type[MultiMapping[str]],
-) -> None:
+def test_update_ci_md(case_insensitive_multidict_class: type[CIMultiDict[str]]) -> None:
d = case_insensitive_multidict_class()
d.add("KEY", "val1")
d.add("key", "val2")
@@ -99,9 +97,7 @@ def test_update_ci_md(
assert [("Key", "val"), ("key2", "val3")] == list(d.items())
-def test_update_list_arg_and_kwds(
- any_multidict_class: Type[MultiMapping[str]],
-) -> None:
+def test_update_list_arg_and_kwds(any_multidict_class: _MD_Classes) -> None:
obj = any_multidict_class()
arg = [("a", 1)]
obj.update(arg, b=2)
@@ -109,9 +105,7 @@ def test_update_list_arg_and_kwds(
assert arg == [("a", 1)]
-def test_update_tuple_arg_and_kwds(
- any_multidict_class: Type[MultiMapping[str]],
-) -> None:
+def test_update_tuple_arg_and_kwds(any_multidict_class: _MD_Classes) -> None:
obj = any_multidict_class()
arg = (("a", 1),)
obj.update(arg, b=2)
@@ -119,9 +113,7 @@ def test_update_tuple_arg_and_kwds(
assert arg == (("a", 1),)
-def test_update_deque_arg_and_kwds(
- any_multidict_class: Type[MultiMapping[str]],
-) -> None:
+def test_update_deque_arg_and_kwds(any_multidict_class: _MD_Classes) -> None:
obj = any_multidict_class()
arg = deque([("a", 1)])
obj.update(arg, b=2)
diff --git a/contrib/python/multidict/tests/test_version.py b/contrib/python/multidict/tests/test_version.py
index e004afa112..4fe209c678 100644
--- a/contrib/python/multidict/tests/test_version.py
+++ b/contrib/python/multidict/tests/test_version.py
@@ -1,18 +1,25 @@
-from typing import Callable, Type
+from collections.abc import Callable
+from typing import TypeVar, Union
import pytest
-from multidict import MultiMapping
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
+_T = TypeVar("_T")
+_MD_Types = Union[
+ MultiDict[_T], CIMultiDict[_T], MultiDictProxy[_T], CIMultiDictProxy[_T]
+]
+GetVersion = Callable[[_MD_Types[_T]], int]
-def test_getversion_bad_param(multidict_getversion_callable):
+
+def test_getversion_bad_param(multidict_getversion_callable: GetVersion[str]) -> None:
with pytest.raises(TypeError):
- multidict_getversion_callable(1)
+ multidict_getversion_callable(1) # type: ignore[arg-type]
def test_ctor(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m1 = any_multidict_class()
v1 = multidict_getversion_callable(m1)
@@ -22,8 +29,8 @@ def test_ctor(
def test_add(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
v = multidict_getversion_callable(m)
@@ -32,8 +39,8 @@ def test_add(
def test_delitem(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -43,8 +50,8 @@ def test_delitem(
def test_delitem_not_found(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -55,8 +62,8 @@ def test_delitem_not_found(
def test_setitem(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -66,8 +73,8 @@ def test_setitem(
def test_setitem_not_found(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -77,8 +84,8 @@ def test_setitem_not_found(
def test_clear(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -88,8 +95,8 @@ def test_clear(
def test_setdefault(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -99,8 +106,8 @@ def test_setdefault(
def test_popone(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -110,8 +117,8 @@ def test_popone(
def test_popone_default(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -121,8 +128,8 @@ def test_popone_default(
def test_popone_key_error(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -133,8 +140,8 @@ def test_popone_key_error(
def test_pop(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -144,8 +151,8 @@ def test_pop(
def test_pop_default(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -155,8 +162,8 @@ def test_pop_default(
def test_pop_key_error(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -167,8 +174,8 @@ def test_pop_key_error(
def test_popall(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -178,8 +185,8 @@ def test_popall(
def test_popall_default(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -189,8 +196,8 @@ def test_popall_default(
def test_popall_key_error(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -201,8 +208,8 @@ def test_popall_key_error(
def test_popitem(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
m.add("key", "val")
@@ -212,8 +219,8 @@ def test_popitem(
def test_popitem_key_error(
- any_multidict_class: Type[MultiMapping[str]],
- multidict_getversion_callable: Callable,
+ any_multidict_class: type[MultiDict[str]],
+ multidict_getversion_callable: GetVersion[str],
) -> None:
m = any_multidict_class()
v = multidict_getversion_callable(m)
diff --git a/contrib/python/multidict/ya.make b/contrib/python/multidict/ya.make
index 8a2950eae9..626036249b 100644
--- a/contrib/python/multidict/ya.make
+++ b/contrib/python/multidict/ya.make
@@ -2,7 +2,7 @@
PY3_LIBRARY()
-VERSION(6.1.0)
+VERSION(6.2.0)
LICENSE(Apache-2.0)
@@ -25,7 +25,6 @@ PY_REGISTER(
PY_SRCS(
TOP_LEVEL
multidict/__init__.py
- multidict/__init__.pyi
multidict/_abc.py
multidict/_compat.py
multidict/_multidict_base.py
diff --git a/library/cpp/coroutine/engine/coroutine_ut.cpp b/library/cpp/coroutine/engine/coroutine_ut.cpp
index de56d0ed2b..656e3943c0 100644
--- a/library/cpp/coroutine/engine/coroutine_ut.cpp
+++ b/library/cpp/coroutine/engine/coroutine_ut.cpp
@@ -112,7 +112,7 @@ void TCoroTest::TestException() {
auto f2 = [&unc, &f2run](TCont*) {
f2run = true;
- unc = std::uncaught_exception();
+ unc = std::uncaught_exceptions();
// check segfault
try {
diff --git a/library/cpp/execprofile/profile.cpp b/library/cpp/execprofile/profile.cpp
index 52cf658ba2..4ce75c1d6f 100644
--- a/library/cpp/execprofile/profile.cpp
+++ b/library/cpp/execprofile/profile.cpp
@@ -25,6 +25,8 @@
#include <util/stream/file.h>
#include <util/string/util.h>
#include <util/system/datetime.h>
+#include <util/system/guard.h>
+#include <util/system/spinlock.h>
// This class sets SIGPROF handler and captures instruction pointer in it.
class TExecutionSampler : TNonCopyable {
@@ -89,7 +91,7 @@ public:
stats.SavedSamples = Samples;
stats.DroppedSamples = AtomicGet(DroppedSamples);
stats.SearchSkipCount = SearchSkipCount;
- AtomicUnlock(&WriteFlag);
+ WriteFlag.Release();
Sort(hits.begin(), hits.end(), TCompareFirst());
@@ -99,7 +101,7 @@ public:
void ResetStats() {
WaitForWriteFlag();
Clear();
- AtomicUnlock(&WriteFlag);
+ WriteFlag.Release();
}
private:
@@ -116,7 +118,6 @@ private:
TExecutionSampler()
: Started(false)
, Ips(SZ)
- , WriteFlag(0)
, DroppedSamples(0)
, Samples(0)
, UniqueSamples(0)
@@ -146,7 +147,7 @@ private:
void WaitForWriteFlag() {
// Wait for write flag to be reset
ui32 delay = 100;
- while (!AtomicTryLock(&WriteFlag)) {
+ while (!WriteFlag.TryAcquire()) {
usleep(delay);
delay += delay;
delay = Min(delay, (ui32)5000);
@@ -157,9 +158,8 @@ private:
// Check if the handler on another thread is in the process of adding a sample
// If this is the case, we just drop the current sample as this should happen
// rarely.
- if (AtomicTryLock(&WriteFlag)) {
+ if (TTryGuard guard(WriteFlag); guard.WasAcquired()) {
AddSample(rip);
- AtomicUnlock(&WriteFlag);
} else {
AtomicIncrement(DroppedSamples);
}
@@ -243,7 +243,7 @@ private:
void
Clear() {
- Y_ASSERT(WriteFlag == 1);
+ Y_ASSERT(WriteFlag.IsLocked());
for (size_t i = 0; i < SZ; ++i) {
Ips[i] = std::make_pair((void*)nullptr, (size_t)0);
@@ -262,7 +262,7 @@ private:
Ips; // The hash table storing addresses and their hitcounts
// TODO: on a big multiproc cache line false sharing by the flag and count might become an issue
- TAtomic WriteFlag; // Is used to syncronize access to the hash table
+ TSpinLock WriteFlag; // Is used to syncronize access to the hash table
TAtomic DroppedSamples; // "dropped sample" count will show how many times
// a sample was dropped either because of write conflict
// or because of the hash table had become too filled up
diff --git a/library/cpp/http/misc/httpreqdata.cpp b/library/cpp/http/misc/httpreqdata.cpp
index ed5e8872c9..028418bc4a 100644
--- a/library/cpp/http/misc/httpreqdata.cpp
+++ b/library/cpp/http/misc/httpreqdata.cpp
@@ -81,6 +81,8 @@ TStringBuf TBaseServerRequestData::Environment(TStringBuf key) const {
return ip ? *ip : RemoteAddr();
} else if (ciKey == "QUERY_STRING") {
return Query();
+ } else if (ciKey == "BODY") {
+ return Body();
} else if (ciKey == "SERVER_NAME") {
return ServerName();
} else if (ciKey == "SERVER_PORT") {
diff --git a/library/cpp/http/misc/httpreqdata.h b/library/cpp/http/misc/httpreqdata.h
index b5c9e446a9..48854923d1 100644
--- a/library/cpp/http/misc/httpreqdata.h
+++ b/library/cpp/http/misc/httpreqdata.h
@@ -7,6 +7,7 @@
#include <util/system/defaults.h>
#include <util/string/cast.h>
#include <library/cpp/cgiparam/cgiparam.h>
+#include <util/memory/blob.h>
#include <util/network/address.h>
#include <util/network/socket.h>
#include <util/generic/hash.h>
@@ -52,6 +53,10 @@ public:
return OrigQuery_;
}
+ TStringBuf Body() const {
+ return Body_.AsStringBuf();
+ }
+
void AppendQueryString(TStringBuf str);
TStringBuf RemoteAddr() const;
void SetRemoteAddr(TStringBuf addr);
@@ -77,6 +82,10 @@ public:
Socket_ = s;
}
+ void SetBody(const TBlob& body) noexcept {
+ Body_ = body;
+ }
+
ui64 RequestBeginTime() const noexcept {
return BeginTime_;
}
@@ -93,6 +102,7 @@ private:
TString Path_;
TStringBuf Query_;
TStringBuf OrigQuery_;
+ TBlob Body_;
THttpHeadersContainer HeadersIn_;
SOCKET Socket_;
ui64 BeginTime_;
diff --git a/library/cpp/http/server/http_ex.cpp b/library/cpp/http/server/http_ex.cpp
index 0681da10ff..ead37da56d 100644
--- a/library/cpp/http/server/http_ex.cpp
+++ b/library/cpp/http/server/http_ex.cpp
@@ -93,6 +93,7 @@ bool THttpClientRequestExtension::ProcessHeaders(TBaseServerRequestData& rd, TBl
} else {
postData = TBlob::FromStream(Input());
}
+ rd.SetBody(postData);
} catch (...) {
Output() << "HTTP/1.1 400 Bad request\r\n\r\n";
return false;
diff --git a/library/cpp/logger/backend_creator.cpp b/library/cpp/logger/backend_creator.cpp
index ea430edb83..f239fa57e2 100644
--- a/library/cpp/logger/backend_creator.cpp
+++ b/library/cpp/logger/backend_creator.cpp
@@ -2,7 +2,7 @@
#include "stream.h"
#include "uninitialized_creator.h"
#include <util/system/yassert.h>
-#include <util/stream/debug.h>
+#include <util/stream/output.h>
#include <util/stream/output.h>
diff --git a/library/cpp/logger/sync_page_cache_file.cpp b/library/cpp/logger/sync_page_cache_file.cpp
index 1424159af7..7f262c7ee7 100644
--- a/library/cpp/logger/sync_page_cache_file.cpp
+++ b/library/cpp/logger/sync_page_cache_file.cpp
@@ -1,26 +1,44 @@
#include "sync_page_cache_file.h"
+
#include "record.h"
#include <util/generic/buffer.h>
#include <util/generic/yexception.h>
+#include <util/system/align.h>
+#include <util/system/event.h>
#include <util/system/file.h>
#include <util/system/info.h>
#include <util/system/mutex.h>
-#include <util/system/align.h>
+#include <util/system/thread.h>
class TSyncPageCacheFileLogBackend::TImpl: public TNonCopyable {
public:
- TImpl(const TString& path, size_t maxBufferSize, size_t maxPendingCacheSize)
+ TImpl(
+ const TString& path,
+ size_t maxBufferSize,
+ size_t maxPendingCacheSize,
+ TMaybe<TDuration> bufferFlushPeriod
+ )
: File_{OpenFile(path)}
, MaxBufferSize_{maxBufferSize}
, MaxPendingCacheSize_{maxPendingCacheSize}
, Buffer_{maxBufferSize}
+ , BufferFlushPeriod_{bufferFlushPeriod}
{
ResetPtrs();
+
+ if (BufferFlushPeriod_) {
+ BufferFlushThreadPtr_ = MakeHolder<TThread>([this] {RunBufferFlushThread();});
+ BufferFlushThreadPtr_->Start();
+ }
}
~TImpl() noexcept {
try {
+ if (BufferFlushThreadPtr_) {
+ BufferFlushThreadExitWaiter_.Signal();
+ }
+
Write();
FlushSync(GuaranteedWrittenPtr_, WrittenPtr_);
} catch (...) {
@@ -32,17 +50,7 @@ public:
Buffer_.Append(rec.Data, rec.Len);
if (Buffer_.size() >= MaxBufferSize_) {
- const i64 prevAlignedEndPtr = PageAlignedWrittenPtr_;
- Write();
-
- if (prevAlignedEndPtr < PageAlignedWrittenPtr_) {
- FlushAsync(prevAlignedEndPtr, PageAlignedWrittenPtr_);
- }
-
- const i64 minPendingCacheOffset = PageAlignedWrittenPtr_ - MaxPendingCacheSize_;
- if (minPendingCacheOffset > GuaranteedWrittenPtr_) {
- FlushSync(GuaranteedWrittenPtr_, minPendingCacheOffset);
- }
+ WriteAndFlush();
}
}
@@ -101,6 +109,36 @@ private:
GuaranteedWrittenPtr_ = to;
}
+ void WriteAndFlush() {
+ const i64 prevAlignedEndPtr = PageAlignedWrittenPtr_;
+ Write();
+
+ if (prevAlignedEndPtr < PageAlignedWrittenPtr_) {
+ FlushAsync(prevAlignedEndPtr, PageAlignedWrittenPtr_);
+ }
+
+ const i64 minPendingCacheOffset = PageAlignedWrittenPtr_ - MaxPendingCacheSize_;
+ if (minPendingCacheOffset > GuaranteedWrittenPtr_) {
+ FlushSync(GuaranteedWrittenPtr_, minPendingCacheOffset);
+ }
+ }
+
+ void RunBufferFlushThread() {
+ Y_ENSURE(BufferFlushPeriod_);
+ TInstant deadline;
+ do {
+ deadline = TInstant::Now() + *BufferFlushPeriod_;
+ try {
+ TGuard guard{Lock_};
+ if (!Buffer_.Empty()) {
+ WriteAndFlush();
+ }
+ } catch (...) {
+ Cerr << "Failed to flush eventlog buffer: " << CurrentExceptionMessage() << Endl;
+ }
+ } while (!BufferFlushThreadExitWaiter_.WaitD(deadline));
+ }
+
private:
TMutex Lock_;
TFile File_;
@@ -112,10 +150,26 @@ private:
i64 WrittenPtr_ = 0;
i64 PageAlignedWrittenPtr_ = 0;
i64 GuaranteedWrittenPtr_ = 0;
+
+ const TMaybe<TDuration> BufferFlushPeriod_;
+ TManualEvent BufferFlushThreadExitWaiter_;
+
+ // thread should be declared last to be destroyed before other props
+ THolder<TThread> BufferFlushThreadPtr_;
};
-TSyncPageCacheFileLogBackend::TSyncPageCacheFileLogBackend(const TString& path, size_t maxBufferSize, size_t maxPengingCacheSize)
- : Impl_(MakeHolder<TImpl>(path, maxBufferSize, maxPengingCacheSize))
+TSyncPageCacheFileLogBackend::TSyncPageCacheFileLogBackend(
+ const TString& path,
+ size_t maxBufferSize,
+ size_t maxPengingCacheSize,
+ TMaybe<TDuration> bufferFlushPeriod
+)
+ : Impl_(MakeHolder<TImpl>(
+ path,
+ maxBufferSize,
+ maxPengingCacheSize,
+ bufferFlushPeriod
+ ))
{}
TSyncPageCacheFileLogBackend::~TSyncPageCacheFileLogBackend() {
diff --git a/library/cpp/logger/sync_page_cache_file.h b/library/cpp/logger/sync_page_cache_file.h
index a36340651c..e50aade08f 100644
--- a/library/cpp/logger/sync_page_cache_file.h
+++ b/library/cpp/logger/sync_page_cache_file.h
@@ -2,12 +2,19 @@
#include "backend.h"
+#include <util/datetime/base.h>
#include <util/generic/fwd.h>
+#include <util/generic/maybe.h>
#include <util/generic/ptr.h>
class TSyncPageCacheFileLogBackend final: public TLogBackend {
public:
- TSyncPageCacheFileLogBackend(const TString& path, size_t maxBufferSize, size_t maxPendingCacheSize);
+ TSyncPageCacheFileLogBackend(
+ const TString& path,
+ size_t maxBufferSize,
+ size_t maxPendingCacheSize,
+ TMaybe<TDuration> bufferFlushPeriod = Nothing()
+ );
~TSyncPageCacheFileLogBackend();
void WriteData(const TLogRecord& rec) override;
diff --git a/library/cpp/neh/http2.cpp b/library/cpp/neh/http2.cpp
index f10fdc3b3e..daf73ba0d1 100644
--- a/library/cpp/neh/http2.cpp
+++ b/library/cpp/neh/http2.cpp
@@ -912,7 +912,6 @@ namespace {
THttpConnManager()
: TotalConn(0)
, EP_(THttp2Options::AsioThreads)
- , InPurging_(0)
, MaxConnId_(0)
, Shutdown_(false)
{
@@ -1011,7 +1010,7 @@ namespace {
}
void SuggestPurgeCache() {
- if (AtomicTryLock(&InPurging_)) {
+ if (InPurging_.TryAcquire()) {
//evaluate the usefulness of purging the cache
//если в кеше мало соединений (< MaxConnId_/16 или 64), не чистим кеш
if (Cache_.Size() > (Min((size_t)AtomicGet(MaxConnId_), (size_t)1024U) >> 4)) {
@@ -1031,7 +1030,7 @@ namespace {
return; //memo: thread MUST unlock InPurging_ (see DoExecute())
}
}
- AtomicUnlock(&InPurging_);
+ InPurging_.Release();
}
}
@@ -1049,7 +1048,7 @@ namespace {
PurgeCache();
- AtomicUnlock(&InPurging_);
+ InPurging_.Release();
}
}
@@ -1076,7 +1075,7 @@ namespace {
TExecutorsPool EP_;
TConnCache<THttpConn> Cache_;
- TAtomic InPurging_;
+ TSpinLock InPurging_;
TAtomic MaxConnId_;
TAutoPtr<IThreadFactory::IThread> T_;
diff --git a/library/cpp/neh/https.cpp b/library/cpp/neh/https.cpp
index ece7d3cf2d..99db8a44cc 100644
--- a/library/cpp/neh/https.cpp
+++ b/library/cpp/neh/https.cpp
@@ -11,7 +11,6 @@
#include <openssl/ssl.h>
#include <openssl/err.h>
-#include <openssl/bio.h>
#include <openssl/x509v3.h>
#include <library/cpp/openssl/init/init.h>
@@ -26,12 +25,10 @@
#include <util/generic/list.h>
#include <util/generic/utility.h>
#include <util/network/socket.h>
-#include <util/stream/str.h>
#include <util/stream/zlib.h>
#include <util/string/builder.h>
#include <util/string/cast.h>
#include <util/system/condvar.h>
-#include <util/system/error.h>
#include <util/system/types.h>
#include <util/thread/factory.h>
@@ -448,8 +445,7 @@ namespace NNeh {
};
TConnCache()
- : InPurging_(0)
- , MaxConnId_(0)
+ : MaxConnId_(0)
, Shutdown_(false)
{
T_ = SystemThreadFactory()->Run(this);
@@ -557,7 +553,7 @@ namespace NNeh {
private:
void SuggestPurgeCache() {
- if (AtomicTryLock(&InPurging_)) {
+ if (InPurging_.TryAcquire()) {
//evaluate the usefulness of purging the cache
//если в кеше мало соединений (< MaxConnId_/16 или 64), не чистим кеш
if ((size_t)CachedConnections.Val() > (Min((size_t)MaxConnId_.load(std::memory_order_acquire), (size_t)1024U) >> 4)) {
@@ -577,7 +573,7 @@ namespace NNeh {
return; //memo: thread MUST unlock InPurging_ (see DoExecute())
}
}
- AtomicUnlock(&InPurging_);
+ InPurging_.Release();
}
}
@@ -594,7 +590,7 @@ namespace NNeh {
PurgeCache();
- AtomicUnlock(&InPurging_);
+ InPurging_.Release();
}
}
@@ -660,7 +656,7 @@ namespace NNeh {
NHttp::TLockFreeSequence<TConnList> Lst_;
- TAtomic InPurging_;
+ TSpinLock InPurging_;
std::atomic<size_t> MaxConnId_;
TAutoPtr<IThreadFactory::IThread> T_;
diff --git a/library/cpp/tld/tlds-alpha-by-domain.txt b/library/cpp/tld/tlds-alpha-by-domain.txt
index b1cec7b2a6..a118629445 100644
--- a/library/cpp/tld/tlds-alpha-by-domain.txt
+++ b/library/cpp/tld/tlds-alpha-by-domain.txt
@@ -1,4 +1,4 @@
-# Version 2025032500, Last Updated Tue Mar 25 07:07:01 2025 UTC
+# Version 2025033101, Last Updated Tue Apr 1 07:07:01 2025 UTC
AAA
AARP
ABB
diff --git a/library/cpp/yt/error/error.cpp b/library/cpp/yt/error/error.cpp
index f097697cd6..ea71a07fee 100644
--- a/library/cpp/yt/error/error.cpp
+++ b/library/cpp/yt/error/error.cpp
@@ -275,6 +275,7 @@ TError::TErrorOr(const std::exception& ex)
*this = errorEx->Error();
} else {
*this = TError(NYT::EErrorCode::Generic, TRuntimeFormat{ex.what()});
+ *this <<= TErrorAttribute("exception_type", TypeName(ex));
}
YT_VERIFY(!IsOK());
Enrich();
diff --git a/library/cpp/yt/error/unittests/error_ut.cpp b/library/cpp/yt/error/unittests/error_ut.cpp
index 198aa1ecd8..a5576fad58 100644
--- a/library/cpp/yt/error/unittests/error_ut.cpp
+++ b/library/cpp/yt/error/unittests/error_ut.cpp
@@ -397,6 +397,22 @@ TEST(TErrorTest, FormatCtor)
EXPECT_EQ("Some error hello", TError("Some error %v", "hello").GetMessage());
}
+TEST(TErrorTest, ExceptionCtor)
+{
+ {
+ auto error = TError(std::runtime_error("Some error"));
+ EXPECT_EQ(error.GetMessage(), "Some error");
+ EXPECT_EQ(error.Attributes().Get<std::string>("exception_type"), "std::runtime_error");
+ }
+ EXPECT_EQ(TError(std::runtime_error("Some bad char sequences: %v %Qv {}")).GetMessage(),
+ "Some bad char sequences: %v %Qv {}");
+
+ EXPECT_EQ(TError(TSimpleException("Some error")).GetMessage(),
+ "Some error");
+ EXPECT_EQ(TError(TSimpleException("Some bad char sequences: %v %d {}")).GetMessage(),
+ "Some bad char sequences: %v %d {}");
+}
+
TEST(TErrorTest, FindRecursive)
{
auto inner = TError("Inner")
diff --git a/util/generic/ptr.h b/util/generic/ptr.h
index 7057949bc4..625418ccdb 100644
--- a/util/generic/ptr.h
+++ b/util/generic/ptr.h
@@ -95,7 +95,7 @@ public:
private:
/*
- * we do not want dependancy on cstdlib here...
+ * we do not want a dependency on cstdlib here...
*/
static void DoDestroy(void* t) noexcept;
};
@@ -161,7 +161,7 @@ public:
};
/*
- * void*-like pointers does not have operator*
+ * void*-like pointers do not have operator*
*/
template <class Base>
class TPointerBase<Base, void>: public TPointerCommon<Base, void> {
diff --git a/util/stream/debug.cpp b/util/stream/debug.cpp
deleted file mode 100644
index 1c08e38df9..0000000000
--- a/util/stream/debug.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-#include "null.h"
-#include "debug.h"
-
-#include <util/string/cast.h>
-#include <util/generic/singleton.h>
-#include <util/generic/yexception.h>
-
-#include <cstdio>
-#include <cstdlib>
-
-void TDebugOutput::DoWrite(const void* buf, size_t len) {
- if (len != fwrite(buf, 1, len, stderr)) {
- ythrow yexception() << "write failed(" << LastSystemErrorText() << ")";
- }
-}
-
-namespace {
- struct TDbgSelector {
- inline TDbgSelector() {
- char* dbg = getenv("DBGOUT");
- if (dbg) {
- Out = &Cerr;
- try {
- Level = FromString(dbg);
- } catch (const yexception&) {
- Level = 0;
- }
- } else {
- Out = &Cnull;
- Level = 0;
- }
- }
-
- IOutputStream* Out;
- int Level;
- };
-} // namespace
-
-template <>
-struct TSingletonTraits<TDbgSelector> {
- static constexpr size_t Priority = 8;
-};
-
-IOutputStream& StdDbgStream() noexcept {
- return *(Singleton<TDbgSelector>()->Out);
-}
-
-int StdDbgLevel() noexcept {
- return Singleton<TDbgSelector>()->Level;
-}
diff --git a/util/stream/debug.h b/util/stream/debug.h
deleted file mode 100644
index 92d6d4b42d..0000000000
--- a/util/stream/debug.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#pragma once
-
-#include "output.h"
-
-/**
- * @addtogroup Streams
- * @{
- */
-
-/**
- * Debug output stream. Writes into `stderr`.
- */
-class TDebugOutput: public IOutputStream {
-public:
- inline TDebugOutput() noexcept = default;
- ~TDebugOutput() override = default;
-
- TDebugOutput(TDebugOutput&&) noexcept = default;
- TDebugOutput& operator=(TDebugOutput&&) noexcept = default;
-
-private:
- void DoWrite(const void* buf, size_t len) override;
-};
-
-/**
- * @returns Standard debug stream.
- * @see Cdbg
- */
-IOutputStream& StdDbgStream() noexcept;
-
-/**
- * This function returns the current debug level as set via `DBGOUT` environment
- * variable.
- *
- * Note that the proper way to use this function is via `Y_DBGTRACE` macro.
- * There are very few cases when there is a need to use it directly.
- *
- * @returns Debug level.
- * @see ETraceLevel
- * @see DBGTRACE
- */
-int StdDbgLevel() noexcept;
-
-/**
- * Standard debug stream.
- *
- * Behavior of this stream is controlled via `DBGOUT` environment variable.
- * If this variable is set, then this stream is redirected into `stderr`,
- * otherwise whatever is written into it is simply ignored.
- */
-#define Cdbg (StdDbgStream())
-
-/** @} */
diff --git a/util/stream/output.cpp b/util/stream/output.cpp
index 51a02300df..e79c323737 100644
--- a/util/stream/output.cpp
+++ b/util/stream/output.cpp
@@ -1,7 +1,8 @@
#include "output.h"
#include <util/string/cast.h>
-#include "format.h"
+#include <util/stream/format.h>
+#include <util/stream/null.h>
#include <util/memory/tempbuf.h>
#include <util/generic/singleton.h>
#include <util/generic/yexception.h>
@@ -17,6 +18,7 @@
#include <cerrno>
#include <cstdio>
+#include <cstdlib>
#include <filesystem>
#include <string_view>
#include <optional>
@@ -458,3 +460,43 @@ void RedirectStdioToAndroidLog(bool redirect) {
Y_UNUSED(redirect);
#endif
}
+void TDebugOutput::DoWrite(const void* buf, size_t len) {
+ if (len != fwrite(buf, 1, len, stderr)) {
+ ythrow yexception() << "write failed(" << LastSystemErrorText() << ")";
+ }
+}
+
+namespace {
+ struct TDbgSelector {
+ inline TDbgSelector() {
+ char* dbg = getenv("DBGOUT");
+ if (dbg) {
+ Out = &Cerr;
+ try {
+ Level = FromString(dbg);
+ } catch (const yexception&) {
+ Level = 0;
+ }
+ } else {
+ Out = &Cnull;
+ Level = 0;
+ }
+ }
+
+ IOutputStream* Out;
+ int Level;
+ };
+} // namespace
+
+template <>
+struct TSingletonTraits<TDbgSelector> {
+ static constexpr size_t Priority = 8;
+};
+
+IOutputStream& NPrivate::StdDbgStream() noexcept {
+ return *(Singleton<TDbgSelector>()->Out);
+}
+
+int StdDbgLevel() noexcept {
+ return Singleton<TDbgSelector>()->Level;
+}
diff --git a/util/stream/output.h b/util/stream/output.h
index 8acd66a4ba..35e093ebfe 100644
--- a/util/stream/output.h
+++ b/util/stream/output.h
@@ -263,6 +263,7 @@ static inline IOutputStream& operator<<(IOutputStream& o Y_LIFETIME_BOUND, wchar
namespace NPrivate {
IOutputStream& StdOutStream() noexcept;
IOutputStream& StdErrStream() noexcept;
+ IOutputStream& StdDbgStream() noexcept;
} // namespace NPrivate
/**
@@ -281,6 +282,15 @@ namespace NPrivate {
#define Clog Cerr
/**
+ * Standard debug stream.
+ *
+ * Behavior of this stream is controlled via `DBGOUT` environment variable.
+ * If this variable is set, then this stream is redirected into `stderr`,
+ * otherwise whatever is written into it is simply ignored.
+ */
+#define Cdbg (::NPrivate::StdDbgStream())
+
+/**
* End-of-line output manipulator, basically the same as `std::endl`.
*/
static inline void Endl(IOutputStream& o) {
@@ -298,8 +308,32 @@ static inline void Flush(IOutputStream& o) {
* Also see format.h for additional manipulators.
*/
-#include "debug.h"
-
void RedirectStdioToAndroidLog(bool redirect);
-/** @} */
+/**
+ * Debug output stream. Writes into `stderr`.
+ */
+class TDebugOutput: public IOutputStream {
+public:
+ inline TDebugOutput() noexcept = default;
+ ~TDebugOutput() override = default;
+
+ TDebugOutput(TDebugOutput&&) noexcept = default;
+ TDebugOutput& operator=(TDebugOutput&&) noexcept = default;
+
+private:
+ void DoWrite(const void* buf, size_t len) override;
+};
+
+/**
+ * This function returns the current debug level as set via `DBGOUT` environment
+ * variable.
+ *
+ * Note that the proper way to use this function is via `Y_DBGTRACE` macro.
+ * There are very few cases when there is a need to use it directly.
+ *
+ * @returns Debug level.
+ * @see ETraceLevel
+ * @see DBGTRACE
+ */
+int StdDbgLevel() noexcept;
diff --git a/util/stream/trace.h b/util/stream/trace.h
index af5dc10397..40a2fe2108 100644
--- a/util/stream/trace.h
+++ b/util/stream/trace.h
@@ -1,6 +1,6 @@
#pragma once
-#include "debug.h"
+#include <util/stream/output.h>
#include <util/system/defaults.h>
/**
@@ -38,11 +38,11 @@ enum ETraceLevel: ui8 {
* @see ETraceLevel
*/
#define Y_DBGTRACE(elevel, args) Y_DBGTRACE0(int(TRACE_##elevel), args)
-#define Y_DBGTRACE0(level, args) \
- do { \
- if constexpr (Y_IS_DEBUG_BUILD) { \
- if ((level) <= StdDbgLevel()) { \
- StdDbgStream() << args << Endl; \
- } \
- } \
+#define Y_DBGTRACE0(level, args) \
+ do { \
+ if constexpr (Y_IS_DEBUG_BUILD) { \
+ if ((level) <= StdDbgLevel()) { \
+ Cdbg << args << Endl; \
+ } \
+ } \
} while (false)
diff --git a/util/system/flock.h b/util/system/flock.h
index 797b1970a1..a3695fde70 100644
--- a/util/system/flock.h
+++ b/util/system/flock.h
@@ -2,7 +2,6 @@
#include "error.h"
#include "defaults.h"
-#include "file.h"
#if defined(_unix_)
diff --git a/util/ya.make b/util/ya.make
index 3da4beec5e..92616752da 100644
--- a/util/ya.make
+++ b/util/ya.make
@@ -194,7 +194,6 @@ JOIN_SRCS(
stream/aligned.cpp
stream/buffer.cpp
stream/buffered.cpp
- stream/debug.cpp
stream/direct_io.cpp
stream/file.cpp
stream/format.cpp
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index fe19e8f97a..aa69fb4d50 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -719,6 +719,8 @@ type PythonSettings struct {
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Experimental features to be included during client library generation.
+ ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
}
func (x *PythonSettings) Reset() {
@@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
return nil
}
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+ if x != nil {
+ return x.ExperimentalFeatures
+ }
+ return nil
+}
+
// Settings for Node client libraries.
type NodeSettings struct {
state protoimpl.MessageState
@@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string {
return nil
}
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables generation of asynchronous REST clients if `rest` transport is
+ // enabled. By default, asynchronous REST clients will not be generated.
+ // This feature will be enabled by default 1 month after launching the
+ // feature in preview packages.
+ RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+ *x = PythonSettings_ExperimentalFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+ if x != nil {
+ return x.RestAsyncIoEnabled
+ }
+ return false
+}
+
// Describes settings to use when generating API methods that use the
// long-running operation pattern.
// All default values below are from those used in the client library
@@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct {
func (x *MethodSettings_LongRunning) Reset() {
*x = MethodSettings_LongRunning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string {
func (*MethodSettings_LongRunning) ProtoMessage() {}
func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{
0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a,
- 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
- 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
- 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
- 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
- 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01,
+ 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15,
+ 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78,
+ 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65,
+ 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41,
+ 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a,
+ 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a,
+ 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
+ 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
+ 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61,
+ 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75,
+ 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
- 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
- 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
- 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
- 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
- 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
- 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
- 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
- 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
- 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2,
- 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a,
- 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
- 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e,
- 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f,
- 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70,
- 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a,
- 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12,
- 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
- 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69,
+ 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12,
+ 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c,
+ 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75,
+ 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50,
+ 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94,
+ 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47,
+ 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64,
+ 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f,
+ 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
+ 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61,
+ 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d,
+ 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c,
- 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65,
- 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d,
- 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
- 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61,
- 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f,
- 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69,
- 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52,
- 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09,
- 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53,
- 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f,
- 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12,
- 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a,
- 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41,
- 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
- 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a,
- 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52,
- 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43,
- 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48,
- 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f,
- 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75,
- 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69,
- 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49,
+ 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41,
+ 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10,
+ 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12,
+ 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45,
+ 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e,
+ 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e,
+ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12,
+ 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47,
+ 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74,
+ 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73,
+ 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f,
+ 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
@@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte {
}
var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
var file_google_api_client_proto_goTypes = []interface{}{
- (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
- (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
- (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
- (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
- (*Publishing)(nil), // 4: google.api.Publishing
- (*JavaSettings)(nil), // 5: google.api.JavaSettings
- (*CppSettings)(nil), // 6: google.api.CppSettings
- (*PhpSettings)(nil), // 7: google.api.PhpSettings
- (*PythonSettings)(nil), // 8: google.api.PythonSettings
- (*NodeSettings)(nil), // 9: google.api.NodeSettings
- (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
- (*RubySettings)(nil), // 11: google.api.RubySettings
- (*GoSettings)(nil), // 12: google.api.GoSettings
- (*MethodSettings)(nil), // 13: google.api.MethodSettings
- nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
- nil, // 15: google.api.DotnetSettings.RenamedServicesEntry
- nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry
- (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning
- (api.LaunchStage)(0), // 18: google.api.LaunchStage
- (*durationpb.Duration)(nil), // 19: google.protobuf.Duration
- (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions
- (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
+ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
+ (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
+ (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
+ (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
+ (*Publishing)(nil), // 4: google.api.Publishing
+ (*JavaSettings)(nil), // 5: google.api.JavaSettings
+ (*CppSettings)(nil), // 6: google.api.CppSettings
+ (*PhpSettings)(nil), // 7: google.api.PhpSettings
+ (*PythonSettings)(nil), // 8: google.api.PythonSettings
+ (*NodeSettings)(nil), // 9: google.api.NodeSettings
+ (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
+ (*RubySettings)(nil), // 11: google.api.RubySettings
+ (*GoSettings)(nil), // 12: google.api.GoSettings
+ (*MethodSettings)(nil), // 13: google.api.MethodSettings
+ nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
+ (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures
+ nil, // 16: google.api.DotnetSettings.RenamedServicesEntry
+ nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry
+ (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning
+ (api.LaunchStage)(0), // 19: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 20: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions
}
var file_google_api_client_proto_depIdxs = []int32{
1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
- 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+ 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
@@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{
2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
- 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
- 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
- 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
- 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
- 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
- 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
- 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
- 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
- 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
- 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
- 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions
- 32, // [32:32] is the sub-list for method output_type
- 32, // [32:32] is the sub-list for method input_type
- 32, // [32:32] is the sub-list for extension type_name
- 28, // [28:32] is the sub-list for extension extendee
- 0, // [0:28] is the sub-list for field type_name
+ 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+ 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+ 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+ 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+ 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+ 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+ 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+ 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+ 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+ 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+ 33, // [33:33] is the sub-list for method output_type
+ 33, // [33:33] is the sub-list for method input_type
+ 33, // [33:33] is the sub-list for extension type_name
+ 29, // [29:33] is the sub-list for extension extendee
+ 0, // [0:29] is the sub-list for field type_name
}
func init() { file_google_api_client_proto_init() }
@@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() {
return nil
}
}
- file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MethodSettings_LongRunning); i {
case 0:
return &v.state
@@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_client_proto_rawDesc,
NumEnums: 2,
- NumMessages: 16,
+ NumMessages: 17,
NumExtensions: 4,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/ya.make b/vendor/google.golang.org/genproto/googleapis/api/annotations/ya.make
index 83425bfed0..6b72eb0831 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/ya.make
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/ya.make
@@ -2,7 +2,7 @@ GO_LIBRARY()
LICENSE(Apache-2.0)
-VERSION(v0.0.0-20240822170219-fc7c04adadcd)
+VERSION(v0.0.0-20241015192408-796eee8c2d53)
SRCS(
annotations.pb.go
diff --git a/vendor/google.golang.org/genproto/googleapis/api/ya.make b/vendor/google.golang.org/genproto/googleapis/api/ya.make
index 383a4ca9bc..0fcc57d08c 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/ya.make
+++ b/vendor/google.golang.org/genproto/googleapis/api/ya.make
@@ -2,7 +2,7 @@ GO_LIBRARY()
LICENSE(Apache-2.0)
-VERSION(v0.0.0-20240822170219-fc7c04adadcd)
+VERSION(v0.0.0-20241015192408-796eee8c2d53)
SRCS(
launch_stage.pb.go
diff --git a/ydb/apps/etcd_proxy/readme.txt b/ydb/apps/etcd_proxy/readme.txt
index 25d5d43ffb..e4745facea 100644
--- a/ydb/apps/etcd_proxy/readme.txt
+++ b/ydb/apps/etcd_proxy/readme.txt
@@ -12,3 +12,4 @@ And other todo's:
- Implement retries on "Transaction lock invslideted" error.
- Implement compaction with control of a requested revision.
- Implement the watches for ranges. (Now the watches work only with a single key or a prefix.)
+- Add unit tests for watches.
diff --git a/ydb/apps/etcd_proxy/service/etcd_impl.cpp b/ydb/apps/etcd_proxy/service/etcd_impl.cpp
index ec881129dd..59adbc029c 100644
--- a/ydb/apps/etcd_proxy/service/etcd_impl.cpp
+++ b/ydb/apps/etcd_proxy/service/etcd_impl.cpp
@@ -1067,13 +1067,23 @@ private:
}
void MakeQueryWithParams(std::ostream& sql, NYdb::TParamsBuilder& params) final {
- sql << "delete from `content` where `modified` < " << AddParam("Revision", params, KeyRevision) << ';' << std::endl;
+ sql << "$Trash = select c.key as key, c.modified as modified from `content` as c inner join (" << std::endl;
+ sql << "select max_by((`key`, `modified`), `modified`) as pair from `content`" << std::endl;
+ sql << "where `modified` < " << AddParam("Revision", params, KeyRevision) << " and 0L = `version` group by `key`" << std::endl;
+ sql << ") as keys on keys.pair.0 = c.key where c.modified <= keys.pair.1;" << std::endl;
+ sql << "select count(*) from $Trash;" << std::endl;
+ sql << "delete from `content` on select * from $Trash;" << std::endl;
}
- void ReplyWith(const NYdb::TResultSets&, const TActorContext& ctx) final {
+ void ReplyWith(const NYdb::TResultSets& results, const TActorContext& ctx) final {
+ auto parser = NYdb::TResultSetParser(results.front());
+ const auto erased = parser.TryNextRow() ? NYdb::TValueParser(parser.GetValue(0)).GetUint64() : 0ULL;
+ if (!erased)
+ TryToRollbackRevision();
+
etcdserverpb::CompactionResponse response;
response.mutable_header()->set_revision(Revision);
- Dump(std::cout) << std::endl;
+ Dump(std::cout) << '=' << erased << std::endl;
return Reply(response, ctx);
}
diff --git a/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp b/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp
index 79c9d5c400..d9b8b11f73 100644
--- a/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp
+++ b/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp
@@ -1020,6 +1020,60 @@ Y_UNIT_TEST_SUITE(Etcd_KV) {
}
});
}
+
+ Y_UNIT_TEST(Compact) {
+ MakeSimpleTest([](const std::unique_ptr<etcdserverpb::KV::Stub> &etcd) {
+ Put("key0", "value0", etcd);
+ Put("key3", "value1", etcd);
+ Put("key2", "value2", etcd);
+ Put("key0", "value3", etcd);
+ Put("key1", "value4", etcd);
+ Delete("key2", etcd);
+ const auto revForCompact = Put("key3", "value5", etcd);
+ Delete("key1", etcd);
+ const auto revForRequest = Put("key3", "value6", etcd);
+ Delete("key0", etcd);
+ Delete("key3", etcd);
+
+ {
+ grpc::ClientContext readRangeCtx;
+ etcdserverpb::RangeRequest rangeRequest;
+ rangeRequest.set_key("key");
+ rangeRequest.set_range_end("kez");
+ rangeRequest.set_keys_only(true);
+ etcdserverpb::RangeResponse rangeResponse;
+ UNIT_ASSERT(etcd->Range(&readRangeCtx, rangeRequest, &rangeResponse).ok());
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.count(), 0LL);
+ }
+
+ {
+ grpc::ClientContext compactCtx;
+ etcdserverpb::CompactionRequest compactionRequest;
+ compactionRequest.set_revision(revForCompact);
+ etcdserverpb::CompactionResponse compactionResponse;
+ UNIT_ASSERT(etcd->Compact(&compactCtx, compactionRequest, &compactionResponse).ok());
+ }
+
+ {
+ grpc::ClientContext readRangeCtx;
+ etcdserverpb::RangeRequest rangeRequest;
+ rangeRequest.set_key("key");
+ rangeRequest.set_range_end("kez");
+ rangeRequest.set_revision(revForRequest);
+ rangeRequest.set_sort_target(etcdserverpb::RangeRequest_SortTarget_VALUE);
+ rangeRequest.set_sort_order(etcdserverpb::RangeRequest_SortOrder_ASCEND);
+ etcdserverpb::RangeResponse rangeResponse;
+ UNIT_ASSERT(etcd->Range(&readRangeCtx, rangeRequest, &rangeResponse).ok());
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.count(), 2LL);
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.kvs().size(), 2U);
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.kvs(0).key(), "key0");
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.kvs(1).key(), "key3");
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.kvs(0).value(), "value3");
+ UNIT_ASSERT_VALUES_EQUAL(rangeResponse.kvs(1).value(), "value6");
+ }
+
+ });
+ }
} // Y_UNIT_TEST_SUITE(Etcd_KV)
Y_UNIT_TEST_SUITE(Etcd_Lease) {
diff --git a/ydb/apps/ydb/CHANGELOG.md b/ydb/apps/ydb/CHANGELOG.md
index 49b17f8605..d017326187 100644
--- a/ydb/apps/ydb/CHANGELOG.md
+++ b/ydb/apps/ydb/CHANGELOG.md
@@ -1,3 +1,6 @@
+* Added "--no-discovery" option. It allows to skip discovery and use user provided endpoint to connect to YDB cluster.
+* Added `--retries` to `ydb workload <clickbenh|tpch|tpcds> run` command.
+* Added `--partition-size` param to `ydb workload <clickbench/tpcds/tpch> init`.
* Fixed bugs in `ydb scheme rmdir`: 1) do not try to delete subdomains, 2) order the deletion of external tables before the deletion of external data sources.
* YDB CLI help message improvements. Different display for detailed help and brief help.
* Support coordination nodes in `ydb scheme rmdir --recursive`.
diff --git a/ydb/apps/ydb/ut/parse_command_line.cpp b/ydb/apps/ydb/ut/parse_command_line.cpp
index 7503456139..fc01d7b5d3 100644
--- a/ydb/apps/ydb/ut/parse_command_line.cpp
+++ b/ydb/apps/ydb/ut/parse_command_line.cpp
@@ -284,6 +284,18 @@ Y_UNIT_TEST_SUITE(ParseOptionsTest) {
);
}
+ Y_UNIT_TEST_F(NoDiscoveryCommandLine, TCliTestFixture) {
+ RunCli(
+ {
+ "-v",
+ "-e", GetEndpoint(),
+ "-d", GetDatabase(),
+ "--no-discovery",
+ "scheme", "ls",
+ }
+ );
+ }
+
Y_UNIT_TEST_F(EndpointAndDatabaseFromActiveProfile, TCliTestFixture) {
TString profile = fmt::format(R"yaml(
profiles:
diff --git a/ydb/ci/rightlib.txt b/ydb/ci/rightlib.txt
index 4d372e8b6f..20221f0853 100644
--- a/ydb/ci/rightlib.txt
+++ b/ydb/ci/rightlib.txt
@@ -1 +1 @@
-a3709f79ab2678b16c3b2b57e43dd977d08bf7bf
+099240b6b9d598e1178351c4324de0bae80ae98a
diff --git a/ydb/core/backup/common/metadata.cpp b/ydb/core/backup/common/metadata.cpp
index fd70983aca..d54b879861 100644
--- a/ydb/core/backup/common/metadata.cpp
+++ b/ydb/core/backup/common/metadata.cpp
@@ -53,4 +53,48 @@ TMetadata TMetadata::Deserialize(const TString& metadata) {
return result;
}
+bool TSchemaMapping::Deserialize(const TString& jsonContent, TString& error) {
+ NJson::TJsonValue json;
+ if (!NJson::ReadJsonTree(jsonContent, &json)) {
+ error = "Failed to parse schema mapping json";
+ return false;
+ }
+ const NJson::TJsonValue& mapping = json["exportedObjects"];
+ if (!mapping.IsMap()) {
+ error = "No mapping in mapping json";
+ return false;
+ }
+
+ bool hasIV = false;
+ bool first = true;
+ for (const auto& [exportObject, info] : mapping.GetMap()) {
+ const NJson::TJsonValue& exportPrefix = info["exportPrefix"];
+ const NJson::TJsonValue& iv = info["iv"];
+ if (!exportPrefix.IsString()) {
+ error = "Incorrect exportPrefix";
+ return false;
+ }
+ if (first) {
+ hasIV = info.Has("iv");
+ } else {
+ first = false;
+ if (hasIV != info.Has("iv")) {
+ error = "Incorrect iv in schema mapping json";
+ return false;
+ }
+ }
+ if (hasIV && !iv.IsString()) {
+ error = "IV in schema mapping json is not a string";
+ return false;
+ }
+ TItem& item = Items.emplace_back();
+ item.ExportPrefix = exportPrefix.GetString();
+ item.ObjectPath = exportObject;
+ if (hasIV) {
+ item.IV = TEncryptionIV::FromHexString(iv.GetString());
+ }
+ }
+ return true;
+}
+
}
diff --git a/ydb/core/backup/common/metadata.h b/ydb/core/backup/common/metadata.h
index f92e5e4fd6..7479f7ee41 100644
--- a/ydb/core/backup/common/metadata.h
+++ b/ydb/core/backup/common/metadata.h
@@ -1,4 +1,5 @@
#pragma once
+#include "encryption.h"
#include <ydb/core/base/row_version.h>
@@ -56,4 +57,20 @@ private:
TMaybeFail<ui64> Version;
};
+class TSchemaMapping {
+public:
+ struct TItem {
+ TString ExportPrefix;
+ TString ObjectPath;
+ TMaybe<NBackup::TEncryptionIV> IV;
+ };
+
+ TSchemaMapping() = default;
+
+ bool Deserialize(const TString& jsonContent, TString& error);
+
+public:
+ std::vector<TItem> Items;
+};
+
} // namespace NKikimr::NBackup
diff --git a/ydb/core/base/appdata_fwd.h b/ydb/core/base/appdata_fwd.h
index 4af04cdb49..11b425c9ff 100644
--- a/ydb/core/base/appdata_fwd.h
+++ b/ydb/core/base/appdata_fwd.h
@@ -153,6 +153,10 @@ namespace NSchemeShard {
class IOperationFactory;
}
+namespace NReplication::NService {
+ class ITransferWriterFactory;
+}
+
class TFormatFactory;
namespace NYamlConfig {
@@ -188,6 +192,7 @@ struct TAppData {
const NMsgBusProxy::IPersQueueGetReadSessionsInfoWorkerFactory* PersQueueGetReadSessionsInfoWorkerFactory = nullptr;
const NPQ::IPersQueueMirrorReaderFactory* PersQueueMirrorReaderFactory = nullptr;
+ std::shared_ptr<NReplication::NService::ITransferWriterFactory> TransferWriterFactory = nullptr;
NYdb::TDriver* YdbDriver = nullptr;
const NPDisk::IIoContextFactory* IoContextFactory = nullptr;
diff --git a/ydb/core/base/local_user_token.cpp b/ydb/core/base/local_user_token.cpp
new file mode 100644
index 0000000000..cc2e48cb32
--- /dev/null
+++ b/ydb/core/base/local_user_token.cpp
@@ -0,0 +1,22 @@
+#include <ydb/library/aclib/aclib.h>
+#include <ydb/library/login/login.h>
+#include <ydb/library/login/protos/login.pb.h>
+
+#include "local_user_token.h"
+
+namespace NKikimr {
+
+NACLib::TUserToken BuildLocalUserToken(const NLogin::TLoginProvider& loginProvider, const TString& user) {
+ const auto providerGroups = loginProvider.GetGroupsMembership(user);
+ const TVector<NACLib::TSID> groups(providerGroups.begin(), providerGroups.end());
+ //NOTE: TVector vs std::vector incompatibility between TUserToken and TLoginProvider
+ return NACLib::TUserToken(user, groups);
+}
+
+NACLib::TUserToken BuildLocalUserToken(const NLoginProto::TSecurityState& state, const TString& user) {
+ NLogin::TLoginProvider loginProvider;
+ loginProvider.UpdateSecurityState(state);
+ return BuildLocalUserToken(loginProvider, user);
+}
+
+}
diff --git a/ydb/core/base/local_user_token.h b/ydb/core/base/local_user_token.h
new file mode 100644
index 0000000000..cf694aa472
--- /dev/null
+++ b/ydb/core/base/local_user_token.h
@@ -0,0 +1,23 @@
+#pragma once
+
+#include <util/generic/string.h>
+#include <ydb/library/aclib/aclib.h>
+
+
+namespace NLoginProto {
+class TSecurityState;
+}
+namespace NLogin {
+class TLoginProvider;
+}
+
+namespace NKikimr {
+
+// Recreates user token from local user login/sid and it's database login provider or security state.
+// Token should be used to determine access level only (e.g. cluster/database admin status),
+// and not for authentication.
+// See methods in ydb/core/base/auth.h.
+NACLib::TUserToken BuildLocalUserToken(const NLogin::TLoginProvider& loginProvider, const TString& user);
+NACLib::TUserToken BuildLocalUserToken(const NLoginProto::TSecurityState& state, const TString& user);
+
+}
diff --git a/ydb/core/base/ya.make b/ydb/core/base/ya.make
index 7487120139..dc352315aa 100644
--- a/ydb/core/base/ya.make
+++ b/ydb/core/base/ya.make
@@ -30,6 +30,8 @@ SRCS(
group_stat.h
hive.h
interconnect_channels.h
+ local_user_token.cpp
+ local_user_token.h
localdb.cpp
localdb.h
location.h
diff --git a/ydb/core/blob_depot/assimilator.cpp b/ydb/core/blob_depot/assimilator.cpp
index a505a66934..1e7dd63bb7 100644
--- a/ydb/core/blob_depot/assimilator.cpp
+++ b/ydb/core/blob_depot/assimilator.cpp
@@ -17,52 +17,52 @@ namespace NKikimr::NBlobDepot {
};
class TBlobDepot::TData::TTxCommitAssimilatedBlob : public NTabletFlatExecutor::TTransactionBase<TBlobDepot> {
- const NKikimrProto::EReplyStatus Status;
- const TBlobSeqId BlobSeqId;
- const TData::TKey Key;
+ std::vector<TAssimilatedBlobInfo> Blobs;
const ui32 NotifyEventType;
const TActorId ParentId;
const ui64 Cookie;
- const bool Keep;
- const bool DoNotKeep;
public:
TTxType GetTxType() const override { return NKikimrBlobDepot::TXTYPE_COMMIT_ASSIMILATED_BLOB; }
- TTxCommitAssimilatedBlob(TBlobDepot *self, NKikimrProto::EReplyStatus status, TBlobSeqId blobSeqId,
- TData::TKey key, ui32 notifyEventType, TActorId parentId, ui64 cookie, bool keep, bool doNotKeep)
+ TTxCommitAssimilatedBlob(TBlobDepot *self, std::vector<TAssimilatedBlobInfo>&& blobs, ui32 notifyEventType,
+ TActorId parentId, ui64 cookie)
: TTransactionBase(self)
- , Status(status)
- , BlobSeqId(blobSeqId)
- , Key(std::move(key))
+ , Blobs(std::move(blobs))
, NotifyEventType(notifyEventType)
, ParentId(parentId)
, Cookie(cookie)
- , Keep(keep)
- , DoNotKeep(doNotKeep)
{}
bool Execute(TTransactionContext& txc, const TActorContext&) override {
- if (Status == NKikimrProto::OK) {
- Y_ABORT_UNLESS(!Self->Data->CanBeCollected(BlobSeqId));
- Self->Data->BindToBlob(Key, BlobSeqId, Keep, DoNotKeep, txc, this);
- } else if (Status == NKikimrProto::NODATA) {
- if (const TData::TValue *value = Self->Data->FindKey(Key); value && value->GoingToAssimilate) {
- Self->Data->DeleteKey(Key, txc, this);
- }
+ for (auto& blob : Blobs) {
+ std::visit(TOverloaded{
+ [&](TAssimilatedBlobInfo::TDrop&) {
+ if (const TData::TValue *value = Self->Data->FindKey(blob.Key); value && value->GoingToAssimilate) {
+ Self->Data->DeleteKey(blob.Key, txc, this);
+ }
+ },
+ [&](TAssimilatedBlobInfo::TUpdate& update) {
+ Y_ABORT_UNLESS(!Self->Data->CanBeCollected(update.BlobSeqId));
+ Self->Data->BindToBlob(blob.Key, update.BlobSeqId, update.Keep, update.DoNotKeep, txc, this);
+ },
+ }, blob.Action);
}
return true;
}
void Complete(const TActorContext&) override {
- if (BlobSeqId) {
- TChannelInfo& channel = Self->Channels[BlobSeqId.Channel];
- const ui32 generation = Self->Executor()->Generation();
- const TBlobSeqId leastExpectedBlobIdBefore = channel.GetLeastExpectedBlobId(generation);
- const size_t numErased = channel.AssimilatedBlobsInFlight.erase(BlobSeqId.ToSequentialNumber());
- Y_ABORT_UNLESS(numErased == 1);
- if (leastExpectedBlobIdBefore != channel.GetLeastExpectedBlobId(generation)) {
- Self->Data->OnLeastExpectedBlobIdChange(channel.Index); // allow garbage collection
+ for (const auto& blob : Blobs) {
+ if (auto *update = std::get_if<TAssimilatedBlobInfo::TUpdate>(&blob.Action)) {
+ const auto& blobSeqId = update->BlobSeqId;
+ TChannelInfo& channel = Self->Channels[blobSeqId.Channel];
+ const ui32 generation = Self->Executor()->Generation();
+ const TBlobSeqId leastExpectedBlobIdBefore = channel.GetLeastExpectedBlobId(generation);
+ const size_t numErased = channel.AssimilatedBlobsInFlight.erase(blobSeqId.ToSequentialNumber());
+ Y_ABORT_UNLESS(numErased == 1);
+ if (leastExpectedBlobIdBefore != channel.GetLeastExpectedBlobId(generation)) {
+ Self->Data->OnLeastExpectedBlobIdChange(channel.Index); // allow garbage collection
+ }
}
}
Self->Data->CommitTrash(this);
@@ -355,11 +355,11 @@ namespace NKikimr::NBlobDepot {
}
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT54, "TAssimilator::ScanDataForCopying", (Id, Self->GetLogId()),
- (LastScannedKey, LastScannedKey), (NumGetsUnprocessed, GetIdToUnprocessedPuts.size()));
+ (LastScannedKey, LastScannedKey), (NumGets, Gets.size()));
THPTimer timer;
- while (GetIdToUnprocessedPuts.size() < MaxGetsUnprocessed) {
+ while (Gets.size() < MaxGetsUnprocessed) {
ui32 numItems = 0;
bool timeout = false;
@@ -391,7 +391,7 @@ namespace NKikimr::NBlobDepot {
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT56, "ScanDataForCopying step", (Id, Self->GetLogId()),
(LastScannedKey, LastScannedKey), (ScanQ.size, ScanQ.size()), (TotalSize, TotalSize),
- (EntriesToProcess, EntriesToProcess), (Timeout, timeout), (NumGetsUnprocessed, GetIdToUnprocessedPuts.size()));
+ (EntriesToProcess, EntriesToProcess), (Timeout, timeout), (NumGets, Gets.size()));
if (timeout) { // timeout hit, reschedule work
TActivationContext::Send(new IEventHandle(TEvPrivate::EvResumeScanDataForCopying, 0, SelfId(), {}, nullptr, 0));
@@ -409,11 +409,11 @@ namespace NKikimr::NBlobDepot {
ev->Decommission = true;
const ui64 getId = NextGetId++;
SendToBSProxy(SelfId(), Self->Config.GetVirtualGroupId(), ev.release(), getId);
- GetIdToUnprocessedPuts.try_emplace(getId);
+ Gets.try_emplace(getId);
ScanQ.clear();
TotalSize = 0;
continue;
- } else if (!GetIdToUnprocessedPuts.empty()) {
+ } else if (!Gets.empty()) {
// there are some unprocessed get queries, still have to wait
} else if (!EntriesToProcess) { // we have finished scanning the whole table without any entries, copying is done
OnCopyDone();
@@ -436,69 +436,72 @@ namespace NKikimr::NBlobDepot {
void TAssimilator::Handle(TEvBlobStorage::TEvGetResult::TPtr ev) {
auto& msg = *ev->Get();
(msg.Status == NKikimrProto::OK ? Self->AsStats.LatestOkGet : Self->AsStats.LatestErrorGet) = TInstant::Now();
- Self->JsonHandler.Invalidate();
- const auto it = GetIdToUnprocessedPuts.find(ev->Cookie);
- Y_ABORT_UNLESS(it != GetIdToUnprocessedPuts.end());
- ui32 getBytes = 0;
+
+ const auto it = Gets.find(ev->Cookie);
+ Y_ABORT_UNLESS(it != Gets.end());
+ TGetBatch& get = it->second;
+
for (ui32 i = 0; i < msg.ResponseSz; ++i) {
auto& resp = msg.Responses[i];
+
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT34, "got TEvGetResult", (Id, Self->GetLogId()), (BlobId, resp.Id),
- (Status, resp.Status), (NumGetsUnprocessed, GetIdToUnprocessedPuts.size()));
- if (resp.Status == NKikimrProto::OK) {
- std::vector<ui8> channels(1);
- if (Self->PickChannels(NKikimrBlobDepot::TChannelKind::Data, channels)) {
- TChannelInfo& channel = Self->Channels[channels.front()];
- const ui64 value = channel.NextBlobSeqId++;
- const auto blobSeqId = TBlobSeqId::FromSequentalNumber(channel.Index, Self->Executor()->Generation(), value);
- const TLogoBlobID id = blobSeqId.MakeBlobId(Self->TabletID(), EBlobType::VG_DATA_BLOB, 0, resp.Id.BlobSize());
- const ui64 putId = NextPutId++;
- SendToBSProxy(SelfId(), channel.GroupId, new TEvBlobStorage::TEvPut(id, TRcBuf(resp.Buffer), TInstant::Max()), putId);
- const bool inserted = channel.AssimilatedBlobsInFlight.insert(value).second; // prevent from barrier advancing
- Y_ABORT_UNLESS(inserted);
- const bool inserted1 = PutIdToKey.try_emplace(putId, TData::TKey(resp.Id), it->first).second;
- Y_ABORT_UNLESS(inserted1);
- ++it->second;
- }
- getBytes += resp.Id.BlobSize();
- ++Self->AsStats.BlobsReadOk;
- Self->JsonHandler.Invalidate();
- } else if (resp.Status == NKikimrProto::NODATA) {
- Self->Data->ExecuteTxCommitAssimilatedBlob(NKikimrProto::NODATA, TBlobSeqId(), TData::TKey(resp.Id),
- TEvPrivate::EvTxComplete, SelfId(), it->first);
- ++it->second;
- ++Self->AsStats.BlobsReadNoData;
- Self->AsStats.BytesToCopy -= resp.Id.BlobSize();
- Self->JsonHandler.Invalidate();
- } else {
- ++Self->AsStats.BlobsReadError;
- Self->JsonHandler.Invalidate();
- continue;
+ (Status, resp.Status), (NumGets, Gets.size()));
+
+ switch (resp.Status) {
+ case NKikimrProto::OK:
+ Self->TabletCounters->Cumulative()[NKikimrBlobDepot::COUNTER_DECOMMIT_GET_BYTES] += resp.Buffer.GetSize();
+ ++Self->AsStats.BlobsReadOk;
+
+ if (std::vector<ui8> channels(1); Self->PickChannels(NKikimrBlobDepot::TChannelKind::Data, channels)) {
+ TChannelInfo& channel = Self->Channels[channels.front()];
+ const ui64 value = channel.NextBlobSeqId++;
+ const auto blobSeqId = TBlobSeqId::FromSequentalNumber(channel.Index, Self->Executor()->Generation(), value);
+ const TLogoBlobID id = blobSeqId.MakeBlobId(Self->TabletID(), EBlobType::VG_DATA_BLOB, 0, resp.Id.BlobSize());
+ const ui64 putId = NextPutId++;
+ SendToBSProxy(SelfId(), channel.GroupId, new TEvBlobStorage::TEvPut(id, TRcBuf(resp.Buffer), TInstant::Max()), putId);
+ const bool inserted = channel.AssimilatedBlobsInFlight.insert(value).second; // prevent from barrier advancing
+ Y_ABORT_UNLESS(inserted);
+ const bool inserted1 = Puts.try_emplace(putId, TData::TKey(resp.Id), it->first).second;
+ Y_ABORT_UNLESS(inserted1);
+ ++get.PutsPending;
+ }
+ break;
+
+ case NKikimrProto::NODATA:
+ ++Self->AsStats.BlobsReadNoData;
+ Self->AsStats.BytesToCopy -= resp.Id.BlobSize();
+
+ get.AssimilatedBlobs.push_back({TData::TKey(resp.Id), TData::TAssimilatedBlobInfo::TDrop{}});
+ break;
+
+ default:
+ ++Self->AsStats.BlobsReadError;
+ continue;
}
+
Self->AsStats.LastReadBlobId = resp.Id;
- Self->JsonHandler.Invalidate();
}
- if (getBytes) {
- Self->TabletCounters->Cumulative()[NKikimrBlobDepot::COUNTER_DECOMMIT_GET_BYTES] += getBytes;
- }
- if (!it->second) {
- GetIdToUnprocessedPuts.erase(it);
- ScanDataForCopying();
+
+ if (!get.PutsPending) {
+ Self->Data->ExecuteTxCommitAssimilatedBlob(std::move(get.AssimilatedBlobs), TEvPrivate::EvTxComplete,
+ SelfId(), it->first);
}
+
+ Self->JsonHandler.Invalidate();
}
void TAssimilator::HandleTxComplete(TAutoPtr<IEventHandle> ev) {
- const auto it = GetIdToUnprocessedPuts.find(ev->Cookie);
- Y_ABORT_UNLESS(it != GetIdToUnprocessedPuts.end());
- if (!--it->second) {
- GetIdToUnprocessedPuts.erase(it);
- ScanDataForCopying();
- }
+ const auto it = Gets.find(ev->Cookie);
+ Y_ABORT_UNLESS(it != Gets.end());
+ Gets.erase(it);
+ ScanDataForCopying();
}
void TAssimilator::Handle(TEvBlobStorage::TEvPutResult::TPtr ev) {
auto& msg = *ev->Get();
+
+ // adjust counters
(msg.Status == NKikimrProto::OK ? Self->AsStats.LatestOkPut : Self->AsStats.LatestErrorPut) = TInstant::Now();
- Self->JsonHandler.Invalidate();
if (msg.Status == NKikimrProto::OK) {
Self->TabletCounters->Cumulative()[NKikimrBlobDepot::COUNTER_DECOMMIT_PUT_OK_BYTES] += msg.Id.BlobSize();
++Self->AsStats.BlobsPutOk;
@@ -508,19 +511,33 @@ namespace NKikimr::NBlobDepot {
++Self->AsStats.BlobsPutError;
}
Self->JsonHandler.Invalidate();
- const auto it = PutIdToKey.find(ev->Cookie);
- Y_ABORT_UNLESS(it != PutIdToKey.end());
- const auto& [key, getId] = it->second;
+
+ // find matching put record
+ const auto it = Puts.find(ev->Cookie);
+ Y_ABORT_UNLESS(it != Puts.end());
+ auto [key, getId] = std::move(it->second);
+ Puts.erase(it);
+
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT37, "got TEvPutResult", (Id, Self->GetLogId()), (Msg, msg),
- (NumGetsUnprocessed, GetIdToUnprocessedPuts.size()), (Key, key));
- Self->Data->ExecuteTxCommitAssimilatedBlob(msg.Status, TBlobSeqId::FromLogoBlobId(msg.Id), std::move(key),
- TEvPrivate::EvTxComplete, SelfId(), getId);
- PutIdToKey.erase(it);
+ (NumGets, Gets.size()), (Key, key));
+
+ // process get
+ const auto jt = Gets.find(getId);
+ Y_ABORT_UNLESS(jt != Gets.end());
+ TGetBatch& get = jt->second;
+ if (msg.Status == NKikimrProto::OK) { // mark blob assimilated only in case of success
+ get.AssimilatedBlobs.push_back({std::move(key), TData::TAssimilatedBlobInfo::TUpdate{
+ TBlobSeqId::FromLogoBlobId(msg.Id)}});
+ }
+ if (!--get.PutsPending) {
+ Self->Data->ExecuteTxCommitAssimilatedBlob(std::move(get.AssimilatedBlobs), TEvPrivate::EvTxComplete,
+ SelfId(), getId);
+ }
}
void TAssimilator::OnCopyDone() {
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT38, "data copying is done", (Id, Self->GetLogId()));
- Y_ABORT_UNLESS(GetIdToUnprocessedPuts.empty());
+ Y_ABORT_UNLESS(Gets.empty());
class TTxFinishCopying : public NTabletFlatExecutor::TTransactionBase<TBlobDepot> {
TAssimilator* const Self;
@@ -699,10 +716,9 @@ namespace NKikimr::NBlobDepot {
json["d.copy_iteration"] = ToString(CopyIteration);
}
- void TBlobDepot::TData::ExecuteTxCommitAssimilatedBlob(NKikimrProto::EReplyStatus status, TBlobSeqId blobSeqId,
- TData::TKey key, ui32 notifyEventType, TActorId parentId, ui64 cookie, bool keep, bool doNotKeep) {
- Self->Execute(std::make_unique<TTxCommitAssimilatedBlob>(Self, status, blobSeqId, std::move(key),
- notifyEventType, parentId, cookie, keep, doNotKeep));
+ void TBlobDepot::TData::ExecuteTxCommitAssimilatedBlob(std::vector<TAssimilatedBlobInfo>&& blobs, ui32 notifyEventType,
+ TActorId parentId, ui64 cookie) {
+ Self->Execute(std::make_unique<TTxCommitAssimilatedBlob>(Self, std::move(blobs), notifyEventType, parentId, cookie));
}
void TBlobDepot::StartGroupAssimilator() {
diff --git a/ydb/core/blob_depot/assimilator.h b/ydb/core/blob_depot/assimilator.h
index d0e757e5ea..90b1e6899b 100644
--- a/ydb/core/blob_depot/assimilator.h
+++ b/ydb/core/blob_depot/assimilator.h
@@ -29,9 +29,13 @@ namespace NKikimr::NBlobDepot {
static constexpr ui32 MaxSizeToQuery = 16'000'000;
+ struct TGetBatch {
+ ui32 PutsPending = 0;
+ std::vector<TData::TAssimilatedBlobInfo> AssimilatedBlobs;
+ };
static constexpr ui32 MaxGetsUnprocessed = 5;
ui64 NextGetId = 1;
- std::unordered_map<ui64, ui32> GetIdToUnprocessedPuts;
+ THashMap<ui64, TGetBatch> Gets;
std::deque<TLogoBlobID> ScanQ;
ui32 TotalSize = 0;
@@ -39,7 +43,7 @@ namespace NKikimr::NBlobDepot {
TActorId PipeId;
ui64 NextPutId = 1;
- THashMap<ui64, std::tuple<TData::TKey, ui64>> PutIdToKey;
+ THashMap<ui64, std::tuple<TData::TKey, ui64>> Puts;
bool ActionInProgress = false;
bool ResumeScanDataForCopyingInFlight = false;
diff --git a/ydb/core/blob_depot/data.h b/ydb/core/blob_depot/data.h
index 30cbd27e89..e1e2649d93 100644
--- a/ydb/core/blob_depot/data.h
+++ b/ydb/core/blob_depot/data.h
@@ -405,6 +405,19 @@ namespace NKikimr::NBlobDepot {
#endif
};
+ struct TAssimilatedBlobInfo {
+ struct TDrop {};
+
+ struct TUpdate {
+ TBlobSeqId BlobSeqId;
+ bool Keep = false;
+ bool DoNotKeep = false;
+ };
+
+ TKey Key;
+ std::variant<TDrop, TUpdate> Action;
+ };
+
private:
struct TRecordWithTrash {};
@@ -730,7 +743,7 @@ namespace NKikimr::NBlobDepot {
bool IsLoaded() const { return Loaded; }
bool IsKeyLoaded(const TKey& key) const { return Loaded || LoadedKeys[key]; }
- bool EnsureKeyLoaded(const TKey& key, NTabletFlatExecutor::TTransactionContext& txc);
+ bool EnsureKeyLoaded(const TKey& key, NTabletFlatExecutor::TTransactionContext& txc, bool *progress = nullptr);
template<typename TRecord>
bool LoadMissingKeys(const TRecord& record, NTabletFlatExecutor::TTransactionContext& txc);
@@ -745,8 +758,8 @@ namespace NKikimr::NBlobDepot {
IActor *CreateResolveDecommitActor(TEvBlobDepot::TEvResolve::TPtr ev);
class TTxCommitAssimilatedBlob;
- void ExecuteTxCommitAssimilatedBlob(NKikimrProto::EReplyStatus status, TBlobSeqId blobSeqId, TData::TKey key,
- ui32 notifyEventType, TActorId parentId, ui64 cookie, bool keep = false, bool doNotKeep = false);
+ void ExecuteTxCommitAssimilatedBlob(std::vector<TAssimilatedBlobInfo>&& blobs, ui32 notifyEventType,
+ TActorId parentId, ui64 cookie);
class TTxResolve;
void ExecuteTxResolve(TEvBlobDepot::TEvResolve::TPtr ev, THashSet<TLogoBlobID>&& resolutionErrors = {});
diff --git a/ydb/core/blob_depot/data_decommit.cpp b/ydb/core/blob_depot/data_decommit.cpp
index c8f0417037..e6439062c8 100644
--- a/ydb/core/blob_depot/data_decommit.cpp
+++ b/ydb/core/blob_depot/data_decommit.cpp
@@ -13,7 +13,7 @@ namespace NKikimr::NBlobDepot {
TBlobDepot* const Self;
std::weak_ptr<TToken> Token;
std::shared_ptr<TToken> ActorToken = std::make_shared<TToken>();
- std::vector<TEvBlobStorage::TEvAssimilateResult::TBlob> DecommitBlobs;
+ std::deque<TEvBlobStorage::TEvAssimilateResult::TBlob> DecommitBlobs;
THashSet<TLogoBlobID> ResolutionErrors;
TEvBlobDepot::TEvResolve::TPtr Ev;
@@ -33,6 +33,161 @@ namespace NKikimr::NBlobDepot {
bool Finished = false;
+ std::vector<TAssimilatedBlobInfo> AssimilatedBlobs;
+
+ using TRange = std::tuple<ui64, TLogoBlobID, TLogoBlobID, bool>;
+ using TScan = std::tuple<TKey, TKey, TScanFlags, bool, std::optional<TRange>>;
+
+ class TTxPrepare : public NTabletFlatExecutor::TTransactionBase<TBlobDepot> {
+ TResolveDecommitActor *Actor;
+ std::weak_ptr<TToken> ActorToken;
+ int Index = 0;
+ std::deque<TScan> Scans;
+ std::optional<TScanRange> ScanRange;
+ bool IssueGets;
+ std::optional<TRange> IssueRangeAfter;
+
+ // transaction-local state
+ bool Progress = false;
+ bool RestartTx = false;
+
+ public:
+ TTxType GetTxType() const override { return NKikimrBlobDepot::TXTYPE_DECOMMIT_BLOBS; }
+
+ TTxPrepare(TResolveDecommitActor *actor, std::deque<TScan>&& scans)
+ : TTransactionBase(actor->Self)
+ , Actor(actor)
+ , ActorToken(Actor->ActorToken)
+ , Scans(std::move(scans))
+ {}
+
+ TTxPrepare(TTxPrepare& other)
+ : TTransactionBase(other.Self)
+ , Actor(other.Actor)
+ , ActorToken(std::move(other.ActorToken))
+ , Index(other.Index)
+ , Scans(std::move(other.Scans))
+ , ScanRange(std::move(other.ScanRange))
+ , IssueGets(other.IssueGets)
+ , IssueRangeAfter(std::move(other.IssueRangeAfter))
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext&) override {
+ if (ActorToken.expired()) {
+ return true;
+ }
+
+ auto checkProgress = [&] {
+ if (Progress) {
+ RestartTx = true;
+ return true;
+ } else {
+ return false;
+ }
+ };
+
+ // process pending scans
+ auto doScanRange = [&] {
+ auto callback = [&](const TKey& key, const TValue& value) {
+ if (IssueGets && value.GoingToAssimilate) {
+ InvokeOtherActor(*Actor, &TResolveDecommitActor::IssueGet, key.GetBlobId(), true /*mustRestoreFirst*/);
+ }
+ return true;
+ };
+ if (Self->Data->ScanRange(*ScanRange, &txc, &Progress, callback)) { // scan has been finished completely
+ ScanRange.reset();
+ if (IssueRangeAfter) {
+ std::apply([&](auto&&... args) {
+ InvokeOtherActor(*Actor, &TResolveDecommitActor::IssueRange, std::move(args)...);
+ }, *IssueRangeAfter);
+ }
+ return true;
+ } else { // some data remains
+ return false;
+ }
+ };
+ if (ScanRange && !doScanRange()) {
+ return checkProgress();
+ }
+ while (!Scans.empty()) {
+ auto& [from, to, flags, issueGets, issueRangeAfter] = Scans.front();
+ ScanRange.emplace(std::move(from), std::move(to), flags);
+ IssueGets = issueGets;
+ IssueRangeAfter = std::move(issueRangeAfter);
+ Scans.pop_front();
+
+ if (!doScanRange()) {
+ return checkProgress();
+ }
+ }
+
+ // process explicit items after doing all scans
+ for (auto& items = Actor->Ev->Get()->Record.GetItems(); Index < items.size(); ++Index) {
+ if (const auto& item = items[Index]; item.HasExactKey()) {
+ TData::TKey key = TKey::FromBinaryKey(item.GetExactKey(), Self->Config);
+ if (!Self->Data->EnsureKeyLoaded(key, txc, &Progress)) {
+ return checkProgress();
+ }
+ const TValue *value = Self->Data->FindKey(key);
+ const bool notYetAssimilated = Self->Data->LastAssimilatedBlobId < key.GetBlobId();
+ const bool doGet = !value ? notYetAssimilated :
+ value->GoingToAssimilate ? item.GetMustRestoreFirst() : notYetAssimilated;
+ if (doGet) {
+ InvokeOtherActor(*Actor, &TResolveDecommitActor::IssueGet, key.GetBlobId(),
+ item.GetMustRestoreFirst());
+ }
+ }
+ }
+
+ return true;
+ }
+
+ void Complete(const TActorContext&) override {
+ if (ActorToken.expired()) {
+ return;
+ } else if (RestartTx) {
+ Self->Execute(std::make_unique<TTxPrepare>(*this));
+ } else {
+ TActivationContext::Send(new IEventHandle(TEvPrivate::EvTxComplete, 0, Actor->SelfId(), {}, nullptr, 0));
+ }
+ }
+ };
+
+ class TTxDecommitBlobs : public NTabletFlatExecutor::TTransactionBase<TBlobDepot> {
+ THashSet<TLogoBlobID> ResolutionErrors;
+ std::deque<TEvBlobStorage::TEvAssimilateResult::TBlob> DecommitBlobs;
+ TEvBlobDepot::TEvResolve::TPtr Ev;
+
+ public:
+ TTxType GetTxType() const override { return NKikimrBlobDepot::TXTYPE_DECOMMIT_BLOBS; }
+
+ TTxDecommitBlobs(TBlobDepot *self, THashSet<TLogoBlobID>&& resolutionErrors,
+ std::deque<TEvBlobStorage::TEvAssimilateResult::TBlob>&& decommitBlobs,
+ TEvBlobDepot::TEvResolve::TPtr ev)
+ : TTransactionBase(self)
+ , ResolutionErrors(std::move(resolutionErrors))
+ , DecommitBlobs(std::move(decommitBlobs))
+ , Ev(ev)
+ {}
+
+ bool Execute(TTransactionContext& txc, const TActorContext&) override {
+ for (size_t num = 0; !DecommitBlobs.empty() && num < 10'000; DecommitBlobs.pop_front()) {
+ num += Self->Data->AddDataOnDecommit(DecommitBlobs.front(), txc, this);
+ }
+ return true;
+ }
+
+ void Complete(const TActorContext&) override {
+ Self->Data->CommitTrash(this);
+ if (DecommitBlobs.empty()) {
+ Self->Data->ExecuteTxResolve(Ev, std::move(ResolutionErrors));
+ } else {
+ Self->Execute(std::make_unique<TTxDecommitBlobs>(Self, std::move(ResolutionErrors),
+ std::move(DecommitBlobs), Ev));
+ }
+ }
+ };
+
public:
TResolveDecommitActor(TBlobDepot *self, TEvBlobDepot::TEvResolve::TPtr ev)
: Self(self)
@@ -48,19 +203,13 @@ namespace NKikimr::NBlobDepot {
STLOG(PRI_DEBUG, BLOB_DEPOT, BDT42, "TResolveDecommitActor::Bootstrap", (Id, Self->GetLogId()),
(Sender, Ev->Sender), (Cookie, Ev->Cookie));
- Self->Execute(std::make_unique<TCoroTx>(Self, TTokens{{Token, ActorToken}}, std::bind(&TThis::TxPrepare,
- this, std::placeholders::_1)));
- ++TxInFlight;
- Become(&TThis::StateFunc);
- }
+ std::deque<TScan> scans;
- void TxPrepare(TCoroTx::TContextBase& tx) {
for (const auto& item : Ev->Get()->Record.GetItems()) {
switch (item.GetKeyDesignatorCase()) {
case NKikimrBlobDepot::TEvResolve::TItem::kKeyRange: {
if (!item.HasTabletId()) {
- tx.FinishTx();
- return FinishWithError(NLog::PRI_CRIT, "incorrect request");
+ return FinishWithError(NLog::PRI_CRIT, "incorrect request: tablet id not set");
}
const ui64 tabletId = item.GetTabletId();
@@ -76,76 +225,46 @@ namespace NKikimr::NBlobDepot {
TLogoBlobID::MaxBlobSize, TLogoBlobID::MaxCookie, TLogoBlobID::MaxPartId,
TLogoBlobID::MaxCrcMode);
+ if (maxId < minId) {
+ return FinishWithError(NLog::PRI_CRIT, "incorrect request: ending key goes before beginning one");
+ }
+
Y_ABORT_UNLESS(minId <= maxId);
if (Self->Data->LastAssimilatedBlobId < maxId) {
// adjust minId to skip already assimilated items in range query
if (minId < Self->Data->LastAssimilatedBlobId) {
if (item.GetMustRestoreFirst()) {
- ScanRange(tx, TKey(minId), TKey(*Self->Data->LastAssimilatedBlobId),
- EScanFlags::INCLUDE_BEGIN, true /*issueGets*/);
+ scans.emplace_back(TKey(minId), TKey(*Self->Data->LastAssimilatedBlobId),
+ EScanFlags::INCLUDE_BEGIN, true, std::nullopt);
}
minId = *Self->Data->LastAssimilatedBlobId;
}
// prepare the range first -- we must have it loaded in memory
- ScanRange(tx, TKey(minId), TKey(maxId), EScanFlags::INCLUDE_BEGIN | EScanFlags::INCLUDE_END,
- false /*issueGets*/);
-
- // issue scan query
- IssueRange(tabletId, minId, maxId, item.GetMustRestoreFirst());
+ scans.emplace_back(TKey(minId), TKey(maxId),
+ EScanFlags::INCLUDE_BEGIN | EScanFlags::INCLUDE_END, false,
+ std::make_tuple(tabletId, minId, maxId, item.GetMustRestoreFirst()));
} else if (item.GetMustRestoreFirst()) {
- ScanRange(tx, TKey(minId), TKey(maxId), EScanFlags::INCLUDE_BEGIN | EScanFlags::INCLUDE_END,
- true /*issueGets*/);
+ scans.emplace_back(TKey(minId), TKey(maxId),
+ EScanFlags::INCLUDE_BEGIN | EScanFlags::INCLUDE_END, true, std::nullopt);
}
break;
}
- case NKikimrBlobDepot::TEvResolve::TItem::kExactKey: {
- TData::TKey key = TKey::FromBinaryKey(item.GetExactKey(), Self->Config);
- while (!Self->Data->EnsureKeyLoaded(key, *tx)) {
- tx.RestartTx();
- }
- const TValue *value = Self->Data->FindKey(key);
- const bool notYetAssimilated = Self->Data->LastAssimilatedBlobId < key.GetBlobId();
- const bool doGet = !value ? notYetAssimilated :
- value->GoingToAssimilate ? item.GetMustRestoreFirst() : notYetAssimilated;
- if (doGet) {
- IssueGet(key.GetBlobId(), item.GetMustRestoreFirst());
- }
+ case NKikimrBlobDepot::TEvResolve::TItem::kExactKey:
+ // this would be processed inside the tx
break;
- }
case NKikimrBlobDepot::TEvResolve::TItem::KEYDESIGNATOR_NOT_SET:
- Y_DEBUG_ABORT_UNLESS(false);
- break;
+ return FinishWithError(NLog::PRI_CRIT, "incorrect request: key designator not set");
}
}
- tx.FinishTx();
- TActivationContext::Send(new IEventHandle(TEvPrivate::EvTxComplete, 0, SelfId(), {}, nullptr, 0));
- }
-
- void ScanRange(TCoroTx::TContextBase& tx, TKey from, TKey to, TScanFlags flags, bool issueGets) {
- bool progress = false;
-
- auto callback = [&](const TKey& key, const TValue& value) {
- if (issueGets && value.GoingToAssimilate) {
- IssueGet(key.GetBlobId(), true /*mustRestoreFirst*/);
- }
- return true;
- };
-
- TScanRange r{from, to, flags};
- while (!Self->Data->ScanRange(r, tx.GetTxc(), &progress, callback)) {
- if (std::exchange(progress, false)) {
- tx.FinishTx();
- tx.RunSuccessorTx();
- } else {
- tx.RestartTx();
- }
- }
+ Self->Execute(std::make_unique<TTxPrepare>(this, std::move(scans)));
+ ++TxInFlight;
+ Become(&TThis::StateFunc);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -189,7 +308,6 @@ namespace NKikimr::NBlobDepot {
Y_ABORT_UNLESS(RangesInFlight);
--RangesInFlight;
- CheckIfDone();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -238,9 +356,10 @@ namespace NKikimr::NBlobDepot {
DecommitBlobs.push_back({r.Id, r.Keep, r.DoNotKeep});
}
} else if (r.Status == NKikimrProto::NODATA) {
- Self->Data->ExecuteTxCommitAssimilatedBlob(NKikimrProto::NODATA, TBlobSeqId(), TData::TKey(r.Id),
- TEvPrivate::EvTxComplete, SelfId(), 0);
- ++TxInFlight;
+ AssimilatedBlobs.push_back({TData::TKey(r.Id), TAssimilatedBlobInfo::TDrop{}});
+ if (AssimilatedBlobs.size() >= 10'000) {
+ IssueTxCommitAssimilatedBlob();
+ }
} else {
// mark this specific key as unresolvable
ResolutionErrors.emplace(r.Id);
@@ -251,9 +370,7 @@ namespace NKikimr::NBlobDepot {
Y_ABORT_UNLESS(GetBytesInFlight >= ev->Cookie);
--GetsInFlight;
GetBytesInFlight -= ev->Cookie;
-
ProcessGetQueue();
- CheckIfDone();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -298,14 +415,16 @@ namespace NKikimr::NBlobDepot {
if (msg.Status != NKikimrProto::OK) { // do not reply OK to this item
ResolutionErrors.insert(key.GetBlobId());
+ } else {
+ AssimilatedBlobs.push_back({std::move(key), TAssimilatedBlobInfo::TUpdate{
+ TBlobSeqId::FromLogoBlobId(msg.Id), keep, doNotKeep}});
+ if (AssimilatedBlobs.size() >= 10'000) {
+ IssueTxCommitAssimilatedBlob();
+ }
}
Y_ABORT_UNLESS(PutsInFlight);
--PutsInFlight;
-
- Self->Data->ExecuteTxCommitAssimilatedBlob(msg.Status, TBlobSeqId::FromLogoBlobId(msg.Id), std::move(key),
- TEvPrivate::EvTxComplete, SelfId(), 0, keep, doNotKeep);
- ++TxInFlight;
}
void HandleTxComplete() {
@@ -315,7 +434,6 @@ namespace NKikimr::NBlobDepot {
Y_ABORT_UNLESS(TxInFlight);
--TxInFlight;
- CheckIfDone();
}
void CheckIfDone() {
@@ -323,6 +441,10 @@ namespace NKikimr::NBlobDepot {
return;
}
+ if (!AssimilatedBlobs.empty()) {
+ return IssueTxCommitAssimilatedBlob();
+ }
+
Y_ABORT_UNLESS(!Finished);
Finished = true;
@@ -330,26 +452,16 @@ namespace NKikimr::NBlobDepot {
(Cookie, Ev->Cookie), (ResolutionErrors.size, ResolutionErrors.size()),
(DecommitBlobs.size, DecommitBlobs.size()));
- Self->Execute(std::make_unique<TCoroTx>(Self, TTokens{{Token}}, [self = Self, decommitBlobs = std::move(DecommitBlobs),
- ev = Ev, resolutionErrors = std::move(ResolutionErrors)](TCoroTx::TContextBase& tx) mutable {
- ui32 numItemsProcessed = 0;
- for (const auto& blob : decommitBlobs) {
- if (numItemsProcessed == 10'000) {
- tx.FinishTx();
- self->Data->CommitTrash(&tx);
- numItemsProcessed = 0;
- tx.RunSuccessorTx();
- }
- numItemsProcessed += self->Data->AddDataOnDecommit(blob, *tx, &tx);
- }
- tx.FinishTx();
- self->Data->CommitTrash(&tx);
- self->Data->ExecuteTxResolve(ev, std::move(resolutionErrors));
- }));
-
+ Self->Execute(std::make_unique<TTxDecommitBlobs>(Self, std::move(ResolutionErrors), std::move(DecommitBlobs), Ev));
PassAway();
}
+ void IssueTxCommitAssimilatedBlob() {
+ Self->Data->ExecuteTxCommitAssimilatedBlob(std::exchange(AssimilatedBlobs, {}), TEvPrivate::EvTxComplete,
+ SelfId(), 0);
+ ++TxInFlight;
+ }
+
void FinishWithError(NLog::EPriority prio, TString errorReason) {
Y_ABORT_UNLESS(!Finished);
Finished = true;
@@ -377,6 +489,8 @@ namespace NKikimr::NBlobDepot {
STLOG(PRI_CRIT, BLOB_DEPOT, BDT90, "unexpected event", (Id, Self->GetLogId()), (Type, type));
break;
}
+
+ CheckIfDone();
}
};
diff --git a/ydb/core/blob_depot/data_load.cpp b/ydb/core/blob_depot/data_load.cpp
index 53f58ab985..950238bb50 100644
--- a/ydb/core/blob_depot/data_load.cpp
+++ b/ydb/core/blob_depot/data_load.cpp
@@ -159,7 +159,7 @@ namespace NKikimr::NBlobDepot {
}
}
- bool TData::EnsureKeyLoaded(const TKey& key, NTabletFlatExecutor::TTransactionContext& txc) {
+ bool TData::EnsureKeyLoaded(const TKey& key, NTabletFlatExecutor::TTransactionContext& txc, bool *progress) {
if (IsKeyLoaded(key)) {
return true;
}
@@ -174,6 +174,9 @@ namespace NKikimr::NBlobDepot {
AddDataOnLoad(key, row.GetValue<Table::Value>(), row.GetValueOrDefault<Table::UncertainWrite>());
}
Self->Data->LoadedKeys |= {key, key};
+ if (progress) {
+ *progress = true;
+ }
return true;
}
}
diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp
index c08f72d87e..8c65537b2e 100644
--- a/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp
+++ b/ydb/core/blobstorage/dsproxy/dsproxy_get.cpp
@@ -130,6 +130,13 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
DiskCounters.resize(orderNumber + 1);
}
DiskCounters[orderNumber].Sent++;
+
+ LWTRACK(
+ DSProxyVGetSent, Orbit,
+ vDiskId.ToStringWOGeneration(),
+ orderNumber,
+ vGets.size()
+ );
}
for (size_t i = 0; i < vPuts.size(); ++i) {
if (RootCauseTrack.IsOn) {
@@ -143,6 +150,16 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
DiskCounters.resize(orderNumber + 1);
}
DiskCounters[orderNumber].Sent++;
+
+ LWTRACK(
+ DSProxyVPutSent, Orbit,
+ vPuts[i]->Type(),
+ vDiskId.ToStringWOGeneration(),
+ Info->GetFailDomainOrderNumber(vDiskId),
+ 1,
+ vPuts[i]->GetBufferBytes(),
+ true
+ );
}
for (auto& ev : vGets) {
const ui64 cookie = ev->Record.GetCookie();
@@ -202,7 +219,7 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
const TVDiskID vdisk = VDiskIDFromVDiskID(record.GetVDiskID());
const TVDiskIdShort shortId(vdisk);
- LWPROBE(DSProxyVDiskRequestDuration, TEvBlobStorage::EvVGet, totalSize, tabletId, vdisk.GroupID.GetRawId(), channel,
+ LWTRACK(DSProxyVDiskRequestDuration, Orbit, TEvBlobStorage::EvVGet, totalSize, tabletId, vdisk.GroupID.GetRawId(), channel,
Info->GetFailDomainOrderNumber(shortId),
GetStartTime(record.GetTimestamps()),
GetTotalTimeMs(record.GetTimestamps()),
@@ -285,7 +302,7 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
const TLogoBlobID blob = GetFirstBlobId(ev);
ui64 sumBlobSize = SumBlobSize(ev);
- LWPROBE(DSProxyVDiskRequestDuration, TEvBlobStorage::EvVPut, sumBlobSize, blob.TabletID(),
+ LWTRACK(DSProxyVDiskRequestDuration, Orbit, TEvBlobStorage::EvVPut, sumBlobSize, blob.TabletID(),
Info->GroupID.GetRawId(), blob.Channel(), Info->GetFailDomainOrderNumber(shortId),
GetStartTime(record.GetTimestamps()),
GetTotalTimeMs(record.GetTimestamps()),
@@ -333,8 +350,9 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
TDuration timeToAccelerate = TDuration::MicroSeconds(timeToAccelerateUs);
TMonotonic now = TActivationContext::Monotonic();
TMonotonic nextAcceleration = RequestStartTime + timeToAccelerate;
+ LWTRACK(DSProxyScheduleAccelerate, Orbit, nextAcceleration > now ? (nextAcceleration - now).MicroSeconds() / 1000.0 : 0.0, "Get");
if (nextAcceleration > now) {
- ui64 causeIdx = RootCauseTrack.RegisterAccelerate();
+ ui64 causeIdx = RootCauseTrack.RegisterAccelerate("Get");
Schedule(nextAcceleration - now, new TEvAccelerateGet(causeIdx));
IsGetAccelerateScheduled = true;
} else {
@@ -352,8 +370,9 @@ class TBlobStorageGroupGetRequest : public TBlobStorageGroupRequestActor {
TDuration timeToAccelerate = TDuration::MicroSeconds(timeToAccelerateUs);
TMonotonic now = TActivationContext::Monotonic();
TMonotonic nextAcceleration = RequestStartTime + timeToAccelerate;
+ LWTRACK(DSProxyScheduleAccelerate, Orbit, nextAcceleration > now ? (nextAcceleration - now).MicroSeconds() / 1000.0 : 0.0, "Put");
if (nextAcceleration > now) {
- ui64 causeIdx = RootCauseTrack.RegisterAccelerate();
+ ui64 causeIdx = RootCauseTrack.RegisterAccelerate("Put");
Schedule(nextAcceleration - now, new TEvAcceleratePut(causeIdx));
IsPutAccelerateScheduled = true;
} else {
@@ -461,6 +480,13 @@ public:
LWTRACK(DSProxyGetBootstrap, Orbit);
+ LWTRACK(
+ DSProxyGetRequest, Orbit,
+ Info->GroupID.GetRawId(),
+ DeviceTypeStr(Info->GetDeviceType(), true),
+ NKikimrBlobStorage::EGetHandleClass_Name(GetImpl.GetHandleClass())
+ );
+
TDeque<std::unique_ptr<TEvBlobStorage::TEvVGet>> vGets;
TDeque<std::unique_ptr<TEvBlobStorage::TEvVPut>> vPuts;
GetImpl.GenerateInitialRequests(LogCtx, vGets);
diff --git a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp
index 3426728b1a..9153e4a5f4 100644
--- a/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp
+++ b/ydb/core/blobstorage/dsproxy/dsproxy_put.cpp
@@ -426,9 +426,9 @@ class TBlobStorageGroupPutRequest : public TBlobStorageGroupRequestActor {
TDuration timeToAccelerate = TDuration::MicroSeconds(timeToAccelerateUs);
TMonotonic now = TActivationContext::Monotonic();
TMonotonic nextAcceleration = RequestStartTime + timeToAccelerate;
- LWTRACK(DSProxyScheduleAccelerate, Orbit, nextAcceleration > now ? (nextAcceleration - now).MicroSeconds() / 1000.0 : 0.0);
+ LWTRACK(DSProxyScheduleAccelerate, Orbit, nextAcceleration > now ? (nextAcceleration - now).MicroSeconds() / 1000.0 : 0.0, "Put");
if (nextAcceleration > now) {
- ui64 causeIdx = RootCauseTrack.RegisterAccelerate();
+ ui64 causeIdx = RootCauseTrack.RegisterAccelerate("Put");
Schedule(nextAcceleration - now, new TEvAccelerate(causeIdx));
IsAccelerateScheduled = true;
} else {
diff --git a/ydb/core/blobstorage/dsproxy/root_cause.h b/ydb/core/blobstorage/dsproxy/root_cause.h
index fcb2b89f49..eaf66668ab 100644
--- a/ydb/core/blobstorage/dsproxy/root_cause.h
+++ b/ydb/core/blobstorage/dsproxy/root_cause.h
@@ -16,13 +16,15 @@ struct TRootCause {
ui64 TransferCycles;
ui64 VDiskReplyCycles;
bool IsAccelerate;
+ TString RequestType;
- TRootCauseItem(ui64 causeIdx, ui64 startCycles, bool isAccelerate)
+ TRootCauseItem(ui64 causeIdx, ui64 startCycles, bool isAccelerate, const TString& requestType)
: CauseIdx(causeIdx)
, StartCycles(startCycles)
, TransferCycles(startCycles)
, VDiskReplyCycles(startCycles)
, IsAccelerate(isAccelerate)
+ , RequestType(requestType)
{}
};
static constexpr ui64 InvalidCauseIdx = 255;
@@ -48,6 +50,7 @@ struct TRootCause {
}
NLWTrace::TParams params;
if (item.IsAccelerate) {
+ params.Param[1].CopyConstruct<TString>(item.RequestType);
orbit.AddProbe(&LWTRACE_GET_NAME(DSProxyScheduleAccelerate).Probe, params, item.StartCycles);
} else {
orbit.AddProbe(&LWTRACE_GET_NAME(DSProxyStartTransfer).Probe, params, item.StartCycles);
@@ -59,18 +62,18 @@ struct TRootCause {
#endif //LWTRACE_DISABLE
}
- ui64 RegisterCause() {
+ ui64 RegisterCause(const TString& requestType = "") {
if (IsOn && Items.size() < InvalidCauseIdx - 1) {
- Items.emplace_back(CurrentCauseIdx, GetCycleCountFast(), false);
+ Items.emplace_back(CurrentCauseIdx, GetCycleCountFast(), false, requestType);
return Items.size() - 1;
} else {
return InvalidCauseIdx;
}
}
- ui64 RegisterAccelerate() {
+ ui64 RegisterAccelerate(const TString& requestType) {
if (IsOn && Items.size() < InvalidCauseIdx - 1) {
- Items.emplace_back(CurrentCauseIdx, GetCycleCountFast(), true);
+ Items.emplace_back(CurrentCauseIdx, GetCycleCountFast(), true, requestType);
return Items.size() - 1;
} else {
return InvalidCauseIdx;
diff --git a/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp b/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp
index 41f36cb8ce..bc3041983d 100644
--- a/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp
+++ b/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp
@@ -651,7 +651,6 @@ Y_UNIT_TEST(TestGivenMirror3DCGetWithFirstSlowDisk) {
TTestState testState(runtime, type, DSProxyEnv.Info);
-
TEvBlobStorage::TEvGet::TPtr ev = testState.CreateGetRequest({blobId}, false);
TActorId getActorId = runtime.Register(DSProxyEnv.CreateGetRequestActor(ev, NKikimrBlobStorage::TabletLog).release());
runtime.EnableScheduleForActor(getActorId);
@@ -704,7 +703,6 @@ Y_UNIT_TEST(TestGivenBlock42GetThenVGetResponseParts2523Nodata4ThenGetOk) {
UNIT_ASSERT(getResult->Responses[0].Status == NKikimrProto::OK);
}
-
struct TBlobPack {
ui32 Count;
ui32 DataLength;
@@ -1285,6 +1283,141 @@ Y_UNIT_TEST(TestGivenBlock42Put6PartsOnOneVDiskWhenDiscoverThenRecoverFirst) {
}
}
+Y_UNIT_TEST(TestBlock42CheckLwtrack) {
+ NLWTrace::TManager mngr(*Singleton<NLWTrace::TProbeRegistry>(), true);
+ NLWTrace::TOrbit orbit;
+ NLWTrace::TTraceRequest req;
+ req.SetIsTraced(true);
+ mngr.HandleTraceRequest(req, orbit);
+
+
+ TTestBasicRuntime runtime(1, false);
+ TBlobStorageGroupType type = {TErasureType::Erasure4Plus2Block};
+ Setup(runtime, type);
+ runtime.SetLogPriority(NKikimrServices::BS_PROXY_GET, NLog::PRI_DEBUG);
+
+ TActorId proxy = MakeBlobStorageProxyID(GROUP_ID);
+ TActorId sender = runtime.AllocateEdgeActor(0);
+
+ TString data("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+ TLogoBlobID logoblobid(1, 0, 0, 0, (ui32)data.size(), 0);
+
+ TVector<TVDiskState> subgroup;
+ PrepareBlobSubgroup(logoblobid, data, subgroup, runtime, type);
+
+ auto ev = new TEvBlobStorage::TEvGet(logoblobid, 0, 0, TInstant::Max(),
+ NKikimrBlobStorage::EGetHandleClass::FastRead);
+ ev->Orbit = std::move(orbit);
+
+ runtime.Send(new IEventHandle(proxy, sender, ev));
+ for (ui32 i = 0; i < 6; ++i) {
+ TAutoPtr<IEventHandle> handle;
+ auto vget = runtime.GrabEdgeEventRethrow<TEvBlobStorage::TEvVGet>(handle);
+ UNIT_ASSERT(vget);
+ for (size_t idx = 0; idx < subgroup.size(); ++idx) {
+ if (subgroup[idx].ActorId == handle->Recipient) {
+ subgroup[idx].SetCookiesAndSenderFrom(handle.Get(), vget);
+ }
+ }
+ }
+
+ SendVGetResult(6, NKikimrProto::OK, 2, subgroup, runtime);
+ SendVGetResult(4, NKikimrProto::OK, 5, subgroup, runtime);
+ SendVGetResult(1, NKikimrProto::OK, 2, subgroup, runtime);
+ SendVGetResult(2, NKikimrProto::OK, 3, subgroup, runtime);
+ SendVGetResult(7, NKikimrProto::NODATA, 1, subgroup, runtime);
+ SendVGetResult(3, NKikimrProto::OK, 4, subgroup, runtime);
+ SendVGetResult(5, NKikimrProto::OK, 6, subgroup, runtime);
+ SendVGetResult(0, NKikimrProto::OK, 1, subgroup, runtime);
+
+ TAutoPtr<IEventHandle> handle;
+ auto getResult = runtime.GrabEdgeEventRethrow<TEvBlobStorage::TEvGetResult>(handle);
+ UNIT_ASSERT(getResult);
+ UNIT_ASSERT(getResult->Status == NKikimrProto::OK);
+ UNIT_ASSERT(getResult->ResponseSz == 1);
+ UNIT_ASSERT(getResult->Responses[0].Status == NKikimrProto::OK);
+
+ NLWTrace::TTraceResponse resp;
+ getResult->Orbit.Serialize(0, *resp.MutableTrace());
+ auto& r = resp.GetTrace();
+ UNIT_ASSERT_VALUES_EQUAL(21, r.EventsSize());
+
+ {
+ const auto& p = r.GetEvents(0);
+ UNIT_ASSERT_VALUES_EQUAL("BLOBSTORAGE_PROVIDER", p.GetProvider());
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyGetHandle", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+
+ {
+ const auto& p = r.GetEvents(1);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyGetBootstrap", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+
+ {
+ const auto& p = r.GetEvents(2);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyGetRequest", p.GetName());
+ // check groupId
+ UNIT_ASSERT_VALUES_EQUAL(0, p.GetParams(0).GetUintValue());
+ // check deviceType
+ UNIT_ASSERT_VALUES_EQUAL("DEVICE_TYPE_UNKNOWN(255)", p.GetParams(1).GetStrValue());
+ // check handleClass
+ UNIT_ASSERT_VALUES_EQUAL("FastRead", p.GetParams(2).GetStrValue());
+ }
+
+ TVector<ui32> vdiskOrderNum = {0, 1, 4, 5, 6, 7};
+ for (auto i = 3; i < 9; ++i) {
+ const auto& p = r.GetEvents(i);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyVGetSent", p.GetName());
+ // check vdiskId
+ UNIT_ASSERT_VALUES_EQUAL("[0:_:0:" + ToString(vdiskOrderNum[i-3]) + ":0]", p.GetParams(0).GetStrValue());
+ // check vdiskOrderNum
+ UNIT_ASSERT_VALUES_EQUAL(vdiskOrderNum[i-3], p.GetParams(1).GetUintValue());
+ // check vgets count
+ UNIT_ASSERT_VALUES_EQUAL(6, p.GetParams(2).GetUintValue());
+ }
+
+ for (auto i = 9; i < 13; ++i) {
+ const auto& p = r.GetEvents(i);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyVDiskRequestDuration", p.GetName());
+ }
+
+ for (auto i = 13; i < 15; ++i) {
+ const auto& p = r.GetEvents(i);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyScheduleAccelerate", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL("Get", p.GetParams(1).GetStrValue());
+ }
+
+ for (auto i = 15; i < 17; ++i) {
+ const auto& p = r.GetEvents(i);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyVDiskRequestDuration", p.GetName());
+ }
+
+ {
+ const auto& p = r.GetEvents(17);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyStartTransfer", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+
+ {
+ const auto& p = r.GetEvents(18);
+ UNIT_ASSERT_VALUES_EQUAL("VDiskStartProcessing", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+
+ {
+ const auto& p = r.GetEvents(19);
+ UNIT_ASSERT_VALUES_EQUAL("VDiskReply", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+
+ {
+ const auto& p = r.GetEvents(20);
+ UNIT_ASSERT_VALUES_EQUAL("DSProxyGetReply", p.GetName());
+ UNIT_ASSERT_VALUES_EQUAL(0 , p.ParamsSize());
+ }
+}
} // Y_UNIT_TEST_SUITE TBlobStorageProxySequenceTest
} // namespace NBlobStorageProxySequenceTest
diff --git a/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h b/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h
index fa03fd03e7..17626ab65a 100644
--- a/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h
+++ b/ydb/core/blobstorage/lwtrace_probes/blobstorage_probes.h
@@ -284,6 +284,8 @@ struct TEventTypeField {
PROBE(DSProxyGetBootstrap, GROUPS("DSProxy"), TYPES(), NAMES()) \
PROBE(DSProxyGetHandle, GROUPS("DSProxy", "LWTrackStart"), TYPES(), NAMES()) \
PROBE(DSProxyGetReply, GROUPS("DSProxy"), TYPES(), NAMES()) \
+ PROBE(DSProxyVGetSent, GROUPS("DSProxy"), TYPES(TString, ui32, ui32), NAMES("vDiskId", "vdiskOrderNum", "count")) \
+ PROBE(DSProxyGetRequest, GROUPS("DSProxy", "LWTrackStart"), TYPES(ui32, TString, TString), NAMES("groupId", "deviceType", "handleClass")) \
PROBE(DSProxyPutEnqueue, GROUPS("DSProxy", "LWTrackStart"), TYPES(), NAMES()) \
PROBE(DSProxyPutHandle, GROUPS("DSProxyRequest", "DSProxy", "LWTrackStart"), TYPES(), NAMES()) \
PROBE(DSProxyPutBootstrapStart, GROUPS("DSProxy"), TYPES(), NAMES()) \
@@ -293,7 +295,7 @@ struct TEventTypeField {
PROBE(DSProxyPutReply, GROUPS("DSProxy"), TYPES(TString, TString, TString), NAMES("blobId", "status", "errorReason")) \
PROBE(DSProxyPutResumeBootstrap, GROUPS("DSProxy"), TYPES(), NAMES()) \
PROBE(DSProxyPutPauseBootstrap, GROUPS("DSProxy"), TYPES(), NAMES()) \
- PROBE(DSProxyScheduleAccelerate, GROUPS("DSProxy"), TYPES(double), NAMES("timeBeforeAccelerationMs")) \
+ PROBE(DSProxyScheduleAccelerate, GROUPS("DSProxy"), TYPES(double, TString), NAMES("timeBeforeAccelerationMs", "reqType")) \
PROBE(DSProxyStartTransfer, GROUPS("DSProxy"), TYPES(), NAMES()) \
PROBE(VDiskStartProcessing, GROUPS("DSProxy"), TYPES(), NAMES()) \
PROBE(VDiskReply, GROUPS("DSProxy"), TYPES(), NAMES()) \
diff --git a/ydb/core/blobstorage/nodewarden/distconf_binding.cpp b/ydb/core/blobstorage/nodewarden/distconf_binding.cpp
index bea763fc0e..45b75246ff 100644
--- a/ydb/core/blobstorage/nodewarden/distconf_binding.cpp
+++ b/ydb/core/blobstorage/nodewarden/distconf_binding.cpp
@@ -386,6 +386,12 @@ namespace NKikimr::NStorage {
STLOG(PRI_DEBUG, BS_NODE, NWDC02, "TEvNodeConfigPush", (NodeId, senderNodeId), (Cookie, ev->Cookie),
(SessionId, ev->InterconnectSession), (Binding, Binding), (Record, record));
+ if (!NodeIdsSet.contains(senderNodeId)) {
+ // node has been already deleted from the config, but new subscription is coming through -- ignoring it
+ SendEvent(*ev, TEvNodeConfigReversePush::MakeRejected());
+ return;
+ }
+
// check if we can't accept this message (or else it would make a cycle)
if (record.GetInitial() && senderNodeId == GetRootNodeId()) {
STLOG(PRI_DEBUG, BS_NODE, NWDC28, "TEvNodeConfigPush rejected", (NodeId, senderNodeId),
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk.h
index d20f192a3e..4319877d28 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk.h
@@ -38,7 +38,7 @@ struct TCommitRecord {
TChunkIdx lastIdx = chunks[0];
for (size_t i = 1; i < chunks.size(); ) {
if (chunks[i] == lastIdx) {
- //Y_ABORT_UNLESS(false);
+ //Y_VERIFY(false);
chunks.erase(chunks.begin() + i);
} else {
lastIdx = chunks[i];
@@ -181,7 +181,7 @@ struct TEvYardInitResult : TEventLocal<TEvYardInitResult, TEvBlobStorage::EvYard
, PDiskParams(new TPDiskParams(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DEVICE_TYPE_ROT))
, ErrorReason(std::move(errorReason))
{
- Y_ABORT_UNLESS(status != NKikimrProto::OK, "Single-parameter constructor is for error responses only");
+ Y_VERIFY(status != NKikimrProto::OK, "Single-parameter constructor is for error responses only");
}
TEvYardInitResult(NKikimrProto::EReplyStatus status, ui64 seekTimeUs, ui64 readSpeedBps,
@@ -263,7 +263,7 @@ struct TEvLog : TEventLocal<TEvLog, TEvBlobStorage::EvLog> {
, Cookie(cookie)
, LogCallback(std::move(cb))
{
- Y_ABORT_UNLESS(Owner);
+ Y_VERIFY(Owner);
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(&owner, sizeof(owner));
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(&ownerRound, sizeof(ownerRound));
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(&signature, sizeof(signature));
@@ -338,8 +338,8 @@ struct TEvMultiLog : TEventLocal<TEvMultiLog, TEvBlobStorage::EvMultiLog> {
if (Logs.size() == 1) {
LsnSeg = TLsnSeg(log.LsnSegmentStart, log.Lsn);
} else {
- Y_VERIFY_S(LsnSeg.Last + 1 == log.LsnSegmentStart, "LastLsn# " << LsnSeg.Last <<
- " NewLsnStart# " << log.LsnSegmentStart);
+ Y_VERIFY_S(LsnSeg.Last + 1 == log.LsnSegmentStart,
+ "LastLsn# " << LsnSeg.Last << " NewLsnStart# " << log.LsnSegmentStart);
LsnSeg.Last = log.Lsn;
}
}
@@ -537,7 +537,7 @@ struct TEvChunkLock : TEventLocal<TEvChunkLock, TEvBlobStorage::EvChunkLock> {
, Count(count)
, Color(color)
{
- Y_DEBUG_ABORT_UNLESS(from != ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(from != ELockFrom::PERSONAL_QUOTA);
}
TEvChunkLock(ELockFrom from, TOwner owner, ui32 count, NKikimrBlobStorage::TPDiskSpaceColor::E color)
@@ -547,7 +547,7 @@ struct TEvChunkLock : TEventLocal<TEvChunkLock, TEvBlobStorage::EvChunkLock> {
, Count(count)
, Color(color)
{
- Y_DEBUG_ABORT_UNLESS(from == ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(from == ELockFrom::PERSONAL_QUOTA);
}
TEvChunkLock(ELockFrom from, TVDiskID vdiskId, bool isGenerationSet, ui32 count, NKikimrBlobStorage::TPDiskSpaceColor::E color)
@@ -558,7 +558,7 @@ struct TEvChunkLock : TEventLocal<TEvChunkLock, TEvBlobStorage::EvChunkLock> {
, Count(count)
, Color(color)
{
- Y_DEBUG_ABORT_UNLESS(from == ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(from == ELockFrom::PERSONAL_QUOTA);
}
TString ToString() const {
@@ -636,7 +636,7 @@ struct TEvChunkUnlock : TEventLocal<TEvChunkUnlock, TEvBlobStorage::EvChunkUnloc
TEvChunkUnlock(TEvChunkLock::ELockFrom lockFrom)
: LockFrom(lockFrom)
{
- Y_DEBUG_ABORT_UNLESS(LockFrom != TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(LockFrom != TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
}
TEvChunkUnlock(TEvChunkLock::ELockFrom lockFrom, TOwner owner)
@@ -644,7 +644,7 @@ struct TEvChunkUnlock : TEventLocal<TEvChunkUnlock, TEvBlobStorage::EvChunkUnloc
, ByVDiskId(false)
, Owner(owner)
{
- Y_DEBUG_ABORT_UNLESS(LockFrom == TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(LockFrom == TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
}
TEvChunkUnlock(TEvChunkLock::ELockFrom lockFrom, TVDiskID vdiskId, bool isGenerationSet)
@@ -653,7 +653,7 @@ struct TEvChunkUnlock : TEventLocal<TEvChunkUnlock, TEvBlobStorage::EvChunkUnloc
, VDiskId(vdiskId)
, IsGenerationSet(isGenerationSet)
{
- Y_DEBUG_ABORT_UNLESS(LockFrom == TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
+ Y_VERIFY_DEBUG(LockFrom == TEvChunkLock::ELockFrom::PERSONAL_QUOTA);
}
TString ToString() const {
@@ -974,7 +974,7 @@ struct TEvChunkWrite : TEventLocal<TEvChunkWrite, TEvBlobStorage::EvChunkWrite>
}
TDataRef operator[] (ui32 index) const override {
- Y_ABORT_UNLESS(index < PartsNum);
+ Y_VERIFY(index < PartsNum);
const TPart &part = Parts[index];
return TDataRef(part.Data, part.Size);
}
@@ -992,7 +992,7 @@ struct TEvChunkWrite : TEventLocal<TEvChunkWrite, TEvBlobStorage::EvChunkWrite>
{}
virtual TDataRef operator[] (ui32 i) const override {
- Y_DEBUG_ABORT_UNLESS(i == 0);
+ Y_VERIFY_DEBUG(i == 0);
return TDataRef(Buffer.Data(), Buffer.Size());
}
@@ -1019,7 +1019,7 @@ struct TEvChunkWrite : TEventLocal<TEvChunkWrite, TEvBlobStorage::EvChunkWrite>
: Data(std::move(data))
, FullSize(fullSize)
{
- Y_DEBUG_ABORT_UNLESS(Data.size() <= FullSize);
+ Y_VERIFY_DEBUG(Data.size() <= FullSize);
}
virtual ui32 Size() const override {
@@ -1031,7 +1031,7 @@ struct TEvChunkWrite : TEventLocal<TEvChunkWrite, TEvBlobStorage::EvChunkWrite>
return std::make_pair(Data.data(), Data.size());
} else {
ui32 padding = FullSize - Data.size();
- Y_DEBUG_ABORT_UNLESS(padding);
+ Y_VERIFY_DEBUG(padding);
return std::make_pair(nullptr, padding);
}
}
@@ -1108,7 +1108,7 @@ struct TEvChunkWrite : TEventLocal<TEvChunkWrite, TEvBlobStorage::EvChunkWrite>
void Validate() const {
const ui32 count = PartsPtr ? PartsPtr->Size() : 0;
for (ui32 idx = 0; idx < count; ++idx) {
- Y_ABORT_UNLESS((*PartsPtr)[idx].second);
+ Y_VERIFY((*PartsPtr)[idx].second);
if ((*PartsPtr)[idx].first) {
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED((*PartsPtr)[idx].first, (*PartsPtr)[idx].second);
}
@@ -1697,6 +1697,7 @@ struct TPDiskCtx {
TActorSystem * const ActorSystem = nullptr;
const ui32 PDiskId = 0;
const TActorId PDiskActor;
+ const TString PDiskLogPrefix;
// TPDiskMon * const Mon = nullptr; TODO implement it
TPDiskCtx() = default;
@@ -1709,6 +1710,7 @@ struct TPDiskCtx {
: ActorSystem(actorSystem)
, PDiskId(pdiskId)
, PDiskActor(pdiskActor)
+ , PDiskLogPrefix(Sprintf("PDiskId# %" PRIu32 " ", PDiskId))
{}
};
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp
index 4d69aff3e9..6b97804612 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_actor.cpp
@@ -229,7 +229,7 @@ public:
->GetSubgroup("pdisk", Sprintf("%09" PRIu32, (ui32)cfg->PDiskId))
->GetSubgroup("media", to_lower(cfg->PDiskCategory.TypeStrShort())))
{
- Y_ABORT_UNLESS(MainKey.IsInitialized);
+ Y_VERIFY(MainKey.IsInitialized);
}
~TPDiskActor() {
@@ -276,14 +276,14 @@ public:
if (!MainKey) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " MainKey is invalid, ErrorReason# " << MainKey.ErrorReason;
+ str << PCtx->PDiskLogPrefix
+ << "MainKey is invalid, ErrorReason# " << MainKey.ErrorReason;
InitError(str.Str());
P_LOG(PRI_CRIT, BPD01, str.Str());
} else if (!isOk) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " bootstrapped to the StateError, reason# " << PDisk->ErrorStr
+ str << PCtx->PDiskLogPrefix
+ << "bootstrapped to the StateError, reason# " << PDisk->ErrorStr
<< " Can not be initialized";
InitError(str.Str());
str << " Config: " << Cfg->ToString();
@@ -363,7 +363,7 @@ public:
} else {
PDisk.Reset(new TPDisk(PCtx, Cfg, PDiskCounters));
PDisk->Initialize();
- Y_ABORT_UNLESS(PDisk->PDiskThread.Running());
+ Y_VERIFY_S(PDisk->PDiskThread.Running(), PCtx->PDiskLogPrefix);
*PDisk->Mon.PDiskState = NKikimrBlobStorage::TPDiskState::InitialFormatReadError;
*PDisk->Mon.PDiskBriefState = TPDiskMon::TPDisk::Error;
@@ -372,8 +372,8 @@ public:
PDisk->ErrorStr = ToString("Can not be formated! Reason# ") + ev->Get()->ErrorStr;
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " Can not be formated! Reason# " << ev->Get()->ErrorStr
+ str << PCtx->PDiskLogPrefix
+ << "Can not be formated! Reason# " << ev->Get()->ErrorStr
<< " Switching to StateError. Config: " << Cfg->ToString();
P_LOG(PRI_CRIT, BPD01, str.Str());
InitError(str.Str());
@@ -383,7 +383,7 @@ public:
void CheckMagicSector(ui8 *magicData, ui32 magicDataSize) {
bool isFormatMagicValid = PDisk->IsFormatMagicValid(magicData, magicDataSize, MainKey);
if (isFormatMagicValid) {
- auto format = PDisk->CheckMetadataFormatSector(magicData, magicDataSize, MainKey);
+ auto format = PDisk->CheckMetadataFormatSector(magicData, magicDataSize, MainKey, PCtx->PDiskLogPrefix);
PDisk->InputRequest(PDisk->ReqCreator.CreateFromArgs<TPushUnformattedMetadataSector>(format,
!Cfg->MetadataOnly));
if (Cfg->MetadataOnly) {
@@ -403,8 +403,8 @@ public:
PDisk->ErrorStr = "Format is incomplete. Magic sector is present and new format was written";
}
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " Can not be initialized! " << PDisk->ErrorStr;
+ str << PCtx->PDiskLogPrefix
+ << "Can not be initialized! " << PDisk->ErrorStr;
for (ui32 i = 0; i < Cfg->HashedMainKey.size(); ++i) {
str << " Hash(NewMainKey[" << i << "])# " << Cfg->HashedMainKey[i];
}
@@ -526,7 +526,7 @@ public:
} else {
PDisk.Reset(new TPDisk(PCtx, Cfg, PDiskCounters));
PDisk->Initialize();
- Y_ABORT_UNLESS(PDisk->PDiskThread.Running());
+ Y_VERIFY_S(PDisk->PDiskThread.Running(), PCtx->PDiskLogPrefix);
*PDisk->Mon.PDiskState = NKikimrBlobStorage::TPDiskState::InitialFormatReadError;
*PDisk->Mon.PDiskBriefState = TPDiskMon::TPDisk::Error;
@@ -535,8 +535,8 @@ public:
PDisk->ErrorStr = ToString("Format chunks cannot be reencrypted! Reason# ") + ev->Get()->ErrorReason;
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " Format chunks cannot be reencrypted! Reason# " << ev->Get()->ErrorReason
+ str << PCtx->PDiskLogPrefix
+ << "Format chunks cannot be reencrypted! Reason# " << ev->Get()->ErrorReason
<< " Switching to StateError. Config: " << Cfg->ToString();
P_LOG(PRI_CRIT, BPD01, str.Str());
InitError(str.Str());
@@ -570,7 +570,7 @@ public:
*PDisk->Mon.PDiskDetailedState = TPDiskMon::TPDisk::ErrorInitialFormatReadDueToGuid;
PDisk->ErrorStr = TStringBuilder() << "Can't start due to a guid error " << info;
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << PDisk->ErrorStr;
+ str << PCtx->PDiskLogPrefix << PDisk->ErrorStr;
P_LOG(PRI_ERROR, BSP01, str.Str());
InitError(str.Str());
} else if (!PDisk->CheckFormatComplete()) {
@@ -579,7 +579,7 @@ public:
*PDisk->Mon.PDiskDetailedState = TPDiskMon::TPDisk::ErrorInitialFormatReadIncompleteFormat;
PDisk->ErrorStr = "Can't start due to incomplete format!";
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " " << PDisk->ErrorStr << " "
+ str << PCtx->PDiskLogPrefix << PDisk->ErrorStr << " "
<< "Please, do not turn off your server or remove your storage device while formatting. "
<< "We are sure you did this or something even more creative, like killing the formatter.";
P_LOG(PRI_ERROR, BSP01, str.Str());
@@ -607,8 +607,8 @@ public:
InitSuccess();
} else {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId <<
- " Can't start due to a log processing error! ErrorStr# \"" << evLogInitResult.ErrorStr << "\"";
+ str << PCtx->PDiskLogPrefix
+ << " Can't start due to a log processing error! ErrorStr# \"" << evLogInitResult.ErrorStr << "\"";
P_LOG(PRI_ERROR, BSP01, str.Str());
InitError(str.Str());
}
@@ -663,7 +663,7 @@ public:
const NPDisk::TEvSlay &evSlay = *ev->Get();
PDisk->Mon.YardSlay.CountRequest();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " is still initializing, please wait";
+ str << PCtx->PDiskLogPrefix << "is still initializing, please wait";
Send(ev->Sender, new NPDisk::TEvSlayResult(NKikimrProto::NOTREADY, 0,
evSlay.VDiskId, evSlay.SlayOwnerRound, evSlay.PDiskId, evSlay.VSlotId, str.Str()));
PDisk->Mon.YardSlay.CountResponse();
@@ -707,8 +707,8 @@ public:
void ErrorHandle(NPDisk::TEvLog::TPtr &ev) {
const NPDisk::TEvLog &evLog = *ev->Get();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId;
- str << " TEvLog error because PDisk State# ";
+ str << PCtx->PDiskLogPrefix;
+ str << "TEvLog error because PDisk State# ";
if (CurrentStateFunc() == &TPDiskActor::StateInit) {
str << "Init, wait for PDisk to initialize. Did you ckeck EvYardInit result? Marker# BSY08";
} else if (CurrentStateFunc() == &TPDiskActor::StateError) {
@@ -731,8 +731,8 @@ public:
void ErrorHandle(NPDisk::TEvMultiLog::TPtr &ev) {
const NPDisk::TEvMultiLog &evMultiLog = *ev->Get();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId;
- str << " TEvBatchedLogs error because PDisk State# ";
+ str << PCtx->PDiskLogPrefix;
+ str << "TEvBatchedLogs error because PDisk State# ";
if (CurrentStateFunc() == &TPDiskActor::StateInit) {
str << "Init, wait for PDisk to initialize. Did you ckeck EvYardInit result? Marker# BSY10";
} else if (CurrentStateFunc() == &TPDiskActor::StateError) {
@@ -753,8 +753,8 @@ public:
void ErrorHandle(NPDisk::TEvReadLog::TPtr &ev) {
const NPDisk::TEvReadLog &evReadLog = *ev->Get();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId;
- str << " TEvReadLog error because PDisk State# ";
+ str << PCtx->PDiskLogPrefix;
+ str << "TEvReadLog error because PDisk State# ";
if (CurrentStateFunc() == &TPDiskActor::StateInit) {
str << "Init, wait for PDisk to initialize. Did you ckeck EvYardInit result? Marker# BSY05";
} else if (CurrentStateFunc() == &TPDiskActor::StateError) {
@@ -801,7 +801,7 @@ public:
const NPDisk::TEvSlay &evSlay = *ev->Get();
PDisk->Mon.YardSlay.CountRequest();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " is in error state.";
+ str << PCtx->PDiskLogPrefix << "is in error state.";
Send(ev->Sender, new NPDisk::TEvSlayResult(NKikimrProto::CORRUPTED, 0,
evSlay.VDiskId, evSlay.SlayOwnerRound, evSlay.PDiskId, evSlay.VSlotId, str.Str()));
PDisk->Mon.YardSlay.CountResponse();
@@ -821,7 +821,7 @@ public:
void ErrorHandle(NPDisk::TEvYardControl::TPtr &ev) {
const NPDisk::TEvYardControl &evControl = *ev->Get();
- Y_ABORT_UNLESS(PDisk);
+ Y_VERIFY_S(PDisk, PCtx->PDiskLogPrefix);
PDisk->Mon.YardControl.CountRequest();
@@ -829,7 +829,7 @@ public:
case TEvYardControl::PDiskStart:
{
auto *mainKey = static_cast<const NPDisk::TMainKey*>(evControl.Cookie);
- Y_ABORT_UNLESS(mainKey);
+ Y_VERIFY_S(mainKey, PCtx->PDiskLogPrefix);
MainKey = *mainKey;
StartPDiskThread();
ControledStartResult = MakeHolder<IEventHandle>(ev->Sender, SelfId(),
@@ -976,7 +976,7 @@ public:
Send(ev->Sender, new NPDisk::TEvYardControlResult(NKikimrProto::OK, evControl.Cookie, {}));
break;
case TEvYardControl::GetPDiskPointer:
- Y_ABORT_UNLESS(!evControl.Cookie);
+ Y_VERIFY_S(!evControl.Cookie, PCtx->PDiskLogPrefix);
Send(ev->Sender, new NPDisk::TEvYardControlResult(NKikimrProto::OK, PDisk.Get(), {}));
break;
case TEvYardControl::PDiskStart:
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp
index f43c67ff09..2f78bd4ec6 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_blockdevice_async.cpp
@@ -160,7 +160,7 @@ class TRealBlockDevice : public IBlockDevice {
continue;
}
- Y_ABORT_UNLESS(min_it != Threads.end());
+ Y_VERIFY(min_it != Threads.end());
if (action->CanBeExecutedInAdditionalCompletionThread) {
(*min_it)->Schedule(action);
} else {
@@ -371,7 +371,7 @@ class TRealBlockDevice : public IBlockDevice {
}
prevCycleEnd = cycleEnd;
}
- Y_ABORT_UNLESS(OperationsToBeSubmit.GetWaitingSize() == 0);
+ Y_VERIFY_S(OperationsToBeSubmit.GetWaitingSize() == 0, PCtx->PDiskLogPrefix);
}
};
@@ -476,10 +476,11 @@ class TRealBlockDevice : public IBlockDevice {
Device.DecrementMonInFlight(op->GetType(), opSize);
if (opSize == 0) { // Special case for flush operation, which is a read operation with 0 bytes size
if (op->GetType() == IAsyncIoOperation::EType::PRead) {
- Y_ABORT_UNLESS(WaitingNoops[completionAction->OperationIdx % MaxWaitingNoops] == nullptr);
+ Y_VERIFY_S(WaitingNoops[completionAction->OperationIdx % MaxWaitingNoops] == nullptr,
+ PCtx->PDiskLogPrefix);
WaitingNoops[completionAction->OperationIdx % MaxWaitingNoops] = completionAction;
} else {
- Y_DEBUG_ABORT("Threre must not be writes of size 0 in TRealBlockDevice");
+ Y_DEBUG_ABORT_S(PCtx->PDiskLogPrefix << "Threre must not be writes of size 0 in TRealBlockDevice");
}
} else {
if ((ui64)op->GetOffset() != EndOffset) {
@@ -501,7 +502,7 @@ class TRealBlockDevice : public IBlockDevice {
(Offset, op->GetOffset()), (Size, opSize));
if (completionAction->FlushAction) {
ui64 idx = completionAction->FlushAction->OperationIdx;
- Y_ABORT_UNLESS(WaitingNoops[idx % MaxWaitingNoops] == nullptr);
+ Y_VERIFY_S(WaitingNoops[idx % MaxWaitingNoops] == nullptr, PCtx->PDiskLogPrefix);
WaitingNoops[idx % MaxWaitingNoops] = completionAction->FlushAction;
completionAction->FlushAction = nullptr;
}
@@ -719,11 +720,11 @@ class TRealBlockDevice : public IBlockDevice {
<< (EIoResult)-ret);
}
inFlight -= ret;
- Y_VERIFY_S(inFlight >= 0, "Error in inFlight# " << inFlight);
+ Y_VERIFY_S(inFlight >= 0, PCtx->PDiskLogPrefix << "Error in inFlight# " << inFlight);
} while (inFlight == (i64)Device.DeviceInFlight || isExiting && inFlight > 0);
}
- Y_ABORT_UNLESS(OperationsToBeSubmit.GetWaitingSize() == 0);
+ Y_VERIFY_S(OperationsToBeSubmit.GetWaitingSize() == 0, PCtx->PDiskLogPrefix);
}
};
@@ -757,7 +758,7 @@ class TRealBlockDevice : public IBlockDevice {
if (op == nullptr) {
return;
}
- Y_ABORT_UNLESS(op->GetType() == IAsyncIoOperation::EType::PTrim);
+ Y_VERIFY_S(op->GetType() == IAsyncIoOperation::EType::PTrim, PCtx->PDiskLogPrefix);
auto *completion = static_cast<TCompletionAction*>(op->GetCookie());
if (Device.IsTrimEnabled) {
Device.IdleCounter.Increment();
@@ -879,15 +880,16 @@ public:
protected:
void Initialize(std::shared_ptr<TPDiskCtx> pCtx) override {
PCtx = std::move(pCtx);
- Y_ABORT_UNLESS(PCtx);
+ Y_VERIFY(PCtx);
+ FlightControl.Initialize(PCtx->PDiskLogPrefix);
TString errStr = TDeviceMode::Validate(Flags);
if (errStr) {
Y_FAIL_S(IoContext->GetPDiskInfo() << " Error in device flags: " << errStr);
}
- Y_ABORT_UNLESS(PCtx->ActorSystem->AppData<TAppData>());
- Y_ABORT_UNLESS(PCtx->ActorSystem->AppData<TAppData>()->IoContextFactory);
+ Y_VERIFY_S(PCtx->ActorSystem->AppData<TAppData>(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(PCtx->ActorSystem->AppData<TAppData>()->IoContextFactory, PCtx->PDiskLogPrefix);
auto *factory = PCtx->ActorSystem->AppData<TAppData>()->IoContextFactory;
IoContext = factory->CreateAsyncIoContext(Path, PCtx->PDiskId, Flags, SectorMap);
if (Flags & TDeviceMode::UseSpdk) {
@@ -1040,7 +1042,7 @@ protected:
}
void TrimSync(ui32 size, ui64 offset) override {
- Y_ABORT_UNLESS(!ReadOnly);
+ Y_VERIFY_S(!ReadOnly, PCtx->PDiskLogPrefix);
IAsyncIoOperation* op = IoContext->CreateAsyncIoOperation(nullptr, {}, nullptr);
IoContext->PreparePTrim(op, size, offset);
IsTrimEnabled = IoContext->DoTrim(op);
@@ -1049,13 +1051,13 @@ protected:
void PreadAsync(void *data, ui32 size, ui64 offset, TCompletionAction *completionAction, TReqId reqId,
NWilson::TTraceId *traceId) override {
- Y_ABORT_UNLESS(completionAction);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
if (!IsInitialized) {
completionAction->Release(PCtx->ActorSystem);
return;
}
if (data && size) {
- Y_ABORT_UNLESS(intptr_t(data) % 512 == 0);
+ Y_VERIFY_S(intptr_t(data) % 512 == 0, PCtx->PDiskLogPrefix);
REQUEST_VALGRIND_CHECK_MEM_IS_ADDRESSABLE(data, size);
}
@@ -1066,14 +1068,14 @@ protected:
void PwriteAsync(const void *data, ui64 size, ui64 offset, TCompletionAction *completionAction, TReqId reqId,
NWilson::TTraceId *traceId) override {
- Y_ABORT_UNLESS(completionAction);
- Y_ABORT_UNLESS(!ReadOnly);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(!ReadOnly, PCtx->PDiskLogPrefix);
if (!IsInitialized) {
completionAction->Release(PCtx->ActorSystem);
return;
}
if (data && size) {
- Y_ABORT_UNLESS(intptr_t(data) % 512 == 0);
+ Y_VERIFY_S(intptr_t(data) % 512 == 0, PCtx->PDiskLogPrefix);
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(data, size);
}
@@ -1083,8 +1085,8 @@ protected:
}
void FlushAsync(TCompletionAction *completionAction, TReqId reqId) override {
- Y_ABORT_UNLESS(completionAction);
- Y_ABORT_UNLESS(!ReadOnly);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(!ReadOnly, PCtx->PDiskLogPrefix);
if (!IsInitialized) {
completionAction->Release(PCtx->ActorSystem);
return;
@@ -1096,7 +1098,7 @@ protected:
}
void NoopAsync(TCompletionAction *completionAction, TReqId /*reqId*/) override {
- Y_ABORT_UNLESS(completionAction);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
if (!IsInitialized) {
completionAction->Release(PCtx->ActorSystem);
return;
@@ -1111,7 +1113,7 @@ protected:
}
void NoopAsyncHackForLogReader(TCompletionAction *completionAction, TReqId /*reqId*/) override {
- Y_ABORT_UNLESS(completionAction);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
if (!IsInitialized) {
completionAction->Release(PCtx->ActorSystem);
return;
@@ -1126,7 +1128,7 @@ protected:
}
void TrimAsync(ui32 size, ui64 offset, TCompletionAction *completionAction, TReqId reqId) override {
- Y_ABORT_UNLESS(completionAction);
+ Y_VERIFY_S(completionAction, PCtx->PDiskLogPrefix);
if (!IsInitialized || QuitCounter.IsBlocked()) {
return;
}
@@ -1192,20 +1194,20 @@ protected:
QuitCounter.BlockA(res);
if (res.PrevA ^ res.A) { // res.ToggledA()
if (IsInitialized) {
- Y_ABORT_UNLESS(TrimThread);
- Y_ABORT_UNLESS(CompletionThreads);
+ Y_VERIFY_S(TrimThread, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(CompletionThreads, PCtx->PDiskLogPrefix);
TrimThread->Schedule(nullptr); // Stop the Trim thread
if (Flags & TDeviceMode::UseSpdk) {
- Y_ABORT_UNLESS(SpdkSubmitGetThread);
+ Y_VERIFY_S(SpdkSubmitGetThread, PCtx->PDiskLogPrefix);
SpdkSubmitGetThread->Schedule(nullptr); // Stop the SpdkSubmitGetEvents thread
SpdkState->WaitAllThreads();
} else {
- Y_ABORT_UNLESS(SubmitThread);
+ Y_VERIFY_S(SubmitThread, PCtx->PDiskLogPrefix);
SubmitThread->Schedule(nullptr); // Stop the SubminEvents thread
SubmitThread->Join();
if (!(Flags & TDeviceMode::UseSubmitGetThread)) {
- Y_ABORT_UNLESS(GetEventsThread);
+ Y_VERIFY_S(GetEventsThread, PCtx->PDiskLogPrefix);
GetEventsThread->Join();
}
}
@@ -1214,10 +1216,10 @@ protected:
CompletionThreads->Join();
IsInitialized = false;
} else {
- Y_ABORT_UNLESS(SubmitThread.Get() == nullptr);
- Y_ABORT_UNLESS(GetEventsThread.Get() == nullptr);
- Y_ABORT_UNLESS(TrimThread.Get() == nullptr);
- Y_ABORT_UNLESS(CompletionThreads.Get() == nullptr);
+ Y_VERIFY_S(SubmitThread.Get() == nullptr, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(GetEventsThread.Get() == nullptr, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(TrimThread.Get() == nullptr, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(CompletionThreads.Get() == nullptr, PCtx->PDiskLogPrefix);
}
if (IsFileOpened) {
EIoResult ret = IoContext->Destroy();
@@ -1322,7 +1324,7 @@ class TCachedBlockDevice : public TRealBlockDevice {
if (read.Size <= cached->Data.Size()) {
memcpy(read.Data, cached->Data.GetData(), read.Size);
Mon.DeviceReadCacheHits->Inc();
- Y_ABORT_UNLESS(read.CompletionAction);
+ Y_VERIFY_S(read.CompletionAction, PCtx->PDiskLogPrefix);
for (size_t i = 0; i < cached->BadOffsets.size(); ++i) {
read.CompletionAction->RegisterBadOffset(cached->BadOffsets[i]);
}
@@ -1367,11 +1369,11 @@ public:
TGuard<TMutex> guard(CacheMutex);
ui64 offset = completion->GetOffset();
auto currentReadIt = CurrentReads.find(offset);
- Y_ABORT_UNLESS(currentReadIt != CurrentReads.end());
+ Y_VERIFY_S(currentReadIt != CurrentReads.end(), PCtx->PDiskLogPrefix);
auto range = ReadsForOffset.equal_range(offset);
ui64 chunkIdx = offset / PDisk->Format.ChunkSize;
- Y_ABORT_UNLESS(chunkIdx < PDisk->ChunkState.size());
+ Y_VERIFY_S(chunkIdx < PDisk->ChunkState.size(), PCtx->PDiskLogPrefix);
if (TChunkState::DATA_COMMITTED == PDisk->ChunkState[chunkIdx].CommitState) {
if ((offset % PDisk->Format.ChunkSize) + completion->GetSize() > PDisk->Format.ChunkSize) {
// TODO: split buffer if crossing chunk boundary instead of completely discarding it
@@ -1400,7 +1402,7 @@ public:
} else {
Mon.DeviceReadCacheMisses->Inc();
}
- Y_ABORT_UNLESS(read.CompletionAction);
+ Y_VERIFY_S(read.CompletionAction, PCtx->PDiskLogPrefix);
for (ui64 badOffset : completion->GetBadOffsets()) {
read.CompletionAction->RegisterBadOffset(badOffset);
}
@@ -1438,7 +1440,7 @@ public:
for (auto it = range.first; it != range.second; ++it) {
TRead &read = it->second;
- Y_ABORT_UNLESS(read.CompletionAction);
+ Y_VERIFY_S(read.CompletionAction, PCtx->PDiskLogPrefix);
read.CompletionAction->SetResult(completion->Result);
read.CompletionAction->SetErrorReason(completion->ErrorReason);
@@ -1446,7 +1448,7 @@ public:
}
auto it = CurrentReads.find(completion->GetOffset());
- Y_ABORT_UNLESS(it != CurrentReads.end());
+ Y_VERIFY_S(it != CurrentReads.end(), PCtx->PDiskLogPrefix);
CurrentReads.erase(it);
ReadsInFly--;
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_chunk_tracker.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_chunk_tracker.h
index 0229261f57..e883ecfa85 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_chunk_tracker.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_chunk_tracker.h
@@ -55,7 +55,7 @@ public:
}
i64 ForceHardLimit(TOwner ownerId, i64 limit) {
- Y_ABORT_UNLESS(limit >= 0);
+ Y_VERIFY(limit >= 0);
return QuotaForOwner[ownerId].ForceHardLimit(limit, ColorLimits);
}
@@ -73,8 +73,8 @@ public:
void AddOwner(TOwner id, TVDiskID vdiskId) {
TQuotaRecord &record = QuotaForOwner[id];
- Y_ABORT_UNLESS(record.GetHardLimit() == 0);
- Y_ABORT_UNLESS(record.GetFree() == 0);
+ Y_VERIFY(record.GetHardLimit() == 0);
+ Y_VERIFY(record.GetFree() == 0);
record.SetName(TStringBuilder() << "Owner# " << id);
record.SetVDiskId(vdiskId);
@@ -94,14 +94,14 @@ public:
break;
}
}
- Y_ABORT_UNLESS(isFound);
+ Y_VERIFY(isFound);
ForceHardLimit(id, 0);
}
i64 AddSystemOwner(TOwner id, i64 quota, TString name) {
TQuotaRecord &record = QuotaForOwner[id];
- Y_ABORT_UNLESS(record.GetHardLimit() == 0);
- Y_ABORT_UNLESS(record.GetFree() == 0);
+ Y_VERIFY(record.GetHardLimit() == 0);
+ Y_VERIFY(record.GetFree() == 0);
record.SetName(name);
i64 inc = ForceHardLimit(id, quota);
ActiveOwnerIds.push_back(id);
@@ -134,7 +134,7 @@ public:
}
bool InitialAllocate(TOwner id, i64 count) {
- Y_ABORT_UNLESS(count >= 0);
+ Y_VERIFY(count >= 0);
return QuotaForOwner[id].ForceAllocate(count);
}
@@ -345,12 +345,12 @@ public:
}
void AddOwner(TOwner owner, TVDiskID vdiskId) {
- Y_ABORT_UNLESS(IsOwnerUser(owner));
+ Y_VERIFY(IsOwnerUser(owner));
OwnerQuota->AddOwner(owner, vdiskId);
}
void RemoveOwner(TOwner owner) {
- Y_ABORT_UNLESS(IsOwnerUser(owner));
+ Y_VERIFY(IsOwnerUser(owner));
OwnerQuota->RemoveOwner(owner);
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp
index 93398e0a93..0052778b1f 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.cpp
@@ -24,7 +24,7 @@ void TCompletionLogWrite::Exec(TActorSystem *actorSystem) {
}
for (auto it = Commits.begin(); it != Commits.end(); ++it) {
TLogWrite *evLog = *it;
- Y_ABORT_UNLESS(evLog);
+ Y_VERIFY(evLog);
if (evLog->Result->Status == NKikimrProto::OK) {
TRequestBase *req = PDisk->ReqCreator.CreateFromArgs<TLogCommitDone>(*evLog);
PDisk->InputRequest(req);
@@ -32,7 +32,7 @@ void TCompletionLogWrite::Exec(TActorSystem *actorSystem) {
}
auto sendResponse = [&] (TLogWrite *evLog) {
- Y_DEBUG_ABORT_UNLESS(evLog->Result);
+ Y_VERIFY_DEBUG(evLog->Result);
ui32 results = evLog->Result->Results.size();
actorSystem->Send(evLog->Sender, evLog->Result.Release());
PDisk->Mon.WriteLog.CountMultipleResponses(results);
@@ -152,8 +152,8 @@ TBuffer *TCompletionChunkReadPart::GetBuffer() {
void TCompletionChunkReadPart::Exec(TActorSystem *actorSystem) {
auto execSpan = Span.CreateChild(TWilson::PDiskDetailed, "PDisk.CompletionChunkReadPart.Exec");
- Y_ABORT_UNLESS(actorSystem);
- Y_ABORT_UNLESS(CumulativeCompletion);
+ Y_VERIFY(actorSystem);
+ Y_VERIFY(CumulativeCompletion);
if (TCompletionAction::Result != EIoResult::Ok) {
Release(actorSystem);
return;
@@ -165,8 +165,9 @@ void TCompletionChunkReadPart::Exec(TActorSystem *actorSystem) {
ui64 lastSector;
ui64 sectorOffset;
bool isOk = ParseSectorOffset(PDisk->Format, actorSystem, PDisk->PCtx->PDiskId,
- Read->Offset + CommonBufferOffset, PayloadReadSize, firstSector, lastSector, sectorOffset);
- Y_ABORT_UNLESS(isOk);
+ Read->Offset + CommonBufferOffset, PayloadReadSize, firstSector, lastSector, sectorOffset,
+ PDisk->PCtx->PDiskLogPrefix);
+ Y_VERIFY(isOk);
ui8* source = Buffer->Data();
@@ -215,7 +216,7 @@ void TCompletionChunkReadPart::Exec(TActorSystem *actorSystem) {
}
}
- Y_ABORT_UNLESS(sectorIdx >= firstSector);
+ Y_VERIFY(sectorIdx >= firstSector);
// Decrypt data
if (beginBadUserOffset != 0xffffffff) {
@@ -302,8 +303,8 @@ void TCompletionChunkReadPart::Release(TActorSystem *actorSystem) {
TCompletionChunkRead::~TCompletionChunkRead() {
OnDestroy();
- Y_ABORT_UNLESS(CommonBuffer.Empty());
- Y_ABORT_UNLESS(DoubleFreeCanary == ReferenceCanary, "DoubleFreeCanary in TCompletionChunkRead is dead!");
+ Y_VERIFY(CommonBuffer.Empty());
+ Y_VERIFY(DoubleFreeCanary == ReferenceCanary, "DoubleFreeCanary in TCompletionChunkRead is dead!");
// Set DoubleFreeCanary to 0 and make sure compiler will not eliminate that action
SecureWipeBuffer((ui8*)&DoubleFreeCanary, sizeof(DoubleFreeCanary));
}
@@ -314,11 +315,11 @@ void TCompletionChunkRead::Exec(TActorSystem *actorSystem) {
Read->ChunkIdx, Read->Offset, Read->Cookie, PDisk->GetStatusFlags(Read->Owner, Read->OwnerGroupType), "");
result->Data = std::move(CommonBuffer);
CommonBuffer.Clear();
- //Y_ABORT_UNLESS(result->Data.IsDetached());
+ //Y_VERIFY(result->Data.IsDetached());
result->Data.Commit();
- Y_ABORT_UNLESS(Read);
+ Y_VERIFY(Read);
LOG_DEBUG_S(*actorSystem, NKikimrServices::BS_PDISK, "Reply from TCompletionChunkRead, PDiskId# " << PDisk->PCtx->PDiskId << " ReqId# " << Read->ReqId.Id
<< " " << result->ToString() << " To# " << Read->Sender.LocalId());
@@ -337,7 +338,7 @@ void TCompletionChunkRead::Exec(TActorSystem *actorSystem) {
}
void TCompletionChunkRead::ReplyError(TActorSystem *actorSystem, TString reason) {
- Y_ABORT_UNLESS(!Read->IsReplied);
+ Y_VERIFY(!Read->IsReplied);
CommonBuffer.Clear();
TStringStream error;
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.h
index 5659eb5f76..7d089895d9 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_completion_impl.h
@@ -270,7 +270,7 @@ public:
void SetCompletionAction(TCompletionAction *completionAction) {
AtomicSet(CompletionActionPtr, (TAtomicBase)completionAction);
- Y_ABORT_UNLESS(AtomicGet(PartsPending) > 0);
+ Y_VERIFY(AtomicGet(PartsPending) > 0);
}
void Ref() {
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_config.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_config.h
index f9844b2c23..1675c05ad2 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_config.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_config.h
@@ -229,8 +229,8 @@ struct TPDiskConfig : public TThrRefBase {
MaxQueuedCompletionActions = BufferPoolBufferCount / 2;
UseSpdkNvmeDriver = Path.StartsWith("PCIe:");
- Y_ABORT_UNLESS(!UseSpdkNvmeDriver || deviceType == NPDisk::DEVICE_TYPE_NVME,
- "SPDK NVMe driver can be used only with NVMe devices!");
+ Y_VERIFY_S(!UseSpdkNvmeDriver || deviceType == NPDisk::DEVICE_TYPE_NVME,
+ "PDiskId# " << PDiskId << " SPDK NVMe driver can be used only with NVMe devices!");
}
TString GetDevicePath() {
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_data.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_data.h
index 26a7971610..508784e48b 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_data.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_data.h
@@ -512,17 +512,17 @@ struct TChunkTrimInfo {
{}
void SetChunkTrimmed(ui8 idx) {
- Y_ABORT_UNLESS(idx < ChunksPerRecord);
+ Y_VERIFY(idx < ChunksPerRecord);
TrimMask |= (1 << idx);
}
void SetChunkUntrimmed(ui8 idx) {
- Y_ABORT_UNLESS(idx < ChunksPerRecord);
+ Y_VERIFY(idx < ChunksPerRecord);
TrimMask &= ~(1 << idx);
}
bool IsChunkTrimmed(ui8 idx) {
- Y_ABORT_UNLESS(idx < ChunksPerRecord);
+ Y_VERIFY(idx < ChunksPerRecord);
return TrimMask & (1 << idx);
}
};
@@ -767,7 +767,7 @@ struct TDiskFormat {
}
ui64 RoundUpToSectorSize(ui64 size) const { // assuming SectorSize is a power of 2
- Y_DEBUG_ABORT_UNLESS(IsPowerOf2(SectorSize));
+ Y_VERIFY_DEBUG(IsPowerOf2(SectorSize));
return (size + SectorSize - 1) & ~ui64(SectorSize - 1);
}
@@ -805,7 +805,7 @@ struct TDiskFormat {
// Set Hash
{
NPDisk::TPDiskHashCalculator hashCalculator;
- Y_ABORT_UNLESS(DiskFormatSize > sizeof(THash));
+ Y_VERIFY(DiskFormatSize > sizeof(THash));
ui64 size = DiskFormatSize - sizeof(THash);
hashCalculator.Hash(this, size);
Hash = hashCalculator.GetHashResult();
@@ -853,8 +853,8 @@ struct TDiskFormat {
FormatFlagErasureEncodeNextChunkReference |
FormatFlagEncryptFormat |
FormatFlagEncryptData;
- Y_ABORT_UNLESS(format.Version <= Version);
- Y_ABORT_UNLESS(format.GetUsedSize() <= sizeof(TDiskFormat));
+ Y_VERIFY(format.Version <= Version);
+ Y_VERIFY(format.GetUsedSize() <= sizeof(TDiskFormat));
memcpy(this, &format, format.GetUsedSize());
}
@@ -886,4 +886,3 @@ struct TPDiskFormatBigChunkException : public yexception {
} // NPDisk
} // NKikimr
-
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp
index 9b945098e2..c37a7e0b68 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_driveestimator.cpp
@@ -244,7 +244,7 @@ TDriveEstimator::TDriveEstimator(const TString filename)
memset(Buffer->Data(), 7, Buffer->Size()); // Initialize the buffer so that Valgrind does not complain
bool isBlockDevice = false;
ActorSystem->AppData<TAppData>()->IoContextFactory->DetectFileParameters(filename, DriveSize, isBlockDevice);
- Y_ABORT_UNLESS(Buffer->Size() * Repeats < DriveSize);
+ Y_VERIFY(Buffer->Size() * Repeats < DriveSize);
Device->Initialize(std::make_shared<TPDiskCtx>(ActorSystem));
Y_VERIFY_S(Device->IsGood(), "Cannot Initialize TBlockDevice");
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_free_chunks.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_free_chunks.h
index ee51f096ed..2f7cb20716 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_free_chunks.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_free_chunks.h
@@ -38,7 +38,7 @@ public:
TChunkIdx Pop() {
if (FreeChunks.empty()) {
- Y_ABORT_UNLESS(AtomicGet(FreeChunkCount) == 0);
+ Y_VERIFY(AtomicGet(FreeChunkCount) == 0);
return 0;
}
if (OutOfOrderCount > SortFreeChunksPerItems) {
@@ -47,7 +47,7 @@ public:
}
TChunkIdx idx = FreeChunks.front();
FreeChunks.pop_front();
- Y_ABORT_UNLESS(AtomicGet(FreeChunkCount) > 0);
+ Y_VERIFY(AtomicGet(FreeChunkCount) > 0);
AtomicDecrement(FreeChunkCount);
MonFreeChunks->Dec();
return idx;
@@ -83,4 +83,3 @@ public:
} // NPDisk
} // NKikimr
-
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp
index 29db8aa30d..dc9ea37cc6 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.cpp
@@ -146,14 +146,14 @@ TCheckDiskFormatResult TPDisk::ReadChunk0Format(ui8* formatSectors, const NPDisk
P_LOG(PRI_ERROR, BPD80, "Read from disk Format has FormatFlagErasureEncodeUserChunks set,"
" but current version of PDisk can't work with it",
(Format, Format.ToString()));
- Y_FAIL_S("PDiskId# " << PCtx->PDiskId
+ Y_FAIL_S(PCtx->PDiskLogPrefix
<< "Unable to run PDisk on disk with FormatFlagErasureEncodeUserChunks set");
}
if (Format.IsErasureEncodeUserLog()) {
P_LOG(PRI_ERROR, BPD801, "Read from disk Format has FormatFlagErasureEncodeUserLog set,"
" but current version of PDisk can't work with it",
(Format, Format.ToString()));
- Y_FAIL_S("PDiskId# " << PCtx->PDiskId
+ Y_FAIL_S(PCtx->PDiskLogPrefix
<< "Unable to run PDisk on disk with FormatFlagErasureEncodeUserLog set");
}
lastGoodIdx = i;
@@ -171,7 +171,7 @@ TCheckDiskFormatResult TPDisk::ReadChunk0Format(ui8* formatSectors, const NPDisk
for (ui32 i = 0; i < ReplicationFactor; ++i) {
if (isBad[i]) {
TBuffer* buffer = BufferPool->Pop();
- Y_ABORT_UNLESS(FormatSectorSize <= buffer->Size());
+ Y_VERIFY_S(FormatSectorSize <= buffer->Size(), PCtx->PDiskLogPrefix);
memcpy(buffer->Data(), formatSector, FormatSectorSize);
ui64 targetOffset = i * FormatSectorSize;
P_LOG(PRI_INFO, BPD46, "PWriteAsync for format restoration",
@@ -191,10 +191,10 @@ TCheckDiskFormatResult TPDisk::ReadChunk0Format(ui8* formatSectors, const NPDisk
}
bool TPDisk::IsFormatMagicValid(ui8 *magicData8, ui32 magicDataSize, const TMainKey& mainKey) {
- Y_VERIFY_S(magicDataSize % sizeof(ui64) == 0, "Magic data size# "<< magicDataSize
- << " must be a multiple of sizeof(ui64)");
- Y_VERIFY_S(magicDataSize >= FormatSectorSize, "Magic data size# "<< magicDataSize
- << " must greater or equals to FormatSectorSize# " << FormatSectorSize);
+ Y_VERIFY_S(magicDataSize % sizeof(ui64) == 0, PCtx->PDiskLogPrefix
+ << "Magic data size# "<< magicDataSize << " must be a multiple of sizeof(ui64)");
+ Y_VERIFY_S(magicDataSize >= FormatSectorSize, PCtx->PDiskLogPrefix
+ << "Magic data size# "<< magicDataSize << " must greater or equals to FormatSectorSize# " << FormatSectorSize);
ui64 magicOr = 0ull;
ui64 isIncompleteFormatMagicPresent = true;
ui64 *magicData64 = reinterpret_cast<ui64 *>(magicData8);
@@ -207,7 +207,7 @@ bool TPDisk::IsFormatMagicValid(ui8 *magicData8, ui32 magicDataSize, const TMain
if (magicOr == 0ull || isIncompleteFormatMagicPresent) {
return true;
}
- auto format = CheckMetadataFormatSector(magicData8, magicDataSize, mainKey);
+ auto format = CheckMetadataFormatSector(magicData8, magicDataSize, mainKey, PCtx->PDiskLogPrefix);
return format.has_value();
}
@@ -280,7 +280,7 @@ TString TPDisk::StartupOwnerInfo() {
TPDisk::~TPDisk() {
TPDisk::Stop();
- Y_ABORT_UNLESS(InputQueue.GetWaitingSize() == 0);
+ Y_VERIFY_S(InputQueue.GetWaitingSize() == 0, PCtx->PDiskLogPrefix);
}
void TPDisk::Stop() {
@@ -324,14 +324,14 @@ void TPDisk::Stop() {
for (; JointChunkReads.size(); JointChunkReads.pop()) {
auto& req = JointChunkReads.front();
Y_VERIFY_DEBUG_S(req->GetType() == ERequestType::RequestChunkReadPiece,
- "Unexpected request type# " << TypeName(*req));
+ PCtx->PDiskLogPrefix << "Unexpected request type# " << TypeName(*req));
TRequestBase::AbortDelete(req.Get(), PCtx->ActorSystem);
}
for (; JointChunkWrites.size(); JointChunkWrites.pop()) {
auto* req = JointChunkWrites.front();
Y_VERIFY_DEBUG_S(req->GetType() == ERequestType::RequestChunkWritePiece,
- "Unexpected request type# " << TypeName(req));
+ PCtx->PDiskLogPrefix << "Unexpected request type# " << TypeName(req));
TRequestBase::AbortDelete(req, PCtx->ActorSystem);
}
@@ -388,19 +388,19 @@ ui32 TPDisk::SystemChunkSize(const TDiskFormat& format, ui32 userAccessibleChunk
}
void ParsePayloadFromSectorOffset(const TDiskFormat& format, ui64 firstSector, ui64 lastSector, ui64 currentSector,
- ui64 *outPayloadBytes, ui64 *outPayloadOffset) {
- Y_VERIFY_S(firstSector <= currentSector && currentSector <= lastSector, firstSector << " <= " << currentSector
- << " <= " << lastSector);
+ ui64 *outPayloadBytes, ui64 *outPayloadOffset, const TString& logPrefix) {
+ Y_VERIFY_S(firstSector <= currentSector && currentSector <= lastSector, logPrefix
+ << firstSector << " <= " << currentSector << " <= " << lastSector);
*outPayloadBytes = (lastSector + 1 - currentSector) * format.SectorPayloadSize();
*outPayloadOffset = (currentSector - firstSector) * format.SectorPayloadSize();
}
bool ParseSectorOffset(const TDiskFormat& format, TActorSystem *actorSystem, ui32 pDiskId, ui64 offset, ui64 size,
- ui64 &outSectorIdx, ui64 &outLastSectorIdx, ui64 &outSectorOffset) {
+ ui64 &outSectorIdx, ui64 &outLastSectorIdx, ui64 &outSectorOffset, const TString& logPrefix) {
const ui64 chunkSizeUsableSectors = format.ChunkSize / format.SectorSize;
const ui64 sectorPayloadSize = format.SectorPayloadSize();
- Y_ABORT_UNLESS(sectorPayloadSize > 0);
+ Y_VERIFY_S(sectorPayloadSize > 0, logPrefix);
ui64 lastSectorIdx = (offset + size + sectorPayloadSize - 1) / sectorPayloadSize - 1;
outLastSectorIdx = lastSectorIdx;
@@ -436,7 +436,7 @@ void TPDisk::CheckLogCanary(ui8* sectorData, ui32 chunkIdx, ui64 sectorIdx) cons
sectorData + Format.SectorSize - CanarySize - sizeof(TDataSectorFooter));
if (readCanary != Canary) {
TStringStream ss;
- ss << "PDiskId# " << PCtx->PDiskId << " Failed log canary at chunkIdx# " << chunkIdx
+ ss << PCtx->PDiskLogPrefix << "Failed log canary at chunkIdx# " << chunkIdx
<< " sectorIdx# " << sectorIdx << " sectorOffset# " << Format.Offset(chunkIdx, sectorIdx)
<< " read canary# " << readCanary << " expected canary# " << Canary;
P_LOG(PRI_ERROR, BPD01, ss.Str());
@@ -447,7 +447,7 @@ void TPDisk::CheckLogCanary(ui8* sectorData, ui32 chunkIdx, ui64 sectorIdx) cons
TLogPosition TPDisk::LogPosition(TChunkIdx chunkIdx, ui64 sectorIdx, ui64 offsetInSector) const {
ui64 offsetBytes = sectorIdx * Format.SectorSize + offsetInSector;
- Y_ABORT_UNLESS(offsetBytes <= Max<ui32>());
+ Y_VERIFY_S(offsetBytes <= Max<ui32>(), PCtx->PDiskLogPrefix);
return {chunkIdx, static_cast<ui32>(offsetBytes)};
}
@@ -492,8 +492,8 @@ bool TPDisk::ReleaseUnusedLogChunks(TCompletionEventSender *completion) {
(ChunkIdx, chunkIdx),
(OwnerId, ui32(state.OwnerId)),
(NewOwnerId, ui32(OwnerUnallocated)));
- Y_VERIFY_S(state.OwnerId == OwnerSystem, "PDiskId# " << PCtx->PDiskId
- << " Unexpected ownerId# " << ui32(state.OwnerId));
+ Y_VERIFY_S(state.OwnerId == OwnerSystem, PCtx->PDiskLogPrefix
+ << "Unexpected ownerId# " << ui32(state.OwnerId));
state.CommitState = TChunkState::FREE;
state.OwnerId = OwnerUnallocated;
Mon.LogChunks->Dec();
@@ -531,6 +531,7 @@ bool TPDisk::ReleaseUnusedLogChunks(TCompletionEventSender *completion) {
return true;
} else {
TStringStream ss;
+ ss << PCtx->PDiskLogPrefix;
ss << "Impossible situation - we have non empty chunksToRelease vector and cannot release them";
ss << " gapStart# ";
if (gapStart) {
@@ -708,8 +709,7 @@ ui32 TPDisk::AskVDisksToCutLogs(TOwner ownerFilter, bool doForce) {
(To, data.CutLogId.ToString()),
(OwnerId, (chunkOwner)),
(Event, cutLog->ToString()));
- Y_VERIFY_S(cutLog->FreeUpToLsn, "Error! Should not ask to cut log at 0 lsn."
- "PDiskId# " << PCtx->PDiskId
+ Y_VERIFY_S(cutLog->FreeUpToLsn, PCtx->PDiskLogPrefix << "Error! Should not ask to cut log at 0 lsn."
<< " Send CutLog to# " << data.CutLogId.ToString().data()
<< " ownerId#" << ui32(chunkOwner)
<< " cutLog# " << cutLog->ToString());
@@ -764,8 +764,7 @@ ui32 TPDisk::AskVDisksToCutLogs(TOwner ownerFilter, bool doForce) {
str << " " << chunkIt->ToString() << " ";
}
str << "}";
- Y_VERIFY_S(cutLog->FreeUpToLsn, "Error! Should not ask to cut log at 0 lsn."
- "PDiskId# " << PCtx->PDiskId
+ Y_VERIFY_S(cutLog->FreeUpToLsn, PCtx->PDiskLogPrefix << "Error! Should not ask to cut log at 0 lsn."
<< " Send CutLog to# " << data.CutLogId.ToString().data()
<< " ownerId#" << ui32(ownerFilter)
<< " cutLog# " << cutLog->ToString()
@@ -797,19 +796,20 @@ bool TPDisk::ChunkWritePiece(TChunkWrite *evChunkWrite, ui32 pieceShift, ui32 pi
return true;
}
TGuard<TMutex> guard(StateMutex);
- Y_ABORT_UNLESS(pieceShift % Format.SectorPayloadSize() == 0);
+ Y_VERIFY_S(pieceShift % Format.SectorPayloadSize() == 0, PCtx->PDiskLogPrefix);
Y_VERIFY_S(pieceSize % Format.SectorPayloadSize() == 0 || pieceShift + pieceSize == evChunkWrite->TotalSize,
- "pieceShift# " << pieceShift << " pieceSize# " << pieceSize
+ PCtx->PDiskLogPrefix << "pieceShift# " << pieceShift << " pieceSize# " << pieceSize
<< " evChunkWrite->TotalSize# " << evChunkWrite->TotalSize);
ui32 chunkIdx = evChunkWrite->ChunkIdx;
- Y_ABORT_UNLESS(chunkIdx != 0);
+ Y_VERIFY_S(chunkIdx != 0, PCtx->PDiskLogPrefix);
ui64 desiredSectorIdx = 0;
ui64 sectorOffset = 0;
ui64 lastSectorIdx;
if (!ParseSectorOffset(Format, PCtx->ActorSystem, PCtx->PDiskId, evChunkWrite->Offset + evChunkWrite->BytesWritten,
- evChunkWrite->TotalSize - evChunkWrite->BytesWritten, desiredSectorIdx, lastSectorIdx, sectorOffset)) {
+ evChunkWrite->TotalSize - evChunkWrite->BytesWritten, desiredSectorIdx, lastSectorIdx, sectorOffset,
+ PCtx->PDiskLogPrefix)) {
guard.Release();
TString err = Sprintf("PDiskId# %" PRIu32 " Can't write chunk: incorrect offset/size offset# %" PRIu32
" size# %" PRIu32 " chunkIdx# %" PRIu32 " ownerId# %" PRIu32, PCtx->PDiskId, (ui32)evChunkWrite->Offset,
@@ -829,7 +829,7 @@ bool TPDisk::ChunkWritePiece(TChunkWrite *evChunkWrite, ui32 pieceShift, ui32 pi
guard.Release();
ui32 bytesAvailable = pieceSize;
- Y_ABORT_UNLESS(evChunkWrite->BytesWritten == pieceShift);
+ Y_VERIFY_S(evChunkWrite->BytesWritten == pieceShift, PCtx->PDiskLogPrefix);
const ui32 count = evChunkWrite->PartsPtr->Size();
for (ui32 partIdx = evChunkWrite->CurrentPart; partIdx < count; ++partIdx) {
ui32 remainingPartSize = (*evChunkWrite->PartsPtr)[partIdx].second - evChunkWrite->CurrentPartOffset;
@@ -854,7 +854,7 @@ bool TPDisk::ChunkWritePiece(TChunkWrite *evChunkWrite, ui32 pieceShift, ui32 pi
evChunkWrite->CurrentPart = partIdx;
return false;
} else {
- Y_ABORT_UNLESS(remainingPartSize);
+ Y_VERIFY_S(remainingPartSize, PCtx->PDiskLogPrefix);
ui32 sizeToWrite = remainingPartSize;
bytesAvailable -= remainingPartSize;
ui8 *data = (ui8*)(*evChunkWrite->PartsPtr)[partIdx].first;
@@ -871,7 +871,7 @@ bool TPDisk::ChunkWritePiece(TChunkWrite *evChunkWrite, ui32 pieceShift, ui32 pi
evChunkWrite->BytesWritten += sizeToWrite;
}
}
- Y_ABORT_UNLESS(evChunkWrite->RemainingSize == 0);
+ Y_VERIFY_S(evChunkWrite->RemainingSize == 0, PCtx->PDiskLogPrefix);
P_LOG(PRI_DEBUG, BPD79, "ChunkWrite",
(ChunkIdx, chunkIdx),
@@ -898,10 +898,10 @@ bool TPDisk::ChunkWritePiece(TChunkWrite *evChunkWrite, ui32 pieceShift, ui32 pi
void TPDisk::SendChunkWriteError(TChunkWrite &chunkWrite, const TString &errorReason,
NKikimrProto::EReplyStatus status) {
- Y_DEBUG_ABORT_UNLESS(errorReason);
- Y_DEBUG_ABORT_UNLESS(status != NKikimrProto::OK);
+ Y_VERIFY_DEBUG_S(errorReason, PCtx->PDiskLogPrefix);
+ Y_VERIFY_DEBUG_S(status != NKikimrProto::OK, PCtx->PDiskLogPrefix);
P_LOG(PRI_ERROR, PBD23, errorReason);
- Y_ABORT_UNLESS(!chunkWrite.IsReplied);
+ Y_VERIFY_S(!chunkWrite.IsReplied, PCtx->PDiskLogPrefix);
NPDisk::TStatusFlags flags = status == NKikimrProto::OUT_OF_SPACE
? NotEnoughDiskSpaceStatusFlags(chunkWrite.Owner, chunkWrite.OwnerGroupType)
: GetStatusFlags(chunkWrite.Owner, chunkWrite.OwnerGroupType);
@@ -918,7 +918,7 @@ void TPDisk::SendChunkWriteError(TChunkWrite &chunkWrite, const TString &errorRe
void TPDisk::SendChunkReadError(const TIntrusivePtr<TChunkRead>& read, TStringStream& error, NKikimrProto::EReplyStatus status) {
error << " for ownerId# " << read->Owner << " can't read chunkIdx# " << read->ChunkIdx;
- Y_ABORT_UNLESS(status != NKikimrProto::OK);
+ Y_VERIFY_S(status != NKikimrProto::OK, PCtx->PDiskLogPrefix);
P_LOG(PRI_ERROR, BPD01, "SendChunkReadError" + error.Str(), (ReqId, read->ReqId));
THolder<NPDisk::TEvChunkReadResult> result = MakeHolder<NPDisk::TEvChunkReadResult>(status,
@@ -934,7 +934,8 @@ TPDisk::EChunkReadPieceResult TPDisk::ChunkReadPiece(TIntrusivePtr<TChunkRead> &
return ReadPieceResultOk;
}
- Y_VERIFY_S(pieceCurrentSector == read->CurrentSector, pieceCurrentSector << " != " << read->CurrentSector);
+ Y_VERIFY_S(pieceCurrentSector == read->CurrentSector,
+ PCtx->PDiskLogPrefix << pieceCurrentSector << " != " << read->CurrentSector);
ui64 sectorsCount = read->LastSector - read->FirstSector + 1;
ui64 sectorsToRead = sectorsCount - read->CurrentSector;
ui64 bytesToRead = sectorsToRead * Format.SectorSize;
@@ -942,18 +943,21 @@ TPDisk::EChunkReadPieceResult TPDisk::ChunkReadPiece(TIntrusivePtr<TChunkRead> &
sectorsToRead = pieceSizeLimit / Format.SectorSize;
bytesToRead = sectorsToRead * Format.SectorSize;
}
- Y_VERIFY_S(bytesToRead == pieceSizeLimit, bytesToRead << " " << pieceSizeLimit);
- Y_VERIFY_S(sectorsToRead == pieceSizeLimit / Format.SectorSize, sectorsToRead << " " << pieceSizeLimit);
- Y_VERIFY_S(pieceSizeLimit % Format.SectorSize == 0, pieceSizeLimit);
+ Y_VERIFY_S(bytesToRead == pieceSizeLimit,
+ PCtx->PDiskLogPrefix << bytesToRead << " " << pieceSizeLimit);
+ Y_VERIFY_S(sectorsToRead == pieceSizeLimit / Format.SectorSize,
+ PCtx->PDiskLogPrefix << sectorsToRead << " " << pieceSizeLimit);
+ Y_VERIFY_S(pieceSizeLimit % Format.SectorSize == 0,
+ PCtx->PDiskLogPrefix << pieceSizeLimit);
- Y_ABORT_UNLESS(sectorsToRead);
+ Y_VERIFY_S(sectorsToRead, PCtx->PDiskLogPrefix);
ui64 firstSector;
ui64 lastSector;
ui64 sectorOffset;
bool isOk = ParseSectorOffset(Format, PCtx->ActorSystem, PCtx->PDiskId,
- read->Offset, read->Size, firstSector, lastSector, sectorOffset);
- Y_ABORT_UNLESS(isOk);
+ read->Offset, read->Size, firstSector, lastSector, sectorOffset, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(isOk, PCtx->PDiskLogPrefix);
ui64 currentSectorOffset = (ui64)read->CurrentSector * (ui64)Format.SectorSize;
bool isTheFirstPart = read->CurrentSector == 0;
@@ -962,7 +966,7 @@ TPDisk::EChunkReadPieceResult TPDisk::ChunkReadPiece(TIntrusivePtr<TChunkRead> &
ui64 payloadBytesToRead;
ui64 payloadOffset;
ParsePayloadFromSectorOffset(Format, read->FirstSector, read->FirstSector + read->CurrentSector + sectorsToRead - 1,
- read->FirstSector + read->CurrentSector, &payloadBytesToRead, &payloadOffset);
+ read->FirstSector + read->CurrentSector, &payloadBytesToRead, &payloadOffset, PCtx->PDiskLogPrefix);
if (!isTheFirstPart) {
payloadOffset -= sectorOffset;
@@ -983,7 +987,7 @@ TPDisk::EChunkReadPieceResult TPDisk::ChunkReadPiece(TIntrusivePtr<TChunkRead> &
AtomicAdd(InFlightChunkRead, (ui64)bytesToRead);
if (isTheLastPart) {
- Y_ABORT_UNLESS(read->RemainingSize == 0);
+ Y_VERIFY_S(read->RemainingSize == 0, PCtx->PDiskLogPrefix);
}
ui64 footerTotalSize = sectorsToRead * sizeof(TDataSectorFooter);
@@ -999,7 +1003,7 @@ TPDisk::EChunkReadPieceResult TPDisk::ChunkReadPiece(TIntrusivePtr<TChunkRead> &
completion->CostNs = DriveModel.TimeForSizeNs(bytesToRead, read->ChunkIdx, TDriveModel::OP_TYPE_READ);
LWTRACK(PDiskChunkReadPiecesSendToDevice, orbit, PCtx->PDiskId);
completion->Orbit = std::move(orbit);
- Y_ABORT_UNLESS(bytesToRead <= completion->GetBuffer()->Size());
+ Y_VERIFY_S(bytesToRead <= completion->GetBuffer()->Size(), PCtx->PDiskLogPrefix);
ui8 *data = completion->GetBuffer()->Data();
BlockDevice->PreadAsync(data, bytesToRead, readOffset, completion.Release(),
read->ReqId, &traceId);
@@ -1023,8 +1027,8 @@ TVector<TChunkIdx> TPDisk::LockChunksForOwner(TOwner owner, const ui32 count, TS
auto makeError = [&](TString info) {
guard.Release();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " Can't lock " << count << " chunks"
+ str << PCtx->PDiskLogPrefix
+ << "Can't lock " << count << " chunks"
<< " for ownerId# " << owner
<< " sharedFree# " << sharedFree
<< " ownerFree# " << ownerFree
@@ -1057,8 +1061,8 @@ TVector<TChunkIdx> TPDisk::LockChunksForOwner(TOwner owner, const ui32 count, TS
Y_VERIFY_S(state.OwnerId == OwnerUnallocated
|| state.OwnerId == OwnerUnallocatedTrimmed
|| state.CommitState == TChunkState::FREE,
- "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx << " desired ownerId# " << owner
- << " state# " << state.ToString());
+ PCtx->PDiskLogPrefix << "chunkIdx# " << chunkIdx << " desired ownerId# " << owner
+ << " state# " << state.ToString());
P_LOG(PRI_INFO, BPD01, "locked chunk for owner",
(ChunkIdx, chunkIdx),
(OldOwnerId, ui32(state.OwnerId)),
@@ -1255,7 +1259,7 @@ void TPDisk::ChunkUnlock(TChunkUnlock &evChunkUnlock) {
TVector<TChunkIdx> TPDisk::AllocateChunkForOwner(const TRequestBase *req, const ui32 count, TString &errorReason) {
// chunkIdx = 0 is deprecated and will not be soon removed
TGuard<TMutex> guard(StateMutex);
- Y_DEBUG_ABORT_UNLESS(IsOwnerUser(req->Owner));
+ Y_VERIFY_DEBUG_S(IsOwnerUser(req->Owner), PCtx->PDiskLogPrefix);
const ui32 sharedFree = Keeper.GetFreeChunkCount() - 1;
i64 ownerFree = Keeper.GetOwnerFree(req->Owner);
@@ -1265,8 +1269,8 @@ TVector<TChunkIdx> TPDisk::AllocateChunkForOwner(const TRequestBase *req, const
auto makeError = [&](TString info) {
guard.Release();
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " Can't reserve " << count << " chunks"
+ str << PCtx->PDiskLogPrefix
+ << "Can't reserve " << count << " chunks"
<< " for ownerId# " << req->Owner
<< " sharedFree# " << sharedFree
<< " ownerFree# " << ownerFree
@@ -1300,8 +1304,8 @@ TVector<TChunkIdx> TPDisk::AllocateChunkForOwner(const TRequestBase *req, const
Y_VERIFY_S(state.OwnerId == OwnerUnallocated
|| state.OwnerId == OwnerUnallocatedTrimmed
|| state.CommitState == TChunkState::FREE,
- "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx << " desired ownerId# " << req->Owner
- << " state# " << state.ToString());
+ PCtx->PDiskLogPrefix << "chunkIdx# " << chunkIdx << " desired ownerId# " << req->Owner
+ << " state# " << state.ToString());
state.Nonce = chunkNonce;
state.CurrentNonce = chunkNonce;
P_LOG(PRI_INFO, BPD01, "chunk is allocated",
@@ -1342,8 +1346,8 @@ void TPDisk::ChunkReserve(TChunkReserve &evChunkReserve) {
bool TPDisk::ValidateForgetChunk(ui32 chunkIdx, TOwner owner, TStringStream& outErrorReason) {
TGuard<TMutex> guard(StateMutex);
if (chunkIdx >= ChunkState.size()) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't forget chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't forget chunkIdx# " << chunkIdx
<< " > total# " << ChunkState.size()
<< " ownerId# " << owner
<< " Marker# BPD89";
@@ -1351,8 +1355,8 @@ bool TPDisk::ValidateForgetChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
return false;
}
if (ChunkState[chunkIdx].OwnerId != owner) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't forget chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't forget chunkIdx# " << chunkIdx
<< ", ownerId# " << owner
<< " != real ownerId# " << ChunkState[chunkIdx].OwnerId
<< " Marker# BPD90";
@@ -1362,8 +1366,8 @@ bool TPDisk::ValidateForgetChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
if (ChunkState[chunkIdx].CommitState != TChunkState::DATA_RESERVED_DECOMMIT_IN_PROGRESS
&& ChunkState[chunkIdx].CommitState != TChunkState::DATA_COMMITTED_DECOMMIT_IN_PROGRESS
&& ChunkState[chunkIdx].CommitState != TChunkState::DATA_DECOMMITTED) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't forget chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't forget chunkIdx# " << chunkIdx
<< " in CommitState# " << ChunkState[chunkIdx].CommitState
<< " ownerId# " << owner << " Marker# BPD91";
P_LOG(PRI_ERROR, BPD91, outErrorReason.Str());
@@ -1410,8 +1414,8 @@ void TPDisk::ChunkForget(TChunkForget &evChunkForget) {
QuarantineChunks.push_back(chunkIdx);
break;
default:
- Y_FAIL_S("PDiskId# " << PCtx->PDiskId
- << " ChunkForget with in flight, ownerId# " << (ui32)evChunkForget.Owner
+ Y_FAIL_S(PCtx->PDiskLogPrefix
+ << "ChunkForget with in flight, ownerId# " << (ui32)evChunkForget.Owner
<< " chunkIdx# " << chunkIdx << " unexpected commitState# " << state.CommitState);
}
} else {
@@ -1425,21 +1429,21 @@ void TPDisk::ChunkForget(TChunkForget &evChunkForget) {
state.CommitState = TChunkState::DATA_COMMITTED_DELETE_IN_PROGRESS;
break;
case TChunkState::DATA_DECOMMITTED:
- Y_VERIFY_S(state.CommitsInProgress == 0,
- "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx << " state# " << state.ToString());
+ Y_VERIFY_S(state.CommitsInProgress == 0, PCtx->PDiskLogPrefix
+ << "chunkIdx# " << chunkIdx << " state# " << state.ToString());
P_LOG(PRI_INFO, BPD01, "chunk was forgotten",
(ChunkIdx, chunkIdx),
(OldOwner, (ui32)state.OwnerId),
(NewOwner, (ui32)OwnerUnallocated));
- Y_ABORT_UNLESS(state.OwnerId == evChunkForget.Owner);
+ Y_VERIFY_S(state.OwnerId == evChunkForget.Owner, PCtx->PDiskLogPrefix);
Mon.UncommitedDataChunks->Dec();
state.OwnerId = OwnerUnallocated;
state.CommitState = TChunkState::FREE;
Keeper.PushFreeOwnerChunk(evChunkForget.Owner, chunkIdx);
break;
default:
- Y_FAIL_S("PDiskId# " << PCtx->PDiskId
- << " ChunkForget, ownerId# " << (ui32)evChunkForget.Owner
+ Y_FAIL_S(PCtx->PDiskLogPrefix
+ << "ChunkForget, ownerId# " << (ui32)evChunkForget.Owner
<< " chunkIdx# " << chunkIdx << " unexpected commitState# " << state.CommitState);
}
}
@@ -1578,9 +1582,9 @@ void TPDisk::EventUndelivered(TUndelivered &req) {
void TPDisk::CommitLogChunks(TCommitLogChunks &req) {
TGuard<TMutex> guard(StateMutex);
for (auto it = req.CommitedLogChunks.begin(); it != req.CommitedLogChunks.end(); ++it) {
- Y_VERIFY_S(ChunkState[*it].OwnerId == OwnerSystem, "Unexpected chunkIdx# " << *it << " ownerId# "
- << (ui32)ChunkState[*it].OwnerId << " in CommitLogChunks PDiskId# " << PCtx->PDiskId);
- Y_DEBUG_ABORT_UNLESS(ChunkState[*it].CommitState == TChunkState::LOG_RESERVED);
+ Y_VERIFY_S(ChunkState[*it].OwnerId == OwnerSystem, PCtx->PDiskLogPrefix << "Unexpected chunkIdx# " << *it << " ownerId# "
+ << (ui32)ChunkState[*it].OwnerId << " in CommitLogChunks");
+ Y_VERIFY_DEBUG_S(ChunkState[*it].CommitState == TChunkState::LOG_RESERVED, PCtx->PDiskLogPrefix);
ChunkState[*it].CommitState = TChunkState::LOG_COMMITTED;
}
}
@@ -1610,7 +1614,7 @@ void TPDisk::WriteApplyFormatRecord(TDiskFormat format, const TKey &mainKey) {
// Fill first bytes with magic pattern
ui64 *formatBegin = reinterpret_cast<ui64*>(&format);
ui64 *formatMagicEnd = reinterpret_cast<ui64*>((ui8*)&format + MagicIncompleteFormatSize);
- Y_ABORT_UNLESS((ui8*)formatMagicEnd - (ui8*)formatBegin <= (intptr_t)sizeof(format));
+ Y_VERIFY_S((ui8*)formatMagicEnd - (ui8*)formatBegin <= (intptr_t)sizeof(format), PCtx->PDiskLogPrefix);
Fill(formatBegin, formatMagicEnd, MagicIncompleteFormat);
}
formatWriter.Write(&format, sizeof(TDiskFormat), TReqId(TReqId::WriteApplyFormatRecordWrite, 0), {});
@@ -1731,7 +1735,7 @@ void TPDisk::WriteDiskFormat(ui64 diskSizeBytes, ui32 sectorSizeBytes, ui32 user
void TPDisk::ReplyErrorYardInitResult(TYardInit &evYardInit, const TString &str, NKikimrProto::EReplyStatus status) {
TStringStream error;
- error << "PDiskId# " << PCtx->PDiskId << " YardInit error for VDiskId# " << evYardInit.VDisk.ToStringWOGeneration()
+ error << PCtx->PDiskLogPrefix << "YardInit error for VDiskId# " << evYardInit.VDisk.ToStringWOGeneration()
<< " reason# " << str;
P_LOG(PRI_ERROR, BPD01, error.Str());
ui64 writeBlockSize = ForsetiOpPieceSizeCached;
@@ -1863,7 +1867,7 @@ bool TPDisk::YardInitStart(TYardInit &evYardInit) {
}
// TODO REPLY ERROR
TOwnerData &data = OwnerData[owner];
- Y_VERIFY_S(!data.HaveRequestsInFlight(), "owner# " << owner);
+ Y_VERIFY_S(!data.HaveRequestsInFlight(), PCtx->PDiskLogPrefix << "owner# " << owner);
}
evYardInit.Owner = owner;
@@ -1919,7 +1923,8 @@ void TPDisk::YardInitFinish(TYardInit &evYardInit) {
AtomicIncrement(TotalOwners);
ownerData.VDiskId = vDiskId;
- Y_ABORT_UNLESS(SysLogFirstNoncesToKeep.FirstNonceToKeep[owner] <= SysLogRecord.Nonces.Value[NonceLog]);
+ Y_VERIFY_S(SysLogFirstNoncesToKeep.FirstNonceToKeep[owner] <= SysLogRecord.Nonces.Value[NonceLog],
+ PCtx->PDiskLogPrefix);
SysLogFirstNoncesToKeep.FirstNonceToKeep[owner] = SysLogRecord.Nonces.Value[NonceLog];
ownerData.CutLogId = evYardInit.CutLogId;
ownerData.WhiteboardProxyId = evYardInit.WhiteboardProxyId;
@@ -2017,8 +2022,10 @@ void TPDisk::ForceDeleteChunk(TChunkIdx chunkIdx) {
TGuard<TMutex> guard(StateMutex);
TChunkState &state = ChunkState[chunkIdx];
TOwner owner = state.OwnerId;
- Y_VERIFY_S(!state.HasAnyOperationsInProgress(), "PDiskId# " << PCtx->PDiskId << " ForceDeleteChunk, ownerId# " << owner
- << " chunkIdx# " << chunkIdx << " has operationsInProgress, state# " << state.ToString());
+ Y_VERIFY_S(!state.HasAnyOperationsInProgress(), PCtx->PDiskLogPrefix
+ << "ForceDeleteChunk, ownerId# " << owner
+ << " chunkIdx# " << chunkIdx
+ << " has operationsInProgress, state# " << state.ToString());
switch (state.CommitState) {
case TChunkState::DATA_ON_QUARANTINE:
@@ -2043,7 +2050,7 @@ void TPDisk::ForceDeleteChunk(TChunkIdx chunkIdx) {
// Chunk will be freed in TPDisk::DeleteChunk()
break;
default:
- Y_FAIL_S("PDiskId# " << PCtx->PDiskId << " ForceDeleteChunk, ownerId# " << owner
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "ForceDeleteChunk, ownerId# " << owner
<< " chunkIdx# " << chunkIdx << " unexpected commitState# " << state.CommitState);
break;
}
@@ -2141,7 +2148,7 @@ void TPDisk::KillOwner(TOwner owner, TOwnerRound killOwnerRound, TCompletionEven
lastSeenLsn = Max(it->OwnerLsnRange[owner].LastLsn, lastSeenLsn);
if (!readingLog) {
- Y_ABORT_UNLESS(it->CurrentUserCount > 0);
+ Y_VERIFY_S(it->CurrentUserCount > 0, PCtx->PDiskLogPrefix);
it->CurrentUserCount--;
it->OwnerLsnRange[owner].IsPresent = false;
it->OwnerLsnRange[owner].FirstLsn = 0;
@@ -2164,7 +2171,7 @@ void TPDisk::KillOwner(TOwner owner, TOwnerRound killOwnerRound, TCompletionEven
// TODO(cthulhu): Replace with VERIFY.
SysLogRecord.OwnerVDisks[owner] = TVDiskID::InvalidId;
- Y_ABORT_UNLESS(AtomicGet(TotalOwners) > 0);
+ Y_VERIFY_S(AtomicGet(TotalOwners) > 0, PCtx->PDiskLogPrefix);
AtomicDecrement(TotalOwners);
TOwnerRound ownerRound = OwnerData[owner].OwnerRound;
@@ -2199,7 +2206,7 @@ void TPDisk::Slay(TSlay &evSlay) {
auto it = VDiskOwners.find(vDiskId);
if (it == VDiskOwners.end()) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " Can't slay VDiskId# " << evSlay.VDiskId;
+ str << PCtx->PDiskLogPrefix << "Can't slay VDiskId# " << evSlay.VDiskId;
str << " as it is not created yet or is already slain"
<< " Marker# BPD31";
P_LOG(PRI_ERROR, BPD31, str.Str());
@@ -2215,7 +2222,7 @@ void TPDisk::Slay(TSlay &evSlay) {
TOwnerRound ownerRound = OwnerData[owner].OwnerRound;
if (evSlay.SlayOwnerRound <= ownerRound) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " Can't slay VDiskId# " << evSlay.VDiskId;
+ str << PCtx->PDiskLogPrefix << "Can't slay VDiskId# " << evSlay.VDiskId;
str << " as SlayOwnerRound# " << evSlay.SlayOwnerRound << " <= ownerRound# " << ownerRound
<< " Marker# BPD32";
P_LOG(PRI_ERROR, BPD32, str.Str());
@@ -2256,7 +2263,8 @@ void TPDisk::ProcessChunkWriteQueue() {
req->SpanStack.PopOk();
req->SpanStack.Push(TWilson::PDiskDetailed, "PDisk.InBlockDevice", NWilson::EFlags::AUTO_END);
- Y_VERIFY_S(req->GetType() == ERequestType::RequestChunkWritePiece, "Unexpected request type# " << ui64(req->GetType())
+ Y_VERIFY_S(req->GetType() == ERequestType::RequestChunkWritePiece, PCtx->PDiskLogPrefix
+ << "Unexpected request type# " << ui64(req->GetType())
<< " TypeName# " << TypeName(*req) << " in JointChunkWrites");
TChunkWritePiece *piece = static_cast<TChunkWritePiece*>(req);
processed++;
@@ -2300,18 +2308,19 @@ void TPDisk::ProcessChunkReadQueue() {
req->SpanStack.PopOk();
req->SpanStack.Push(TWilson::PDiskDetailed, "PDisk.InBlockDevice", NWilson::EFlags::AUTO_END);
- Y_VERIFY_S(req->GetType() == ERequestType::RequestChunkReadPiece, "Unexpected request type# " << ui64(req->GetType()) << " in JointChunkReads");
+ Y_VERIFY_S(req->GetType() == ERequestType::RequestChunkReadPiece, PCtx->PDiskLogPrefix
+ << "Unexpected request type# " << ui64(req->GetType()) << " in JointChunkReads");
TChunkReadPiece *piece = static_cast<TChunkReadPiece*>(req.Get());
processed++;
processedBytes += piece->PieceSizeLimit;
processedCostMs += piece->GetCostMs();
- Y_ABORT_UNLESS(!piece->SelfPointer);
+ Y_VERIFY_S(!piece->SelfPointer, PCtx->PDiskLogPrefix);
TIntrusivePtr<TChunkRead> &read = piece->ChunkRead;
TReqId reqId = read->ReqId;
ui32 chunkIdx = read->ChunkIdx;
ui8 priorityClass = read->PriorityClass;
NHPTimer::STime creationTime = read->CreationTime;
- Y_VERIFY_S(!read->IsReplied, "read's reqId# " << read->ReqId);
+ Y_VERIFY_S(!read->IsReplied, PCtx->PDiskLogPrefix << "read's reqId# " << read->ReqId);
P_LOG(PRI_DEBUG, BPD36, "Performing TChunkReadPiece", (ReqId, reqId), (chunkIdx, chunkIdx),
(PieceCurrentSector, piece->PieceCurrentSector),
(PieceSizeLimit, piece->PieceSizeLimit),
@@ -2323,7 +2332,8 @@ void TPDisk::ProcessChunkReadQueue() {
EChunkReadPieceResult result = ChunkReadPiece(read, piece->PieceCurrentSector, currentLimit,
piece->SpanStack.GetTraceId(), std::move(piece->Orbit));
bool isComplete = (result != ReadPieceResultInProgress);
- Y_VERIFY_S(isComplete || currentLimit >= piece->PieceSizeLimit, isComplete << " " << currentLimit << " " << piece->PieceSizeLimit);
+ Y_VERIFY_S(isComplete || currentLimit >= piece->PieceSizeLimit, PCtx->PDiskLogPrefix
+ << isComplete << " " << currentLimit << " " << piece->PieceSizeLimit);
piece->OnSuccessfulDestroy(PCtx->ActorSystem);
if (isComplete) {
//
@@ -2353,14 +2363,14 @@ void TPDisk::TrimAllUntrimmedChunks() {
while (ui32 idx = Keeper.PopUntrimmedFreeChunk()) {
BlockDevice->TrimSync(Format.ChunkSize, idx * Format.ChunkSize);
Y_VERIFY_S(ChunkState[idx].OwnerId == OwnerUnallocated || ChunkState[idx].OwnerId == OwnerUnallocatedTrimmed,
- "PDiskId# " << PCtx->PDiskId << " Unexpected ownerId# " << ui32(ChunkState[idx].OwnerId));
+ PCtx->PDiskLogPrefix << "Unexpected ownerId# " << ui32(ChunkState[idx].OwnerId));
ChunkState[idx].OwnerId = OwnerUnallocatedTrimmed;
Keeper.PushTrimmedFreeChunk(idx);
}
}
void TPDisk::ProcessChunkTrimQueue() {
- Y_ABORT_UNLESS(JointChunkTrims.size() <= 1);
+ Y_VERIFY_S(JointChunkTrims.size() <= 1, PCtx->PDiskLogPrefix);
for (auto it = JointChunkTrims.begin(); it != JointChunkTrims.end(); ++it) {
TChunkTrim *trim = (*it);
trim->SpanStack.PopOk();
@@ -2425,7 +2435,7 @@ void TPDisk::ClearQuarantineChunks() {
auto it = LogChunks.begin();
while (it != LogChunks.end()) {
if (it->OwnerLsnRange.size() > owner && it->OwnerLsnRange[owner].IsPresent) {
- Y_ABORT_UNLESS(it->CurrentUserCount > 0);
+ Y_VERIFY_S(it->CurrentUserCount > 0, PCtx->PDiskLogPrefix);
ui32 userCount = --it->CurrentUserCount;
it->OwnerLsnRange[owner].IsPresent = false;
it->OwnerLsnRange[owner].FirstLsn = 0;
@@ -2474,21 +2484,21 @@ void TPDisk::TryTrimChunk(bool prevDone, ui64 trimmedSize, const NWilson::TSpan&
ChunkBeingTrimmed = Keeper.PopUntrimmedFreeChunk();
if (ChunkBeingTrimmed) {
Y_VERIFY_S(ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocated
- || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, "PDiskId# " << PCtx->PDiskId
- << " Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
+ || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, PCtx->PDiskLogPrefix
+ << "Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
}
TrimOffset = 0;
} else if (TrimOffset >= Format.ChunkSize) { // Previous chunk entirely trimmed
Y_VERIFY_S(ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocated
- || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, "PDiskId# " << PCtx->PDiskId
- << " Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
+ || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, PCtx->PDiskLogPrefix
+ << "Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
ChunkState[ChunkBeingTrimmed].OwnerId = OwnerUnallocatedTrimmed;
Keeper.PushTrimmedFreeChunk(ChunkBeingTrimmed);
ChunkBeingTrimmed = Keeper.PopUntrimmedFreeChunk();
if (ChunkBeingTrimmed) {
Y_VERIFY_S(ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocated
- || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, "PDiskId# " << PCtx->PDiskId
- << " Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
+ || ChunkState[ChunkBeingTrimmed].OwnerId == OwnerUnallocatedTrimmed, PCtx->PDiskLogPrefix
+ << "Unexpected ownerId# " << ui32(ChunkState[ChunkBeingTrimmed].OwnerId));
}
TrimOffset = 0;
}
@@ -2591,7 +2601,7 @@ void TPDisk::ProcessFastOperationsQueue() {
ProcessContinueShred(static_cast<TContinueShred&>(*req));
break;
default:
- Y_FAIL_S("Unexpected request type# " << TypeName(*req));
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "Unexpected request type# " << TypeName(*req));
break;
}
}
@@ -2707,7 +2717,7 @@ bool TPDisk::Initialize() {
}
}
- Y_ABORT_UNLESS(BlockDevice);
+ Y_VERIFY_S(BlockDevice, PCtx->PDiskLogPrefix);
BlockDevice->Initialize(PCtx);
IsStarted = true;
@@ -2815,7 +2825,7 @@ NKikimrProto::EReplyStatus TPDisk::ValidateRequest(TLogWrite *logWrite, TStringS
}
void TPDisk::PrepareLogError(TLogWrite *logWrite, TStringStream& err, NKikimrProto::EReplyStatus status) {
- Y_DEBUG_ABORT_UNLESS(status != NKikimrProto::OK);
+ Y_VERIFY_DEBUG_S(status != NKikimrProto::OK, PCtx->PDiskLogPrefix);
if (logWrite->Result && logWrite->Result->Status != NKikimrProto::OK) {
return;
}
@@ -2860,7 +2870,7 @@ NKikimrProto::EReplyStatus TPDisk::CheckOwnerAndRound(TRequestBase* req, TString
// Returns is request valid and should be processed further
bool TPDisk::PreprocessRequest(TRequestBase *request) {
TStringStream err;
- err << "PDiskId# " << PCtx->PDiskId << " ";
+ err << PCtx->PDiskLogPrefix;
// Advisory check, further code may ignore results
NKikimrProto::EReplyStatus errStatus = CheckOwnerAndRound(request, err);
@@ -2937,7 +2947,7 @@ bool TPDisk::PreprocessRequest(TRequestBase *request) {
}
ui64 offset = 0;
if (!ParseSectorOffset(Format, PCtx->ActorSystem, PCtx->PDiskId, read->Offset, read->Size,
- read->FirstSector, read->LastSector, offset)) {
+ read->FirstSector, read->LastSector, offset, PCtx->PDiskLogPrefix)) {
err << "invalid size# " << read->Size << " and offset# " << read->Offset;
SendChunkReadError(read, err, NKikimrProto::ERROR);
return false;
@@ -2947,7 +2957,7 @@ bool TPDisk::PreprocessRequest(TRequestBase *request) {
ownerData.ReadThroughput.Increment(read->Size, PCtx->ActorSystem->Timestamp());
request->JobKind = NSchLab::JobKindRead;
- Y_ABORT_UNLESS(read->FinalCompletion == nullptr);
+ Y_VERIFY_S(read->FinalCompletion == nullptr, PCtx->PDiskLogPrefix);
++state.OperationsInProgress;
@@ -3007,7 +3017,7 @@ bool TPDisk::PreprocessRequest(TRequestBase *request) {
delete request;
return false;
} else {
- Y_DEBUG_ABORT_UNLESS(chunks.size() == 1);
+ Y_VERIFY_DEBUG_S(chunks.size() == 1, PCtx->PDiskLogPrefix);
ev.ChunkIdx = chunks.front();
}
}
@@ -3212,7 +3222,7 @@ bool TPDisk::PreprocessRequest(TRequestBase *request) {
case ERequestType::RequestChunkReadPiece:
case ERequestType::RequestChunkWritePiece:
case ERequestType::RequestNop:
- Y_ABORT();
+ Y_ABORT_S(PCtx->PDiskLogPrefix);
}
return true;
}
@@ -3244,12 +3254,12 @@ void TPDisk::PushRequestToScheduler(TRequestBase *request) {
AddJobToScheduler(piece, request->JobKind);
remainingSize -= jobSize;
}
- Y_VERIFY_S(remainingSize == 0, remainingSize);
+ Y_VERIFY_S(remainingSize == 0, PCtx->PDiskLogPrefix << "remainingSize# " << remainingSize);
} else if (request->GetType() == ERequestType::RequestChunkRead) {
TIntrusivePtr<TChunkRead> read = std::move(static_cast<TChunkRead*>(request)->SelfPointer);
ui32 totalSectors = read->LastSector - read->FirstSector + 1;
- Y_DEBUG_ABORT_UNLESS(ForsetiOpPieceSizeCached % Format.SectorSize == 0);
+ Y_VERIFY_DEBUG_S(ForsetiOpPieceSizeCached % Format.SectorSize == 0, PCtx->PDiskLogPrefix);
const ui32 jobSizeLimit = ForsetiOpPieceSizeCached / Format.SectorSize;
const ui32 jobCount = (totalSectors + jobSizeLimit - 1) / jobSizeLimit;
for (ui32 idx = 0; idx < jobCount; ++idx) {
@@ -3269,7 +3279,7 @@ void TPDisk::PushRequestToScheduler(TRequestBase *request) {
AddJobToScheduler(piece, request->JobKind);
totalSectors -= jobSize;
}
- Y_VERIFY_S(totalSectors == 0, totalSectors);
+ Y_VERIFY_S(totalSectors == 0, PCtx->PDiskLogPrefix << "totalSectors# " << totalSectors);
} else {
AddJobToScheduler(request, request->JobKind);
}
@@ -3302,8 +3312,8 @@ void TPDisk::AddJobToScheduler(TRequestBase *request, NSchLab::EJobKind jobKind)
cbs = ForsetiScheduler.GetCbs(OwnerSystem, request->GateId);
if (!cbs) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " ReqId# " << request->ReqId
+ str << PCtx->PDiskLogPrefix
+ << "ReqId# " << request->ReqId
<< " Cost# " << request->Cost
<< " JobKind# " << (ui64)request->JobKind
<< " ownerId# " << request->Owner
@@ -3439,7 +3449,7 @@ void TPDisk::ProcessPausedQueue() {
if (ev->Action == NPDisk::TEvYardControl::ActionPause) {
if (PreprocessRequest(ev)) {
- Y_ABORT();
+ Y_ABORT_S(PCtx->PDiskLogPrefix);
}
break;
}
@@ -3945,7 +3955,7 @@ TChunkIdx TPDisk::GetUnshreddedFreeChunk() {
if (state.CommitState == TChunkState::FREE && state.IsDirty && state.ShredGeneration < ShredGeneration) {
// Found an unshredded free chunk
TChunkIdx unshreddedChunkIdx = freeChunks->PopAt(it);
- Y_VERIFY(unshreddedChunkIdx == chunkIdx);
+ Y_VERIFY_S(unshreddedChunkIdx == chunkIdx, PCtx->PDiskLogPrefix);
// Mark it as being shredded and update its generation
LOG_DEBUG_S(*PCtx->ActorSystem, NKikimrServices::BS_PDISK_SHRED,
"PDisk# " << PCtx->PDiskId
@@ -4053,7 +4063,7 @@ void TPDisk::ProgressShredState() {
state.OperationsInProgress--;
state.IsDirty = false;
state.ShredGeneration = ShredGeneration;
- Y_VERIFY(ChunkState[ChunkBeingShredded].OperationsInProgress == 0);
+ Y_VERIFY_S(ChunkState[ChunkBeingShredded].OperationsInProgress == 0, PCtx->PDiskLogPrefix);
Keeper.UntrimmedFreeChunks.PushFront(ChunkBeingShredded);
ChunkBeingShredded = GetUnshreddedFreeChunk();
ChunkBeingShreddedIteration = 0;
@@ -4063,7 +4073,7 @@ void TPDisk::ProgressShredState() {
}
if (ChunkBeingShredded) {
if (ChunkBeingShreddedIteration == 0 && ChunkBeingShreddedNextSectorIdx == 0) {
- Y_VERIFY(ChunkState[ChunkBeingShredded].OperationsInProgress == 0);
+ Y_VERIFY_S(ChunkState[ChunkBeingShredded].OperationsInProgress == 0, PCtx->PDiskLogPrefix);
ChunkState[ChunkBeingShredded].OperationsInProgress++;
}
// Continue shredding the chunk: send a write request to the device using the iteration-specific pattern
@@ -4419,7 +4429,7 @@ void TPDisk::ProcessChunkShredResult(TChunkShredResult& request) {
"ProcessChunkShredResult at PDisk# " << PCtx->PDiskId
<< " ShredGeneration# " << ShredGeneration
<< " request# " << request.ToString());
- Y_ABORT_UNLESS(ChunkBeingShreddedInFlight > 0);
+ Y_VERIFY_S(ChunkBeingShreddedInFlight > 0, PCtx->PDiskLogPrefix);
--ChunkBeingShreddedInFlight;
ProgressShredState();
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.h
index 455ec8cd08..553923f875 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl.h
@@ -428,7 +428,8 @@ public:
const TKey& key, ui64 sequenceNumber, ui32 recordIndex, ui32 totalRecords);
bool WriteMetadataSync(TRcBuf&& metadata, const TDiskFormat& format);
- static std::optional<TMetadataFormatSector> CheckMetadataFormatSector(const ui8 *data, size_t len, const TMainKey& mainKey);
+ static std::optional<TMetadataFormatSector> CheckMetadataFormatSector(const ui8 *data, size_t len,
+ const TMainKey& mainKey, const TString& logPrefix);
static void MakeMetadataFormatSector(ui8 *data, const TMainKey& mainKey, const TMetadataFormatSector& format);
NMeta::TFormatted& GetFormattedMeta();
@@ -470,10 +471,10 @@ private:
};
void ParsePayloadFromSectorOffset(const TDiskFormat& format, ui64 firstSector, ui64 lastSector, ui64 currentSector,
- ui64 *outPayloadBytes, ui64 *outPayloadOffset);
+ ui64 *outPayloadBytes, ui64 *outPayloadOffset, const TString& logPrefix);
bool ParseSectorOffset(const TDiskFormat& format, TActorSystem *actorSystem, ui32 pDiskId, ui64 offset, ui64 size,
- ui64 &outSectorIdx, ui64 &outLastSectorIdx, ui64 &outSectorOffset);
+ ui64 &outSectorIdx, ui64 &outLastSectorIdx, ui64 &outSectorOffset, const TString& logPrefix);
} // NPDisk
} // NKikimr
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp
index 8b8dca1084..5dd7c1fda0 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_log.cpp
@@ -107,7 +107,7 @@ void TPDisk::InitLogChunksInfo() {
if (!keep && it->OwnerLsnRange.size() > owner && it->OwnerLsnRange[owner].IsPresent) {
TLogChunkInfo::TLsnRange &range = it->OwnerLsnRange[owner];
range.IsPresent = false;
- Y_ABORT_UNLESS(it->CurrentUserCount > 0);
+ Y_VERIFY_S(it->CurrentUserCount > 0, PCtx->PDiskLogPrefix);
it->CurrentUserCount--;
P_LOG(PRI_INFO, BPD01, "InitLogChunksInfo, chunk is dereferenced by owner",
(ChunkIdx, it->ChunkIdx),
@@ -160,9 +160,9 @@ void TPDisk::PrintLogChunksInfo(const TString& msg) {
bool TPDisk::LogNonceJump(ui64 previousNonce) {
bool isWhole = CommonLogger->SectorBytesFree >= sizeof(TNonceJumpLogPageHeader2);
- Y_ABORT_UNLESS(isWhole);
+ Y_VERIFY_S(isWhole, PCtx->PDiskLogPrefix);
- Y_ABORT_UNLESS(CommonLogger->NextChunks.size() == 0);
+ Y_VERIFY_S(CommonLogger->NextChunks.size() == 0, PCtx->PDiskLogPrefix);
if (!PreallocateLogChunks(CommonLogger->SectorBytesFree, OwnerSystem, 0, EOwnerGroupType::Static, true)) {
return false;
}
@@ -255,7 +255,7 @@ bool TPDisk::ProcessChunk0(const NPDisk::TEvReadLogResult &readLogResult, TStrin
// Parse VDiskOwners
ui32 ownerCount = sizeof(sysLogRecord->OwnerVDisks) / sizeof(TVDiskID);
- Y_ABORT_UNLESS(ownerCount <= 256);
+ Y_VERIFY_S(ownerCount <= 256, PCtx->PDiskLogPrefix);
for (ui32 i = 0; i < ownerCount; ++i) {
TVDiskID &id = sysLogRecord->OwnerVDisks[i];
id.GroupGeneration = -1; // Clear GroupGeneration in sys log record (for compatibility)
@@ -273,9 +273,9 @@ bool TPDisk::ProcessChunk0(const NPDisk::TEvReadLogResult &readLogResult, TStrin
// Set initial chunk owners
// Use actual format info to set busy chunks mask
ui32 chunkCount = (ui32)(Format.DiskSize / (ui64)Format.ChunkSize);
- Y_DEBUG_ABORT_UNLESS(ChunkState.size() == 0);
+ Y_VERIFY_DEBUG_S(ChunkState.size() == 0, PCtx->PDiskLogPrefix);
ChunkState = TVector<TChunkState>(chunkCount);
- Y_ABORT_UNLESS(ChunkState.size() >= Format.SystemChunkCount);
+ Y_VERIFY_S(ChunkState.size() >= Format.SystemChunkCount, PCtx->PDiskLogPrefix);
for (ui32 i = 0; i < Format.SystemChunkCount; ++i) {
ChunkState[i].OwnerId = OwnerSystem;
}
@@ -407,27 +407,27 @@ bool TPDisk::ProcessChunk0(const NPDisk::TEvReadLogResult &readLogResult, TStrin
SysLogFirstNoncesToKeep.Clear();
} else {
ui64 minSize = noneSize + sizeof(TSysLogFirstNoncesToKeep);
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
memcpy(&SysLogFirstNoncesToKeep, firstNoncesToKeep, sizeof(TSysLogFirstNoncesToKeep));
}
}
TChunkTrimInfo *trimStateEnd = nullptr;
if (sysLogRecord->Version >= PDISK_SYS_LOG_RECORD_VERSION_4) {
- Y_ABORT_UNLESS(firstNoncesToKeep);
+ Y_VERIFY_S(firstNoncesToKeep, PCtx->PDiskLogPrefix);
ui64 *trimInfoBytesPtr = (ui64*)(firstNoncesToKeep + 1);
ui64 minSize = (ui64)((char*)(trimInfoBytesPtr + 1) - (char*)sysLogRecord);
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
ui64 trimInfoBytes = ReadUnaligned<ui64>(trimInfoBytesPtr);
TChunkTrimInfo *trimState = (TChunkTrimInfo*)(trimInfoBytesPtr + 1);
trimStateEnd = trimState + trimInfoBytes / sizeof(TChunkTrimInfo);
minSize = (ui64)((char*)trimStateEnd - (char*)sysLogRecord);
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
Y_VERIFY_S(trimInfoBytes == 0 || trimInfoBytes == TChunkTrimInfo::SizeForChunkCount(chunkCount),
- "SysLogRecord's ChunkTrimInfo has size# " << trimInfoBytes
+ PCtx->PDiskLogPrefix << "SysLogRecord's ChunkTrimInfo has size# " << trimInfoBytes
<< " different from expeceted #" << TChunkTrimInfo::SizeForChunkCount(chunkCount));
for (ui32 i = 0; i < chunkCount; i++) {
if (trimState[i / 8].IsChunkTrimmed(i % 8) && ChunkState[i].OwnerId == OwnerUnallocated) {
@@ -441,12 +441,12 @@ bool TPDisk::ProcessChunk0(const NPDisk::TEvReadLogResult &readLogResult, TStrin
ui32 *firstChunkEnd = nullptr;
if (sysLogRecord->Version >= PDISK_SYS_LOG_RECORD_VERSION_6) {
- Y_ABORT_UNLESS(trimStateEnd);
+ Y_VERIFY_S(trimStateEnd, PCtx->PDiskLogPrefix);
ui32 *firstChunk = reinterpret_cast<ui32*>(trimStateEnd);
firstChunkEnd = firstChunk + 1;
ui64 minSize = (ui64)((char*)firstChunkEnd - (char*)sysLogRecord);
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
FirstLogChunkToParseCommits = ReadUnaligned<ui32>(firstChunk);
}
@@ -454,29 +454,29 @@ bool TPDisk::ProcessChunk0(const NPDisk::TEvReadLogResult &readLogResult, TStrin
char *compatibilityInfoEnd = nullptr;
if (sysLogRecord->Version >= PDISK_SYS_LOG_RECORD_VERSION_7) {
- Y_ABORT_UNLESS(firstChunkEnd);
+ Y_VERIFY_S(firstChunkEnd, PCtx->PDiskLogPrefix);
ui32 *protoSizePtr = reinterpret_cast<ui32*>(firstChunkEnd);
ui32 *protoSizePtrEnd = protoSizePtr + 1;
ui64 minSize = (ui64)((char*)protoSizePtrEnd - (char*)sysLogRecord);
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
ui32 protoSize = ReadUnaligned<ui32>(protoSizePtr);
- Y_ABORT_UNLESS(protoSize > 0);
+ Y_VERIFY_S(protoSize > 0, PCtx->PDiskLogPrefix);
char *compatibilityInfo = reinterpret_cast<char*>(protoSizePtrEnd);
compatibilityInfoEnd = compatibilityInfo + protoSize;
minSize += protoSize;
- Y_VERIFY_S(lastSysLogRecord.size() >= minSize,
- "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
+ Y_VERIFY_S(lastSysLogRecord.size() >= minSize, PCtx->PDiskLogPrefix
+ << "SysLogRecord is too small, minSize# " << minSize << " size# " << lastSysLogRecord.size());
if (!suppressCompatibilityCheck) {
auto storedCompatibilityInfo = NKikimrConfig::TStoredCompatibilityInfo();
bool success = storedCompatibilityInfo.ParseFromArray(compatibilityInfo, protoSize);
- Y_ABORT_UNLESS(success);
+ Y_VERIFY_S(success, PCtx->PDiskLogPrefix);
bool isCompatible = CompatibilityInfo.CheckCompatibility(&storedCompatibilityInfo,
NKikimrConfig::TCompatibilityRule::PDisk, errorReason);
@@ -558,7 +558,7 @@ TRcBuf TPDisk::ProcessReadSysLogResult(ui64 &outWritePosition, ui64 &outLsn,
outWritePosition = (firstSysLogSectorIdx + sectorGroup % Format.SysLogSectorCount * ReplicationFactor)
* Format.SectorSize;
- Y_ABORT_UNLESS(outWritePosition > 0);
+ Y_VERIFY_S(outWritePosition > 0, PCtx->PDiskLogPrefix);
if (!readLogResult.Results.size()) {
P_LOG(PRI_ERROR, BPD54, "ProcessReadSysLogResult Results.size() == 0");
@@ -741,7 +741,7 @@ void TPDisk::WriteSysLogRestorePoint(TCompletionAction *action, TReqId reqId, NW
SerializedCompatibilityInfo.emplace(TString());
auto stored = CompatibilityInfo.MakeStored(NKikimrConfig::TCompatibilityRule::PDisk);
bool success = stored.SerializeToString(&*SerializedCompatibilityInfo);
- Y_ABORT_UNLESS(success);
+ Y_VERIFY_S(success, PCtx->PDiskLogPrefix);
}
ui32 compatibilityInfoSize = SerializedCompatibilityInfo->size();
@@ -823,7 +823,7 @@ void TPDisk::ProcessLogWriteBatch(TVector<TLogWrite*> logWrites, TVector<TLogWri
size_t logOperationSizeBytes = 0;
TVector<ui32> logChunksToCommit;
for (TLogWrite *logWrite : logWrites) {
- Y_DEBUG_ABORT_UNLESS(logWrite);
+ Y_VERIFY_DEBUG_S(logWrite, PCtx->PDiskLogPrefix);
Mon.LogQueueTime.Increment(logWrite->LifeDurationMs(now));
logWrite->SpanStack.PopOk();
logOperationSizeBytes += logWrite->Data.size();
@@ -900,7 +900,7 @@ bool TPDisk::AllocateLogChunks(ui32 chunksNeeded, ui32 chunksContainingPayload,
if (IsOwnerUser(owner)) {
Y_VERIFY_S(LogChunks.empty() || chunksNeeded > 0 || LogChunks.back().ChunkIdx == CommonLogger->ChunkIdx,
- "PDiskId# " << PCtx->PDiskId << " Chunk idx mismatch! back# " << LogChunks.back().ChunkIdx
+ PCtx->PDiskLogPrefix << "Chunk idx mismatch! back# " << LogChunks.back().ChunkIdx
<< " pre-back# " << (LogChunks.rbegin()->ChunkIdx == LogChunks.begin()->ChunkIdx ?
0 : (++LogChunks.rbegin())->ChunkIdx)
<< " logger# " << CommonLogger->ChunkIdx);
@@ -922,10 +922,10 @@ bool TPDisk::AllocateLogChunks(ui32 chunksNeeded, ui32 chunksContainingPayload,
bool isDirtyMarked = false;
for (ui32 i = 0; i < chunksNeeded; ++i) {
ui32 chunkIdx = Keeper.PopOwnerFreeChunk(keeperOwner, errorReason);
- Y_VERIFY_S(chunkIdx, "errorReason# " << errorReason);
+ Y_VERIFY_S(chunkIdx, PCtx->PDiskLogPrefix << "errorReason# " << errorReason);
Y_VERIFY_S(ChunkState[chunkIdx].OwnerId == OwnerUnallocated ||
- ChunkState[chunkIdx].OwnerId == OwnerUnallocatedTrimmed, "PDiskId# " << PCtx->PDiskId <<
- " Unexpected ownerId# " << ui32(ChunkState[chunkIdx].OwnerId));
+ ChunkState[chunkIdx].OwnerId == OwnerUnallocatedTrimmed, PCtx->PDiskLogPrefix <<
+ "Unexpected ownerId# " << ui32(ChunkState[chunkIdx].OwnerId));
ChunkState[chunkIdx].CommitState = TChunkState::LOG_RESERVED;
if (TPDisk::IS_SHRED_ENABLED && !ChunkState[chunkIdx].IsDirty) {
ChunkState[chunkIdx].IsDirty = true;
@@ -953,7 +953,7 @@ bool TPDisk::AllocateLogChunks(ui32 chunksNeeded, ui32 chunksContainingPayload,
}
void TPDisk::LogWrite(TLogWrite &evLog, TVector<ui32> &logChunksToCommit) {
- Y_DEBUG_ABORT_UNLESS(!evLog.Result);
+ Y_VERIFY_DEBUG_S(!evLog.Result, PCtx->PDiskLogPrefix);
OwnerData[evLog.Owner].Status = TOwnerData::VDISK_STATUS_LOGGED;
bool isCommitRecord = evLog.Signature.HasCommitRecord();
@@ -974,13 +974,13 @@ void TPDisk::LogWrite(TLogWrite &evLog, TVector<ui32> &logChunksToCommit) {
if (!PreallocateLogChunks(headedRecordSize, evLog.Owner, evLog.Lsn, evLog.OwnerGroupType, isAllowedForSpaceRed)) {
// TODO: make sure that commit records that delete chunks are applied atomically even if this error occurs.
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " Can't preallocate log chunks!"
+ str << PCtx->PDiskLogPrefix << "Can't preallocate log chunks!"
<< " Marker# BPD70";
P_LOG(PRI_ERROR, BPD70, str.Str());
evLog.Result.Reset(new NPDisk::TEvLogResult(NKikimrProto::OUT_OF_SPACE,
NotEnoughDiskSpaceStatusFlags(evLog.Owner, evLog.OwnerGroupType), str.Str(),
Keeper.GetLogChunkCount()));
- Y_ABORT_UNLESS(evLog.Result.Get());
+ Y_VERIFY_S(evLog.Result.Get(), PCtx->PDiskLogPrefix);
evLog.Result->Results.push_back(NPDisk::TEvLogResult::TRecord(evLog.Lsn, evLog.Cookie));
return;
}
@@ -1044,11 +1044,11 @@ void TPDisk::LogWrite(TLogWrite &evLog, TVector<ui32> &logChunksToCommit) {
}
}
}
- Y_ABORT_UNLESS(CommonLogger->NextChunks.empty());
+ Y_VERIFY_S(CommonLogger->NextChunks.empty(), PCtx->PDiskLogPrefix);
evLog.Result.Reset(new NPDisk::TEvLogResult(NKikimrProto::OK,
GetStatusFlags(OwnerSystem, evLog.OwnerGroupType), "", Keeper.GetLogChunkCount()));
- Y_ABORT_UNLESS(evLog.Result.Get());
+ Y_VERIFY_S(evLog.Result.Get(), PCtx->PDiskLogPrefix);
evLog.Result->Results.push_back(NPDisk::TEvLogResult::TRecord(evLog.Lsn, evLog.Cookie));
}
@@ -1058,7 +1058,7 @@ void TPDisk::LogFlush(TCompletionAction *action, TVector<ui32> *logChunksToCommi
if (!CommonLogger->IsEmptySector()) {
size_t prevPreallocatedSize = CommonLogger->NextChunks.size();
if (!PreallocateLogChunks(CommonLogger->SectorBytesFree, OwnerSystem, 0, EOwnerGroupType::Static, true)) {
- Y_ABORT("Last chunk is over, how did you do that?!");
+ Y_ABORT_S(PCtx->PDiskLogPrefix << "Last chunk is over, how did you do that?!");
}
size_t nextPreallocatedSize = CommonLogger->NextChunks.size();
if (nextPreallocatedSize != prevPreallocatedSize && logChunksToCommit) {
@@ -1121,7 +1121,7 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
if (!isLogged) {
isLogged = true;
LOG_CRIT_S(*PCtx->ActorSystem, NKikimrServices::BS_PDISK_SHRED,
- "Commit DirtyChunk contains invalid chunkIdx# " << chunkIdx << " for PDisk# " << PCtx->PDiskId
+ PCtx->PDiskLogPrefix << "Commit DirtyChunk contains invalid chunkIdx# " << chunkIdx
<< " ShredGeneration# " << ShredGeneration);
}
} else {
@@ -1129,7 +1129,7 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
ChunkState[chunkIdx].IsDirty = true;
isDirtyMarked = true;
LOG_DEBUG_S(*PCtx->ActorSystem, NKikimrServices::BS_PDISK_SHRED,
- "PDisk# " << PCtx->PDiskId << " marked chunkIdx# " << chunkIdx << " as dirty"
+ PCtx->PDiskLogPrefix << "marked chunkIdx# " << chunkIdx << " as dirty"
<< " chunk.ShredGeneration# " << ChunkState[chunkIdx].ShredGeneration
<< " ShredGeneration# " << ShredGeneration);
}
@@ -1153,7 +1153,7 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
state.CommitState = TChunkState::DATA_COMMITTED_DECOMMIT_IN_PROGRESS;
break;
default:
- Y_FAIL_S("PDiskID# " << PCtx->PDiskId << " can't delete to decomitted chunkIdx# " << chunkIdx
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "can't delete to decomitted chunkIdx# " << chunkIdx
<< " request ownerId# " << logWrite.Owner
<< " as it is in unexpected CommitState# " << state.ToString());
break;
@@ -1180,7 +1180,7 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
state.CommitState = TChunkState::DATA_COMMITTED_DELETE_ON_QUARANTINE;
break;
default:
- Y_FAIL_S("PDiskID# " << PCtx->PDiskId << " can't delete chunkIdx# " << chunkIdx
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "can't delete chunkIdx# " << chunkIdx
<< " request ownerId# " << logWrite.Owner
<< " with operations in progress as it is in unexpected CommitState# " << state.ToString());
break;
@@ -1197,7 +1197,7 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
Mon.CommitedDataChunks->Dec();
state.CommitState = TChunkState::DATA_COMMITTED_DELETE_IN_PROGRESS;
} else {
- Y_FAIL_S("PDiskID# " << PCtx->PDiskId << " can't delete chunkIdx# " << chunkIdx
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "can't delete chunkIdx# " << chunkIdx
<< " request ownerId# " << logWrite.Owner
<< " as it is in unexpected CommitState# " << state.ToString());
}
@@ -1213,8 +1213,8 @@ NKikimrProto::EReplyStatus TPDisk::BeforeLoggingCommitRecord(const TLogWrite &lo
bool TPDisk::ValidateCommitChunk(ui32 chunkIdx, TOwner owner, TStringStream& outErrorReason) {
TGuard<TMutex> guard(StateMutex);
if (chunkIdx >= ChunkState.size()) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't commit chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't commit chunkIdx# " << chunkIdx
<< " > total# " << ChunkState.size()
<< " ownerId# " << owner
<< " Marker# BPD74";
@@ -1222,8 +1222,8 @@ bool TPDisk::ValidateCommitChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
return false;
}
if (ChunkState[chunkIdx].OwnerId != owner) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't commit chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't commit chunkIdx# " << chunkIdx
<< ", ownerId# " << owner
<< " != real ownerId# " << ChunkState[chunkIdx].OwnerId
<< " Marker# BPD75";
@@ -1232,8 +1232,8 @@ bool TPDisk::ValidateCommitChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
}
if (ChunkState[chunkIdx].CommitState != TChunkState::DATA_RESERVED
&& ChunkState[chunkIdx].CommitState != TChunkState::DATA_COMMITTED) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't commit chunkIdx# " << chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't commit chunkIdx# " << chunkIdx
<< " in CommitState# " << ChunkState[chunkIdx].CommitState
<< " ownerId# " << owner << " Marker# BPD83";
P_LOG(PRI_ERROR, BPD01, outErrorReason.Str());
@@ -1246,7 +1246,7 @@ bool TPDisk::ValidateCommitChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
void TPDisk::CommitChunk(ui32 chunkIdx) {
TGuard<TMutex> guard(StateMutex);
TChunkState &state = ChunkState[chunkIdx];
- Y_ABORT_UNLESS(state.CommitsInProgress > 0);
+ Y_VERIFY_S(state.CommitsInProgress > 0, PCtx->PDiskLogPrefix);
--state.CommitsInProgress;
switch (state.CommitState) {
@@ -1269,7 +1269,7 @@ void TPDisk::CommitChunk(ui32 chunkIdx) {
// Do nothing
break;
default:
- Y_FAIL_S("PDiskID# " << PCtx->PDiskId << " can't commit chunkIdx# " << chunkIdx
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "can't commit chunkIdx# " << chunkIdx
<< " as it is in unexpected CommitState# " << state.ToString());
break;
}
@@ -1278,8 +1278,8 @@ void TPDisk::CommitChunk(ui32 chunkIdx) {
bool TPDisk::ValidateDeleteChunk(ui32 chunkIdx, TOwner owner, TStringStream& outErrorReason) {
TGuard<TMutex> guard(StateMutex);
if (chunkIdx >= ChunkState.size()) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't delete chunkIdx# " << (ui32)chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't delete chunkIdx# " << (ui32)chunkIdx
<< " > total# " << (ui32)ChunkState.size()
<< " ownerId# " << (ui32)owner << "!"
<< " Marker# BPD76";
@@ -1287,8 +1287,8 @@ bool TPDisk::ValidateDeleteChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
return false;
}
if (ChunkState[chunkIdx].OwnerId != owner) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't delete chunkIdx# " << (ui32)chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't delete chunkIdx# " << (ui32)chunkIdx
<< " ownerId# " << (ui32)owner
<< " != trueOwnerId# " << (ui32)ChunkState[chunkIdx].OwnerId << "!"
<< " Marker# BPD77";
@@ -1297,8 +1297,8 @@ bool TPDisk::ValidateDeleteChunk(ui32 chunkIdx, TOwner owner, TStringStream& out
}
if (ChunkState[chunkIdx].CommitState != TChunkState::DATA_RESERVED
&& ChunkState[chunkIdx].CommitState != TChunkState::DATA_COMMITTED) {
- outErrorReason << "PDiskId# " << PCtx->PDiskId
- << " Can't delete chunkIdx# " << (ui32)chunkIdx
+ outErrorReason << PCtx->PDiskLogPrefix
+ << "Can't delete chunkIdx# " << (ui32)chunkIdx
<< " in CommitState# " << ChunkState[chunkIdx].CommitState
<< " ownerId# " << (ui32)owner << " Marker# BPD82";
P_LOG(PRI_ERROR, BPD01, outErrorReason.Str());
@@ -1322,25 +1322,25 @@ void TPDisk::DeleteChunk(ui32 chunkIdx, TOwner owner) {
case TChunkState::DATA_RESERVED_DELETE_IN_PROGRESS:
[[fallthrough]];
case TChunkState::DATA_COMMITTED_DELETE_IN_PROGRESS:
- Y_VERIFY_S(state.CommitsInProgress == 0,
- "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx << " state# " << state.ToString());
+ Y_VERIFY_S(state.CommitsInProgress == 0, PCtx->PDiskLogPrefix
+ << " chunkIdx# " << chunkIdx << " state# " << state.ToString());
P_LOG(PRI_INFO, BPD01, "Chunk is deleted",
(ChunkIdx, chunkIdx),
(OldOwner, (ui32)state.OwnerId),
(NewOwner, (ui32)OwnerUnallocated));
- Y_ABORT_UNLESS(state.OwnerId == owner); // TODO DELETE
+ Y_VERIFY_S(state.OwnerId == owner, PCtx->PDiskLogPrefix); // TODO DELETE
state.OwnerId = OwnerUnallocated;
state.CommitState = TChunkState::FREE;
Keeper.PushFreeOwnerChunk(owner, chunkIdx);
break;
case TChunkState::DATA_COMMITTED_DELETE_ON_QUARANTINE:
// Mark chunk as quarantine, so it will be released through default quarantine way
- Y_ABORT_UNLESS(state.OwnerId == owner); // TODO DELETE
+ Y_VERIFY_S(state.OwnerId == owner, PCtx->PDiskLogPrefix); // TODO DELETE
state.CommitState = TChunkState::DATA_ON_QUARANTINE;
break;
case TChunkState::DATA_RESERVED_DELETE_ON_QUARANTINE:
// Mark chunk as quarantine, so it will be released through default quarantine way
- Y_ABORT_UNLESS(state.OwnerId == owner); // TODO DELETE
+ Y_VERIFY_S(state.OwnerId == owner, PCtx->PDiskLogPrefix); // TODO DELETE
state.CommitState = TChunkState::DATA_ON_QUARANTINE;
break;
case TChunkState::DATA_COMMITTED_DECOMMIT_IN_PROGRESS:
@@ -1350,7 +1350,7 @@ void TPDisk::DeleteChunk(ui32 chunkIdx, TOwner owner) {
break;
default:
- Y_FAIL_S("PDiskID# " << PCtx->PDiskId << " can't delete chunkIdx# " << chunkIdx
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "can't delete chunkIdx# " << chunkIdx
<< " requesting ownerId# " << owner
<< " as it is in unexpected CommitState# " << state.ToString());
}
@@ -1383,7 +1383,7 @@ void TPDisk::OnLogCommitDone(TLogCommitDone &req) {
while (it != LogChunks.end() && it->OwnerLsnRange.size() > req.OwnerId) {
TLogChunkInfo::TLsnRange &range = it->OwnerLsnRange[req.OwnerId];
if (range.IsPresent && range.LastLsn < currentFirstLsnToKeep) {
- //Y_ABORT_UNLESS(range.FirstLsn != range.LastLsn);
+ //Y_VERIFY_S(range.FirstLsn != range.LastLsn, PCtx->PDiskLogPrefix);
P_LOG(PRI_INFO, BPD27, "Log chunk is dereferenced by owner",
(ChunkIdx, it->ChunkIdx),
(LsnRange, TString(TStringBuilder() << "[" << range.FirstLsn << ", " << range.LastLsn << "]")),
@@ -1392,7 +1392,7 @@ void TPDisk::OnLogCommitDone(TLogCommitDone &req) {
(CausedbyLsn, req.Lsn),
(PreviousCurrentUserCount, it->CurrentUserCount));
range.IsPresent = false;
- Y_ABORT_UNLESS(it->CurrentUserCount > 0);
+ Y_VERIFY_S(it->CurrentUserCount > 0, PCtx->PDiskLogPrefix);
it->CurrentUserCount--;
if (it->CurrentUserCount == 0) {
isChunkReleased = true;
@@ -1453,7 +1453,7 @@ void TPDisk::MarkChunksAsReleased(TReleaseChunks& req) {
dataChunkSizeSectors, Format.MagicLogChunk, req.GapStart->ChunkIdx, nullptr, desiredSectorIdx,
nullptr, PCtx, &DriveModel, Cfg->EnableSectorEncryption);
- Y_VERIFY_S(req.GapEnd->DesiredPrevChunkLastNonce, "PDiskId# " << PCtx->PDiskId
+ Y_VERIFY_S(req.GapEnd->DesiredPrevChunkLastNonce, PCtx->PDiskLogPrefix
<< "Zero GapEnd->DesiredPrevChunkLastNonce, chunkInfo# " << *req.GapEnd);
// +1 stands for -1 in logreader in old versions of pdisk
ui64 expectedNonce = req.GapEnd->DesiredPrevChunkLastNonce + 1;
@@ -1474,9 +1474,9 @@ void TPDisk::MarkChunksAsReleased(TReleaseChunks& req) {
// Schedules EvReadLogResult event for the system log
void TPDisk::InitiateReadSysLog(const TActorId &pDiskActor) {
- Y_VERIFY_S(PDiskThread.Running(), "expect PDiskThread to be running");
- Y_VERIFY_S(InitPhase == EInitPhase::Uninitialized, "expect InitPhase to be Uninitialized, but InitPhase# "
- << InitPhase.load());
+ Y_VERIFY_S(PDiskThread.Running(), PCtx->PDiskLogPrefix << "expect PDiskThread to be running");
+ Y_VERIFY_S(InitPhase == EInitPhase::Uninitialized, PCtx->PDiskLogPrefix
+ << "expect InitPhase to be Uninitialized, but InitPhase# " << InitPhase.load());
ui32 formatSectorsSize = FormatSectorSize * ReplicationFactor;
THolder<TEvReadFormatResult> evReadFormatResult(new TEvReadFormatResult(formatSectorsSize, UseHugePages));
ui8 *formatSectors = evReadFormatResult->FormatSectors.Get();
@@ -1572,11 +1572,11 @@ void TPDisk::ProcessReadLogResult(const NPDisk::TEvReadLogResult &evReadLogResul
}
for (auto it = chunkOwners.begin(); it != chunkOwners.end(); ++it) {
TOwnerData &data = OwnerData[*it];
- Y_ABORT_UNLESS(data.VDiskId != TVDiskID::InvalidId);
+ Y_VERIFY_S(data.VDiskId != TVDiskID::InvalidId, PCtx->PDiskLogPrefix);
if (data.StartingPoints.empty()) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
- << " ownerId# " << (ui32)*it
+ str << PCtx->PDiskLogPrefix
+ << "ownerId# " << (ui32)*it
<< " Owns chunks, but has no starting points! ownedChunks# [";
for (size_t chunkIdx = 0; chunkIdx < ChunkState.size(); ++chunkIdx) {
TChunkState &state = ChunkState[chunkIdx];
@@ -1684,7 +1684,7 @@ void TPDisk::ProcessReadLogResult(const NPDisk::TEvReadLogResult &evReadLogResul
return;
}
default:
- Y_FAIL_S("Unexpected InitPhase# " << InitPhase.load());
+ Y_FAIL_S(PCtx->PDiskLogPrefix << "Unexpected InitPhase# " << InitPhase.load());
}
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp
index 925d68e9f2..f10e61dfdb 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_impl_metadata.cpp
@@ -101,7 +101,7 @@ namespace NKikimr::NPDisk {
}
void IssueQuery(TActorSystem *actorSystem) {
- Y_ABORT_UNLESS(!WriteQueue.empty());
+ Y_VERIFY_S(!WriteQueue.empty(), PDisk->PCtx->PDiskLogPrefix);
auto& [key, buffer] = WriteQueue.front();
const ui64 writeOffset = PDisk->Format.Offset(key.ChunkIdx, key.OffsetInSectors);
STLOGX(*actorSystem, PRI_DEBUG, BS_PDISK, BPD01, "TCompletionWriteMetadata::IssueQuery",
@@ -115,7 +115,7 @@ namespace NKikimr::NPDisk {
void Exec(TActorSystem *actorSystem) override {
STLOGX(*actorSystem, PRI_DEBUG, BS_PDISK, BPD01, "TCompletionWriteMetadata::Exec",
(Result, Result));
- Y_ABORT_UNLESS(!WriteQueue.empty());
+ Y_VERIFY_S(!WriteQueue.empty(), PDisk->PCtx->PDiskLogPrefix);
WriteQueue.pop_front();
if (Result != EIoResult::Ok) {
PDisk->InputRequest(PDisk->ReqCreator.CreateFromArgs<TWriteMetadataResult>(false, Sender));
@@ -221,7 +221,7 @@ namespace NKikimr::NPDisk {
: FormatIndex * FormatSectorSize;
if (FormatIndex != -1) {
- Y_ABORT_UNLESS(static_cast<ui32>(FormatIndex) < ReplicationFactor);
+ Y_VERIFY_S(static_cast<ui32>(FormatIndex) < ReplicationFactor, PDisk->PCtx->PDiskLogPrefix);
Payload = TRcBuf::UninitializedPageAligned(FormatSectorSize);
TPDisk::MakeMetadataFormatSector(reinterpret_cast<ui8*>(Payload.GetDataMut()), MainKey, Format);
}
@@ -270,7 +270,7 @@ namespace NKikimr::NPDisk {
} // anonymous
void TPDisk::InitFormattedMetadata() {
- Y_ABORT_UNLESS(std::holds_alternative<std::monostate>(Meta.State));
+ Y_VERIFY_S(std::holds_alternative<std::monostate>(Meta.State), PCtx->PDiskLogPrefix);
auto& formatted = Meta.State.emplace<NMeta::TFormatted>();
std::vector<TChunkIdx> metadataChunks;
@@ -316,17 +316,17 @@ namespace NKikimr::NPDisk {
}
void TPDisk::ReadFormattedMetadataIfNeeded() {
- Y_ABORT_UNLESS(std::holds_alternative<NMeta::TScanInProgress>(Meta.StoredMetadata));
+ Y_VERIFY_S(std::holds_alternative<NMeta::TScanInProgress>(Meta.StoredMetadata), PCtx->PDiskLogPrefix);
auto& formatted = GetFormattedMeta();
- Y_ABORT_UNLESS(formatted.NumReadsInFlight < formatted.MaxReadsInFlight);
+ Y_VERIFY_S(formatted.NumReadsInFlight < formatted.MaxReadsInFlight, PCtx->PDiskLogPrefix);
while (!formatted.ReadPending.empty()) {
// find the slot we have to read
const NMeta::TSlotKey& key = formatted.ReadPending.front();
const auto it = formatted.Slots.find(key);
- Y_ABORT_UNLESS(it != formatted.Slots.end());
- Y_ABORT_UNLESS(it->second == NMeta::ESlotState::READ_PENDING);
+ Y_VERIFY_S(it != formatted.Slots.end(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(it->second == NMeta::ESlotState::READ_PENDING, PCtx->PDiskLogPrefix);
// make completion object and the request that will be pushed back to PDisk thread when the request is complete
const size_t bytesToRead = Format.RoundUpToSectorSize(sizeof(TMetadataHeader));
@@ -378,9 +378,9 @@ namespace NKikimr::NPDisk {
},
[&](NMeta::TFormatted& formatted) {
const auto it = formatted.Slots.find(request.Key);
- Y_ABORT_UNLESS(it != formatted.Slots.end());
- Y_ABORT_UNLESS(it->second == NMeta::ESlotState::READ_IN_PROGRESS);
- Y_ABORT_UNLESS(formatted.NumReadsInFlight);
+ Y_VERIFY_S(it != formatted.Slots.end(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(it->second == NMeta::ESlotState::READ_IN_PROGRESS, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(formatted.NumReadsInFlight, PCtx->PDiskLogPrefix);
--formatted.NumReadsInFlight;
P_LOG(PRI_DEBUG, BPD01, "ProcessInitialReadMetadataResult (formatted)",
@@ -404,9 +404,9 @@ namespace NKikimr::NPDisk {
void TPDisk::FinishReadingFormattedMetadata() {
auto& formatted = GetFormattedMeta();
- Y_ABORT_UNLESS(formatted.ReadPending.empty());
+ Y_VERIFY_S(formatted.ReadPending.empty(), PCtx->PDiskLogPrefix);
for (auto& [_, state] : formatted.Slots) {
- Y_ABORT_UNLESS(state == NMeta::ESlotState::FREE || state == NMeta::ESlotState::PROCESSED);
+ Y_VERIFY_S(state == NMeta::ESlotState::FREE || state == NMeta::ESlotState::PROCESSED, PCtx->PDiskLogPrefix);
}
std::sort(formatted.Parts.begin(), formatted.Parts.end());
@@ -416,7 +416,7 @@ namespace NKikimr::NPDisk {
auto markSlots = [&](auto begin, auto end, NMeta::ESlotState newState) {
for (auto it = begin; it != end; ++it) {
const NMeta::ESlotState prev = std::exchange(formatted.Slots[it->Key], newState);
- Y_ABORT_UNLESS(prev == NMeta::ESlotState::PROCESSED);
+ Y_VERIFY_S(prev == NMeta::ESlotState::PROCESSED, PCtx->PDiskLogPrefix);
}
};
@@ -434,7 +434,7 @@ namespace NKikimr::NPDisk {
ui32 expectedRecordIndex = totalParts - 1;
bool success = std::distance(it, endIt) == totalParts;
for (auto temp = it; temp != endIt; ++temp) {
- Y_ABORT_UNLESS(temp->Header.SequenceNumber == sequenceNumber);
+ Y_VERIFY_S(temp->Header.SequenceNumber == sequenceNumber, PCtx->PDiskLogPrefix);
if (success && temp->Header.TotalRecords == totalParts && temp->Header.RecordIndex == expectedRecordIndex) {
--expectedRecordIndex;
buffer.Insert(buffer.Begin(), std::move(temp->Payload));
@@ -454,14 +454,14 @@ namespace NKikimr::NPDisk {
formatted.Parts.clear();
// start processing any pending metadata requests
- Y_ABORT_UNLESS(!Meta.WriteInFlight);
+ Y_VERIFY_S(!Meta.WriteInFlight, PCtx->PDiskLogPrefix);
ProcessMetadataRequestQueue();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void TPDisk::ProcessPushUnformattedMetadataSector(TPushUnformattedMetadataSector& request) {
- Y_ABORT_UNLESS(std::holds_alternative<std::monostate>(Meta.State));
+ Y_VERIFY_S(std::holds_alternative<std::monostate>(Meta.State), PCtx->PDiskLogPrefix);
auto& unformatted = Meta.State.emplace<NMeta::TUnformatted>();
unformatted.Format = request.Format;
if (unformatted.Format) {
@@ -480,7 +480,7 @@ namespace NKikimr::NPDisk {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void TPDisk::ProcessMetadataRequestQueue() {
- Y_ABORT_UNLESS(!std::holds_alternative<NMeta::TScanInProgress>(Meta.StoredMetadata));
+ Y_VERIFY_S(!std::holds_alternative<NMeta::TScanInProgress>(Meta.StoredMetadata), PCtx->PDiskLogPrefix);
while (!Meta.Requests.empty() && !Meta.WriteInFlight) {
const size_t sizeBefore = Meta.Requests.size();
switch (auto& front = Meta.Requests.front(); front->GetType()) {
@@ -495,7 +495,7 @@ namespace NKikimr::NPDisk {
default:
Y_ABORT();
}
- Y_ABORT_UNLESS(Meta.Requests.size() < sizeBefore || Meta.WriteInFlight);
+ Y_VERIFY_S(Meta.Requests.size() < sizeBefore || Meta.WriteInFlight, PCtx->PDiskLogPrefix);
}
}
@@ -513,8 +513,8 @@ namespace NKikimr::NPDisk {
void TPDisk::HandleNextReadMetadata() {
auto& front = Meta.Requests.front();
- Y_ABORT_UNLESS(front->GetType() == ERequestType::RequestReadMetadata);
- Y_ABORT_UNLESS(!Meta.WriteInFlight);
+ Y_VERIFY_S(front->GetType() == ERequestType::RequestReadMetadata, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(!Meta.WriteInFlight, PCtx->PDiskLogPrefix);
auto guid = std::visit<std::optional<ui64>>(TOverloaded{
[](std::monostate&) -> std::nullopt_t { Y_ABORT("incorrect case"); },
[&](NMeta::TFormatted&) { return Format.Guid; },
@@ -547,11 +547,11 @@ namespace NKikimr::NPDisk {
}
void TPDisk::HandleNextWriteMetadata() {
- Y_ABORT_UNLESS(!Meta.Requests.empty());
+ Y_VERIFY_S(!Meta.Requests.empty(), PCtx->PDiskLogPrefix);
const auto& front = Meta.Requests.front();
- Y_ABORT_UNLESS(front->GetType() == ERequestType::RequestWriteMetadata);
+ Y_VERIFY_S(front->GetType() == ERequestType::RequestWriteMetadata, PCtx->PDiskLogPrefix);
auto& write = static_cast<TWriteMetadata&>(*front);
- Y_ABORT_UNLESS(!Meta.WriteInFlight);
+ Y_VERIFY_S(!Meta.WriteInFlight, PCtx->PDiskLogPrefix);
P_LOG(PRI_DEBUG, BPD01, "HandleNextWriteMetadata",
(Metadata.size, write.Metadata.size()));
@@ -599,8 +599,8 @@ namespace NKikimr::NPDisk {
completion->CostNs += DriveModel.TimeForSizeNs(payload.size(), key.ChunkIdx, TDriveModel::OP_TYPE_WRITE);
const auto it = formatted.Slots.find(key);
- Y_ABORT_UNLESS(it != formatted.Slots.end());
- Y_ABORT_UNLESS(it->second == NMeta::ESlotState::FREE);
+ Y_VERIFY_S(it != formatted.Slots.end(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(it->second == NMeta::ESlotState::FREE, PCtx->PDiskLogPrefix);
it->second = NMeta::ESlotState::BEING_WRITTEN;
}
@@ -664,18 +664,18 @@ namespace NKikimr::NPDisk {
}
void TPDisk::ProcessWriteMetadataResult(TWriteMetadataResult& request) {
- Y_ABORT_UNLESS(Meta.WriteInFlight);
+ Y_VERIFY_S(Meta.WriteInFlight, PCtx->PDiskLogPrefix);
Meta.WriteInFlight = false;
- Y_ABORT_UNLESS(!Meta.Requests.empty());
+ Y_VERIFY_S(!Meta.Requests.empty(), PCtx->PDiskLogPrefix);
auto& front = Meta.Requests.front();
- Y_ABORT_UNLESS(front->GetType() == ERequestType::RequestWriteMetadata);
+ Y_VERIFY_S(front->GetType() == ERequestType::RequestWriteMetadata, PCtx->PDiskLogPrefix);
auto& write = static_cast<TWriteMetadata&>(*front);
std::optional<ui64> guid;
std::visit(TOverloaded{
- [](std::monostate&) { Y_ABORT_UNLESS("incorrect case"); },
+ [](std::monostate&) { Y_ABORT("incorrect case"); },
[&](NMeta::TFormatted& formatted) {
for (auto& [_, state] : formatted.Slots) {
if (request.Success) {
@@ -719,9 +719,9 @@ namespace NKikimr::NPDisk {
TRcBuf TPDisk::CreateMetadataPayload(TRcBuf& metadata, size_t offset, size_t payloadSize, ui32 sectorSize,
bool encryption, const TKey& key, ui64 sequenceNumber, ui32 recordIndex, ui32 totalRecords) {
- Y_ABORT_UNLESS(offset + payloadSize <= metadata.size());
+ Y_VERIFY_S(offset + payloadSize <= metadata.size(), PCtx->PDiskLogPrefix);
- Y_DEBUG_ABORT_UNLESS(IsPowerOf2(sectorSize));
+ Y_VERIFY_DEBUG_S(IsPowerOf2(sectorSize), PCtx->PDiskLogPrefix);
const size_t dataSize = sizeof(TMetadataHeader) + payloadSize;
const size_t bytesToWrite = (dataSize + sectorSize - 1) & ~size_t(sectorSize - 1);
@@ -730,9 +730,9 @@ namespace NKikimr::NPDisk {
auto buffer = TRcBuf::UninitializedPageAligned(bytesToWrite);
- Y_ABORT_UNLESS(recordIndex <= Max<ui16>());
- Y_ABORT_UNLESS(totalRecords <= Max<ui16>());
- Y_ABORT_UNLESS(payloadSize <= Max<ui32>());
+ Y_VERIFY_S(recordIndex <= Max<ui16>(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(totalRecords <= Max<ui16>(), PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(payloadSize <= Max<ui32>(), PCtx->PDiskLogPrefix);
auto *header = reinterpret_cast<TMetadataHeader*>(buffer.GetDataMut());
void *data = header + 1;
@@ -798,9 +798,11 @@ namespace NKikimr::NPDisk {
return true;
}
- std::optional<TMetadataFormatSector> TPDisk::CheckMetadataFormatSector(const ui8 *data, size_t len, const TMainKey& mainKey) {
+ std::optional<TMetadataFormatSector> TPDisk::CheckMetadataFormatSector(const ui8 *data, size_t len,
+ const TMainKey& mainKey, const TString& logPrefix) {
if (len != FormatSectorSize * ReplicationFactor) {
- Y_DEBUG_ABORT("unexpected metadata format sector size");
+ Y_UNUSED(logPrefix);
+ Y_DEBUG_ABORT_S(logPrefix << "unexpected metadata format sector size");
return {}; // definitely not correct
}
@@ -859,7 +861,7 @@ namespace NKikimr::NPDisk {
}
NMeta::TFormatted& TPDisk::GetFormattedMeta() {
- Y_ABORT_UNLESS(std::holds_alternative<NMeta::TFormatted>(Meta.State));
+ Y_VERIFY_S(std::holds_alternative<NMeta::TFormatted>(Meta.State), PCtx->PDiskLogPrefix);
return std::get<NMeta::TFormatted>(Meta.State);
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_keeper.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_keeper.h
index 54dfa6e207..83f08faf4a 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_keeper.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_keeper.h
@@ -125,7 +125,7 @@ public:
}
void PushFreeOwnerChunk(TOwner owner, TChunkIdx chunkIdx) {
- Y_ABORT_UNLESS(chunkIdx != 0);
+ Y_VERIFY(chunkIdx != 0);
UntrimmedFreeChunks.Push(chunkIdx);
ChunkTracker.Release(owner, 1);
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp
index dd97e316e6..cb80937ffb 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_log_cache.cpp
@@ -66,7 +66,7 @@ size_t TLogCache::Erase(ui64 offset) {
}
size_t TLogCache::EraseRange(ui64 begin, ui64 end) {
- Y_DEBUG_ABORT_UNLESS(begin <= end);
+ Y_VERIFY_DEBUG(begin <= end);
auto beginIt = Index.lower_bound(begin);
auto endIt = Index.lower_bound(end);
size_t dist = std::distance(beginIt, endIt);
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp
index 8cc9b869d6..08342c5c22 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_logreader.cpp
@@ -24,7 +24,7 @@ void TPDisk::ProcessChunkOwnerMap(TMap<ui32, TChunkState> &chunkOwnerMap) {
}
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId << " ProcessChunkOwnerMap; ";
+ str << PCtx->PDiskLogPrefix << "ProcessChunkOwnerMap; ";
for (auto& [owner, chunks] : ownerToChunks) {
std::sort(chunks.begin(), chunks.end());
str << " Owner# " << owner << " [";
@@ -51,14 +51,15 @@ void TPDisk::ProcessChunkOwnerMap(TMap<ui32, TChunkState> &chunkOwnerMap) {
// OwnerMap states that the chunk is used by some user
// Make sure the chunk is not really a part of syslog/format
- Y_VERIFY_S(chunkIdx > Format.SystemChunkCount, "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx
+ Y_VERIFY_S(chunkIdx > Format.SystemChunkCount, PCtx->PDiskLogPrefix
+ << "chunkIdx# " << chunkIdx
<< " SystemChunkCount# " << Format.SystemChunkCount);
// Make sure the chunk is not really a part of the log
for (const auto& logChunk : LogChunks) {
if (logChunk.ChunkIdx == chunkIdx) {
TStringStream out;
- out << "PDiskId# " << PCtx->PDiskId << " chunkIdx# " << chunkIdx;
+ out << PCtx->PDiskLogPrefix << "chunkIdx# " << chunkIdx;
out << " is a part of the log and is owned by user, ownerIdx# " << ownerId;
out << " LogChunks# {";
for (const auto& chunk : LogChunks) {
@@ -177,7 +178,7 @@ void TPDisk::ProcessReadLogRecord(TLogRecordHeader &header, TString &data, NPDis
if (ownerData.VDiskId != TVDiskID::InvalidId) {
if (!ownerData.IsNextLsnOk(header.OwnerLsn)) {
TStringStream str;
- str << "Lsn reversal! PDiskId# " << PCtx->PDiskId
+ str << PCtx->PDiskLogPrefix << "Lsn reversal!"
<< " ownerId# " << (ui32)owner
<< " LogStartPosition# " << ownerData.LogStartPosition
<< " LastSeenLsn# " << ownerData.LastSeenLsn
@@ -346,10 +347,10 @@ TLogReader::TLogReader(bool isInitial,TPDisk *pDisk, TActorSystem * const actorS
, CurrentChunkToRead(ChunksToRead.end())
, ParseCommits(false) // Actual only if IsInitial
{
- Y_DEBUG_ABORT_UNLESS(PCtx);
- Y_DEBUG_ABORT_UNLESS(PCtx->ActorSystem == actorSystem);
- Y_ABORT_UNLESS(PDisk->PDiskThread.Id() == TThread::CurrentThreadId(), "Constructor of TLogReader must be called"
- " from PDiskThread");
+ Y_VERIFY_DEBUG(PCtx);
+ Y_VERIFY_DEBUG_S(PCtx->ActorSystem == actorSystem, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(PDisk->PDiskThread.Id() == TThread::CurrentThreadId(),
+ PCtx->PDiskLogPrefix << "Constructor of TLogReader must be called from PDiskThread");
Cypher.SetKey(PDisk->Format.LogKey);
AtomicIncrement(PDisk->InFlightLogRead);
@@ -516,7 +517,7 @@ void TLogReader::Exec(ui64 offsetRead, TVector<ui64> &badOffsets, TActorSystem *
{
ui64 sizeToProcess = (ui64)format.SectorSize;
TSectorData *data = Sector->DataByIdx(idxRead);
- Y_ABORT_UNLESS(data->IsAvailable(sizeToProcess));
+ Y_VERIFY_S(data->IsAvailable(sizeToProcess), PCtx->PDiskLogPrefix);
bool isEndOfLog = ProcessSectorSet(data);
data->SetOffset(data->Offset + sizeToProcess);
if (isEndOfLog) {
@@ -544,7 +545,7 @@ void TLogReader::Exec(ui64 offsetRead, TVector<ui64> &badOffsets, TActorSystem *
break;
}
default:
- Y_ABORT();
+ Y_ABORT("unexpected case");
break;
}
}// while (true)
@@ -565,8 +566,8 @@ void TLogReader::NotifyError(ui64 offsetRead, TString& errorReason) {
TString TLogReader::SelfInfo() {
TStringStream ss;
- ss << "PDiskId# " << PCtx->PDiskId
- << " LogReader"
+ ss << PCtx->PDiskLogPrefix
+ << "LogReader"
<< " IsInitial# " << IsInitial;
if (!IsInitial) {
ss << " Owner# " << ui32(Owner)
@@ -581,9 +582,7 @@ TString TLogReader::SelfInfo() {
bool TLogReader::PrepareToRead() {
TDiskFormat &format = PDisk->Format;
if (Position == TLogPosition::Invalid()) {
- if (IsInitial) {
- Y_ABORT();
- }
+ Y_VERIFY_S(!IsInitial, PCtx->PDiskLogPrefix);
ReplyOk();
return true;
}
@@ -597,7 +596,7 @@ bool TLogReader::PrepareToRead() {
}
if (OwnerLogStartPosition != TLogPosition{0, 0}) {
ui32 startChunkIdx = OwnerLogStartPosition.ChunkIdx;
- Y_ABORT_UNLESS(startChunkIdx == ChunksToRead[0].ChunkIdx);
+ Y_VERIFY_S(startChunkIdx == ChunksToRead[0].ChunkIdx, PCtx->PDiskLogPrefix);
Position = OwnerLogStartPosition;
} else {
Position = PDisk->LogPosition(ChunksToRead[0].ChunkIdx, 0, 0);
@@ -691,7 +690,7 @@ void TLogReader::ProcessLogPageTerminator(ui8 *data, ui32 sectorPayloadSize) {
// The rest of the sector contains no data.
auto *firstPageHeader = reinterpret_cast<TFirstLogPageHeader*>(data);
ui32 sizeLeft = sectorPayloadSize - OffsetInSector;
- Y_ABORT_UNLESS(firstPageHeader->Size + sizeof(TFirstLogPageHeader) == sizeLeft);
+ Y_VERIFY_S(firstPageHeader->Size + sizeof(TFirstLogPageHeader) == sizeLeft, PCtx->PDiskLogPrefix);
OffsetInSector += sizeLeft;
SetLastGoodToWritePosition = true;
}
@@ -735,7 +734,7 @@ void TLogReader::ProcessLogPageNonceJump2(ui8 *data, const ui64 previousNonce, c
} else if (previousNonce < nonceJumpLogPageHeader2->PreviousNonce &&
previousDataNonce < nonceJumpLogPageHeader2->PreviousNonce) {
TStringStream str;
- str << "PDiskId# " << PCtx->PDiskId
+ str << PCtx->PDiskLogPrefix
<< "previousNonce# " << previousNonce
<< " and previousDataNonce# " << previousDataNonce
<< " != header->PreviousNonce# " << nonceJumpLogPageHeader2->PreviousNonce
@@ -776,13 +775,13 @@ void TLogReader::ProcessLogPageNonceJump1(ui8 *data, const ui64 previousNonce) {
ReplyOk();
return;
}
- Y_ABORT_UNLESS(previousNonce == nonceJumpLogPageHeader1->PreviousNonce,
- "previousNonce# %" PRIu64 " != header->PreviousNonce# %" PRIu64
- " OffsetInSector# %" PRIu64 " sizeof(TNonceJumpLogPageHeader1)# %" PRIu64
- " chunkIdx# %" PRIu64 " sectorIdx# %" PRIu64, // " header->Flags# %" PRIu64,
- (ui64)previousNonce, (ui64)nonceJumpLogPageHeader1->PreviousNonce,
- (ui64)OffsetInSector, (ui64)sizeof(TNonceJumpLogPageHeader1),
- (ui64)ChunkIdx, (ui64)SectorIdx); //, (ui64)pageHeader->Flags);
+ Y_VERIFY_S(previousNonce == nonceJumpLogPageHeader1->PreviousNonce, PCtx->PDiskLogPrefix
+ << "previousNonce# " << (ui64)previousNonce
+ << " != header->PreviousNonce# " << (ui64)nonceJumpLogPageHeader1->PreviousNonce
+ << " OffsetInSector# " << (ui64)OffsetInSector
+ << " sizeof(TNonceJumpLogPageHeader1)# " << (ui64)sizeof(TNonceJumpLogPageHeader1)
+ << " chunkIdx# " << (ui64)ChunkIdx
+ << " sectorIdx# " << (ui64)SectorIdx);
}
if (!IsInitial && ChunkIdx == LogEndChunkIdx && SectorIdx >= LogEndSectorIdx) {
@@ -887,12 +886,12 @@ bool TLogReader::ProcessSectorSet(TSectorData *sector) {
ui32 maxOffsetInSector = format.SectorPayloadSize() - ui32(sizeof(TFirstLogPageHeader));
while (OffsetInSector <= maxOffsetInSector) {
TLogPageHeader *pageHeader = (TLogPageHeader*)(data + OffsetInSector);
- Y_ABORT_UNLESS(pageHeader->Version == PDISK_DATA_VERSION, "PDiskId# %" PRIu32
- " incompatible log page header version: %" PRIu32
- " (expected: %" PRIu32 ") at chunk %" PRIu32 " SectorSet: %" PRIu32 " Sector: %" PRIu32
- " Offset in sector: %" PRIu32 " A: %" PRIu32 " B: %" PRIu32, PCtx->PDiskId,
- (ui32)pageHeader->Version, (ui32)PDISK_DATA_VERSION, (ui32)ChunkIdx, (ui32)SectorIdx,
- (ui32)0, (ui32)OffsetInSector, (ui32)pageHeader->A, (ui32)pageHeader->B);
+ Y_VERIFY_S(pageHeader->Version == PDISK_DATA_VERSION, PCtx->PDiskLogPrefix
+ << "incompatible log page header version: " << (ui32)pageHeader->Version
+ << " (expected: " << (ui32)PDISK_DATA_VERSION << ") at chunk " << (ui32)ChunkIdx
+ << " SectorIdx: " << (ui32)SectorIdx << " Sector: 0"
+ << " Offset in sector: " << (ui32)OffsetInSector
+ << " A: %" << (ui32)pageHeader->A << " B: %" << (ui32)pageHeader->B);
if (pageHeader->Flags & LogPageTerminator) {
ProcessLogPageTerminator(data + OffsetInSector, format.SectorPayloadSize());
@@ -1025,7 +1024,7 @@ bool TLogReader::ProcessSectorSet(TSectorData *sector) {
LastRecordHeaderNonce = sectorFooter->Nonce;
IsLastRecordHeaderValid = true;
LastRecordData = TString::Uninitialized(firstPageHeader->DataSize);
- Y_ABORT_UNLESS(firstPageHeader->Size <= LastRecordData.size());
+ Y_VERIFY_S(firstPageHeader->Size <= LastRecordData.size(), PCtx->PDiskLogPrefix);
memcpy((void*)LastRecordData.data(), data + OffsetInSector, firstPageHeader->Size);
LastRecordDataWritePosition = firstPageHeader->Size;
} else {
@@ -1126,7 +1125,8 @@ void TLogReader::ReplyOk() {
ownerData.LogRecordsInitiallyRead &&
!ownerData.LogRecordsConsequentlyRead) {
TStringStream str;
- str << "LogRecordsConsequentlyRead# " << ownerData.LogRecordsConsequentlyRead
+ str << PCtx->PDiskLogPrefix
+ << "LogRecordsConsequentlyRead# " << ownerData.LogRecordsConsequentlyRead
<< " LogRecordsInitiallyRead# " << ownerData.LogRecordsInitiallyRead;
Y_FAIL_S(str.Str());
}
@@ -1163,7 +1163,7 @@ void TLogReader::ReplyError() {
}
void TLogReader::Reply() {
- Y_ABORT_UNLESS(!IsReplied.load());
+ Y_VERIFY_S(!IsReplied.load(), PCtx->PDiskLogPrefix);
if (IsInitial) {
PDisk->ProcessChunkOwnerMap(*ChunkOwnerMap.Get());
ChunkOwnerMap.Destroy();
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp
index 4e7078f9a8..931193fb55 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_params.cpp
@@ -27,7 +27,7 @@ namespace NKikimr {
, GlueRequestDistanceBytes(CalculateGlueRequestDistanceBytes(seekTimeUs, readSpeedBps))
, TrueMediaType(trueMediaType)
{
- Y_DEBUG_ABORT_UNLESS(AppendBlockSize <= ChunkSize);
+ Y_VERIFY_DEBUG(AppendBlockSize <= ChunkSize);
}
// Read size that allows pdisk to spend at least 50% actually reading the data (not seeking)
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_quota_record.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_quota_record.h
index d08ec76538..289fd96d87 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_quota_record.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_quota_record.h
@@ -103,7 +103,7 @@ public:
// Called only from the main thread
bool TryAllocate(i64 count, TString &outErrorReason) {
- Y_ABORT_UNLESS(count > 0);
+ Y_VERIFY(count > 0);
if (AtomicSub(Free, count) > AtomicGet(Black)) {
return true;
}
@@ -118,7 +118,7 @@ public:
}
bool InitialAllocate(i64 count) {
- Y_ABORT_UNLESS(count >= 0);
+ Y_VERIFY(count >= 0);
if (AtomicSub(Free, count) >= 0) {
return true;
} else {
@@ -128,7 +128,7 @@ public:
}
void Release(i64 count) {
- Y_ABORT_UNLESS(count > 0);
+ Y_VERIFY(count > 0);
TAtomicBase newFree = AtomicAdd(Free, count);
Y_VERIFY_S(newFree <= AtomicGet(HardLimit), Print());
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h
index c86c81f0ee..c392649c6a 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_req_creator.h
@@ -170,7 +170,7 @@ private:
template<typename TEv>
static TString ToString(const TAutoPtr<NActors::TEventHandle<TEv>> &ev) {
- Y_ABORT_UNLESS(ev && ev->Get());
+ Y_VERIFY(ev && ev->Get());
return ev->Get()->ToString();
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp
index a21c596811..4564a5ec37 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.cpp
@@ -52,7 +52,7 @@ void TChunkRead::Abort(TActorSystem* actorSystem) {
if (FinalCompletion) {
FinalCompletion->PartDeleted(actorSystem);
} else {
- Y_ABORT_UNLESS(!IsReplied);
+ Y_VERIFY(!IsReplied);
TStringStream error;
error << "ReqId# " << ReqId << " ChunkRead is deleted because of PDisk stoppage";
THolder<NPDisk::TEvChunkReadResult> result = MakeHolder
@@ -78,7 +78,7 @@ TChunkReadPiece::TChunkReadPiece(TIntrusivePtr<TChunkRead> &read, ui64 pieceCurr
, PieceSizeLimit(pieceSizeLimit)
, IsTheLastPiece(isTheLastPiece)
{
- Y_ABORT_UNLESS(ChunkRead->FinalCompletion);
+ Y_VERIFY(ChunkRead->FinalCompletion);
if (!IsTheLastPiece) {
ChunkRead->FinalCompletion->AddPart();
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.h
index 328bb27b8a..56a7f296f1 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_requestimpl.h
@@ -317,7 +317,7 @@ public:
}
virtual ~TLogWrite() {
- Y_DEBUG_ABORT_UNLESS(Replied);
+ Y_VERIFY_DEBUG(Replied);
if (OnDestroy) {
OnDestroy();
}
@@ -333,8 +333,8 @@ public:
}
void AddToBatch(TLogWrite *req) {
- Y_ABORT_UNLESS(BatchTail->NextInBatch == nullptr);
- Y_ABORT_UNLESS(req->NextInBatch == nullptr);
+ Y_VERIFY(BatchTail->NextInBatch == nullptr);
+ Y_VERIFY(req->NextInBatch == nullptr);
BatchTail->NextInBatch = req;
BatchTail = req;
}
@@ -414,11 +414,11 @@ public:
}
virtual ~TChunkRead() {
- Y_ABORT_UNLESS(DoubleFreeCanary == ReferenceCanary, "DoubleFreeCanary in TChunkRead is dead");
+ Y_VERIFY(DoubleFreeCanary == ReferenceCanary, "DoubleFreeCanary in TChunkRead is dead");
// Set DoubleFreeCanary to 0 and make sure compiler will not eliminate that action
SecureWipeBuffer((ui8*)&DoubleFreeCanary, sizeof(DoubleFreeCanary));
- Y_ABORT_UNLESS(!SelfPointer);
- Y_ABORT_UNLESS(IsReplied, "Unreplied read request, chunkIdx# %" PRIu32 " Offset# %" PRIu32 " Size# %" PRIu32
+ Y_VERIFY(!SelfPointer);
+ Y_VERIFY(IsReplied, "Unreplied read request, chunkIdx# %" PRIu32 " Offset# %" PRIu32 " Size# %" PRIu32
" CurrentSector# %" PRIu32 " RemainingSize# %" PRIu32,
(ui32)ChunkIdx, (ui32)Offset, (ui32)Size, (ui32)CurrentSector, (ui32)RemainingSize);
}
@@ -468,7 +468,7 @@ public:
TChunkReadPiece(TIntrusivePtr<TChunkRead> &read, ui64 pieceCurrentSector, ui64 pieceSizeLimit, bool isTheLastPiece, NWilson::TSpan span);
virtual ~TChunkReadPiece() {
- Y_ABORT_UNLESS(!SelfPointer);
+ Y_VERIFY(!SelfPointer);
}
void OnSuccessfulDestroy(TActorSystem* actorSystem);
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp
index 1cc6ad5888..6d5142d45c 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_sectorrestorator.cpp
@@ -180,7 +180,7 @@ void TSectorRestorator::Restore(ui8 *source, const ui64 offset, const ui64 magic
void TSectorRestorator::WriteSector(ui8 *sectorData, ui64 writeOffset) {
if (PCtx && PCtx->ActorSystem && BufferPool) {
TBuffer *buffer = BufferPool->Pop();
- Y_ABORT_UNLESS(Format.SectorSize <= buffer->Size());
+ Y_VERIFY_S(Format.SectorSize <= buffer->Size(), PCtx->PDiskLogPrefix);
memcpy(buffer->Data(), sectorData, (size_t)Format.SectorSize);
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(buffer->Data(), Format.SectorSize);
PCtx->ActorSystem->Send(PCtx->PDiskActor, new TEvLogSectorRestore(buffer->Data(), Format.SectorSize, writeOffset, buffer));
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp
index b38efc6985..1e9909f343 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_syslogreader.cpp
@@ -71,8 +71,8 @@ TSysLogReader::TSysLogReader(TPDisk *pDisk, TActorSystem *const actorSystem, con
, SizeToRead(PDisk->Format.SysLogSectorCount * ReplicationFactor * PDisk->Format.SectorSize)
, Data(SizeToRead)
{
- Y_ABORT_UNLESS(actorSystem == PCtx->ActorSystem);
- Y_ABORT_UNLESS(replyTo == PCtx->PDiskActor);
+ Y_VERIFY_S(actorSystem == PCtx->ActorSystem, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(replyTo == PCtx->PDiskActor, PCtx->PDiskLogPrefix);
Cypher.SetKey(PDisk->Format.SysLogKey);
AtomicIncrement(PDisk->InFlightLogRead);
@@ -95,7 +95,7 @@ void TSysLogReader::Start() {
finalCompletion->CostNs = PDisk->DriveModel.TimeForSizeNs(SizeToRead, 0, TDriveModel::EOperationType::OP_TYPE_READ);
const ui32 bufferSize = PDisk->BufferPool->GetBufferSize();
const ui32 partsToRead = (SizeToRead + bufferSize - 1) / bufferSize;
- Y_ABORT_UNLESS(partsToRead > 0);
+ Y_VERIFY_S(partsToRead > 0, PCtx->PDiskLogPrefix);
TVector<TCompletionAction *> completionParts;
TVector<TBuffer *> bufferParts;
completionParts.reserve(partsToRead);
@@ -416,8 +416,8 @@ bool TSysLogReader::VerboseCheck(bool condition, const char *desctiption) {
void TSysLogReader::DumpDebugInfo(TStringStream &str, bool isSingleLine) {
const char *nl = (isSingleLine ? "; " : "\n(B) ");
- str << "PDiskId# " << PCtx->PDiskId;
- str << " SysLog";
+ str << PCtx->PDiskLogPrefix;
+ str << "SysLog";
str << " BeginSectorIdx# " << BeginSectorIdx;
str << " EndSectorIdx# " << EndSectorIdx;
str << " LoopOffset# " << LoopOffset;
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp
index 6345ef92b2..5a6e12fe1c 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_tools.cpp
@@ -151,7 +151,7 @@ bool ReadPDiskFormatInfo(const TString &path, const NPDisk::TMainKey &mainKey, T
THolder<NPDisk::TBufferPool> bufferPool(NPDisk::CreateBufferPool(512 << 10, 2, useSdpkNvmeDriver, {}));
NPDisk::TBuffer::TPtr formatRaw(bufferPool->Pop());
- Y_ABORT_UNLESS(formatRaw->Size() >= formatSectorsSize);
+ Y_VERIFY(formatRaw->Size() >= formatSectorsSize);
blockDevice->PreadSync(formatRaw->Data(), formatSectorsSize, 0,
NPDisk::TReqId(NPDisk::TReqId::ReadFormatInfo, 0), {});
@@ -203,7 +203,7 @@ bool ReadPDiskFormatInfo(const TString &path, const NPDisk::TMainKey &mainKey, T
const ui32 sysLogRawParts = (sysLogSize + bufferSize - 1) / bufferSize;
for (ui32 i = 0; i < sysLogRawParts; i++) {
const ui32 sysLogPartSize = Min(bufferSize, sysLogSize - i * bufferSize);
- Y_ABORT_UNLESS(buffer->Size() >= sysLogPartSize);
+ Y_VERIFY(buffer->Size() >= sysLogPartSize);
blockDevice->PreadSync(buffer->Data(), sysLogPartSize, sysLogOffset + i * bufferSize,
NPDisk::TReqId(NPDisk::TReqId::ReadSysLogData, 0), {});
memcpy(sysLogRaw.Get() + i * bufferSize, buffer->Data(), sysLogPartSize);
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.h
index d8532e4941..a81f0630dc 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_actions.h
@@ -1716,7 +1716,7 @@ private:
if (DeletedChunks < ChunksToReserve / 2) {
NPDisk::TCommitRecord commitRecord;
commitRecord.FirstLsnToKeep = 1 + ReleaseLsnStepSize * (DeletedChunks + 1);
- Y_ABORT_UNLESS(commitRecord.FirstLsnToKeep <= LogRecordsToWrite + 1);
+ Y_VERIFY(commitRecord.FirstLsnToKeep <= LogRecordsToWrite + 1);
TLogRecAboutChunks log;
log.Type = EDeleteChunk;
log.Data.DeletedChunk = CommittedChunks.back();
@@ -1749,7 +1749,7 @@ public:
Garbage = PrepareData(LogRecordSize);
TLogRecAboutChunks log;
log.Type = EGarbage;
- Y_ABORT_UNLESS(LogRecordSize >= sizeof(log));
+ Y_VERIFY(LogRecordSize >= sizeof(log));
memcpy(Garbage.Detach(), &log, sizeof(log));
}
};
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp
index efdf0932f0..62b6f2354b 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_helpers.cpp
@@ -207,7 +207,7 @@ void FillDeviceWithPattern(TTestContext *tc, ui64 chunkSize, ui64 pattern) {
const ui32 formatSectorsSize = NPDisk::FormatSectorSize * NPDisk::ReplicationFactor;
NPDisk::TAlignedData data(formatSectorsSize);
- Y_ABORT_UNLESS(data.Size() % sizeof(ui64) == 0);
+ Y_VERIFY(data.Size() % sizeof(ui64) == 0);
Fill((ui64*)data.Get(), (ui64*)(data.Get() + data.Size()), pattern);
{
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
index fb0894ee28..2d525ae72d 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_ut_run.cpp
@@ -76,7 +76,7 @@ void Run(TVector<IActor*> tests, TTestRunConfig runCfg) {
runCfg.IsErasureEncodeUserLog, runCfg.TestContext->SectorMap);
}
} else {
- Y_ABORT_UNLESS(!runCfg.IsBad);
+ Y_VERIFY(!runCfg.IsBad);
}
pDiskId = MakeBlobStoragePDiskID(1, 1);
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp
index 5c6d6c728b..eed176138d 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.cpp
@@ -14,7 +14,11 @@ TFlightControl::TFlightControl(ui64 bits)
, Mask(~((~0ull) << bits))
, IsCompleteLoop(1ull << bits)
{
- Y_ABORT_UNLESS(bits > 0 && bits < 16);
+ Y_VERIFY(bits > 0 && bits < 16);
+}
+
+void TFlightControl::Initialize(const TString& logPrefix) {
+ PDiskLogPrefix = logPrefix;
}
// Returns 0 in case of scheduling error
@@ -67,8 +71,8 @@ void TFlightControl::WakeUp() {
void TFlightControl::MarkComplete(ui64 idx) {
ui64 beginIdx = AtomicGet(BeginIdx);
- Y_ABORT_UNLESS(idx >= beginIdx);
- Y_ABORT_UNLESS(idx < beginIdx + MaxSize);
+ Y_VERIFY_S(idx >= beginIdx, PDiskLogPrefix);
+ Y_VERIFY_S(idx < beginIdx + MaxSize, PDiskLogPrefix);
if (idx == beginIdx) {
// It's the first item we are waiting for
if (beginIdx == EndIdx) {
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.h
index 8fbb72459a..029eacfcb7 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_flightcontrol.h
@@ -22,12 +22,15 @@ class TFlightControl {
TVector<bool> IsCompleteLoop;
TMutex ScheduleMutex;
TCondVar ScheduleCondVar;
+ TString PDiskLogPrefix;
void WakeUp();
public:
TFlightControl(ui64 bits);
+ void Initialize(const TString& logPrefix);
+
// Returns 0 in case of scheduling error
// Operation Idx otherwise
// May sometimes return 0 when it already can schedule
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_sector.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_sector.h
index 9de0416fa7..22244c17ef 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_sector.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_sector.h
@@ -49,17 +49,17 @@ public:
}
TDataSectorFooter *GetDataFooter() {
- Y_DEBUG_ABORT_UNLESS(Size() >= sizeof(TDataSectorFooter));
+ Y_VERIFY_DEBUG(Size() >= sizeof(TDataSectorFooter));
return (TDataSectorFooter*) (End() - sizeof(TDataSectorFooter));
}
ui64 GetCanary() const {
- Y_DEBUG_ABORT_UNLESS(Size() >= sizeof(TDataSectorFooter) + CanarySize);
+ Y_VERIFY_DEBUG(Size() >= sizeof(TDataSectorFooter) + CanarySize);
return ReadUnaligned<ui64>(End() - sizeof(TDataSectorFooter) - CanarySize);
}
void SetCanary(ui64 canary = NPDisk::Canary) {
- Y_DEBUG_ABORT_UNLESS(Size() >= sizeof(TDataSectorFooter) + CanarySize);
+ Y_VERIFY_DEBUG(Size() >= sizeof(TDataSectorFooter) + CanarySize);
WriteUnaligned<ui64>(End() - sizeof(TDataSectorFooter) - CanarySize, canary);
}
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp
index 243f2b8a09..de8c4be7df 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_ut.cpp
@@ -255,7 +255,7 @@ void TestOffset(ui64 offset, ui64 size, ui64 expectedFirstSector, ui64 expectedL
ui64 firstSector;
ui64 lastSector;
ui64 sectorOffset;
- bool isOk = ParseSectorOffset(format, nullptr, 0, offset, size, firstSector, lastSector, sectorOffset);
+ bool isOk = ParseSectorOffset(format, nullptr, 0, offset, size, firstSector, lastSector, sectorOffset, "");
UNIT_ASSERT_C(isOk && firstSector == expectedFirstSector && lastSector == expectedLastSector &&
sectorOffset == expectedSectorOffset,
"isOk# " << isOk << "\n"
@@ -298,7 +298,7 @@ void TestPayloadOffset(ui64 firstSector, ui64 lastSector, ui64 currentSector, ui
ui64 payloadSize;
ui64 payloadOffset;
- ParsePayloadFromSectorOffset(format, firstSector, lastSector, currentSector, &payloadSize, &payloadOffset);
+ ParsePayloadFromSectorOffset(format, firstSector, lastSector, currentSector, &payloadSize, &payloadOffset, "");
UNIT_ASSERT_C(payloadSize == expectedPayloadSize && payloadOffset == expectedPayloadOffset,
"firstSector# " << firstSector << " lastSector# " << lastSector << " currentSector# " << currentSector << "\n"
"payloadSize# " << payloadSize << " expectedPayloadSize# " << expectedPayloadSize << "\n"
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp
index e1664e8090..41cbd2ce35 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.cpp
@@ -19,7 +19,7 @@ void TBufferedWriter::WriteBufferWithFlush(TReqId reqId, NWilson::TTraceId *trac
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(source, sizeToWrite);
CurrentBuffer->FlushAction = flushAction;
CurrentBuffer->CostNs = DriveModel->TimeForSizeNs(sizeToWrite, chunkIdx, TDriveModel::OP_TYPE_WRITE);
- Y_DEBUG_ABORT_UNLESS(sizeToWrite <= CurrentBuffer->Size());
+ Y_VERIFY_DEBUG_S(sizeToWrite <= CurrentBuffer->Size(), PCtx->PDiskLogPrefix);
BlockDevice.PwriteAsync(source, sizeToWrite, DirtyFrom, CurrentBuffer.Release(), reqId, traceId);
CurrentBuffer = TBuffer::TPtr(Pool->Pop());
CurrentSector = CurrentBuffer->Data();
@@ -32,7 +32,7 @@ void TBufferedWriter::WriteBufferWithFlush(TReqId reqId, NWilson::TTraceId *trac
}
TBufferedWriter::TBufferedWriter(ui64 sectorSize, IBlockDevice &blockDevice, TDiskFormat &format, TBufferPool *pool,
- TActorSystem *actorSystem, TDriveModel *driveModel)
+ TActorSystem *actorSystem, TDriveModel *driveModel, std::shared_ptr<TPDiskCtx> pCtx)
: SectorSize(sectorSize)
, BlockDevice(blockDevice)
, Format(format)
@@ -46,6 +46,7 @@ TBufferedWriter::TBufferedWriter(ui64 sectorSize, IBlockDevice &blockDevice, TDi
, ActorSystem(actorSystem)
, LastReqId(TReqId::InitialTSectorWriterReqId, 0)
, DriveModel(driveModel)
+ , PCtx(std::move(pCtx))
{
}
@@ -64,8 +65,8 @@ void TBufferedWriter::SetupWithBuffer(ui64 startOffset, ui64 currentOffset, TBuf
ui8* TBufferedWriter::Seek(ui64 offset, ui32 count, ui32 reserve, TReqId reqId, NWilson::TTraceId *traceId,
ui32 chunkIdx) {
- Y_ABORT_UNLESS(count > 0);
- Y_ABORT_UNLESS(count <= 16);
+ Y_VERIFY_S(count > 0, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(count <= 16, PCtx->PDiskLogPrefix);
if (NextOffset != offset || NextOffset + SectorSize * reserve - StartOffset > CurrentBuffer->Size()) {
WriteBufferWithFlush(LastReqId, traceId, nullptr, chunkIdx);
StartOffset = offset;
@@ -79,7 +80,7 @@ ui8* TBufferedWriter::Seek(ui64 offset, ui32 count, ui32 reserve, TReqId reqId,
}
ui8* TBufferedWriter::Get() const {
- Y_ABORT_UNLESS(CurrentSector);
+ Y_VERIFY_S(CurrentSector, PCtx->PDiskLogPrefix);
return CurrentSector;
}
@@ -106,4 +107,3 @@ TBufferedWriter::~TBufferedWriter() {
} // NPDisk
} // NKikimr
-
diff --git a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.h b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.h
index 58f65878a3..a635914f58 100644
--- a/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.h
+++ b/ydb/core/blobstorage/pdisk/blobstorage_pdisk_writer.h
@@ -43,11 +43,13 @@ protected:
TReqId LastReqId;
TDriveModel *DriveModel;
+ std::shared_ptr<TPDiskCtx> PCtx;
+
void WriteBufferWithFlush(TReqId reqId, NWilson::TTraceId *traceId,
TCompletionAction *flushAction, ui32 chunkIdx);
public:
TBufferedWriter(ui64 sectorSize, IBlockDevice &blockDevice, TDiskFormat &format, TBufferPool *pool,
- TActorSystem *actorSystem, TDriveModel *driveModel);
+ TActorSystem *actorSystem, TDriveModel *driveModel, std::shared_ptr<TPDiskCtx> pCtx);
void SetupWithBuffer(ui64 startOffset, ui64 currentOffset, TBuffer *buffer, ui32 count, TReqId reqId);
ui8* Seek(ui64 offset, ui32 count, ui32 reserve, TReqId reqId, NWilson::TTraceId *traceId, ui32 chunkIdx);
ui8* Get() const;
@@ -130,17 +132,17 @@ public:
, DriveModel(driveModel)
, OnNewChunk(true)
{
- Y_ABORT_UNLESS(!LogChunkInfo || LogChunkInfo->ChunkIdx == ChunkIdx);
- BufferedWriter.Reset(new TBufferedWriter(Format.SectorSize, BlockDevice, Format, pool, PCtx->ActorSystem,
- DriveModel));
+ Y_VERIFY_S(!LogChunkInfo || LogChunkInfo->ChunkIdx == ChunkIdx, PCtx->PDiskLogPrefix);
+ BufferedWriter.Reset(new TBufferedWriter(Format.SectorSize, BlockDevice, Format, pool,
+ PCtx->ActorSystem, DriveModel, PCtx));
Cypher.SetKey(key);
Cypher.StartMessage(Nonce);
ui64 sectorOffset = Format.Offset(ChunkIdx, SectorIdx);
if (buffer) {
- Y_ABORT_UNLESS(IsLog);
- Y_ABORT_UNLESS(!IsSysLog);
+ Y_VERIFY_S(IsLog, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(!IsSysLog, PCtx->PDiskLogPrefix);
ui64 startOffset = Format.Offset(ChunkIdx, SectorIdx);
BufferedWriter->SetupWithBuffer(startOffset, sectorOffset, buffer, 1,
TReqId(TReqId::CreateTSectorWriterWithBuffer, 0));
@@ -210,8 +212,8 @@ public:
void SwitchToNewChunk(TReqId reqId, NWilson::TTraceId *traceId) {
// Allocate next log chunk, write next log chunk pointer sectors, switch to that log chunk.
- Y_ABORT_UNLESS(IsLog);
- Y_ABORT_UNLESS(!NextChunks.empty());
+ Y_VERIFY_S(IsLog, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(!NextChunks.empty(), PCtx->PDiskLogPrefix);
ui32 nextChunk = NextChunks.front().Idx;
TLogChunkInfo *nextLogChunkInfo = NextChunks.front().Info;
NextChunks.pop_front();
@@ -252,8 +254,8 @@ public:
memcpy(sectorData, BufferedWriter->Get(), Format.SectorSize);
// Check sector CRC
const ui64 sectorHash = *(ui64*)(void*)(sectorData + Format.SectorSize - sizeof(ui64));
- Y_ABORT_UNLESS(Hash.CheckSectorHash(sectorOffset, dataMagic, sectorData, Format.SectorSize, sectorHash),
- "Sector hash corruption detected!");
+ Y_VERIFY_S(Hash.CheckSectorHash(sectorOffset, dataMagic, sectorData, Format.SectorSize, sectorHash),
+ PCtx->PDiskLogPrefix << "Sector hash corruption detected!");
}
BufferedWriter->MarkDirty();
reserve = ReplicationFactor;
@@ -340,7 +342,7 @@ public:
}
void Write(const void* data, ui64 size, TReqId reqId, NWilson::TTraceId *traceId) {
- Y_ABORT_UNLESS(data != nullptr);
+ Y_VERIFY_S(data != nullptr, PCtx->PDiskLogPrefix);
Cypher.Encrypt(BufferedWriter->Get() + CurrentPosition, data, (ui32)size);
FinalizeWrite(size, reqId, traceId);
}
@@ -358,7 +360,7 @@ public:
}
void TerminateLog(TReqId reqId, NWilson::TTraceId *traceId) {
- Y_ABORT_UNLESS(IsLog);
+ Y_VERIFY_S(IsLog, PCtx->PDiskLogPrefix);
if (SectorBytesFree == 0 || SectorBytesFree == Format.SectorPayloadSize()) {
P_LOG(PRI_DEBUG, BPD63, SelfInfo() << " TerminateLog Sector is full or free",
(SectorBytesFree, SectorBytesFree),
@@ -399,8 +401,8 @@ public:
void LogHeader(TOwner owner, TLogSignature signature, ui64 ownerLsn, ui64 dataSize, TReqId reqId,
NWilson::TTraceId *traceId) {
- Y_ABORT_UNLESS(IsLog);
- Y_ABORT_UNLESS(SectorBytesFree >= sizeof(TFirstLogPageHeader));
+ Y_VERIFY_S(IsLog, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(SectorBytesFree >= sizeof(TFirstLogPageHeader), PCtx->PDiskLogPrefix);
ui64 availableSize = SectorBytesFree - sizeof(TFirstLogPageHeader);
bool isWhole = availableSize >= dataSize;
bool isTornOffHeader = false;
@@ -433,10 +435,10 @@ public:
}
void LogDataPart(const void* data, ui64 size, TReqId reqId, NWilson::TTraceId *traceId) {
- Y_ABORT_UNLESS(IsLog);
+ Y_VERIFY_S(IsLog, PCtx->PDiskLogPrefix);
REQUEST_VALGRIND_CHECK_MEM_IS_DEFINED(data, size);
- Y_ABORT_UNLESS(data);
- Y_ABORT_UNLESS(size > 0);
+ Y_VERIFY_S(data, PCtx->PDiskLogPrefix);
+ Y_VERIFY_S(size > 0, PCtx->PDiskLogPrefix);
while (RecordBytesLeft > SectorBytesFree && size >= SectorBytesFree) {
const ui64 bytesToWrite = SectorBytesFree;
Write(data, bytesToWrite, reqId, traceId);
@@ -467,7 +469,7 @@ public:
protected:
void FinalizeWrite(ui64 size, TReqId reqId, NWilson::TTraceId *traceId) {
CurrentPosition += size;
- Y_ABORT_UNLESS(SectorBytesFree >= size);
+ Y_VERIFY_S(SectorBytesFree >= size, PCtx->PDiskLogPrefix);
SectorBytesFree -= size;
RecordBytesLeft -= size;
if (size) {
diff --git a/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp b/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp
index a945a4e13f..06d2da56e6 100644
--- a/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp
+++ b/ydb/core/blobstorage/pdisk/mock/pdisk_mock.cpp
@@ -88,7 +88,7 @@ struct TPDiskMockState::TImpl {
to.ReservedChunks.insert(FreeChunks.extract(it));
}
- Y_ABORT_UNLESS(chunkIdx != TotalChunks);
+ Y_VERIFY(chunkIdx != TotalChunks);
return chunkIdx;
}
@@ -97,7 +97,7 @@ struct TPDiskMockState::TImpl {
for (auto& [chunkIdx, chunk] : owner.ChunkData) {
for (auto& [blockIdx, ref] : chunk.Blocks) {
const auto it = Blocks.find(*ref);
- Y_ABORT_UNLESS(it != Blocks.end());
+ Y_VERIFY(it != Blocks.end());
ref = &it->first;
}
}
@@ -144,7 +144,7 @@ struct TPDiskMockState::TImpl {
for (auto& [ownerId, owner] : Owners) {
if (slotIsValid) {
if (slotId == owner.SlotId) {
- Y_ABORT_UNLESS(owner.VDiskId.SameExceptGeneration(vdiskId));
+ Y_VERIFY(owner.VDiskId.SameExceptGeneration(vdiskId));
*created = false;
return std::make_tuple(ownerId, &owner);
}
@@ -160,7 +160,7 @@ struct TPDiskMockState::TImpl {
std::map<ui8, TOwner>::iterator it;
for (it = Owners.begin(); it != Owners.end() && it->first == ownerId; ++it, ++ownerId)
{}
- Y_ABORT_UNLESS(ownerId);
+ Y_VERIFY(ownerId);
it = Owners.emplace_hint(it, ownerId, TOwner());
it->second.VDiskId = vdiskId;
it->second.SlotId = slotId;
@@ -178,17 +178,17 @@ struct TPDiskMockState::TImpl {
void CommitChunk(TOwner& owner, TChunkIdx chunkIdx) {
const ui32 num = owner.ReservedChunks.erase(chunkIdx) + owner.CommittedChunks.erase(chunkIdx);
- Y_ABORT_UNLESS(num);
+ Y_VERIFY(num);
const bool inserted = owner.CommittedChunks.insert(chunkIdx).second;
- Y_ABORT_UNLESS(inserted);
+ Y_VERIFY(inserted);
}
void DeleteChunk(TOwner& owner, TChunkIdx chunkIdx) {
const ui32 num = owner.ReservedChunks.erase(chunkIdx) + owner.CommittedChunks.erase(chunkIdx);
- Y_ABORT_UNLESS(num);
+ Y_VERIFY(num);
owner.ChunkData.erase(chunkIdx);
const bool inserted = FreeChunks.insert(chunkIdx).second;
- Y_ABORT_UNLESS(inserted);
+ Y_VERIFY(inserted);
AdjustFreeChunks();
}
@@ -222,7 +222,7 @@ struct TPDiskMockState::TImpl {
for (auto& [ownerId, owner] : Owners) {
for (auto& [chunkIdx, data] : owner.ChunkData) {
const bool inserted = res.insert(chunkIdx).second;
- Y_ABORT_UNLESS(inserted);
+ Y_VERIFY(inserted);
}
}
return res;
@@ -388,7 +388,7 @@ public:
for (const auto& [ownerId, owner] : Impl.Owners) {
usedChunks += owner.CommittedChunks.size() + owner.ReservedChunks.size();
}
- Y_ABORT_UNLESS(usedChunks <= Impl.TotalChunks);
+ Y_VERIFY(usedChunks <= Impl.TotalChunks);
auto ev = std::make_unique<TEvBlobStorage::TEvControllerUpdateDiskStatus>();
auto& record = ev->Record;
@@ -406,7 +406,7 @@ public:
// report message and validate PDisk guid
auto *msg = ev->Get();
PDISK_MOCK_LOG(NOTICE, PDM01, "received TEvYardInit", (Msg, msg->ToString()));
- Y_ABORT_UNLESS(msg->PDiskGuid == Impl.PDiskGuid, "PDiskGuid mismatch");
+ Y_VERIFY(msg->PDiskGuid == Impl.PDiskGuid, "PDiskGuid mismatch");
// find matching owner or create a new one
ui8 ownerId;
@@ -500,7 +500,7 @@ public:
std::deque<std::tuple<TActorId, THolder<NPDisk::TEvLog>>> LogQ;
void Handle(NPDisk::TEvLog::TPtr ev) {
- Y_ABORT_UNLESS(!Impl.CheckIsReadOnlyOwner(ev->Get()));
+ Y_VERIFY(!Impl.CheckIsReadOnlyOwner(ev->Get()));
if (LogQ.empty()) {
TActivationContext::Send(new IEventHandle(EvResume, 0, SelfId(), TActorId(), nullptr, 0));
}
@@ -512,7 +512,7 @@ public:
TActivationContext::Send(new IEventHandle(EvResume, 0, SelfId(), TActorId(), nullptr, 0));
}
for (auto& [msg, _] : ev->Get()->Logs) {
- Y_ABORT_UNLESS(!Impl.CheckIsReadOnlyOwner(msg.Get()));
+ Y_VERIFY(!Impl.CheckIsReadOnlyOwner(msg.Get()));
LogQ.emplace_back(ev->Sender, std::move(msg));
}
}
@@ -539,7 +539,7 @@ public:
TImpl::TOwner& owner = it->second;
PDISK_MOCK_LOG(DEBUG, PDM11, "received TEvLog", (Msg, msg->ToString()), (VDiskId, owner.VDiskId));
- Y_ABORT_UNLESS(msg->Lsn > std::exchange(owner.LastLsn, msg->Lsn));
+ Y_VERIFY(msg->Lsn > std::exchange(owner.LastLsn, msg->Lsn));
// add successful result to the actor's result queue if there is no such last one
if (!results.empty() && results.back()->Recipient == recipient) {
@@ -586,7 +586,7 @@ public:
owner.StartingPoints[msg->Signature.GetUnmasked()] = owner.Log.back();
}
}
- Y_ABORT_UNLESS(res);
+ Y_VERIFY(res);
if (auto&& cb = std::move(msg->LogCallback)) { // register callback in the queue if there is one
callbacks.emplace_back(std::move(cb), res);
}
@@ -646,7 +646,7 @@ public:
if (TImpl::TOwner *owner = Impl.FindOwner(msg, res)) {
PDISK_MOCK_LOG(INFO, PDM05, "received TEvReadLog", (Msg, msg->ToString()), (VDiskId, owner->VDiskId));
ui64 size = 0;
- Y_ABORT_UNLESS(msg->Position.OffsetInChunk <= owner->Log.size());
+ Y_VERIFY(msg->Position.OffsetInChunk <= owner->Log.size());
for (auto it = owner->Log.begin() + msg->Position.OffsetInChunk; it != owner->Log.end(); ++it) {
res->Results.push_back(*it);
res->IsEndOfLog = ++res->NextPosition.OffsetInChunk == owner->Log.size();
@@ -664,7 +664,7 @@ public:
void Handle(NPDisk::TEvChunkReserve::TPtr ev) {
auto *msg = ev->Get();
- Y_ABORT_UNLESS(!Impl.CheckIsReadOnlyOwner(msg));
+ Y_VERIFY(!Impl.CheckIsReadOnlyOwner(msg));
auto res = std::make_unique<NPDisk::TEvChunkReserveResult>(NKikimrProto::OK, GetStatusFlags());
if (TImpl::TOwner *owner = Impl.FindOwner(msg, res)) {
if (Impl.GetNumFreeChunks() < msg->SizeChunks) {
@@ -692,7 +692,7 @@ public:
"VDiskId# " << owner->VDiskId << " ChunkIdx# " << msg->ChunkIdx);
ui32 offset = msg->Offset;
ui32 size = msg->Size;
- Y_ABORT_UNLESS(offset < Impl.ChunkSize && offset + size <= Impl.ChunkSize && size);
+ Y_VERIFY(offset < Impl.ChunkSize && offset + size <= Impl.ChunkSize && size);
auto data = TRcBuf::Uninitialized(size);
const auto chunkIt = owner->ChunkData.find(msg->ChunkIdx);
@@ -733,7 +733,7 @@ public:
}
void Handle(NPDisk::TEvChunkWrite::TPtr ev) {
- Y_ABORT_UNLESS(!Impl.CheckIsReadOnlyOwner(ev->Get()));
+ Y_VERIFY(!Impl.CheckIsReadOnlyOwner(ev->Get()));
auto *msg = ev->Get();
auto res = std::make_unique<NPDisk::TEvChunkWriteResult>(NKikimrProto::OK, msg->ChunkIdx, msg->Cookie,
GetStatusFlags(), TString());
@@ -749,12 +749,12 @@ public:
}
if (msg->ChunkIdx) {
// allow reads only from owned chunks
- Y_ABORT_UNLESS(owner->ReservedChunks.count(msg->ChunkIdx) || owner->CommittedChunks.count(msg->ChunkIdx));
+ Y_VERIFY(owner->ReservedChunks.count(msg->ChunkIdx) || owner->CommittedChunks.count(msg->ChunkIdx));
// ensure offset and write sizes are granular
- Y_ABORT_UNLESS(msg->Offset % Impl.AppendBlockSize == 0);
- Y_ABORT_UNLESS(msg->PartsPtr);
- Y_ABORT_UNLESS(msg->PartsPtr->ByteSize() % Impl.AppendBlockSize == 0);
- Y_ABORT_UNLESS(msg->Offset + msg->PartsPtr->ByteSize() <= Impl.ChunkSize);
+ Y_VERIFY(msg->Offset % Impl.AppendBlockSize == 0);
+ Y_VERIFY(msg->PartsPtr);
+ Y_VERIFY(msg->PartsPtr->ByteSize() % Impl.AppendBlockSize == 0);
+ Y_VERIFY(msg->Offset + msg->PartsPtr->ByteSize() <= Impl.ChunkSize);
// issue write
const ui32 offset = msg->Offset;
TImpl::TChunkData& chunk = owner->ChunkData[msg->ChunkIdx];
@@ -788,7 +788,7 @@ public:
++it->second;
if (const TString *prev = std::exchange(chunk.Blocks[blockIdx++], &it->first)) {
const auto it = Impl.Blocks.find(*prev);
- Y_ABORT_UNLESS(it != Impl.Blocks.end());
+ Y_VERIFY(it != Impl.Blocks.end());
if (!--it->second) {
Impl.Blocks.erase(it);
}
diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp b/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp
index 230a0d745b..12a0c41b0a 100644
--- a/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp
+++ b/ydb/core/blobstorage/vdisk/common/vdisk_context.cpp
@@ -28,6 +28,7 @@ namespace NKikimr {
const TVDiskID &selfVDisk,
TActorSystem *as, // as can be nullptr for tests
NPDisk::EDeviceType type,
+ ui32 pDiskId,
bool donorMode,
TReplQuoter::TPtr replPDiskReadQuoter,
TReplQuoter::TPtr replPDiskWriteQuoter,
@@ -42,7 +43,7 @@ namespace NKikimr {
, IFaceMonGroup(std::make_shared<NMonGroup::TVDiskIFaceGroup>(VDiskCounters, "subsystem", "interface"))
, GroupId(selfVDisk.GroupID)
, ShortSelfVDisk(selfVDisk)
- , VDiskLogPrefix(GenerateVDiskLogPrefix(selfVDisk, donorMode))
+ , VDiskLogPrefix(GenerateVDiskLogPrefix(pDiskId, selfVDisk, donorMode))
, NodeId(as ? as->NodeId : 0)
, FreshIndex(VDiskMemCounters->GetCounter("MemTotal:FreshIndex"))
, FreshData(VDiskMemCounters->GetCounter("MemTotal:FreshData"))
diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_context.h b/ydb/core/blobstorage/vdisk/common/vdisk_context.h
index c12bf3347c..2af82628a5 100644
--- a/ydb/core/blobstorage/vdisk/common/vdisk_context.h
+++ b/ydb/core/blobstorage/vdisk/common/vdisk_context.h
@@ -100,6 +100,7 @@ namespace NKikimr {
const TVDiskID &selfVDisk,
TActorSystem *as, // can be nullptr for tests
NPDisk::EDeviceType type,
+ ui32 pDiskId = 0,
bool donorMode = false,
TReplQuoter::TPtr replPDiskReadQuoter = nullptr,
TReplQuoter::TPtr replPDiskWriteQuoter = nullptr,
diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp b/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp
index f6648c96d9..a8d81ea6d4 100644
--- a/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp
+++ b/ydb/core/blobstorage/vdisk/common/vdisk_log.cpp
@@ -25,10 +25,16 @@ namespace NKikimr {
return prefix + formatted;
}
- TString GenerateVDiskLogPrefix(const TVDiskID &vdisk, bool donorMode) {
+ TString GenerateVDiskMonitoringName(const TVDiskID &vDiskId, bool donorMode) {
return donorMode
- ? Sprintf("VDISK%s(DONOR): ", vdisk.ToString().data())
- : Sprintf("VDISK%s: ", vdisk.ToStringWOGeneration().data());
+ ? Sprintf("VDISK%s(DONOR): ", vDiskId.ToString().data())
+ : Sprintf("VDISK%s: ", vDiskId.ToStringWOGeneration().data());
+ }
+
+ TString GenerateVDiskLogPrefix(ui32 pDiskId, const TVDiskID &vDiskId, bool donorMode) {
+ auto monName = GenerateVDiskMonitoringName(vDiskId, donorMode);
+ return Sprintf("PDiskId# %" PRIu32 " %s(%" PRIu32 ") ",
+ pDiskId, monName.data(), vDiskId.GroupID.GetRawId());
}
////////////////////////////////////////////////////////////////////////////
diff --git a/ydb/core/blobstorage/vdisk/common/vdisk_log.h b/ydb/core/blobstorage/vdisk/common/vdisk_log.h
index 69fa585dac..57c89a5bc4 100644
--- a/ydb/core/blobstorage/vdisk/common/vdisk_log.h
+++ b/ydb/core/blobstorage/vdisk/common/vdisk_log.h
@@ -20,7 +20,8 @@ namespace NKikimr {
TString AppendVDiskLogPrefix(const TString &prefix, const char *c, ...);
struct TVDiskID;
- TString GenerateVDiskLogPrefix(const TVDiskID &vdisk, bool donorMode);
+ TString GenerateVDiskMonitoringName(const TVDiskID &vDiskId, bool donorMode);
+ TString GenerateVDiskLogPrefix(ui32 pDiskId, const TVDiskID &vDiskId, bool donorMode);
// logger
diff --git a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp
index 17eddf7d5a..86c01e205d 100644
--- a/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp
+++ b/ydb/core/blobstorage/vdisk/skeleton/blobstorage_skeletonfront.cpp
@@ -752,7 +752,8 @@ namespace NKikimr {
const auto &bi = Config->BaseInfo;
TString path = Sprintf("vdisk%09" PRIu32 "_%09" PRIu32, bi.PDiskId, bi.VDiskSlotId);
TString name = Sprintf("%s VDisk%09" PRIu32 "_%09" PRIu32 " (%" PRIu32 ")",
- VCtx->VDiskLogPrefix.data(), bi.PDiskId, bi.VDiskSlotId, GInfo->GroupID.GetRawId());
+ GenerateVDiskMonitoringName(SelfVDiskId, bi.DonorMode).data(),
+ bi.PDiskId, bi.VDiskSlotId, GInfo->GroupID.GetRawId());
mon->RegisterActorPage(vdisksMonPage, path, name, false, TActivationContext::ActorSystem(), ctx.SelfID);
}
}
@@ -760,7 +761,7 @@ namespace NKikimr {
void Bootstrap(const TActorContext &ctx) {
const auto& baseInfo = Config->BaseInfo;
VCtx = MakeIntrusive<TVDiskContext>(ctx.SelfID, GInfo->PickTopology(), VDiskCounters, SelfVDiskId,
- TActivationContext::ActorSystem(), baseInfo.DeviceType, baseInfo.DonorMode,
+ TActivationContext::ActorSystem(), baseInfo.DeviceType, baseInfo.PDiskId, baseInfo.DonorMode,
baseInfo.ReplPDiskReadQuoter, baseInfo.ReplPDiskWriteQuoter, baseInfo.ReplNodeRequestQuoter,
baseInfo.ReplNodeResponseQuoter);
diff --git a/ydb/core/cms/console/console_configs_manager.cpp b/ydb/core/cms/console/console_configs_manager.cpp
index 80bbe99140..77fa7fb019 100644
--- a/ydb/core/cms/console/console_configs_manager.cpp
+++ b/ydb/core/cms/console/console_configs_manager.cpp
@@ -174,15 +174,20 @@ void TConfigsManager::ValidateDatabaseConfig(TUpdateDatabaseConfigOpContext& opC
auto resolved = NYamlConfig::ResolveAll(tree);
errors.clear();
+
+ auto* csk = AppData()->ConfigSwissKnife;
+
for (auto& [_, config] : resolved.Configs) {
auto cfg = NYamlConfig::YamlToProto(
config.second,
true,
true,
unknownFieldsCollector);
- NKikimr::NConfig::EValidationResult result = NKikimr::NConfig::ValidateConfig(cfg, errors);
- if (result == NKikimr::NConfig::EValidationResult::Error) {
- ythrow yexception() << errors.front();
+ if (csk) {
+ auto result = csk->ValidateConfig(cfg, errors);
+ if (result == NYamlConfig::EValidationResult::Error) {
+ ythrow yexception() << errors.front();
+ }
}
}
diff --git a/ydb/core/config/init/init_impl.h b/ydb/core/config/init/init_impl.h
index 190dcce300..792272c8d0 100644
--- a/ydb/core/config/init/init_impl.h
+++ b/ydb/core/config/init/init_impl.h
@@ -1158,15 +1158,14 @@ public:
TenantName = FillTenantPoolConfig(CommonAppOptions);
+ FillData(CommonAppOptions);
+
std::vector<TString> errors;
- EValidationResult result = ValidateConfig(AppConfig, errors);
- if (result == EValidationResult::Error) {
+ if (csk && csk->ValidateConfig(AppConfig, errors) == NYamlConfig::EValidationResult::Error) {
ythrow yexception() << errors.front();
}
Logger.Out() << "configured" << Endl;
-
- FillData(CommonAppOptions);
}
void FillData(const NConfig::TCommonAppOptions& cf) {
diff --git a/ydb/core/driver_lib/run/factories.h b/ydb/core/driver_lib/run/factories.h
index e87e4e8cda..492f3b6a92 100644
--- a/ydb/core/driver_lib/run/factories.h
+++ b/ydb/core/driver_lib/run/factories.h
@@ -2,6 +2,7 @@
#include <ydb/core/blobstorage/pdisk/blobstorage_pdisk_util_devicemode.h>
#include <ydb/core/kqp/common/kqp.h>
#include <ydb/core/tx/datashard/export_iface.h>
+#include <ydb/core/tx/replication/service/transfer_writer_factory.h>
#include <ydb/core/tx/schemeshard/schemeshard_operation_factory.h>
#include <ydb/core/persqueue/actor_persqueue_client_iface.h>
#include <ydb/core/protos/auth.pb.h>
@@ -53,6 +54,7 @@ struct TModuleFactories {
TGrpcServiceFactory GrpcServiceFactory;
std::shared_ptr<NPQ::IPersQueueMirrorReaderFactory> PersQueueMirrorReaderFactory;
+ std::shared_ptr<NReplication::NService::ITransferWriterFactory> TransferWriterFactory;
/// Factory for pdisk's aio engines
std::shared_ptr<NPDisk::IIoContextFactory> IoContextFactory;
diff --git a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
index 7090097cbe..ad58b78ab0 100644
--- a/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
+++ b/ydb/core/driver_lib/run/kikimr_services_initializers.cpp
@@ -70,8 +70,10 @@
#include <ydb/core/health_check/health_check.h>
#include <ydb/core/kafka_proxy/actors/kafka_metrics_actor.h>
+#include <ydb/core/kafka_proxy/actors/kafka_metadata_actor.h>
#include <ydb/core/kafka_proxy/kafka_metrics.h>
#include <ydb/core/kafka_proxy/kafka_proxy.h>
+#include <ydb/core/kafka_proxy/kafka_transactions_coordinator.h>
#include <ydb/core/kqp/common/kqp.h>
#include <ydb/core/kqp/proxy_service/kqp_proxy_service.h>
@@ -2764,10 +2766,16 @@ void TKafkaProxyServiceInitializer::InitializeServices(NActors::TActorSystemSetu
TActorSetupCmd(CreateDiscoveryCache(NGRpcService::KafkaEndpointId),
TMailboxType::HTSwap, appData->UserPoolId)
);
+
+ setup->LocalServices.emplace_back(
+ NKafka::MakeKafkaTransactionsServiceID(),
+ TActorSetupCmd(NKafka::CreateKafkaTransactionsCoordinator(),
+ TMailboxType::HTSwap, appData->UserPoolId
+ )
+ );
setup->LocalServices.emplace_back(
TActorId(),
- TActorSetupCmd(NKafka::CreateKafkaListener(MakePollerActorId(), settings, Config.GetKafkaProxyConfig(),
- NKafka::MakeKafkaDiscoveryCacheID()),
+ TActorSetupCmd(NKafka::CreateKafkaListener(MakePollerActorId(), settings, Config.GetKafkaProxyConfig()),
TMailboxType::HTSwap, appData->UserPoolId)
);
diff --git a/ydb/core/driver_lib/run/run.cpp b/ydb/core/driver_lib/run/run.cpp
index 70eb9987ca..9b28f65eab 100644
--- a/ydb/core/driver_lib/run/run.cpp
+++ b/ydb/core/driver_lib/run/run.cpp
@@ -947,6 +947,40 @@ void TKikimrRunner::InitializeGRpc(const TKikimrRunConfig& runConfig) {
opts.SetMaxMessageSize(grpcConfig.HasMaxMessageSize() ? grpcConfig.GetMaxMessageSize() : NYdbGrpc::DEFAULT_GRPC_MESSAGE_SIZE_LIMIT);
opts.SetMaxGlobalRequestInFlight(grpcConfig.GetMaxInFlight());
opts.SetLogger(NYdbGrpc::CreateActorSystemLogger(*ActorSystem.Get(), NKikimrServices::GRPC_SERVER));
+ switch(grpcConfig.GetDefaultCompressionAlgorithm()) {
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_NONE: {
+ opts.SetDefaultCompressionAlgorithm(GRPC_COMPRESS_NONE);
+ break;
+ }
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_DEFLATE: {
+ opts.SetDefaultCompressionAlgorithm(GRPC_COMPRESS_DEFLATE);
+ break;
+ }
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_GZIP: {
+ opts.SetDefaultCompressionAlgorithm(GRPC_COMPRESS_GZIP);
+ break;
+ }
+ }
+
+ switch(grpcConfig.GetDefaultCompressionLevel()) {
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_LEVEL_NONE: {
+ opts.SetDefaultCompressionLevel(GRPC_COMPRESS_LEVEL_NONE);
+ break;
+ }
+
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_LEVEL_LOW: {
+ opts.SetDefaultCompressionLevel(GRPC_COMPRESS_LEVEL_LOW);
+ break;
+ }
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_LEVEL_MED: {
+ opts.SetDefaultCompressionLevel(GRPC_COMPRESS_LEVEL_MED);
+ break;
+ }
+ case NKikimrConfig::TGRpcConfig::YDB_GRPC_COMPRESS_LEVEL_HIGH: {
+ opts.SetDefaultCompressionLevel(GRPC_COMPRESS_LEVEL_HIGH);
+ break;
+ }
+ }
if (appConfig.HasDomainsConfig() &&
appConfig.GetDomainsConfig().HasSecurityConfig() &&
@@ -1093,6 +1127,9 @@ void TKikimrRunner::InitializeAppData(const TKikimrRunConfig& runConfig)
AppData->IoContextFactory = ModuleFactories ? ModuleFactories->IoContextFactory.get() : nullptr;
AppData->SchemeOperationFactory = ModuleFactories ? ModuleFactories->SchemeOperationFactory.get() : nullptr;
AppData->ConfigSwissKnife = ModuleFactories ? ModuleFactories->ConfigSwissKnife.get() : nullptr;
+ if (ModuleFactories) {
+ AppData->TransferWriterFactory = ModuleFactories->TransferWriterFactory;
+ }
AppData->SqsAuthFactory = ModuleFactories
? ModuleFactories->SqsAuthFactory.get()
diff --git a/ydb/core/formats/arrow/accessor/abstract/accessor.cpp b/ydb/core/formats/arrow/accessor/abstract/accessor.cpp
index b1221c1bf8..cca692de70 100644
--- a/ydb/core/formats/arrow/accessor/abstract/accessor.cpp
+++ b/ydb/core/formats/arrow/accessor/abstract/accessor.cpp
@@ -62,10 +62,10 @@ IChunkedArray::TFullDataAddress IChunkedArray::GetChunk(const std::optional<TAdd
return TFullDataAddress(localAddress.GetArray(), std::move(addressChain));
} else {
auto chunkedArrayAddress = GetArray(chunkCurrent, position, nullptr);
- if (chunkCurrent) {
- AFL_VERIFY(chunkCurrent->GetSize() == 1 + chunkedArrayAddress.GetAddress().GetSize())("current", chunkCurrent->GetSize())(
- "chunked", chunkedArrayAddress.GetAddress().GetSize());
- }
+// if (chunkCurrent) {
+// AFL_VERIFY(chunkCurrent->GetSize() == chunkedArrayAddress.GetAddress().GetSize())("current", chunkCurrent->GetSize())(
+// "chunked", chunkedArrayAddress.GetAddress().GetSize());
+// }
auto localAddress = chunkedArrayAddress.GetArray()->GetLocalData(address, chunkedArrayAddress.GetAddress().GetLocalIndex(position));
auto fullAddress = std::move(chunkedArrayAddress.MutableAddress());
fullAddress.Add(localAddress.GetAddress());
@@ -112,7 +112,7 @@ std::shared_ptr<IChunkedArray> IChunkedArray::DoApplyFilter(const TColumnFilter&
auto schema = std::make_shared<arrow::Schema>(fields);
auto table = arrow::Table::Make(schema, { arr }, GetRecordsCount());
AFL_VERIFY(table->num_columns() == 1);
- AFL_VERIFY(filter.Apply(table));
+ filter.Apply(table);
if (table->column(0)->num_chunks() == 1) {
return std::make_shared<TTrivialArray>(table->column(0)->chunk(0));
} else {
diff --git a/ydb/core/formats/arrow/accessor/abstract/constructor.h b/ydb/core/formats/arrow/accessor/abstract/constructor.h
index 4dd908a822..56b981e08e 100644
--- a/ydb/core/formats/arrow/accessor/abstract/constructor.h
+++ b/ydb/core/formats/arrow/accessor/abstract/constructor.h
@@ -33,6 +33,10 @@ private:
const std::shared_ptr<NArrow::NAccessor::IChunkedArray>& originalArray, const TChunkConstructionData& externalInfo) const = 0;
public:
+ virtual bool HasInternalConversion() const {
+ return false;
+ }
+
IConstructor(const IChunkedArray::EType type)
: Type(type) {
}
diff --git a/ydb/core/formats/arrow/accessor/plain/accessor.h b/ydb/core/formats/arrow/accessor/plain/accessor.h
index 3b9cbad85e..29f11ca68b 100644
--- a/ydb/core/formats/arrow/accessor/plain/accessor.h
+++ b/ydb/core/formats/arrow/accessor/plain/accessor.h
@@ -93,6 +93,16 @@ public:
AFL_VERIFY(NArrow::Append<TArrowDataType>(*Builder, arrow::util::string_view(value.data(), value.size())));
}
+ void AddNull(const ui32 recordIndex) {
+ if (LastRecordIndex) {
+ AFL_VERIFY(*LastRecordIndex < recordIndex)("last", LastRecordIndex)("index", recordIndex);
+ TStatusValidator::Validate(Builder->AppendNulls(recordIndex - *LastRecordIndex));
+ } else {
+ TStatusValidator::Validate(Builder->AppendNulls(recordIndex + 1));
+ }
+ LastRecordIndex = recordIndex;
+ }
+
std::shared_ptr<IChunkedArray> Finish(const ui32 recordsCount) {
if (LastRecordIndex) {
AFL_VERIFY(*LastRecordIndex < recordsCount)("last", LastRecordIndex)("count", recordsCount);
diff --git a/ydb/core/formats/arrow/accessor/plain/constructor.cpp b/ydb/core/formats/arrow/accessor/plain/constructor.cpp
index 757b4c1298..1f34cb7962 100644
--- a/ydb/core/formats/arrow/accessor/plain/constructor.cpp
+++ b/ydb/core/formats/arrow/accessor/plain/constructor.cpp
@@ -56,6 +56,10 @@ TString TConstructor::DoSerializeToString(const std::shared_ptr<IChunkedArray>&
TConclusion<std::shared_ptr<IChunkedArray>> TConstructor::DoConstruct(
const std::shared_ptr<IChunkedArray>& originalArray, const TChunkConstructionData& externalInfo) const {
+ if (!originalArray->GetDataType()->Equals(externalInfo.GetColumnType())) {
+ return TConclusionStatus::Fail("plain accessor cannot convert types for transfer: " + originalArray->GetDataType()->ToString() + " to " +
+ externalInfo.GetColumnType()->ToString());
+ }
auto schema = std::make_shared<arrow::Schema>(arrow::FieldVector({ std::make_shared<arrow::Field>("val", externalInfo.GetColumnType()) }));
auto chunked = originalArray->GetChunkedArray();
auto table = arrow::Table::Make(schema, { chunked }, originalArray->GetRecordsCount());
diff --git a/ydb/core/formats/arrow/accessor/sparsed/accessor.h b/ydb/core/formats/arrow/accessor/sparsed/accessor.h
index d125fba073..bda8ef5034 100644
--- a/ydb/core/formats/arrow/accessor/sparsed/accessor.h
+++ b/ydb/core/formats/arrow/accessor/sparsed/accessor.h
@@ -58,6 +58,11 @@ private:
: TSparsedArrayChunk(original) {
AFL_VERIFY(!original.GetNotDefaultRecordsCount());
RecordsCount = recordsCount;
+ AFL_VERIFY(RemapExternalToInternal.size() == 1);
+ AFL_VERIFY(RemapExternalToInternal[0].GetStartExt() == 0);
+ AFL_VERIFY(RemapExternalToInternal[0].GetStartInt() == 0);
+ AFL_VERIFY(RemapExternalToInternal[0].GetIsDefault());
+ RemapExternalToInternal[0] = TInternalChunkInfo(0, 0, recordsCount, true);
}
public:
@@ -224,7 +229,7 @@ public:
std::unique_ptr<arrow::ArrayBuilder> ValueBuilder;
ui32 RecordsCount = 0;
const std::shared_ptr<arrow::Scalar> DefaultValue;
-
+ std::optional<ui32> LastRecordIndex;
public:
TSparsedBuilder(const std::shared_ptr<arrow::Scalar>& defaultValue, const ui32 reserveItems, const ui32 reserveData)
: DefaultValue(defaultValue) {
@@ -233,11 +238,26 @@ public:
}
void AddRecord(const ui32 recordIndex, const std::string_view value) {
+ if (!!LastRecordIndex) {
+ AFL_VERIFY(*LastRecordIndex < recordIndex);
+ }
+ LastRecordIndex = recordIndex;
AFL_VERIFY(NArrow::Append<arrow::UInt32Type>(*IndexBuilder, recordIndex));
AFL_VERIFY(NArrow::Append<TDataType>(*ValueBuilder, arrow::util::string_view(value.data(), value.size())));
++RecordsCount;
}
+ void AddNull(const ui32 recordIndex) {
+ if (!!LastRecordIndex) {
+ AFL_VERIFY(*LastRecordIndex < recordIndex);
+ }
+ LastRecordIndex = recordIndex;
+ if (!!DefaultValue && DefaultValue->type->id() != arrow::null()->id()) {
+ AFL_VERIFY(NArrow::Append<arrow::UInt32Type>(*IndexBuilder, recordIndex));
+ TStatusValidator::Validate(ValueBuilder->AppendNull());
+ }
+ }
+
std::shared_ptr<IChunkedArray> Finish(const ui32 recordsCount) {
TSparsedArray::TBuilder builder(DefaultValue, arrow::TypeTraits<TDataType>::type_singleton());
std::vector<std::unique_ptr<arrow::ArrayBuilder>> builders;
diff --git a/ydb/core/formats/arrow/accessor/sparsed/constructor.cpp b/ydb/core/formats/arrow/accessor/sparsed/constructor.cpp
index 3d4a3574af..ab7d309fc7 100644
--- a/ydb/core/formats/arrow/accessor/sparsed/constructor.cpp
+++ b/ydb/core/formats/arrow/accessor/sparsed/constructor.cpp
@@ -42,7 +42,10 @@ TString TConstructor::DoSerializeToString(const std::shared_ptr<IChunkedArray>&
TConclusion<std::shared_ptr<IChunkedArray>> TConstructor::DoConstruct(
const std::shared_ptr<IChunkedArray>& originalArray, const TChunkConstructionData& externalInfo) const {
- AFL_VERIFY(originalArray);
+ if (!externalInfo.GetColumnType()->Equals(originalArray->GetDataType())) {
+ return TConclusionStatus::Fail("sparsed accessor cannot convert types for transfer: " + originalArray->GetDataType()->ToString() + " to " +
+ externalInfo.GetColumnType()->ToString());
+ }
return TSparsedArray::Make(*originalArray, externalInfo.GetDefaultValue());
}
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/accessor.cpp b/ydb/core/formats/arrow/accessor/sub_columns/accessor.cpp
index 327cf5bb27..242b6664bf 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/accessor.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/accessor.cpp
@@ -1,4 +1,5 @@
#include "accessor.h"
+#include "direct_builder.h"
#include <ydb/core/formats/arrow/accessor/composite_serial/accessor.h>
#include <ydb/core/formats/arrow/accessor/plain/constructor.h>
@@ -14,9 +15,8 @@
namespace NKikimr::NArrow::NAccessor {
-TConclusion<std::shared_ptr<TSubColumnsArray>> TSubColumnsArray::Make(const std::shared_ptr<IChunkedArray>& sourceArray,
- const std::shared_ptr<NSubColumns::IDataAdapter>& adapter, const NSubColumns::TSettings& settings) {
- AFL_VERIFY(adapter);
+TConclusion<std::shared_ptr<TSubColumnsArray>> TSubColumnsArray::Make(
+ const std::shared_ptr<IChunkedArray>& sourceArray, const NSubColumns::TSettings& settings) {
AFL_VERIFY(sourceArray);
NSubColumns::TDataBuilder builder(sourceArray->GetDataType(), settings);
IChunkedArray::TReader reader(sourceArray);
@@ -24,7 +24,7 @@ TConclusion<std::shared_ptr<TSubColumnsArray>> TSubColumnsArray::Make(const std:
for (ui32 i = 0; i < reader.GetRecordsCount();) {
auto address = reader.GetReadChunk(i);
storage.emplace_back(address.GetArray());
- auto conclusion = adapter->AddDataToBuilders(address.GetArray(), builder);
+ auto conclusion = settings.GetDataExtractor()->AddDataToBuilders(address.GetArray(), builder);
if (conclusion.IsFail()) {
return conclusion;
}
@@ -69,8 +69,7 @@ TString TSubColumnsArray::SerializeToString(const TChunkConstructionData& extern
ui32 columnIdx = 0;
for (auto&& i : ColumnsData.GetRecords()->GetColumns()) {
TChunkConstructionData cData(GetRecordsCount(), nullptr, arrow::utf8(), externalInfo.GetDefaultSerializer());
- blobRanges.emplace_back(
- ColumnsData.GetStats().GetAccessorConstructor(columnIdx).SerializeToString(i, cData));
+ blobRanges.emplace_back(ColumnsData.GetStats().GetAccessorConstructor(columnIdx).SerializeToString(i, cData));
auto* cInfo = proto.AddKeyColumns();
cInfo->SetSize(blobRanges.back().size());
++columnIdx;
@@ -105,37 +104,132 @@ TString TSubColumnsArray::SerializeToString(const TChunkConstructionData& extern
return result;
}
+class TJsonRestorer {
+private:
+ NJson::TJsonValue Result;
+
+public:
+ bool IsNull() const {
+ return !Result.IsDefined();
+ }
+
+ TConclusion<NBinaryJson::TBinaryJson> Finish() {
+ auto str = Result.GetStringRobust();
+ auto bJson = NBinaryJson::SerializeToBinaryJson(Result.GetStringRobust());
+ if (const TString* val = std::get_if<TString>(&bJson)) {
+ return TConclusionStatus::Fail(*val);
+ } else if (const NBinaryJson::TBinaryJson* val = std::get_if<NBinaryJson::TBinaryJson>(&bJson)) {
+ return std::move(*val);
+ } else {
+ return TConclusionStatus::Fail("undefined case for binary json construction");
+ }
+ }
+
+ void SetValueByPath(const TString& path, const TString& valueStr) {
+ ui32 start = 0;
+ bool enqueue = false;
+ bool wasEnqueue = false;
+ NJson::TJsonValue* current = &Result;
+ for (ui32 i = 0; i < path.size(); ++i) {
+ if (path[i] == '\\') {
+ ++i;
+ continue;
+ }
+ if (path[i] == '\'' || path[i] == '\"') {
+ wasEnqueue = true;
+ enqueue = !enqueue;
+ continue;
+ }
+ if (enqueue) {
+ continue;
+ }
+ if (path[i] == '.') {
+ if (wasEnqueue) {
+ AFL_VERIFY(i > start + 2);
+ TStringBuf key(path.data() + start + 1, (i - 1) - start - 1);
+ NJson::TJsonValue* currentNext = nullptr;
+ if (current->GetValuePointer(key, &currentNext)) {
+ current = currentNext;
+ } else {
+ current = &current->InsertValue(key, NJson::JSON_MAP);
+ }
+ } else {
+ AFL_VERIFY(i > start);
+ TStringBuf key(path.data() + start, i - start);
+ NJson::TJsonValue* currentNext = nullptr;
+ if (current->GetValuePointer(key, &currentNext)) {
+ current = currentNext;
+ } else {
+ ui32 keyIndex;
+ if (key.StartsWith("[") && key.EndsWith("]") && TryFromString<ui32>(key.data() + 1, key.size() - 2, keyIndex)) {
+ AFL_VERIFY(!current->IsDefined() || current->IsArray() || (current->IsMap() && current->GetMapSafe().empty()));
+ current->SetType(NJson::JSON_ARRAY);
+ if (current->GetArraySafe().size() <= keyIndex) {
+ current->GetArraySafe().resize(keyIndex + 1);
+ }
+ current = &current->GetArraySafe()[keyIndex];
+ } else {
+ AFL_VERIFY(!current->IsArray())("current_type", current->GetType())("current", current->GetStringRobust());
+ current = &current->InsertValue(key, NJson::JSON_MAP);
+ }
+ }
+ }
+ wasEnqueue = false;
+ start = i + 1;
+ }
+ }
+ if (wasEnqueue) {
+ AFL_VERIFY(path.size() > start + 2)("path", path)("start", start);
+ TStringBuf key(path.data() + start + 1, (path.size() - 1) - start - 1);
+ current->InsertValue(key, valueStr);
+ } else {
+ AFL_VERIFY(path.size() > start);
+ TStringBuf key(path.data() + start, (path.size()) - start);
+ ui32 keyIndex;
+ if (key.StartsWith("[") && key.EndsWith("]") && TryFromString<ui32>(key.data() + 1, key.size() - 2, keyIndex)) {
+ AFL_VERIFY(!current->IsDefined() || current->IsArray() || (current->IsMap() && current->GetMapSafe().empty()));
+ current->SetType(NJson::JSON_ARRAY);
+
+ if (current->GetArraySafe().size() <= keyIndex) {
+ current->GetArraySafe().resize(keyIndex + 1);
+ }
+ current->GetArraySafe()[keyIndex] = valueStr;
+ } else {
+ AFL_VERIFY(!current->IsArray())("key", key)("current", current->GetStringRobust())("full", Result.GetStringRobust())(
+ "current_type", current->GetType());
+ current->InsertValue(key, valueStr);
+ }
+ }
+ }
+};
+
IChunkedArray::TLocalDataAddress TSubColumnsArray::DoGetLocalData(
const std::optional<TCommonChunkAddress>& /*chunkCurrent*/, const ui64 /*position*/) const {
auto it = BuildUnorderedIterator();
auto builder = NArrow::MakeBuilder(GetDataType());
for (ui32 recordIndex = 0; recordIndex < GetRecordsCount(); ++recordIndex) {
- NJson::TJsonValue value;
+ TJsonRestorer value;
auto onStartRecord = [&](const ui32 index) {
AFL_VERIFY(recordIndex == index)("count", recordIndex)("index", index);
};
auto onFinishRecord = [&]() {
- auto str = value.GetStringRobust();
- // NArrow::Append<arrow::BinaryType>(*builder, arrow::util::string_view(str.data(), str.size()));
- //
- auto bJson = NBinaryJson::SerializeToBinaryJson(value.GetStringRobust());
- if (const TString* val = std::get_if<TString>(&bJson)) {
- AFL_VERIFY(false)("error", *val);
- } else if (const NBinaryJson::TBinaryJson* val = std::get_if<NBinaryJson::TBinaryJson>(&bJson)) {
- if (value.IsNull() || !value.IsDefined()) {
- TStatusValidator::Validate(builder->AppendNull());
- } else {
- NArrow::Append<arrow::BinaryType>(*builder, arrow::util::string_view(val->data(), val->size()));
- }
+ if (value.IsNull()) {
+ TStatusValidator::Validate(builder->AppendNull());
} else {
- AFL_VERIFY(false);
+ const TConclusion<NBinaryJson::TBinaryJson> bJson = value.Finish();
+ NArrow::Append<arrow::BinaryType>(*builder, arrow::util::string_view(bJson->data(), bJson->size()));
}
};
+
+ const auto addValueToJson = [&](const TString& path, const TString& valueStr) {
+ value.SetValueByPath(path, valueStr);
+ };
+
auto onRecordKV = [&](const ui32 index, const std::string_view valueView, const bool isColumn) {
if (isColumn) {
- value.InsertValue(ColumnsData.GetStats().GetColumnNameString(index), TString(valueView.data(), valueView.size()));
+ addValueToJson(ColumnsData.GetStats().GetColumnNameString(index), TString(valueView.data(), valueView.size()));
} else {
- value.InsertValue(OthersData.GetStats().GetColumnNameString(index), TString(valueView.data(), valueView.size()));
+ addValueToJson(OthersData.GetStats().GetColumnNameString(index), TString(valueView.data(), valueView.size()));
}
};
it.ReadRecord(recordIndex, onStartRecord, onRecordKV, onFinishRecord);
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/accessor.h b/ydb/core/formats/arrow/accessor/sub_columns/accessor.h
index c1abe02519..e7b9ac45ba 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/accessor.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/accessor.h
@@ -97,8 +97,7 @@ public:
TSubColumnsArray(NSubColumns::TColumnsData&& columns, NSubColumns::TOthersData&& others, const std::shared_ptr<arrow::DataType>& type,
const ui32 recordsCount, const NSubColumns::TSettings& settings);
- static TConclusion<std::shared_ptr<TSubColumnsArray>> Make(const std::shared_ptr<IChunkedArray>& sourceArray,
- const std::shared_ptr<NSubColumns::IDataAdapter>& adapter, const NSubColumns::TSettings& settings);
+ static TConclusion<std::shared_ptr<TSubColumnsArray>> Make(const std::shared_ptr<IChunkedArray>& sourceArray, const NSubColumns::TSettings& settings);
TSubColumnsArray(const std::shared_ptr<arrow::DataType>& type, const ui32 recordsCount, const NSubColumns::TSettings& settings);
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/columns_storage.cpp b/ydb/core/formats/arrow/accessor/sub_columns/columns_storage.cpp
index 86838382a2..cf0894631b 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/columns_storage.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/columns_storage.cpp
@@ -31,7 +31,7 @@ TColumnsData TColumnsData::ApplyFilter(const TColumnFilter& filter) const {
return *this;
}
auto records = Records;
- AFL_VERIFY(filter.Apply(records));
+ filter.Apply(records);
if (records->GetRecordsCount()) {
TDictStats::TBuilder builder;
ui32 idx = 0;
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/constructor.cpp b/ydb/core/formats/arrow/accessor/sub_columns/constructor.cpp
index 73e4cbe8ce..c0173e826b 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/constructor.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/constructor.cpp
@@ -67,7 +67,11 @@ bool TConstructor::DoDeserializeFromProto(const NKikimrArrowAccessorProto::TCons
TConclusion<std::shared_ptr<IChunkedArray>> TConstructor::DoConstruct(
const std::shared_ptr<IChunkedArray>& originalData, const TChunkConstructionData& /*externalInfo*/) const {
- return NAccessor::TSubColumnsArray::Make(originalData, DataExtractor, Settings).DetachResult();
+ auto conclusion = NAccessor::TSubColumnsArray::Make(originalData, Settings);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ return conclusion.DetachResult();
}
TString TConstructor::DoSerializeToString(const std::shared_ptr<IChunkedArray>& columnData, const TChunkConstructionData& externalInfo) const {
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/constructor.h b/ydb/core/formats/arrow/accessor/sub_columns/constructor.h
index c9ef9bd1a3..dca602a38b 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/constructor.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/constructor.h
@@ -10,7 +10,6 @@ namespace NKikimr::NArrow::NAccessor::NSubColumns {
class TConstructor: public IConstructor {
private:
using TBase = IConstructor;
- std::shared_ptr<IDataAdapter> DataExtractor = std::make_shared<TFirstLevelSchemaData>();
TSettings Settings;
public:
@@ -43,6 +42,10 @@ public:
: TBase(IChunkedArray::EType::SubColumnsArray) {
}
+ virtual bool HasInternalConversion() const override {
+ return Settings.GetDataExtractor()->HasInternalConversion();
+ }
+
static TConclusion<std::shared_ptr<TGeneralContainer>> BuildOthersContainer(
const TStringBuf data, const NKikimrArrowAccessorProto::TSubColumnsAccessor& proto, const TChunkConstructionData& externalInfo, const bool deserialize);
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.cpp b/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.cpp
index 7fe91d7468..0bbf7abb27 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.cpp
@@ -1,4 +1,6 @@
#include "data_extractor.h"
+#include "direct_builder.h"
+#include "json_extractors.h"
#include <util/string/split.h>
#include <util/string/vector.h>
@@ -8,53 +10,103 @@
namespace NKikimr::NArrow::NAccessor::NSubColumns {
-TConclusionStatus TFirstLevelSchemaData::DoAddDataToBuilders(
- const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept {
- if (sourceArray->type()->id() != arrow::binary()->id()) {
- return TConclusionStatus::Fail("incorrect base type for subcolumns schema usage");
+class TSimdBuffers: public TDataBuilder::IBuffers {
+private:
+ std::vector<simdjson::padded_string> PaddedStrings;
+ std::vector<TString> Strings;
+
+public:
+ TSimdBuffers(std::vector<simdjson::padded_string>&& paddedStrings, std::vector<TString>&& strings)
+ : PaddedStrings(std::move(paddedStrings))
+ , Strings(std::move(strings)) {
}
+};
- auto arr = std::static_pointer_cast<arrow::StringArray>(sourceArray);
+TConclusionStatus TJsonScanExtractor::DoAddDataToBuilders(const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const {
+ auto arr = std::static_pointer_cast<arrow::BinaryArray>(sourceArray);
+ std::optional<bool> isBinaryJson;
+ if (arr->type()->id() == arrow::utf8()->id()) {
+ isBinaryJson = false;
+ }
+ if (!arr->length()) {
+ return TConclusionStatus::Success();
+ }
+ simdjson::ondemand::parser simdParser;
+ std::vector<simdjson::padded_string> paddedStrings;
+ std::vector<TString> forceSIMDStrings;
+ ui32 sumBuf = 0;
+ ui32 paddedBorder = 0;
+ for (i32 i = arr->length() - 1; i >= 1; --i) {
+ sumBuf += arr->GetView(i).size();
+ if (sumBuf > simdjson::SIMDJSON_PADDING) {
+ paddedBorder = i;
+ break;
+ }
+ }
for (ui32 i = 0; i < arr->length(); ++i) {
const auto view = arr->GetView(i);
if (view.size() && !arr->IsNull(i)) {
- // NBinaryJson::TBinaryJson bJson(view.data(), view.size());
- // auto bJson = NBinaryJson::SerializeToBinaryJson(TStringBuf(view.data(), view.size()));
- // const NBinaryJson::TBinaryJson* bJsonParsed = std::get_if<NBinaryJson::TBinaryJson>(&bJson);
- // AFL_VERIFY(bJsonParsed)("error", *std::get_if<TString>(&bJson))("json", TStringBuf(view.data(), view.size()));
- // const NBinaryJson::TBinaryJson* bJsonParsed = &bJson;
- auto reader = NBinaryJson::TBinaryJsonReader::Make(TStringBuf(view.data(), view.size()));
- auto cursor = reader->GetRootCursor();
- if (cursor.GetType() == NBinaryJson::EContainerType::Object) {
- auto it = cursor.GetObjectIterator();
- while (it.HasNext()) {
- auto [key, value] = it.Next();
- if (key.GetType() != NBinaryJson::EEntryType::String) {
- continue;
- }
- if (value.GetType() == NBinaryJson::EEntryType::String) {
- dataBuilder.AddKV(key.GetString(), value.GetString());
- } else if (value.GetType() == NBinaryJson::EEntryType::Number) {
- dataBuilder.AddKVOwn(key.GetString(), ::ToString(value.GetNumber()));
- } else if (value.GetType() == NBinaryJson::EEntryType::BoolFalse) {
- dataBuilder.AddKVOwn(key.GetString(), "0");
- } else if (value.GetType() == NBinaryJson::EEntryType::BoolTrue) {
- dataBuilder.AddKVOwn(key.GetString(), "1");
- } else {
- continue;
+ TStringBuf sbJson(view.data(), view.size());
+ if (!isBinaryJson) {
+ isBinaryJson = NBinaryJson::IsValidBinaryJson(sbJson);
+ }
+ TString json;
+ if (*isBinaryJson && ForceSIMDJsonParsing) {
+ json = NBinaryJson::SerializeToJson(sbJson);
+ forceSIMDStrings.emplace_back(json);
+ sbJson = TStringBuf(json.data(), json.size());
+ }
+ if (!json && *isBinaryJson) {
+ auto reader = NBinaryJson::TBinaryJsonReader::Make(sbJson);
+ auto cursor = reader->GetRootCursor();
+ std::deque<std::unique_ptr<IJsonObjectExtractor>> iterators;
+ if (cursor.GetType() == NBinaryJson::EContainerType::Object) {
+ iterators.push_back(std::make_unique<TKVExtractor>(cursor.GetObjectIterator(), TStringBuf(), FirstLevelOnly));
+ } else if (cursor.GetType() == NBinaryJson::EContainerType::Array) {
+ iterators.push_back(std::make_unique<TArrayExtractor>(cursor.GetArrayIterator(), TStringBuf(), FirstLevelOnly));
+ }
+ while (iterators.size()) {
+ const auto conclusion = iterators.front()->Fill(dataBuilder, iterators);
+ if (conclusion.IsFail()) {
+ return conclusion;
}
+ iterators.pop_front();
}
} else {
- // return TConclusionStatus::Fail("incorrect json data: " + ::ToString((int)cursor.GetType()));
+ std::deque<std::unique_ptr<IJsonObjectExtractor>> iterators;
+ simdjson::simdjson_result<simdjson::ondemand::document> doc;
+ if (i < paddedBorder) {
+ doc = simdParser.iterate(
+ simdjson::padded_string_view(sbJson.data(), sbJson.size(), sbJson.size() + simdjson::SIMDJSON_PADDING));
+ } else {
+ paddedStrings.emplace_back(simdjson::padded_string(sbJson.data(), sbJson.size()));
+ doc = simdParser.iterate(paddedStrings.back());
+ }
+ auto conclusion = TSIMDExtractor(doc, FirstLevelOnly).Fill(dataBuilder, iterators);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
}
}
dataBuilder.StartNextRecord();
}
+ if (paddedStrings.size()) {
+ dataBuilder.StoreBuffer(std::make_shared<TSimdBuffers>(std::move(paddedStrings), std::move(forceSIMDStrings)));
+ }
return TConclusionStatus::Success();
}
TConclusionStatus IDataAdapter::AddDataToBuilders(const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept {
- return DoAddDataToBuilders(sourceArray, dataBuilder);
+ try {
+ return DoAddDataToBuilders(sourceArray, dataBuilder);
+ } catch (...) {
+ return TConclusionStatus::Fail("exception on data extraction: " + CurrentExceptionMessage());
+ }
+}
+
+TDataAdapterContainer TDataAdapterContainer::GetDefault() {
+ static TDataAdapterContainer result(std::make_shared<NSubColumns::TJsonScanExtractor>());
+ return result;
}
} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.h b/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.h
index 631728c7ac..aff5f957ad 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/data_extractor.h
@@ -1,30 +1,121 @@
#pragma once
-#include "direct_builder.h"
#include <ydb/core/formats/arrow/accessor/abstract/accessor.h>
#include <ydb/core/formats/arrow/arrow_helpers.h>
+#include <ydb/library/formats/arrow/protos/accessor.pb.h>
+#include <ydb/services/bg_tasks/abstract/interface.h>
+#include <ydb/services/metadata/abstract/request_features.h>
+
#include <contrib/libs/apache/arrow/cpp/src/arrow/array/builder_base.h>
+#include <library/cpp/object_factory/object_factory.h>
namespace NKikimr::NArrow::NAccessor::NSubColumns {
+class TDataBuilder;
+
class IDataAdapter {
+public:
+ using TProto = NKikimrArrowAccessorProto::TDataExtractor;
+ using TFactory = NObjectFactory::TObjectFactory<IDataAdapter, TString>;
+
private:
virtual TConclusionStatus DoAddDataToBuilders(
- const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept = 0;
+ const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const = 0;
+ virtual bool DoDeserializeFromProto(const TProto& proto) = 0;
+ virtual void DoSerializeToProto(TProto& proto) const = 0;
+ virtual NJson::TJsonValue DoDebugJson() const {
+ return NJson::JSON_MAP;
+ }
+ virtual TConclusionStatus DoDeserializeFromRequest(NYql::TFeaturesExtractor& features) = 0;
public:
+ virtual bool HasInternalConversion() const = 0;
+ virtual TString GetClassName() const = 0;
+ bool DeserializeFromProto(const TProto& proto) {
+ return DoDeserializeFromProto(proto);
+ }
+
+ TConclusionStatus DeserializeFromRequest(NYql::TFeaturesExtractor& features) {
+ return DoDeserializeFromRequest(features);
+ }
+
+ NJson::TJsonValue DebugJson() const {
+ NJson::TJsonValue result = NJson::JSON_MAP;
+ result.InsertValue("class_name", GetClassName());
+ result.InsertValue("details", DoDebugJson());
+ return result;
+ }
+ void SerializeToProto(TProto& proto) const {
+ DoSerializeToProto(proto);
+ }
+
virtual ~IDataAdapter() = default;
- [[nodiscard]] TConclusionStatus AddDataToBuilders(const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept;
+ [[nodiscard]] TConclusionStatus AddDataToBuilders(
+ const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept;
};
-class TFirstLevelSchemaData: public IDataAdapter {
+class TJsonScanExtractor: public IDataAdapter {
+public:
+ static TString GetClassNameStatic() {
+ return "JSON_SCANNER";
+ }
+
private:
+ virtual bool HasInternalConversion() const override {
+ return true;
+ }
+
+ bool FirstLevelOnly = false;
+ bool ForceSIMDJsonParsing = false;
+ virtual TConclusionStatus DoDeserializeFromRequest(NYql::TFeaturesExtractor& features) override {
+ if (auto scanFlag = features.Extract<bool>("SCAN_FIRST_LEVEL_ONLY")) {
+ FirstLevelOnly = *scanFlag;
+ }
+ if (auto scanFlag = features.Extract<bool>("FORCE_SIMD_PARSING")) {
+ ForceSIMDJsonParsing = *scanFlag;
+ }
+ return TConclusionStatus::Success();
+ }
+
virtual TConclusionStatus DoAddDataToBuilders(
- const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const noexcept override;
+ const std::shared_ptr<arrow::Array>& sourceArray, TDataBuilder& dataBuilder) const override;
+ virtual bool DoDeserializeFromProto(const TProto& proto) override {
+ if (!proto.HasJsonScanner() && !proto.HasSIMDJsonScanner()) {
+ return true;
+ }
+ FirstLevelOnly = proto.GetJsonScanner().GetFirstLevelOnly() || proto.GetSIMDJsonScanner().GetFirstLevelOnly();
+ ForceSIMDJsonParsing = proto.GetJsonScanner().GetForceSIMDJsonParsing();
+ return true;
+ }
+ virtual void DoSerializeToProto(TProto& proto) const override {
+ proto.MutableJsonScanner()->SetFirstLevelOnly(FirstLevelOnly);
+ proto.MutableJsonScanner()->SetForceSIMDJsonParsing(ForceSIMDJsonParsing);
+ }
+ virtual TString GetClassName() const override {
+ return GetClassNameStatic();
+ }
+
+ static const inline auto Registrator = TFactory::TRegistrator<TJsonScanExtractor>(GetClassNameStatic());
+ static const inline auto Registrator1 = TFactory::TRegistrator<TJsonScanExtractor>("BINARY_JSON_SCANNER");
+ static const inline auto Registrator2 = TFactory::TRegistrator<TJsonScanExtractor>("SIMD_JSON_SCANNER");
public:
+ TJsonScanExtractor() = default;
+ TJsonScanExtractor(const bool firstLevelOnly)
+ : FirstLevelOnly(firstLevelOnly) {
+ }
+};
+
+class TDataAdapterContainer: public NBackgroundTasks::TInterfaceProtoContainer<IDataAdapter> {
+private:
+ using TBase = NBackgroundTasks::TInterfaceProtoContainer<IDataAdapter>;
+
+public:
+ static TDataAdapterContainer GetDefault();
+
+ using TBase::TBase;
};
} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.cpp b/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.cpp
index e5126a64dc..5b858f8787 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.cpp
@@ -5,6 +5,13 @@
#include <ydb/core/formats/arrow/accessor/plain/accessor.h>
#include <ydb/core/formats/arrow/accessor/sparsed/accessor.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/array-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/document-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/element-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/object-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/parser-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/ondemand.h>
+
namespace NKikimr::NArrow::NAccessor::NSubColumns {
void TColumnElements::BuildSparsedAccessor(const ui32 recordsCount) {
@@ -37,7 +44,7 @@ std::shared_ptr<TSubColumnsArray> TDataBuilder::Finish() {
TSettings::TColumnsDistributor distributor = Settings.BuildDistributor(sumSize, CurrentRecordIndex);
for (auto rIt = elementsBySize.rbegin(); rIt != elementsBySize.rend(); ++rIt) {
for (auto&& i : rIt->second) {
- switch (distributor.TakeAndDetect(rIt->first, i->GetRecordIndexes().size())) {
+ switch (distributor.TakeAndDetect(rIt->first, i->GetRecordIndexes().size())) {
case TSettings::TColumnsDistributor::EColumnType::Separated:
columnElements.emplace_back(i);
break;
@@ -97,7 +104,7 @@ TOthersData TDataBuilder::MergeOthers(const std::vector<TColumnElements*>& other
auto othersBuilder = TOthersData::MakeMergedBuilder();
while (heap.size()) {
std::pop_heap(heap.begin(), heap.end());
- othersBuilder->Add(heap.back().GetRecordIndex(), heap.back().GetKeyIndex(), heap.back().GetValue());
+ othersBuilder->AddImpl(heap.back().GetRecordIndex(), heap.back().GetKeyIndex(), heap.back().GetValuePointer());
if (!heap.back().Next()) {
heap.pop_back();
} else {
@@ -107,4 +114,44 @@ TOthersData TDataBuilder::MergeOthers(const std::vector<TColumnElements*>& other
return othersBuilder->Finish(TOthersData::TFinishContext(BuildStats(otherKeys, Settings, recordsCount)));
}
+std::string BuildString(const TStringBuf currentPrefix, const TStringBuf key) {
+ if (key.find(".") != std::string::npos) {
+ if (currentPrefix.size()) {
+ return Sprintf("%.*s.\"%.*s\"", currentPrefix.size(), currentPrefix.data(), key.size(), key.data());
+ } else {
+ return Sprintf("\"%.*s\"", key.size(), key.data());
+ }
+ } else {
+ if (currentPrefix.size()) {
+ return Sprintf("%.*s.%.*s", currentPrefix.size(), currentPrefix.data(), key.size(), key.data());
+ } else {
+ return std::string(key.data(), key.size());
+ }
+ }
+}
+
+TStringBuf TDataBuilder::AddKeyOwn(const TStringBuf currentPrefix, std::string&& key) {
+ auto it = StorageHash.find(TStorageAddress(currentPrefix, TStringBuf(key.data(), key.size())));
+ if (it == StorageHash.end()) {
+ Storage.emplace_back(std::move(key));
+ TStringBuf sbKey(Storage.back().data(), Storage.back().size());
+ it = StorageHash.emplace(TStorageAddress(currentPrefix, sbKey), BuildString(currentPrefix, sbKey)).first;
+ }
+ return TStringBuf(it->second.data(), it->second.size());
+}
+
+TStringBuf TDataBuilder::AddKey(const TStringBuf currentPrefix, const TStringBuf key) {
+ TStorageAddress keyAddress(currentPrefix, key);
+ auto it = StorageHash.find(keyAddress);
+ if (it == StorageHash.end()) {
+ it = StorageHash.emplace(keyAddress, BuildString(currentPrefix, key)).first;
+ }
+ return TStringBuf(it->second.data(), it->second.size());
+}
+
+TDataBuilder::TDataBuilder(const std::shared_ptr<arrow::DataType>& type, const TSettings& settings)
+ : Type(type)
+ , Settings(settings) {
+}
+
} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.h b/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.h
index 5e7365b271..59a1d9ea93 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/direct_builder.h
@@ -7,6 +7,8 @@
#include <ydb/core/formats/arrow/arrow_helpers.h>
#include <contrib/libs/apache/arrow/cpp/src/arrow/array/builder_base.h>
+#include <contrib/libs/xxhash/xxhash.h>
+#include <util/string/join.h>
namespace NKikimr::NArrow::NAccessor {
class TSubColumnsArray;
@@ -18,7 +20,6 @@ class TColumnElements {
private:
YDB_READONLY_DEF(TStringBuf, KeyName);
YDB_READONLY_DEF(std::deque<TStringBuf>, Values);
- std::vector<TString> ValuesStorage;
YDB_READONLY_DEF(std::vector<ui32>, RecordIndexes);
YDB_READONLY(ui32, DataSize, 0);
std::shared_ptr<IChunkedArray> Accessor;
@@ -38,34 +39,82 @@ public:
void AddData(const TStringBuf sb, const ui32 index) {
Values.emplace_back(sb);
+ AFL_VERIFY(RecordIndexes.empty() || RecordIndexes.back() < index);
RecordIndexes.emplace_back(index);
DataSize += sb.size();
}
-
- void AddDataToOwn(const TString& value, const ui32 index) {
- ValuesStorage.emplace_back(value);
- AddData(TStringBuf(value.data(), value.size()), index);
- }
};
class TDataBuilder {
+public:
+ class IBuffers {
+ public:
+ virtual ~IBuffers() = default;
+ };
+
private:
+ class TStorageAddress {
+ private:
+ const TStringBuf Prefix;
+ const TStringBuf Key;
+ const size_t Hash;
+
+ public:
+ TStorageAddress(const TStringBuf prefix, const TStringBuf key)
+ : Prefix(prefix)
+ , Key(key)
+ , Hash(XXH3_64bits(Prefix.data(), Prefix.size()) ^ XXH3_64bits(Key.data(), Key.size())) {
+ }
+
+ operator size_t() const {
+ return Hash;
+ }
+
+ bool operator==(const TStorageAddress& item) const {
+ return Hash == item.Hash && Prefix == item.Prefix && Key == item.Key;
+ }
+ };
+
ui32 CurrentRecordIndex = 0;
THashMap<TStringBuf, TColumnElements> Elements;
- std::deque<TString> Storage;
+ THashMap<TStorageAddress, std::string> StorageHash;
+ std::deque<std::string> Storage;
+ std::deque<TString> StorageStrings;
const std::shared_ptr<arrow::DataType> Type;
const TSettings Settings;
+ std::vector<std::shared_ptr<IBuffers>> Buffers;
public:
- TDataBuilder(const std::shared_ptr<arrow::DataType>& type, const TSettings& settings)
- : Type(type)
- , Settings(settings) {
+ TDataBuilder(const std::shared_ptr<arrow::DataType>& type, const TSettings& settings);
+
+ void StoreBuffer(const std::shared_ptr<IBuffers>& data) {
+ Buffers.emplace_back(data);
}
void StartNextRecord() {
++CurrentRecordIndex;
}
+ TStringBuf AddKeyOwn(const TStringBuf currentPrefix, std::string&& key);
+ TStringBuf AddKey(const TStringBuf currentPrefix, const TStringBuf key);
+
+ void AddKVNull(const TStringBuf key) {
+ auto itElements = Elements.find(key);
+ if (itElements == Elements.end()) {
+ itElements = Elements.emplace(key, key).first;
+ }
+ itElements->second.AddData(GetNullString(), CurrentRecordIndex);
+ }
+
+ static const TString& GetNullString() {
+ const static TString nullString = "NULL";
+ return nullString;
+ }
+
+ static std::string_view GetNullStringView() {
+ return std::string_view(GetNullString().data(), GetNullString().size());
+ }
+
void AddKV(const TStringBuf key, const TStringBuf value) {
auto itElements = Elements.find(key);
if (itElements == Elements.end()) {
@@ -74,13 +123,22 @@ public:
itElements->second.AddData(value, CurrentRecordIndex);
}
- void AddKVOwn(const TStringBuf key, const TString& value) {
- Storage.emplace_back(value);
+ void AddKVOwn(const TStringBuf key, std::string&& value) {
+ Storage.emplace_back(std::move(value));
auto itElements = Elements.find(key);
if (itElements == Elements.end()) {
itElements = Elements.emplace(key, key).first;
}
- itElements->second.AddData(value, CurrentRecordIndex);
+ itElements->second.AddData(Storage.back(), CurrentRecordIndex);
+ }
+
+ void AddKVOwn(const TStringBuf key, TString&& value) {
+ StorageStrings.emplace_back(std::move(value));
+ auto itElements = Elements.find(key);
+ if (itElements == Elements.end()) {
+ itElements = Elements.emplace(key, key).first;
+ }
+ itElements->second.AddData(StorageStrings.back(), CurrentRecordIndex);
}
class THeapElements {
@@ -108,8 +166,8 @@ public:
return KeyIndex;
}
- TStringBuf GetValue() const {
- return Elements->GetValues()[Index];
+ const TStringBuf* GetValuePointer() const {
+ return &Elements->GetValues()[Index];
}
bool operator<(const THeapElements& item) const {
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.cpp b/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.cpp
new file mode 100644
index 0000000000..3483ae1369
--- /dev/null
+++ b/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.cpp
@@ -0,0 +1,79 @@
+#include "json_extractors.h"
+
+#include <util/string/split.h>
+#include <util/string/vector.h>
+#include <yql/essentials/types/binary_json/format.h>
+#include <yql/essentials/types/binary_json/read.h>
+#include <yql/essentials/types/binary_json/write.h>
+
+#include <math.h>
+
+namespace NKikimr::NArrow::NAccessor::NSubColumns {
+
+TConclusionStatus TArrayExtractor::DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) {
+ ui32 idx = 0;
+ while (Iterator.HasNext()) {
+ auto value = Iterator.Next();
+ const TStringBuf key = dataBuilder.AddKeyOwn(GetPrefix(), "[" + std::to_string(idx++) + "]");
+ auto conclusion = AddDataToBuilder(dataBuilder, iterators, key, value);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ }
+ return TConclusionStatus::Success();
+}
+
+TConclusionStatus TKVExtractor::DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) {
+ while (Iterator.HasNext()) {
+ auto [jsonKey, value] = Iterator.Next();
+ if (jsonKey.GetType() != NBinaryJson::EEntryType::String) {
+ continue;
+ }
+ const TStringBuf key = dataBuilder.AddKey(GetPrefix(), jsonKey.GetString());
+ auto conclusion = AddDataToBuilder(dataBuilder, iterators, key, value);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ }
+ return TConclusionStatus::Success();
+}
+
+TConclusionStatus IJsonObjectExtractor::AddDataToBuilder(TDataBuilder& dataBuilder,
+ std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators, const TStringBuf key, NBinaryJson::TEntryCursor& value) const {
+ if (value.GetType() == NBinaryJson::EEntryType::String) {
+ dataBuilder.AddKV(key, value.GetString());
+ } else if (value.GetType() == NBinaryJson::EEntryType::Number) {
+ const double val = value.GetNumber();
+ double integer;
+ if (modf(val, &integer)) {
+ dataBuilder.AddKVOwn(key, std::to_string(val));
+ } else {
+ dataBuilder.AddKVOwn(key, std::to_string((i64)integer));
+ }
+ } else if (value.GetType() == NBinaryJson::EEntryType::BoolFalse) {
+ static const TString zeroString = "0";
+ dataBuilder.AddKV(key, TStringBuf(zeroString.data(), zeroString.size()));
+ } else if (value.GetType() == NBinaryJson::EEntryType::BoolTrue) {
+ static const TString oneString = "1";
+ dataBuilder.AddKV(key, TStringBuf(oneString.data(), oneString.size()));
+ } else if (value.GetType() == NBinaryJson::EEntryType::Container) {
+ auto container = value.GetContainer();
+ if (FirstLevelOnly) {
+ dataBuilder.AddKVOwn(key, NBinaryJson::SerializeToJson(container));
+ } else if (container.GetType() == NBinaryJson::EContainerType::Array) {
+ iterators.emplace_back(std::make_unique<TArrayExtractor>(container.GetArrayIterator(), key));
+ } else if (container.GetType() == NBinaryJson::EContainerType::Object) {
+ iterators.emplace_back(std::make_unique<TKVExtractor>(container.GetObjectIterator(), key));
+ } else {
+ return TConclusionStatus::Fail("unexpected top value scalar in container iterator");
+ }
+
+ } else if (value.GetType() == NBinaryJson::EEntryType::Null) {
+ dataBuilder.AddKVNull(key);
+ } else {
+ return TConclusionStatus::Fail("unexpected json value type: " + ::ToString((int)value.GetType()));
+ }
+ return TConclusionStatus::Success();
+}
+
+} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.h b/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.h
new file mode 100644
index 0000000000..c0e61456c1
--- /dev/null
+++ b/ydb/core/formats/arrow/accessor/sub_columns/json_extractors.h
@@ -0,0 +1,188 @@
+#pragma once
+#include "direct_builder.h"
+
+#include <contrib/libs/apache/arrow/cpp/src/arrow/array/builder_base.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/array-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/document-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/element-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/object-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/dom/parser-inl.h>
+#include <contrib/libs/simdjson/include/simdjson/ondemand.h>
+#include <yql/essentials/types/binary_json/read.h>
+
+namespace NKikimr::NArrow::NAccessor::NSubColumns {
+
+class IJsonObjectExtractor {
+private:
+ const TStringBuf Prefix;
+ virtual TConclusionStatus DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) = 0;
+
+protected:
+ const bool FirstLevelOnly = false;
+ TStringBuf GetPrefix() const {
+ return Prefix;
+ }
+
+ [[nodiscard]] TConclusionStatus AddDataToBuilder(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators,
+ const TStringBuf key, NBinaryJson::TEntryCursor& value) const;
+
+public:
+ virtual ~IJsonObjectExtractor() = default;
+
+ IJsonObjectExtractor(const TStringBuf prefix, const bool firstLevelOnly)
+ : Prefix(prefix)
+ , FirstLevelOnly(firstLevelOnly) {
+ }
+
+ [[nodiscard]] TConclusionStatus Fill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) {
+ return DoFill(dataBuilder, iterators);
+ }
+};
+
+class TKVExtractor: public IJsonObjectExtractor {
+private:
+ using TBase = IJsonObjectExtractor;
+ NBinaryJson::TObjectIterator Iterator;
+ virtual TConclusionStatus DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) override;
+
+public:
+ TKVExtractor(const NBinaryJson::TObjectIterator& iterator, const TStringBuf prefix, const bool firstLevelOnly = false)
+ : TBase(prefix, firstLevelOnly)
+ , Iterator(iterator) {
+ }
+};
+
+class TArrayExtractor: public IJsonObjectExtractor {
+private:
+ using TBase = IJsonObjectExtractor;
+ NBinaryJson::TArrayIterator Iterator;
+ virtual TConclusionStatus DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& iterators) override;
+
+public:
+ TArrayExtractor(const NBinaryJson::TArrayIterator& iterator, const TStringBuf prefix, const bool firstLevelOnly = false)
+ : TBase(prefix, firstLevelOnly)
+ , Iterator(iterator) {
+ }
+};
+
+class TSIMDExtractor: public IJsonObjectExtractor {
+private:
+ using TBase = IJsonObjectExtractor;
+ simdjson::simdjson_result<simdjson::ondemand::document>& Document;
+
+#define RETURN_IF_NOT_SUCCESS(expr) \
+ if (const auto& status = expr; Y_UNLIKELY(status != simdjson::SUCCESS)) { \
+ return TConclusionStatus::Fail("json parsing error: " + TString(simdjson::error_message(status))); \
+ }
+
+ TConclusion<std::string_view> PrintObject(simdjson::ondemand::value& value) const {
+ switch (value.type()) {
+ case simdjson::ondemand::json_type::string: {
+ auto sv = (std::string_view)value.raw_json_token();
+ AFL_VERIFY(sv.size() >= 2);
+ return std::string_view(sv.data() + 1, sv.size() - 2);
+ }
+ case simdjson::ondemand::json_type::null: {
+ return TDataBuilder::GetNullStringView();
+ }
+ case simdjson::ondemand::json_type::number:
+ case simdjson::ondemand::json_type::boolean: {
+ return (std::string_view)value.raw_json_token();
+ }
+ case simdjson::ondemand::json_type::object: {
+ simdjson::ondemand::object v;
+ RETURN_IF_NOT_SUCCESS(value.get(v));
+ return v.raw_json();
+ }
+ case simdjson::ondemand::json_type::array: {
+ simdjson::ondemand::array v;
+ RETURN_IF_NOT_SUCCESS(value.get(v));
+ return v.raw_json();
+ }
+ }
+ }
+
+ template <typename TOnDemandValue>
+ requires std::is_same_v<TOnDemandValue, simdjson::ondemand::value> || std::is_same_v<TOnDemandValue, simdjson::ondemand::document>
+ [[nodiscard]] TConclusionStatus ProcessValue(TDataBuilder& dataBuilder, TOnDemandValue& value, const TStringBuf currentKey) {
+ switch (value.type()) {
+ case simdjson::ondemand::json_type::string: {
+ auto sv = (std::string_view)value.raw_json_token();
+ AFL_VERIFY(sv.size() >= 2);
+ dataBuilder.AddKV(currentKey, TStringBuf(sv.data() + 1, sv.size() - 2));
+ break;
+ }
+ case simdjson::ondemand::json_type::null: {
+ dataBuilder.AddKVNull(currentKey);
+ break;
+ }
+ case simdjson::ondemand::json_type::number:
+ case simdjson::ondemand::json_type::boolean: {
+ dataBuilder.AddKV(currentKey, (std::string_view)value.raw_json_token());
+ break;
+ }
+ case simdjson::ondemand::json_type::array: {
+ simdjson::ondemand::array v;
+ RETURN_IF_NOT_SUCCESS(value.get(v));
+ ui32 idx = 0;
+ for (auto item : v) {
+ RETURN_IF_NOT_SUCCESS(item.error());
+ const TStringBuf sbKey = dataBuilder.AddKeyOwn(currentKey, "[" + std::to_string(idx++) + "]");
+ if (FirstLevelOnly) {
+ auto conclusion = PrintObject(item.value_unsafe());
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ dataBuilder.AddKV(sbKey, conclusion.DetachResult());
+ } else {
+ auto conclusion =
+ ProcessValue(dataBuilder, item.value_unsafe(), sbKey);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ }
+ }
+ break;
+ }
+ case simdjson::ondemand::json_type::object: {
+ simdjson::ondemand::object v;
+ RETURN_IF_NOT_SUCCESS(value.get(v));
+ for (auto item : v) {
+ RETURN_IF_NOT_SUCCESS(item.error());
+ auto& keyValue = item.value_unsafe();
+ const auto key = keyValue.escaped_key();
+ const auto sbKey = dataBuilder.AddKey(currentKey, key);
+ if (FirstLevelOnly) {
+ auto conclusion = PrintObject(keyValue.value());
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ dataBuilder.AddKV(sbKey, conclusion.DetachResult());
+ } else {
+ auto conclusion = ProcessValue(dataBuilder, keyValue.value(), sbKey);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return TConclusionStatus::Success();
+ }
+
+ virtual TConclusionStatus DoFill(TDataBuilder& dataBuilder, std::deque<std::unique_ptr<IJsonObjectExtractor>>& /*iterators*/) override {
+ RETURN_IF_NOT_SUCCESS(Document.error());
+ return ProcessValue(dataBuilder, Document.value_unsafe(), TStringBuf());
+ }
+
+public:
+#undef RETURN_IF_NOT_SUCCESS
+ TSIMDExtractor(simdjson::simdjson_result<simdjson::ondemand::document>& document, const bool firstLevelOnly = false)
+ : TBase(TStringBuf(), firstLevelOnly)
+ , Document(document) {
+ }
+};
+
+} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/others_storage.cpp b/ydb/core/formats/arrow/accessor/sub_columns/others_storage.cpp
index 58d1877d64..36c8a7fe3e 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/others_storage.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/others_storage.cpp
@@ -21,12 +21,14 @@ TOthersData::TBuilderWithStats::TBuilderWithStats() {
Values = static_cast<arrow::StringBuilder*>(Builders[2].get());
}
-void TOthersData::TBuilderWithStats::Add(const ui32 recordIndex, const ui32 keyIndex, const std::string_view value) {
+void TOthersData::TBuilderWithStats::AddImpl(const ui32 recordIndex, const ui32 keyIndex, const std::string_view* value) {
AFL_VERIFY(Builders.size());
if (StatsByKeyIndex.size() <= keyIndex) {
StatsByKeyIndex.resize((keyIndex + 1) * 2);
}
- StatsByKeyIndex[keyIndex].AddValue(value);
+ if (value) {
+ StatsByKeyIndex[keyIndex].AddValue(*value);
+ }
if (!LastRecordIndex) {
LastRecordIndex = recordIndex;
LastKeyIndex = keyIndex;
@@ -35,7 +37,11 @@ void TOthersData::TBuilderWithStats::Add(const ui32 recordIndex, const ui32 keyI
}
TStatusValidator::Validate(RecordIndex->Append(recordIndex));
RTKeyIndexes.emplace_back(keyIndex);
- TStatusValidator::Validate(Values->Append(value.data(), value.size()));
+ if (value) {
+ TStatusValidator::Validate(Values->Append(value->data(), value->size()));
+ } else {
+ TStatusValidator::Validate(Values->AppendNull());
+ }
++RecordsCount;
}
@@ -247,7 +253,7 @@ std::shared_ptr<IChunkedArray> TOthersData::GetPathAccessor(const std::string_vi
filter.Add(it.GetKeyIndex() == *idx);
}
auto recordsFiltered = Records;
- AFL_VERIFY(filter.Apply(recordsFiltered));
+ filter.Apply(recordsFiltered);
auto table = recordsFiltered->BuildTableVerified(std::set<std::string>({ "record_idx", "value" }));
TSparsedArray::TBuilder builder(nullptr, arrow::utf8());
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/others_storage.h b/ydb/core/formats/arrow/accessor/sub_columns/others_storage.h
index 84ba8071f1..b172dd5b72 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/others_storage.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/others_storage.h
@@ -173,11 +173,16 @@ public:
std::optional<ui32> LastKeyIndex;
ui32 RecordsCount = 0;
YDB_READONLY_DEF(std::vector<TDictStats::TRTStatsValue>, StatsByKeyIndex);
-
public:
TBuilderWithStats();
- void Add(const ui32 recordIndex, const ui32 keyIndex, const std::string_view value);
+ void AddImpl(const ui32 recordIndex, const ui32 keyIndex, const std::string_view* value);
+ void Add(const ui32 recordIndex, const ui32 keyIndex, const std::string_view value) {
+ return AddImpl(recordIndex, keyIndex, &value);
+ }
+ void AddNull(const ui32 recordIndex, const ui32 keyIndex) {
+ return AddImpl(recordIndex, keyIndex, nullptr);
+ }
TOthersData Finish(const TFinishContext& finishContext);
};
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/request.cpp b/ydb/core/formats/arrow/accessor/sub_columns/request.cpp
index 82315e8973..e0483ec58e 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/request.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/request.cpp
@@ -1,5 +1,6 @@
-#include "request.h"
#include "constructor.h"
+#include "data_extractor.h"
+#include "request.h"
namespace NKikimr::NArrow::NAccessor::NSubColumns {
@@ -20,6 +21,20 @@ TConclusionStatus TRequestedConstuctor::DoDeserializeFromRequest(NYql::TFeatures
if (auto kff = features.Extract<ui32>("SPARSED_DETECTOR_KFF")) {
Settings.SetSparsedDetectorKff(*kff);
}
+ THolder<IDataAdapter> extractor;
+ if (auto dataExtractorClassName = features.Extract<TString>("DATA_EXTRACTOR_CLASS_NAME")) {
+ extractor = IDataAdapter::TFactory::MakeHolder(*dataExtractorClassName);
+ if (!extractor) {
+ return TConclusionStatus::Fail("incorrect data extractor class name");
+ }
+ } else {
+ extractor = MakeHolder<TJsonScanExtractor>(false);
+ }
+ auto parseConclusion = extractor->DeserializeFromRequest(features);
+ if (parseConclusion.IsFail()) {
+ return parseConclusion;
+ }
+ Settings.SetDataExtractor(std::shared_ptr<IDataAdapter>(extractor.Release()));
if (auto memLimit = features.Extract<ui32>("MEM_LIMIT_CHUNK")) {
Settings.SetChunkMemoryLimit(*memLimit);
}
@@ -36,4 +51,4 @@ NKikimr::TConclusion<TConstructorContainer> TRequestedConstuctor::DoBuildConstru
return std::make_shared<TConstructor>(Settings);
}
-}
+} // namespace NKikimr::NArrow::NAccessor::NSubColumns
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/settings.h b/ydb/core/formats/arrow/accessor/sub_columns/settings.h
index 45dda39ae0..0a6c38d5cb 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/settings.h
+++ b/ydb/core/formats/arrow/accessor/sub_columns/settings.h
@@ -1,4 +1,6 @@
#pragma once
+#include "data_extractor.h"
+
#include <ydb/core/formats/arrow/accessor/abstract/accessor.h>
#include <ydb/core/formats/arrow/arrow_helpers.h>
@@ -14,6 +16,7 @@ private:
YDB_ACCESSOR(ui32, ColumnsLimit, 1024);
YDB_ACCESSOR(ui32, ChunkMemoryLimit, 50 * 1024 * 1024);
YDB_READONLY(double, OthersAllowedFraction, 0.05);
+ YDB_ACCESSOR_DEF(TDataAdapterContainer, DataExtractor);
public:
class TColumnsDistributor {
@@ -45,11 +48,14 @@ public:
}
TSettings() = default;
- TSettings(const ui32 sparsedDetectorKff, const ui32 columnsLimit, const ui32 chunkMemoryLimit, const double othersAllowedFraction)
+ TSettings(const ui32 sparsedDetectorKff, const ui32 columnsLimit, const ui32 chunkMemoryLimit, const double othersAllowedFraction,
+ const TDataAdapterContainer& dataExtractor)
: SparsedDetectorKff(sparsedDetectorKff)
, ColumnsLimit(columnsLimit)
, ChunkMemoryLimit(chunkMemoryLimit)
- , OthersAllowedFraction(othersAllowedFraction) {
+ , OthersAllowedFraction(othersAllowedFraction)
+ , DataExtractor(dataExtractor) {
+ AFL_VERIFY(!!DataExtractor);
AFL_VERIFY(OthersAllowedFraction >= 0 && OthersAllowedFraction <= 1)("others_fraction", OthersAllowedFraction);
}
@@ -65,6 +71,7 @@ public:
result.InsertValue("columns_limit", ColumnsLimit);
result.InsertValue("memory_limit", ChunkMemoryLimit);
result.InsertValue("others_allowed_fraction", OthersAllowedFraction);
+ result.InsertValue("data_extractor", DataExtractor->DebugJson());
return result;
}
@@ -79,6 +86,7 @@ public:
result.SetColumnsLimit(ColumnsLimit);
result.SetChunkMemoryLimit(ChunkMemoryLimit);
result.SetOthersAllowedFraction(OthersAllowedFraction);
+ DataExtractor.SerializeToProto(*result.MutableDataExtractor());
}
template <class TProto>
@@ -87,6 +95,11 @@ public:
ColumnsLimit = proto.GetColumnsLimit();
ChunkMemoryLimit = proto.GetChunkMemoryLimit();
OthersAllowedFraction = proto.GetOthersAllowedFraction();
+ if (!proto.HasDataExtractor()) {
+ AFL_VERIFY(DataExtractor.Initialize(TJsonScanExtractor::GetClassNameStatic()));
+ } else if (!DataExtractor.DeserializeFromProto(proto.GetDataExtractor())) {
+ return false;
+ }
return true;
}
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/ut/ut_sub_columns.cpp b/ydb/core/formats/arrow/accessor/sub_columns/ut/ut_sub_columns.cpp
index 40513d5223..edeef71ee9 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/ut/ut_sub_columns.cpp
+++ b/ydb/core/formats/arrow/accessor/sub_columns/ut/ut_sub_columns.cpp
@@ -49,7 +49,7 @@ Y_UNIT_TEST_SUITE(SubColumnsArrayAccessor) {
Y_UNIT_TEST(SlicesDef) {
for (ui32 colsCount = 0; colsCount < 5; ++colsCount) {
- NSubColumns::TSettings settings(4, colsCount, 0, 0);
+ NSubColumns::TSettings settings(4, colsCount, 0, 0, NKikimr::NArrow::NAccessor::NSubColumns::TDataAdapterContainer::GetDefault());
const std::vector<TString> jsons = {
R"({"a" : 1, "b" : 1, "c" : "111"})",
@@ -71,7 +71,7 @@ Y_UNIT_TEST_SUITE(SubColumnsArrayAccessor) {
++idx;
}
auto bJsonArr = arrBuilder.Finish(jsons.size());
- auto arrData = TSubColumnsArray::Make(bJsonArr, std::make_shared<NSubColumns::TFirstLevelSchemaData>(), settings).DetachResult();
+ auto arrData = TSubColumnsArray::Make(bJsonArr, settings).DetachResult();
Cerr << arrData->DebugJson() << Endl;
AFL_VERIFY(PrintBinaryJsons(arrData->GetChunkedArray()) == R"([[{"a":"1","b":"1","c":"111"},null,{"a1":"2","b":"2","c":"222"},{"a":"3","b":"3","c":"333"},null,{"a":"5","b1":"5"}]])")(
"string", PrintBinaryJsons(arrData->GetChunkedArray()));
@@ -141,7 +141,7 @@ Y_UNIT_TEST_SUITE(SubColumnsArrayAccessor) {
Y_UNIT_TEST(FiltersDef) {
for (ui32 colsCount = 0; colsCount < 5; ++colsCount) {
- NSubColumns::TSettings settings(4, colsCount, 0, 0);
+ NSubColumns::TSettings settings(4, colsCount, 0, 0, NKikimr::NArrow::NAccessor::NSubColumns::TDataAdapterContainer::GetDefault());
const std::vector<TString> jsons = {
R"({"a" : 1, "b" : 1, "c" : "111"})",
@@ -163,7 +163,7 @@ Y_UNIT_TEST_SUITE(SubColumnsArrayAccessor) {
++idx;
}
auto bJsonArr = arrBuilder.Finish(jsons.size());
- auto arrData = TSubColumnsArray::Make(bJsonArr, std::make_shared<NSubColumns::TFirstLevelSchemaData>(), settings).DetachResult();
+ auto arrData = TSubColumnsArray::Make(bJsonArr, settings).DetachResult();
Cerr << arrData->DebugJson() << Endl;
AFL_VERIFY(PrintBinaryJsons(arrData->GetChunkedArray()) == R"([[{"a":"1","b":"1","c":"111"},null,{"a1":"2","b":"2","c":"222"},{"a":"3","b":"3","c":"333"},null,{"a":"5","b1":"5"}]])")(
"string", PrintBinaryJsons(arrData->GetChunkedArray()));
diff --git a/ydb/core/formats/arrow/accessor/sub_columns/ya.make b/ydb/core/formats/arrow/accessor/sub_columns/ya.make
index 9b2f1d2202..0ce5e597be 100644
--- a/ydb/core/formats/arrow/accessor/sub_columns/ya.make
+++ b/ydb/core/formats/arrow/accessor/sub_columns/ya.make
@@ -18,6 +18,7 @@ SRCS(
header.cpp
partial.cpp
data_extractor.cpp
+ json_extractors.cpp
accessor.cpp
direct_builder.cpp
settings.cpp
@@ -29,6 +30,10 @@ SRCS(
YQL_LAST_ABI_VERSION()
+CFLAGS(
+ -Wno-assume
+)
+
END()
RECURSE_FOR_TESTS(
diff --git a/ydb/core/formats/arrow/arrow_filter.cpp b/ydb/core/formats/arrow/arrow_filter.cpp
index d0e127dc91..8858998841 100644
--- a/ydb/core/formats/arrow/arrow_filter.cpp
+++ b/ydb/core/formats/arrow/arrow_filter.cpp
@@ -363,9 +363,9 @@ NKikimr::NArrow::TColumnFilter TColumnFilter::MakePredicateFilter(
}
template <class TData>
-bool ApplyImpl(const TColumnFilter& filter, std::shared_ptr<TData>& batch, const TColumnFilter::TApplyContext& context) {
+void ApplyImpl(const TColumnFilter& filter, std::shared_ptr<TData>& batch, const TColumnFilter::TApplyContext& context) {
if (!batch || !batch->num_rows()) {
- return false;
+ return;
}
if (!filter.IsEmpty()) {
if (context.HasSlice()) {
@@ -380,10 +380,10 @@ bool ApplyImpl(const TColumnFilter& filter, std::shared_ptr<TData>& batch, const
}
if (filter.IsTotalDenyFilter()) {
batch = NAdapter::TDataBuilderPolicy<TData>::GetEmptySame(batch);
- return true;
+ return;
}
if (filter.IsTotalAllowFilter()) {
- return true;
+ return;
}
if (context.GetTrySlices() && filter.GetFilter().size() * 10 < filter.GetRecordsCountVerified() &&
filter.GetRecordsCountVerified() < filter.GetFilteredCountVerified() * 50) {
@@ -394,18 +394,17 @@ bool ApplyImpl(const TColumnFilter& filter, std::shared_ptr<TData>& batch, const
} else {
batch = NAdapter::TDataBuilderPolicy<TData>::ApplyArrowFilter(batch, filter);
}
- return batch->num_rows();
}
-bool TColumnFilter::Apply(std::shared_ptr<TGeneralContainer>& batch, const TApplyContext& context) const {
+void TColumnFilter::Apply(std::shared_ptr<TGeneralContainer>& batch, const TApplyContext& context) const {
return ApplyImpl(*this, batch, context);
}
-bool TColumnFilter::Apply(std::shared_ptr<arrow::Table>& batch, const TApplyContext& context) const {
+void TColumnFilter::Apply(std::shared_ptr<arrow::Table>& batch, const TApplyContext& context) const {
return ApplyImpl(*this, batch, context);
}
-bool TColumnFilter::Apply(std::shared_ptr<arrow::RecordBatch>& batch, const TApplyContext& context) const {
+void TColumnFilter::Apply(std::shared_ptr<arrow::RecordBatch>& batch, const TApplyContext& context) const {
return ApplyImpl(*this, batch, context);
}
diff --git a/ydb/core/formats/arrow/arrow_filter.h b/ydb/core/formats/arrow/arrow_filter.h
index c93b086080..8d4b1afad4 100644
--- a/ydb/core/formats/arrow/arrow_filter.h
+++ b/ydb/core/formats/arrow/arrow_filter.h
@@ -289,9 +289,9 @@ public:
TApplyContext& Slice(const ui32 start, const ui32 count);
};
- [[nodiscard]] bool Apply(std::shared_ptr<TGeneralContainer>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
- [[nodiscard]] bool Apply(std::shared_ptr<arrow::Table>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
- [[nodiscard]] bool Apply(std::shared_ptr<arrow::RecordBatch>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
+ void Apply(std::shared_ptr<TGeneralContainer>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
+ void Apply(std::shared_ptr<arrow::Table>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
+ void Apply(std::shared_ptr<arrow::RecordBatch>& batch, const TApplyContext& context = Default<TApplyContext>()) const;
void Apply(const ui32 expectedRecordsCount, std::vector<arrow::Datum*>& datums) const;
[[nodiscard]] std::shared_ptr<NAccessor::IChunkedArray> Apply(
const std::shared_ptr<NAccessor::IChunkedArray>& source, const TApplyContext& context = Default<TApplyContext>()) const;
diff --git a/ydb/core/formats/arrow/program/abstract.h b/ydb/core/formats/arrow/program/abstract.h
index 01724d31ae..9ca8a96f90 100644
--- a/ydb/core/formats/arrow/program/abstract.h
+++ b/ydb/core/formats/arrow/program/abstract.h
@@ -13,6 +13,44 @@ class TAccessorsCollection;
namespace NKikimr::NArrow::NSSA {
+class TIndexCheckOperation {
+public:
+ enum class EOperation : ui32 {
+ Equals,
+ StartsWith,
+ EndsWith,
+ Contains
+ };
+
+private:
+ const EOperation Operation;
+ YDB_READONLY(bool, CaseSensitive, true);
+
+public:
+ TString GetSignalId() const {
+ return TStringBuilder() << Operation << "::" << (CaseSensitive ? 1 : 0);
+ }
+
+ TString DebugString() const {
+ return TStringBuilder() << "{" << Operation << "," << CaseSensitive << "}";
+ }
+
+ EOperation GetOperation() const {
+ return Operation;
+ }
+
+ TIndexCheckOperation(const EOperation op, const bool caseSensitive)
+ : Operation(op)
+ , CaseSensitive(caseSensitive) {
+ }
+
+ explicit operator size_t() const {
+ return (size_t)Operation;
+ }
+
+ bool operator==(const TIndexCheckOperation& op) const = default;
+};
+
using IChunkedArray = NAccessor::IChunkedArray;
using TAccessorsCollection = NAccessor::TAccessorsCollection;
diff --git a/ydb/core/formats/arrow/program/assign_internal.cpp b/ydb/core/formats/arrow/program/assign_internal.cpp
index a1432806e6..cc51fef22d 100644
--- a/ydb/core/formats/arrow/program/assign_internal.cpp
+++ b/ydb/core/formats/arrow/program/assign_internal.cpp
@@ -40,40 +40,16 @@ TConclusion<std::shared_ptr<TCalculationProcessor>> TCalculationProcessor::Build
NJson::TJsonValue TCalculationProcessor::DoDebugJson() const {
NJson::TJsonValue result = NJson::JSON_MAP;
- if (!!YqlOperationId) {
- result.InsertValue("yql_op", ::ToString((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId));
- }
- if (!!KernelLogic) {
- result.InsertValue("kernel", KernelLogic->GetClassName());
- }
+ result.InsertValue("kernel", KernelLogic->GetClassName());
return result;
}
ui64 TCalculationProcessor::DoGetWeight() const {
- if (KernelLogic) {
- return 0;
- }
- if (!YqlOperationId) {
- return 10;
- } else if ((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId == NYql::TKernelRequestBuilder::EBinaryOp::StartsWith ||
- (NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId == NYql::TKernelRequestBuilder::EBinaryOp::EndsWith) {
- return 7;
- } else if ((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId == NYql::TKernelRequestBuilder::EBinaryOp::StringContains) {
- return 10;
- } else if ((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId == NYql::TKernelRequestBuilder::EBinaryOp::Equals) {
- return 5;
- }
- return 0;
+ return (ui64)KernelLogic->GetWeight();
}
TString TCalculationProcessor::DoGetSignalCategoryName() const {
- if (KernelLogic) {
- return ::ToString(GetProcessorType()) + "::" + KernelLogic->GetClassName();
- } else if (YqlOperationId) {
- return ::ToString(GetProcessorType()) + "::" + ::ToString((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId);
- } else {
- return ::ToString(GetProcessorType());
- }
+ return ::ToString(GetProcessorType()) + "::" + KernelLogic->SignalDescription();
}
} // namespace NKikimr::NArrow::NSSA
diff --git a/ydb/core/formats/arrow/program/assign_internal.h b/ydb/core/formats/arrow/program/assign_internal.h
index fb67c3c7de..213a7299b0 100644
--- a/ydb/core/formats/arrow/program/assign_internal.h
+++ b/ydb/core/formats/arrow/program/assign_internal.h
@@ -11,7 +11,6 @@ class TCalculationProcessor: public IResourceProcessor {
private:
using TBase = IResourceProcessor;
- YDB_ACCESSOR_DEF(std::optional<ui32>, YqlOperationId);
YDB_ACCESSOR_DEF(std::shared_ptr<IKernelLogic>, KernelLogic);
std::shared_ptr<IStepFunction> Function;
@@ -27,6 +26,7 @@ private:
: TBase(std::move(input), std::move(output), EProcessorType::Calculation)
, KernelLogic(kernelLogic)
, Function(function) {
+ AFL_VERIFY(KernelLogic);
}
virtual bool IsAggregation() const override {
@@ -37,7 +37,7 @@ private:
public:
static TConclusion<std::shared_ptr<TCalculationProcessor>> Build(std::vector<TColumnChainInfo>&& input, const TColumnChainInfo& output,
- const std::shared_ptr<IStepFunction>& function, const std::shared_ptr<IKernelLogic>& kernelLogic = nullptr);
+ const std::shared_ptr<IStepFunction>& function, const std::shared_ptr<IKernelLogic>& kernelLogic);
};
} // namespace NKikimr::NArrow::NSSA
diff --git a/ydb/core/formats/arrow/program/collection.cpp b/ydb/core/formats/arrow/program/collection.cpp
index 194fd984b5..728a7a1693 100644
--- a/ydb/core/formats/arrow/program/collection.cpp
+++ b/ydb/core/formats/arrow/program/collection.cpp
@@ -9,6 +9,11 @@
namespace NKikimr::NArrow::NAccessor {
+void TAccessorsCollection::Upsert(const ui32 columnId, const std::shared_ptr<IChunkedArray>& data, const bool withFilter) {
+ Remove(columnId, true);
+ AddVerified(columnId, data, withFilter);
+}
+
void TAccessorsCollection::AddVerified(const ui32 columnId, const arrow::Datum& data, const bool withFilter) {
AddVerified(columnId, TAccessorCollectedContainer(data), withFilter);
}
diff --git a/ydb/core/formats/arrow/program/collection.h b/ydb/core/formats/arrow/program/collection.h
index 046f7a4c51..8822f13072 100644
--- a/ydb/core/formats/arrow/program/collection.h
+++ b/ydb/core/formats/arrow/program/collection.h
@@ -149,6 +149,7 @@ public:
void AddVerified(const ui32 columnId, const arrow::Datum& data, const bool withFilter);
void AddVerified(const ui32 columnId, const std::shared_ptr<IChunkedArray>& data, const bool withFilter);
void AddVerified(const ui32 columnId, const TAccessorCollectedContainer& data, const bool withFilter);
+ void Upsert(const ui32 columnId, const std::shared_ptr<IChunkedArray>& data, const bool withFilter);
void AddConstantVerified(const ui32 columnId, const std::shared_ptr<arrow::Scalar>& scalar) {
AFL_VERIFY(columnId);
diff --git a/ydb/core/formats/arrow/program/execution.h b/ydb/core/formats/arrow/program/execution.h
index 348dea253f..4be60989c8 100644
--- a/ydb/core/formats/arrow/program/execution.h
+++ b/ydb/core/formats/arrow/program/execution.h
@@ -10,13 +10,6 @@
namespace NKikimr::NArrow::NSSA {
-enum class EIndexCheckOperation {
- Equals,
- StartsWith,
- EndsWith,
- Contains
-};
-
class TProcessorContext;
class IFetchLogic {
@@ -153,15 +146,15 @@ public:
class TFetchIndexContext {
public:
- using EOperation = EIndexCheckOperation;
+ using TOperation = TIndexCheckOperation;
class TOperationsBySubColumn {
private:
std::optional<bool> FullColumnOperations;
- THashMap<TString, THashSet<EOperation>> Data;
+ THashMap<TString, THashSet<TOperation>> Data;
public:
- const THashMap<TString, THashSet<EOperation>>& GetData() const {
+ const THashMap<TString, THashSet<TOperation>>& GetData() const {
return Data;
}
@@ -170,7 +163,7 @@ public:
return !*FullColumnOperations;
}
- TOperationsBySubColumn& Add(const TString& subColumn, const EOperation operation, const bool strict = true) {
+ TOperationsBySubColumn& Add(const TString& subColumn, const TOperation operation, const bool strict = true) {
if (FullColumnOperations) {
AFL_VERIFY(*FullColumnOperations == !subColumn);
} else {
@@ -196,7 +189,7 @@ public:
for (auto&& i : OperationsBySubColumn.GetData()) {
auto& subColumnJson = result.InsertValue(i.first, NJson::JSON_ARRAY);
for (auto&& op : i.second) {
- subColumnJson.AppendValue(::ToString(op));
+ subColumnJson.AppendValue(op.DebugString());
}
}
return result;
@@ -231,15 +224,19 @@ public:
private:
YDB_READONLY(ui32, ColumnId, 0);
YDB_READONLY_DEF(TString, SubColumnName);
- YDB_READONLY(EIndexCheckOperation, Operation, EIndexCheckOperation::Equals);
+ TIndexCheckOperation Operation;
public:
- TCheckIndexContext(const ui32 columnId, const TString& subColumnName, const EIndexCheckOperation operation)
+ TCheckIndexContext(const ui32 columnId, const TString& subColumnName, const TIndexCheckOperation& operation)
: ColumnId(columnId)
, SubColumnName(subColumnName)
, Operation(operation) {
}
+ const TIndexCheckOperation& GetOperation() const {
+ return Operation;
+ }
+
bool operator==(const TCheckIndexContext& item) const {
return std::tie(ColumnId, SubColumnName, Operation) == std::tie(item.ColumnId, item.SubColumnName, item.Operation);
}
diff --git a/ydb/core/formats/arrow/program/graph_optimization.cpp b/ydb/core/formats/arrow/program/graph_optimization.cpp
index e541b74442..56f9cbbb51 100644
--- a/ydb/core/formats/arrow/program/graph_optimization.cpp
+++ b/ydb/core/formats/arrow/program/graph_optimization.cpp
@@ -10,7 +10,9 @@
#include <ydb/library/arrow_kernels/operations.h>
#include <ydb/library/formats/arrow/switch/switch_type.h>
+#include <library/cpp/string_utils/quote/quote.h>
#include <util/string/builder.h>
+#include <util/string/escape.h>
#include <yql/essentials/core/arrow_kernels/request/request.h>
namespace NKikimr::NArrow::NSSA::NGraph::NOptimization {
@@ -199,9 +201,12 @@ TConclusion<bool> TGraph::OptimizeMergeFetching(TGraphNode* baseNode) {
if (!i.second->Is(EProcessorType::FetchOriginalData)) {
continue;
}
- if (i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetDataAddresses().size() +
- i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetIndexContext().size() +
- i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetHeaderContext().size() > 1) {
+ if (!i.second->AddOptimizerMarker(EOptimizerMarkers::FetchMerged)) {
+ continue;
+ }
+ if (i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetDataAddresses().size() +
+ i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetIndexContext().size() +
+ i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetHeaderContext().size() > 1) {
continue;
}
if (i.second->GetProcessorAs<TOriginalColumnDataProcessor>()->GetDataAddresses().size()) {
@@ -220,8 +225,7 @@ TConclusion<bool> TGraph::OptimizeMergeFetching(TGraphNode* baseNode) {
for (auto&& i : dataAddresses) {
columnIds.emplace(i->GetProcessorAs<TOriginalColumnDataProcessor>()->GetOutputColumnIdOnce());
}
- auto proc =
- std::make_shared<TOriginalColumnDataProcessor>(std::vector<ui32>(columnIds.begin(), columnIds.end()));
+ auto proc = std::make_shared<TOriginalColumnDataProcessor>(std::vector<ui32>(columnIds.begin(), columnIds.end()));
for (auto&& i : dataAddresses) {
for (auto&& addr : i->GetProcessorAs<TOriginalColumnDataProcessor>()->GetDataAddresses()) {
proc->Add(addr.second);
@@ -230,7 +234,7 @@ TConclusion<bool> TGraph::OptimizeMergeFetching(TGraphNode* baseNode) {
auto nodeFetch = AddNode(proc);
FetchersMerged.emplace(nodeFetch->GetIdentifier());
for (auto&& i : dataAddresses) {
- for (auto&& to: i->GetOutputEdges()) {
+ for (auto&& to : i->GetOutputEdges()) {
AddEdge(nodeFetch.get(), to.second, to.first.GetResourceId());
}
RemoveNode(i->GetIdentifier());
@@ -245,8 +249,7 @@ TConclusion<bool> TGraph::OptimizeMergeFetching(TGraphNode* baseNode) {
for (auto&& i : headers) {
columnIds.emplace(i->GetProcessorAs<TOriginalColumnDataProcessor>()->GetOutputColumnIdOnce());
}
- auto proc =
- std::make_shared<TOriginalColumnDataProcessor>(std::vector<ui32>(columnIds.begin(), columnIds.end()));
+ auto proc = std::make_shared<TOriginalColumnDataProcessor>(std::vector<ui32>(columnIds.begin(), columnIds.end()));
for (auto&& i : indexes) {
for (auto&& addr : i->GetProcessorAs<TOriginalColumnDataProcessor>()->GetIndexContext()) {
proc->Add(addr.second);
@@ -345,12 +348,6 @@ std::optional<TResourceAddress> TGraph::GetOriginalAddress(TGraphNode* condNode)
if (path.StartsWith("$.")) {
path = path.substr(2);
}
- if (path.StartsWith("\"") && path.EndsWith("\"")) {
- if (path.size() < 2) {
- return std::nullopt;
- }
- path = path.substr(1, path.size() - 2);
- }
if (!path) {
return std::nullopt;
}
@@ -367,11 +364,11 @@ TConclusion<bool> TGraph::OptimizeConditionsForIndexes(TGraphNode* condNode) {
if (condNode->GetProcessor()->GetProcessorType() != EProcessorType::Calculation) {
return false;
}
- if (condNode->GetProcessor()->GetInput().size() != 2) {
+ auto calc = condNode->GetProcessorAs<TCalculationProcessor>();
+ if (!calc->GetKernelLogic()) {
return false;
}
- auto calc = condNode->GetProcessorAs<TCalculationProcessor>();
- if (!calc->GetYqlOperationId()) {
+ if (condNode->GetProcessor()->GetInput().size() != 2) {
return false;
}
if (condNode->GetOutputEdges().size() != 1) {
@@ -382,17 +379,7 @@ TConclusion<bool> TGraph::OptimizeConditionsForIndexes(TGraphNode* condNode) {
if (constNode->GetProcessor()->GetProcessorType() != EProcessorType::Const) {
return false;
}
- if (!!calc->GetKernelLogic()) {
- if (!calc->GetKernelLogic()->IsBoolInResult()) {
- return false;
- }
- }
- if (calc->GetYqlOperationId()) {
- if (!IsBoolResultYqlOperator((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId())) {
- return false;
- }
- }
- if (!calc->GetYqlOperationId() && !calc->GetKernelLogic()) {
+ if (!calc->GetKernelLogic()->IsBoolInResult()) {
return false;
}
std::optional<TResourceAddress> dataAddr = GetOriginalAddress(dataNode);
@@ -401,91 +388,44 @@ TConclusion<bool> TGraph::OptimizeConditionsForIndexes(TGraphNode* condNode) {
}
auto* dest = condNode->GetOutputEdges().begin()->second;
const ui32 destResourceId = condNode->GetOutputEdges().begin()->first.GetResourceId();
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::Equals ||
- (NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::StartsWith ||
- (NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::EndsWith ||
- (NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::StringContains) {
- if (!IndexesConstructed.emplace(condNode->GetIdentifier()).second) {
- return false;
- }
- RemoveEdge(condNode, dest, destResourceId);
-
- const EIndexCheckOperation indexOperation = [&]() {
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::Equals) {
- return EIndexCheckOperation::Equals;
- }
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::StartsWith) {
- return EIndexCheckOperation::StartsWith;
- }
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::EndsWith) {
- return EIndexCheckOperation::EndsWith;
- }
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() == NYql::TKernelRequestBuilder::EBinaryOp::StringContains) {
- return EIndexCheckOperation::Contains;
- }
- return EIndexCheckOperation::Contains;
- AFL_VERIFY(false);
- }();
-
- const ui32 resourceIdxFetch = BuildNextResourceId();
- IDataSource::TFetchIndexContext indexContext(dataAddr->GetColumnId(),
- IDataSource::TFetchIndexContext::TOperationsBySubColumn().Add(dataAddr->GetSubColumnName(), indexOperation));
- auto indexFetchProc = std::make_shared<TOriginalColumnDataProcessor>(resourceIdxFetch, indexContext);
- auto indexFetchNode = AddNode(indexFetchProc);
- RegisterProducer(resourceIdxFetch, indexFetchNode.get());
-
- const ui32 resourceIdIndexToAnd = BuildNextResourceId();
- IDataSource::TCheckIndexContext checkIndexContext(dataAddr->GetColumnId(), dataAddr->GetSubColumnName(), indexOperation);
- auto indexCheckProc = std::make_shared<TIndexCheckerProcessor>(
- resourceIdxFetch, constNode->GetProcessor()->GetOutputColumnIdOnce(), checkIndexContext, resourceIdIndexToAnd);
- auto indexProcNode = AddNode(indexCheckProc);
- RegisterProducer(resourceIdIndexToAnd, indexProcNode.get());
- AddEdge(indexFetchNode.get(), indexProcNode.get(), resourceIdxFetch);
- AddEdge(constNode, indexProcNode.get(), constNode->GetProcessor()->GetOutputColumnIdOnce());
-
- const ui32 resourceIdEqToAnd = BuildNextResourceId();
- RegisterProducer(resourceIdEqToAnd, condNode);
- calc->SetOutputResourceIdOnce(resourceIdEqToAnd);
-
- auto andProcessor = std::make_shared<TStreamLogicProcessor>(TColumnChainInfo::BuildVector({ resourceIdEqToAnd, resourceIdIndexToAnd }),
- TColumnChainInfo(destResourceId), NKernels::EOperation::And);
- auto andNode = AddNode(andProcessor);
- AddEdge(andNode.get(), dest, destResourceId);
-
- AddEdge(indexProcNode.get(), andNode.get(), resourceIdIndexToAnd);
- AddEdge(condNode, andNode.get(), resourceIdEqToAnd);
- ResetProducer(destResourceId, andNode.get());
- return true;
+ auto indexChecker = calc->GetKernelLogic()->GetIndexCheckerOperation();
+ if (!indexChecker) {
+ return false;
}
- return false;
-}
+ if (!IndexesConstructed.emplace(condNode->GetIdentifier()).second) {
+ return false;
+ }
+ RemoveEdge(condNode, dest, destResourceId);
-bool TGraph::IsBoolResultYqlOperator(const NYql::TKernelRequestBuilder::EBinaryOp op) const {
- switch (op) {
- case NYql::TKernelRequestBuilder::EBinaryOp::And:
- case NYql::TKernelRequestBuilder::EBinaryOp::Or:
- case NYql::TKernelRequestBuilder::EBinaryOp::Xor:
- return true;
- case NYql::TKernelRequestBuilder::EBinaryOp::Add:
- case NYql::TKernelRequestBuilder::EBinaryOp::Sub:
- case NYql::TKernelRequestBuilder::EBinaryOp::Mul:
- case NYql::TKernelRequestBuilder::EBinaryOp::Div:
- case NYql::TKernelRequestBuilder::EBinaryOp::Mod:
- case NYql::TKernelRequestBuilder::EBinaryOp::Coalesce:
- return false;
+ const ui32 resourceIdxFetch = BuildNextResourceId();
+ IDataSource::TFetchIndexContext indexContext(
+ dataAddr->GetColumnId(), IDataSource::TFetchIndexContext::TOperationsBySubColumn().Add(dataAddr->GetSubColumnName(), *indexChecker));
+ auto indexFetchProc = std::make_shared<TOriginalColumnDataProcessor>(resourceIdxFetch, indexContext);
+ auto indexFetchNode = AddNode(indexFetchProc);
+ RegisterProducer(resourceIdxFetch, indexFetchNode.get());
- case NYql::TKernelRequestBuilder::EBinaryOp::StartsWith:
- case NYql::TKernelRequestBuilder::EBinaryOp::EndsWith:
- case NYql::TKernelRequestBuilder::EBinaryOp::StringContains:
+ const ui32 resourceIdIndexToAnd = BuildNextResourceId();
+ IDataSource::TCheckIndexContext checkIndexContext(dataAddr->GetColumnId(), dataAddr->GetSubColumnName(), *indexChecker);
+ auto indexCheckProc = std::make_shared<TIndexCheckerProcessor>(
+ resourceIdxFetch, constNode->GetProcessor()->GetOutputColumnIdOnce(), checkIndexContext, resourceIdIndexToAnd);
+ auto indexProcNode = AddNode(indexCheckProc);
+ RegisterProducer(resourceIdIndexToAnd, indexProcNode.get());
+ AddEdge(indexFetchNode.get(), indexProcNode.get(), resourceIdxFetch);
+ AddEdge(constNode, indexProcNode.get(), constNode->GetProcessor()->GetOutputColumnIdOnce());
- case NYql::TKernelRequestBuilder::EBinaryOp::Equals:
- case NYql::TKernelRequestBuilder::EBinaryOp::NotEquals:
- case NYql::TKernelRequestBuilder::EBinaryOp::Less:
- case NYql::TKernelRequestBuilder::EBinaryOp::LessOrEqual:
- case NYql::TKernelRequestBuilder::EBinaryOp::Greater:
- case NYql::TKernelRequestBuilder::EBinaryOp::GreaterOrEqual:
- return true;
- }
+ const ui32 resourceIdEqToAnd = BuildNextResourceId();
+ RegisterProducer(resourceIdEqToAnd, condNode);
+ calc->SetOutputResourceIdOnce(resourceIdEqToAnd);
+
+ auto andProcessor = std::make_shared<TStreamLogicProcessor>(
+ TColumnChainInfo::BuildVector({ resourceIdEqToAnd, resourceIdIndexToAnd }), TColumnChainInfo(destResourceId), NKernels::EOperation::And);
+ auto andNode = AddNode(andProcessor);
+ AddEdge(andNode.get(), dest, destResourceId);
+
+ AddEdge(indexProcNode.get(), andNode.get(), resourceIdIndexToAnd);
+ AddEdge(condNode, andNode.get(), resourceIdEqToAnd);
+ ResetProducer(destResourceId, andNode.get());
+ return true;
}
TConclusion<bool> TGraph::OptimizeConditionsForHeadersCheck(TGraphNode* condNode) {
@@ -501,17 +441,7 @@ TConclusion<bool> TGraph::OptimizeConditionsForHeadersCheck(TGraphNode* condNode
}
auto* dest = condNode->GetOutputEdges().begin()->second;
const ui32 destResourceId = condNode->GetOutputEdges().begin()->first.GetResourceId();
- if (!!calc->GetKernelLogic()) {
- if (!calc->GetKernelLogic()->IsBoolInResult()) {
- return false;
- }
- }
- if (calc->GetYqlOperationId()) {
- if (!IsBoolResultYqlOperator((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId())) {
- return false;
- }
- }
- if (!calc->GetYqlOperationId() && !calc->GetKernelLogic()) {
+ if (!calc->GetKernelLogic() || !calc->GetKernelLogic()->IsBoolInResult()) {
return false;
}
auto* node = GetProducerVerified(condNode->GetProcessor()->GetInput()[0].GetColumnId());
@@ -561,10 +491,11 @@ TConclusion<bool> TGraph::OptimizeFilterWithCoalesce(TGraphNode* cNode) {
return false;
}
const auto calc = cNode->GetProcessorAs<TCalculationProcessor>();
- if (!calc->GetYqlOperationId()) {
+ if (!calc->GetKernelLogic()->GetYqlOperationId()) {
return false;
}
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetYqlOperationId() != NYql::TKernelRequestBuilder::EBinaryOp::Coalesce) {
+ if ((NYql::TKernelRequestBuilder::EBinaryOp)*calc->GetKernelLogic()->GetYqlOperationId() !=
+ NYql::TKernelRequestBuilder::EBinaryOp::Coalesce) {
return false;
}
if (cNode->GetOutputEdges().size() != 1) {
@@ -585,30 +516,14 @@ TConclusion<bool> TGraph::OptimizeFilterWithCoalesce(TGraphNode* cNode) {
auto* nextNode = cNode->GetOutputEdges().begin()->second;
if (nextNode->GetProcessor()->GetProcessorType() != EProcessorType::Filter) {
- if (nextNode->GetProcessor()->GetProcessorType() == EProcessorType::Calculation) {
- const auto outputCalc = nextNode->GetProcessorAs<TCalculationProcessor>();
- if (!outputCalc->GetYqlOperationId()) {
- return false;
- }
- if ((NYql::TKernelRequestBuilder::EBinaryOp)*outputCalc->GetYqlOperationId() != NYql::TKernelRequestBuilder::EBinaryOp::And) {
- return false;
- }
- } else if (nextNode->GetProcessor()->GetProcessorType() == EProcessorType::StreamLogic) {
- const auto outputCalc = nextNode->GetProcessorAs<TStreamLogicProcessor>();
- if (outputCalc->GetOperation() != NKernels::EOperation::And) {
- return false;
- }
+ if (nextNode->GetProcessor()->GetProcessorType() != EProcessorType::StreamLogic) {
+ return false;
}
- if (nextNode->GetOutputEdges().size() != 1) {
+ const auto outputCalc = nextNode->GetProcessorAs<TStreamLogicProcessor>();
+ if (outputCalc->GetOperation() != NKernels::EOperation::And) {
return false;
}
- if (nextNode->GetOutputEdges().begin()->second->GetProcessor()->GetProcessorType() == EProcessorType::StreamLogic) {
- const auto outputCalc = nextNode->GetOutputEdges().begin()->second->GetProcessorAs<TStreamLogicProcessor>();
- if (outputCalc->GetOperation() != NKernels::EOperation::And) {
- return false;
- }
- } else if (nextNode->GetOutputEdges().begin()->second->GetProcessor()->GetProcessorType() == EProcessorType::Filter) {
- } else {
+ if (nextNode->GetOutputEdges().size() != 1) {
return false;
}
}
@@ -693,16 +608,16 @@ TConclusionStatus TGraph::Collapse() {
}
}
-// {
-// auto conclusion = OptimizeConditionsForHeadersCheck(n.get());
-// if (conclusion.IsFail()) {
-// return conclusion;
-// }
-// if (*conclusion) {
-// hasChanges = true;
-// break;
-// }
-// }
+ // {
+ // auto conclusion = OptimizeConditionsForHeadersCheck(n.get());
+ // if (conclusion.IsFail()) {
+ // return conclusion;
+ // }
+ // if (*conclusion) {
+ // hasChanges = true;
+ // break;
+ // }
+ // }
{
auto conclusion = OptimizeConditionsForStream(n.get());
diff --git a/ydb/core/formats/arrow/program/graph_optimization.h b/ydb/core/formats/arrow/program/graph_optimization.h
index 5fcf22495e..ec5b97a138 100644
--- a/ydb/core/formats/arrow/program/graph_optimization.h
+++ b/ydb/core/formats/arrow/program/graph_optimization.h
@@ -47,8 +47,13 @@ public:
TString DebugString() const;
};
+enum class EOptimizerMarkers {
+ FetchMerged
+};
+
class TGraphNode {
private:
+ std::set<EOptimizerMarkers> OptimizerMarkers;
YDB_READONLY(i64, Identifier, 0);
YDB_READONLY_DEF(std::shared_ptr<IResourceProcessor>, Processor);
class TAddress {
@@ -81,6 +86,15 @@ private:
std::map<TAddress, TGraphNode*> OutputEdges;
public:
+
+ bool AddOptimizerMarker(const EOptimizerMarkers marker) {
+ return OptimizerMarkers.emplace(marker).second;
+ }
+
+ bool HasOptimizerMarker(const EOptimizerMarkers marker) {
+ return OptimizerMarkers.contains(marker);
+ }
+
void AddEdgeTo(TGraphNode* to, const ui32 resourceId);
void AddEdgeFrom(TGraphNode* from, const ui32 resourceId);
void RemoveEdgeTo(const ui32 identifier, const ui32 resourceId);
@@ -147,7 +161,6 @@ private:
std::optional<TResourceAddress> GetOriginalAddress(TGraphNode* condNode) const;
TConclusion<bool> OptimizeForFetchSubColumns(TGraphNode* condNode);
TConclusion<bool> OptimizeConditionsForHeadersCheck(TGraphNode* condNode);
- bool IsBoolResultYqlOperator(const NYql::TKernelRequestBuilder::EBinaryOp op) const;
TConclusion<bool> OptimizeConditionsForStream(TGraphNode* condNode);
TConclusion<bool> OptimizeConditionsForIndexes(TGraphNode* condNode);
diff --git a/ydb/core/formats/arrow/program/index.h b/ydb/core/formats/arrow/program/index.h
index 3a54dd9c5b..1ff82c8016 100644
--- a/ydb/core/formats/arrow/program/index.h
+++ b/ydb/core/formats/arrow/program/index.h
@@ -28,7 +28,7 @@ private:
bool ApplyToFilterFlag = false;
virtual TString DoGetSignalCategoryName() const override {
- return ::ToString(GetProcessorType()) + "::" + ::ToString(IndexContext.GetOperation());
+ return ::ToString(GetProcessorType()) + "::" + IndexContext.GetOperation().GetSignalId();
}
public:
diff --git a/ydb/core/formats/arrow/program/kernel_logic.cpp b/ydb/core/formats/arrow/program/kernel_logic.cpp
index f20d22d9f5..ebd9ec2c9b 100644
--- a/ydb/core/formats/arrow/program/kernel_logic.cpp
+++ b/ydb/core/formats/arrow/program/kernel_logic.cpp
@@ -5,6 +5,8 @@
#include <ydb/core/formats/arrow/accessor/sub_columns/accessor.h>
#include <ydb/core/formats/arrow/accessor/sub_columns/partial.h>
+#include <yql/essentials/core/arrow_kernels/request/request.h>
+
namespace NKikimr::NArrow::NSSA {
TConclusion<bool> TGetJsonPath::DoExecute(const std::vector<TColumnChainInfo>& input, const std::vector<TColumnChainInfo>& output,
@@ -47,37 +49,6 @@ std::shared_ptr<IChunkedArray> TGetJsonPath::ExtractArray(const std::shared_ptr<
}
}
-std::optional<TFetchingInfo> TGetJsonPath::BuildFetchTask(const ui32 columnId, const NAccessor::IChunkedArray::EType arrType,
- const std::vector<TColumnChainInfo>& input, const std::shared_ptr<TAccessorsCollection>& resources) const {
- if (arrType != NAccessor::IChunkedArray::EType::SubColumnsArray) {
- return TFetchingInfo::BuildFullRestore(false);
- }
- AFL_VERIFY(input.size() == 2 && input.front().GetColumnId() == columnId);
- auto description = BuildDescription(input, resources).DetachResult();
- const std::vector<TString> subColumns = { TString(description.GetJsonPath().data(), description.GetJsonPath().size()) };
- if (!description.GetInputAccessor()) {
- return TFetchingInfo::BuildSubColumnsRestore(subColumns);
- }
-
- std::optional<bool> hasSubColumns;
- return NAccessor::TCompositeChunkedArray::VisitDataOwners<TFetchingInfo>(
- description.GetInputAccessor(), [&](const std::shared_ptr<NAccessor::IChunkedArray>& arr) {
- if (arr->GetType() == NAccessor::IChunkedArray::EType::SubColumnsPartialArray) {
- AFL_VERIFY(!hasSubColumns || *hasSubColumns);
- hasSubColumns = true;
- auto scArr = std::static_pointer_cast<NAccessor::TSubColumnsPartialArray>(arr);
- if (scArr->NeedFetch(description.GetJsonPath())) {
- return std::optional<TFetchingInfo>(TFetchingInfo::BuildSubColumnsRestore(subColumns));
- }
- } else {
- AFL_VERIFY(arr->GetType() == NAccessor::IChunkedArray::EType::SubColumnsArray);
- AFL_VERIFY(!hasSubColumns || !*hasSubColumns);
- hasSubColumns = false;
- }
- return std::optional<TFetchingInfo>();
- });
-}
-
NAccessor::TCompositeChunkedArray::TBuilder TGetJsonPath::MakeCompositeBuilder() const {
return NAccessor::TCompositeChunkedArray::TBuilder(arrow::utf8());
}
@@ -99,4 +70,44 @@ NAccessor::TCompositeChunkedArray::TBuilder TExistsJsonPath::MakeCompositeBuilde
return NAccessor::TCompositeChunkedArray::TBuilder(arrow::uint8());
}
+TString TSimpleKernelLogic::SignalDescription() const {
+ if (YqlOperationId) {
+ return ::ToString((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId);
+ } else {
+ return "UNKNOWN";
+ }
+}
+
+bool TSimpleKernelLogic::IsBoolInResult() const {
+ if (YqlOperationId) {
+ switch ((NYql::TKernelRequestBuilder::EBinaryOp)*YqlOperationId) {
+ case NYql::TKernelRequestBuilder::EBinaryOp::And:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Or:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Xor:
+ return true;
+ case NYql::TKernelRequestBuilder::EBinaryOp::Add:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Sub:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Mul:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Div:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Mod:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Coalesce:
+ return false;
+
+ case NYql::TKernelRequestBuilder::EBinaryOp::StartsWith:
+ case NYql::TKernelRequestBuilder::EBinaryOp::EndsWith:
+ case NYql::TKernelRequestBuilder::EBinaryOp::StringContains:
+
+ case NYql::TKernelRequestBuilder::EBinaryOp::Equals:
+ case NYql::TKernelRequestBuilder::EBinaryOp::NotEquals:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Less:
+ case NYql::TKernelRequestBuilder::EBinaryOp::LessOrEqual:
+ case NYql::TKernelRequestBuilder::EBinaryOp::Greater:
+ case NYql::TKernelRequestBuilder::EBinaryOp::GreaterOrEqual:
+ return true;
+ }
+ } else {
+ return false;
+ }
+}
+
} // namespace NKikimr::NArrow::NSSA
diff --git a/ydb/core/formats/arrow/program/kernel_logic.h b/ydb/core/formats/arrow/program/kernel_logic.h
index c6488f691c..9e3d795bd4 100644
--- a/ydb/core/formats/arrow/program/kernel_logic.h
+++ b/ydb/core/formats/arrow/program/kernel_logic.h
@@ -8,21 +8,40 @@
namespace NKikimr::NArrow::NSSA {
+enum class ECalculationHardness {
+ JustAccessorUsage = 1,
+ NotSpecified = 3,
+ Equals = 5,
+ StringMatching = 10,
+ Unknown = 20
+};
+
class IKernelLogic {
private:
virtual TConclusion<bool> DoExecute(const std::vector<TColumnChainInfo>& input, const std::vector<TColumnChainInfo>& output,
const std::shared_ptr<TAccessorsCollection>& resources) const = 0;
+ virtual std::optional<TIndexCheckOperation> DoGetIndexCheckerOperation() const = 0;
+ YDB_ACCESSOR_DEF(std::optional<ui32>, YqlOperationId);
+
public:
+ IKernelLogic() = default;
+
+ IKernelLogic(const ui32 yqlOperationId)
+ : YqlOperationId(yqlOperationId) {
+ }
+
virtual ~IKernelLogic() = default;
+ virtual TString SignalDescription() const {
+ return GetClassName();
+ }
+ virtual ECalculationHardness GetWeight() const = 0;
+
using TFactory = NObjectFactory::TObjectFactory<IKernelLogic, TString>;
virtual TString GetClassName() const = 0;
- virtual std::optional<TFetchingInfo> BuildFetchTask(const ui32 columnId, const NAccessor::IChunkedArray::EType arrType,
- const std::vector<TColumnChainInfo>& input, const std::shared_ptr<TAccessorsCollection>& resources) const = 0;
-
TConclusion<bool> Execute(const std::vector<TColumnChainInfo>& input, const std::vector<TColumnChainInfo>& output,
const std::shared_ptr<TAccessorsCollection>& resources) const {
if (!resources) {
@@ -32,6 +51,114 @@ public:
}
virtual bool IsBoolInResult() const = 0;
+ std::optional<TIndexCheckOperation> GetIndexCheckerOperation() const {
+ return DoGetIndexCheckerOperation();
+ }
+};
+
+class TSimpleKernelLogic: public IKernelLogic {
+private:
+ using TBase = IKernelLogic;
+ YDB_READONLY_DEF(std::optional<ui32>, YqlOperationId);
+
+ virtual TConclusion<bool> DoExecute(const std::vector<TColumnChainInfo>& /*input*/, const std::vector<TColumnChainInfo>& /*output*/,
+ const std::shared_ptr<TAccessorsCollection>& /*resources*/) const override {
+ return false;
+ }
+
+ virtual std::optional<TIndexCheckOperation> DoGetIndexCheckerOperation() const override {
+ return std::nullopt;
+ }
+
+public:
+ TSimpleKernelLogic() = default;
+ TSimpleKernelLogic(const ui32 yqlOperationId)
+ : TBase(yqlOperationId)
+ , YqlOperationId(yqlOperationId) {
+ }
+
+ virtual TString SignalDescription() const override;
+
+ virtual ECalculationHardness GetWeight() const override {
+ if (!YqlOperationId) {
+ return ECalculationHardness::Unknown;
+ }
+ return ECalculationHardness::NotSpecified;
+ }
+
+ virtual TString GetClassName() const override {
+ return "SIMPLE";
+ }
+
+ virtual bool IsBoolInResult() const override;
+};
+
+class TLogicMatchString: public IKernelLogic {
+private:
+ using TBase = IKernelLogic;
+ virtual TConclusion<bool> DoExecute(const std::vector<TColumnChainInfo>& /*input*/, const std::vector<TColumnChainInfo>& /*output*/,
+ const std::shared_ptr<TAccessorsCollection>& /*resources*/) const override {
+ return false;
+ }
+ virtual std::optional<TIndexCheckOperation> DoGetIndexCheckerOperation() const override {
+ return TIndexCheckOperation(Operation, CaseSensitive);
+ }
+ virtual ECalculationHardness GetWeight() const override {
+ return ECalculationHardness::StringMatching;
+ }
+
+ const TIndexCheckOperation::EOperation Operation;
+ const bool CaseSensitive;
+ const bool IsSimpleFunction;
+
+public:
+ TLogicMatchString(const TIndexCheckOperation::EOperation operation, const bool caseSensitive, const bool isSimpleFunction)
+ : Operation(operation)
+ , CaseSensitive(caseSensitive)
+ , IsSimpleFunction(isSimpleFunction) {
+ }
+
+ virtual TString SignalDescription() const override {
+ return "MATCH_STRING::" + ::ToString(Operation) + "::" + ::ToString(CaseSensitive);
+ }
+
+ virtual TString GetClassName() const override {
+ return "MATCH_STRING";
+ }
+
+ virtual bool IsBoolInResult() const override {
+ return !IsSimpleFunction;
+ }
+};
+
+class TLogicEquals: public IKernelLogic {
+private:
+ using TBase = IKernelLogic;
+ virtual TConclusion<bool> DoExecute(const std::vector<TColumnChainInfo>& /*input*/, const std::vector<TColumnChainInfo>& /*output*/,
+ const std::shared_ptr<TAccessorsCollection>& /*resources*/) const override {
+ return false;
+ }
+ virtual std::optional<TIndexCheckOperation> DoGetIndexCheckerOperation() const override {
+ return TIndexCheckOperation(TIndexCheckOperation::EOperation::Equals, true);
+ }
+ const bool IsSimpleFunction;
+
+ virtual ECalculationHardness GetWeight() const override {
+ return ECalculationHardness::Equals;
+ }
+
+public:
+ TLogicEquals(const bool isSimpleFunction)
+ : IsSimpleFunction(isSimpleFunction) {
+ }
+
+ virtual TString GetClassName() const override {
+ return "EQUALS";
+ }
+
+ virtual bool IsBoolInResult() const override {
+ return !IsSimpleFunction;
+ }
};
class TGetJsonPath: public IKernelLogic {
@@ -39,6 +166,13 @@ public:
static TString GetClassNameStatic() {
return "JsonValue";
}
+ virtual std::optional<TIndexCheckOperation> DoGetIndexCheckerOperation() const override {
+ return std::nullopt;
+ }
+
+ virtual ECalculationHardness GetWeight() const override {
+ return ECalculationHardness::JustAccessorUsage;
+ }
private:
virtual bool IsBoolInResult() const override {
@@ -81,9 +215,6 @@ private:
return TConclusionStatus::Fail("incorrect path format: have to be as '$.**...**'");
}
svPath = svPath.substr(2);
- if (svPath.starts_with("\"") && svPath.ends_with("\"") && svPath.size() > 2) {
- svPath = svPath.substr(1, svPath.size() - 2);
- }
return TDescription(resources->GetAccessorOptional(input.front().GetColumnId()), svPath);
}
@@ -94,9 +225,6 @@ private:
static const inline TFactory::TRegistrator<TGetJsonPath> Registrator = TFactory::TRegistrator<TGetJsonPath>(GetClassNameStatic());
- virtual std::optional<TFetchingInfo> BuildFetchTask(const ui32 columnId, const NAccessor::IChunkedArray::EType arrType,
- const std::vector<TColumnChainInfo>& input, const std::shared_ptr<TAccessorsCollection>& resources) const override;
-
virtual TConclusion<bool> DoExecute(const std::vector<TColumnChainInfo>& input, const std::vector<TColumnChainInfo>& output,
const std::shared_ptr<TAccessorsCollection>& resources) const override;
diff --git a/ydb/core/formats/arrow/program/original.h b/ydb/core/formats/arrow/program/original.h
index 3d3ab3d1fc..0ba877b54f 100644
--- a/ydb/core/formats/arrow/program/original.h
+++ b/ydb/core/formats/arrow/program/original.h
@@ -2,6 +2,7 @@
#include "abstract.h"
#include "functions.h"
#include "kernel_logic.h"
+#include "execution.h"
namespace NKikimr::NArrow::NSSA {
diff --git a/ydb/core/formats/arrow/program/stream_logic.cpp b/ydb/core/formats/arrow/program/stream_logic.cpp
index 58079b6d98..69425620a1 100644
--- a/ydb/core/formats/arrow/program/stream_logic.cpp
+++ b/ydb/core/formats/arrow/program/stream_logic.cpp
@@ -29,72 +29,59 @@ TConclusion<bool> TStreamLogicProcessor::OnInputReady(
const ui32 inputId, const TProcessorContext& context, const TExecutionNodeContext& /*nodeContext*/) const {
auto accInput = context.GetResources()->GetAccessorVerified(inputId);
- std::shared_ptr<arrow::Scalar> monoValue;
AFL_VERIFY(!context.GetResources()->HasMarker(FinishMarker));
const auto accResult = context.GetResources()->GetAccessorOptional(GetOutputColumnIdOnce());
- const auto isMonoValue = accInput->CheckOneValueAccessor(monoValue);
- if (isMonoValue && *isMonoValue) {
- const auto isFalseConclusion = ScalarIsFalse(monoValue);
- if (isFalseConclusion.IsFail()) {
- return isFalseConclusion;
- }
- const auto isTrueConclusion = ScalarIsTrue(monoValue);
- if (isTrueConclusion.IsFail()) {
- return isTrueConclusion;
- }
- AFL_VERIFY(*isFalseConclusion || *isTrueConclusion);
+ TConclusion<std::optional<bool>> isMonoInput = GetMonoInput(accInput);
+ if (isMonoInput.IsFail()) {
+ return isMonoInput;
+ }
+
+ if (isMonoInput.GetResult()) {
+ const bool monoValue = *isMonoInput.GetResult();
if (Operation == NKernels::EOperation::And) {
- if (*isTrueConclusion) {
+ if (monoValue) {
if (!accResult) {
context.GetResources()->AddVerified(GetOutputColumnIdOnce(),
- std::make_shared<NAccessor::TSparsedArray>(
- std::make_shared<arrow::UInt8Scalar>(1), arrow::uint8(), context.GetResources()->GetRecordsCountActualVerified()),
- false);
+ NAccessor::TSparsedArray::BuildTrueArrayUI8(context.GetResources()->GetRecordsCountActualVerified()), false);
}
return false;
} else {
- if (accResult) {
- context.GetResources()->Remove(GetOutputColumnIdOnce(), true);
- }
- context.GetResources()->AddVerified(GetOutputColumnIdOnce(),
- std::make_shared<NAccessor::TSparsedArray>(
- std::make_shared<arrow::UInt8Scalar>(0), arrow::uint8(), context.GetResources()->GetRecordsCountActualVerified()),
- false);
+ context.GetResources()->Upsert(GetOutputColumnIdOnce(),
+ NAccessor::TSparsedArray::BuildFalseArrayUI8(context.GetResources()->GetRecordsCountActualVerified()), false);
return true;
}
} else if (Operation == NKernels::EOperation::Or) {
- if (*isFalseConclusion) {
+ if (!monoValue) {
if (!accResult) {
context.GetResources()->AddVerified(GetOutputColumnIdOnce(),
- std::make_shared<NAccessor::TSparsedArray>(
- std::make_shared<arrow::UInt8Scalar>(0), arrow::uint8(), context.GetResources()->GetRecordsCountActualVerified()),
- false);
+ NAccessor::TSparsedArray::BuildFalseArrayUI8(context.GetResources()->GetRecordsCountActualVerified()), false);
}
return false;
} else {
- if (accResult) {
- context.GetResources()->Remove(GetOutputColumnIdOnce(), true);
- }
- context.GetResources()->AddVerified(GetOutputColumnIdOnce(),
- std::make_shared<NAccessor::TSparsedArray>(
- std::make_shared<arrow::UInt8Scalar>(1), arrow::uint8(), context.GetResources()->GetRecordsCountActualVerified()),
- false);
+ context.GetResources()->Upsert(GetOutputColumnIdOnce(),
+ NAccessor::TSparsedArray::BuildTrueArrayUI8(context.GetResources()->GetRecordsCountActualVerified()), false);
return true;
}
}
}
if (!accResult) {
+ AFL_VERIFY(accInput->GetDataType()->id() == arrow::uint8()->id())("type", accInput->GetDataType()->ToString());
context.GetResources()->AddVerified(GetOutputColumnIdOnce(), accInput, false);
} else {
auto result = Function->Call(TColumnChainInfo::BuildVector({ GetOutputColumnIdOnce(), inputId }), context.GetResources());
if (result.IsFail()) {
return result;
}
+ auto datum = result.DetachResult();
context.GetResources()->Remove(GetOutputColumnIdOnce());
- context.GetResources()->AddVerified(GetOutputColumnIdOnce(), std::move(*result), false);
+ context.GetResources()->AddVerified(GetOutputColumnIdOnce(), datum, false);
+ if (IsFinishDatum(datum)) {
+ return true;
+ }
}
+
return false;
}
@@ -162,4 +149,57 @@ NJson::TJsonValue TStreamLogicProcessor::DoDebugJson() const {
return result;
}
+bool TStreamLogicProcessor::IsFinishDatum(const arrow::Datum& datum) const {
+ const auto arrChecker = [&](const arrow::Array& arr) {
+ AFL_VERIFY(arr.type()->id() == arrow::uint8()->id());
+ const arrow::UInt8Array& ui8Arr = static_cast<const arrow::UInt8Array&>(arr);
+ const ui8* values = ui8Arr.raw_values();
+ if (Operation == NKernels::EOperation::And) {
+ for (ui32 i = 0; i < ui8Arr.length(); ++i) {
+ if (values[i] != 0) {
+ return false;
+ }
+ }
+ } else if (Operation == NKernels::EOperation::Or) {
+ for (ui32 i = 0; i < ui8Arr.length(); ++i) {
+ if (values[i] == 0) {
+ return false;
+ }
+ }
+ } else {
+ AFL_VERIFY(false)("op", Operation);
+ }
+ return true;
+ };
+ if (datum.is_array()) {
+ auto arr = datum.make_array();
+ return arrChecker(*arr);
+ } else if (datum.is_arraylike()) {
+ auto arr = datum.chunked_array();
+ AFL_VERIFY(arr->type()->id() == arrow::uint8()->id());
+ for (auto&& chunk : arr->chunks()) {
+ if (!arrChecker(*chunk)) {
+ return false;
+ }
+ }
+ return true;
+ } else {
+ AFL_VERIFY(false)("kind", (ui32)datum.kind());
+ return false;
+ }
+}
+
+TConclusion<std::optional<bool>> TStreamLogicProcessor::GetMonoInput(const std::shared_ptr<IChunkedArray>& inputArray) const {
+ std::shared_ptr<arrow::Scalar> monoValue;
+ const auto isMonoValue = inputArray->CheckOneValueAccessor(monoValue);
+ if (!isMonoValue || !*isMonoValue) {
+ return std::optional<bool>();
+ }
+ const auto isFalseConclusion = ScalarIsFalse(monoValue);
+ if (isFalseConclusion.IsFail()) {
+ return isFalseConclusion;
+ }
+ return !*isFalseConclusion;
+}
+
} // namespace NKikimr::NArrow::NSSA
diff --git a/ydb/core/formats/arrow/program/stream_logic.h b/ydb/core/formats/arrow/program/stream_logic.h
index f78619b8cf..3855cedf54 100644
--- a/ydb/core/formats/arrow/program/stream_logic.h
+++ b/ydb/core/formats/arrow/program/stream_logic.h
@@ -21,15 +21,16 @@ private:
return false;
}
+ TConclusion<std::optional<bool>> GetMonoInput(const std::shared_ptr<IChunkedArray>& inputArray) const;
+
+ bool IsFinishDatum(const arrow::Datum& datum) const;
virtual ui64 DoGetWeight() const override;
public:
NKernels::EOperation GetOperation() const {
return Operation;
}
-
TConclusion<bool> OnInputReady(const ui32 inputId, const TProcessorContext& context, const TExecutionNodeContext& nodeContext) const;
-
TStreamLogicProcessor(std::vector<TColumnChainInfo>&& input, const TColumnChainInfo& output, const NKernels::EOperation op);
};
diff --git a/ydb/core/formats/arrow/reader/merger.cpp b/ydb/core/formats/arrow/reader/merger.cpp
index b6c56ba231..06b5d2be4b 100644
--- a/ydb/core/formats/arrow/reader/merger.cpp
+++ b/ydb/core/formats/arrow/reader/merger.cpp
@@ -105,7 +105,7 @@ std::shared_ptr<arrow::Table> TMergePartialStream::SingleSourceDrain(const TSort
*lastResultPosition = TCursor(keys, 0, SortSchema->field_names());
}
if (SortHeap.Current().GetFilter()) {
- AFL_VERIFY(SortHeap.Current().GetFilter()->Apply(result, TColumnFilter::TApplyContext(pos.GetPosition() + (include ? 0 : 1), resultSize)));
+ SortHeap.Current().GetFilter()->Apply(result, TColumnFilter::TApplyContext(pos.GetPosition() + (include ? 0 : 1), resultSize));
}
} else {
result = SortHeap.Current().GetKeyColumns().SliceData(startPos, resultSize);
@@ -114,7 +114,7 @@ std::shared_ptr<arrow::Table> TMergePartialStream::SingleSourceDrain(const TSort
*lastResultPosition = TCursor(keys, keys->num_rows() - 1, SortSchema->field_names());
}
if (SortHeap.Current().GetFilter()) {
- AFL_VERIFY(SortHeap.Current().GetFilter()->Apply(result, TColumnFilter::TApplyContext(startPos, resultSize)));
+ SortHeap.Current().GetFilter()->Apply(result, TColumnFilter::TApplyContext(startPos, resultSize));
}
}
if (!result || !result->num_rows()) {
diff --git a/ydb/core/formats/arrow/ut/ut_program_step.cpp b/ydb/core/formats/arrow/ut/ut_program_step.cpp
index 687777f331..951cc57bfb 100644
--- a/ydb/core/formats/arrow/ut/ut_program_step.cpp
+++ b/ydb/core/formats/arrow/ut/ut_program_step.cpp
@@ -43,8 +43,8 @@ size_t FilterTest(const std::vector<std::shared_ptr<arrow::Array>>& args, const
std::make_shared<arrow::Field>("y", args.at(1)->type()), std::make_shared<arrow::Field>("z", args.at(2)->type()) });
TSchemaColumnResolver resolver(schema);
NOptimization::TGraph::TBuilder builder(resolver);
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 2}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(op1)).DetachResult());
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({4, 3}), TColumnChainInfo(5), std::make_shared<TSimpleFunction>(op2)).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 2}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(op1), std::make_shared<TSimpleKernelLogic>()).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({4, 3}), TColumnChainInfo(5), std::make_shared<TSimpleFunction>(op2), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TFilterProcessor>(TColumnChainInfo(5)));
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 4, 5 })));
auto chain = builder.Finish().DetachResult();
@@ -74,8 +74,8 @@ size_t FilterTestUnary(std::vector<std::shared_ptr<arrow::Array>> args, const EO
}
NOptimization::TGraph::TBuilder builder(resolver);
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(op1)).DetachResult());
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2, 4}), TColumnChainInfo(5), std::make_shared<TSimpleFunction>(op2)).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(op1), std::make_shared<TSimpleKernelLogic>()).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2, 4}), TColumnChainInfo(5), std::make_shared<TSimpleFunction>(op2), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TFilterProcessor>(TColumnChainInfo(5)));
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 4, 5 })));
auto chain = builder.Finish().DetachResult();
@@ -103,7 +103,7 @@ std::vector<bool> LikeTest(const std::vector<std::string>& data, EOperation op,
NOptimization::TGraph::TBuilder builder(resolver);
builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(2),
- std::make_shared<TSimpleFunction>(op, std::make_shared<arrow::compute::MatchSubstringOptions>(pattern, ignoreCase))).DetachResult());
+ std::make_shared<TSimpleFunction>(op, std::make_shared<arrow::compute::MatchSubstringOptions>(pattern, ignoreCase)), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 2 })));
auto chain = builder.Finish().DetachResult();
@@ -505,7 +505,7 @@ Y_UNIT_TEST_SUITE(ProgramStep) {
TSchemaColumnResolver resolver(schema);
NOptimization::TGraph::TBuilder builder(resolver);
builder.Add(std::make_shared<TConstProcessor>(std::make_shared<arrow::Int64Scalar>(56), 3));
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 3}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(EOperation::Add)).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 3}), TColumnChainInfo(4), std::make_shared<TSimpleFunction>(EOperation::Add), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TFilterProcessor>(TColumnChainInfo(2)));
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 2, 4 })));
auto chain = builder.Finish().DetachResult();
@@ -533,6 +533,7 @@ Y_UNIT_TEST_SUITE(ProgramStep) {
std::vector<std::string> data = { "aa", "aaa", "aaaa", "bbbbb" };
arrow::StringBuilder sb;
sb.AppendValues(data).ok();
+ using namespace NKikimr::NArrow::NSSA;
auto schema = std::make_shared<arrow::Schema>(
std::vector{ std::make_shared<arrow::Field>("int", arrow::int64()), std::make_shared<arrow::Field>("string", arrow::utf8()) });
@@ -551,33 +552,33 @@ Y_UNIT_TEST_SUITE(ProgramStep) {
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({10001}), TColumnChainInfo(1001), std::make_shared<TSimpleFunction>(EOperation::MatchSubstring)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::StringContains);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({10001}), TColumnChainInfo(1001), std::make_shared<TSimpleFunction>(EOperation::MatchSubstring),
+ std::make_shared<NKikimr::NArrow::NSSA::TLogicMatchString>(TIndexCheckOperation::EOperation::Contains, true, false)).DetachResult();
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1001, 4}), TColumnChainInfo(1101), std::make_shared<TSimpleFunction>(EOperation::Add)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1001, 4}), TColumnChainInfo(1101), std::make_shared<TSimpleFunction>(EOperation::Add),
+ std::make_shared<TSimpleKernelLogic>((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce)).DetachResult();
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(1002), std::make_shared<TSimpleFunction>(EOperation::StartsWith)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::StartsWith);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(1002), std::make_shared<TSimpleFunction>(EOperation::StartsWith), std::make_shared<NKikimr::NArrow::NSSA::TLogicMatchString>(
+ NKikimr::NArrow::NSSA::TIndexCheckOperation::EOperation::StartsWith, true, false)).DetachResult();
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1002, 4}), TColumnChainInfo(1102), std::make_shared<TSimpleFunction>(EOperation::Add)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1002, 4}), TColumnChainInfo(1102), std::make_shared<TSimpleFunction>(EOperation::Add),
+ std::make_shared<TSimpleKernelLogic>((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce)).DetachResult();
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 3}), TColumnChainInfo(1003), std::make_shared<TSimpleFunction>(EOperation::Equal)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Equals);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1, 3}), TColumnChainInfo(1003), std::make_shared<TSimpleFunction>(EOperation::Equal),
+ std::make_shared<TLogicEquals>(false)).DetachResult();
builder.Add(proc);
}
{
- auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1003, 4}), TColumnChainInfo(1103), std::make_shared<TSimpleFunction>(EOperation::Add)).DetachResult();
- proc->SetYqlOperationId((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce);
+ auto proc = TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1003, 4}), TColumnChainInfo(1103), std::make_shared<TSimpleFunction>(EOperation::Add),
+ std::make_shared<TSimpleKernelLogic>((ui32)NYql::TKernelRequestBuilder::EBinaryOp::Coalesce)).DetachResult();
builder.Add(proc);
}
@@ -626,8 +627,8 @@ Y_UNIT_TEST_SUITE(ProgramStep) {
TSchemaColumnResolver resolver(schema);
NOptimization::TGraph::TBuilder builder(resolver);
NAggregation::TWithKeysAggregationProcessor::TBuilder aggrBuilder;
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(3), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Min)).DetachResult());
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(4), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Max)).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(3), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Min), std::make_shared<TSimpleKernelLogic>()).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(4), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Max), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 3, 4 })));
auto chain = builder.Finish().DetachResult();
auto sds = std::make_shared<TSimpleDataSource>();
@@ -655,8 +656,8 @@ Y_UNIT_TEST_SUITE(ProgramStep) {
TSchemaColumnResolver resolver(schema);
NOptimization::TGraph::TBuilder builder(resolver);
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(3), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Sum)).DetachResult());
- builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(4), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Sum)).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({1}), TColumnChainInfo(3), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Sum), std::make_shared<TSimpleKernelLogic>()).DetachResult());
+ builder.Add(TCalculationProcessor::Build(TColumnChainInfo::BuildVector({2}), TColumnChainInfo(4), std::make_shared<NAggregation::TAggregateFunction>(EAggregate::Sum), std::make_shared<TSimpleKernelLogic>()).DetachResult());
builder.Add(std::make_shared<TProjectionProcessor>(TColumnChainInfo::BuildVector({ 3, 4 })));
auto chain = builder.Finish().DetachResult();
diff --git a/ydb/core/fq/libs/compute/ydb/control_plane/cms_grpc_client_actor.cpp b/ydb/core/fq/libs/compute/ydb/control_plane/cms_grpc_client_actor.cpp
index 57eff7fdf1..335917fde3 100644
--- a/ydb/core/fq/libs/compute/ydb/control_plane/cms_grpc_client_actor.cpp
+++ b/ydb/core/fq/libs/compute/ydb/control_plane/cms_grpc_client_actor.cpp
@@ -80,6 +80,7 @@ public:
void Handle(TEvYdbCompute::TEvCreateDatabaseRequest::TPtr& ev) {
const auto& request = *ev.Get()->Get();
auto forwardRequest = std::make_unique<TEvPrivate::TEvCreateDatabaseRequest>();
+ forwardRequest->Request.mutable_operation_params()->set_operation_mode(Ydb::Operations::OperationParams::SYNC);
forwardRequest->Request.mutable_serverless_resources()->set_shared_database_path(request.BasePath);
forwardRequest->Request.set_path(request.Path);
SetYdbRequestToken(*forwardRequest, CredentialsProvider->GetAuthInfo());
diff --git a/ydb/core/graph/shard/backends.cpp b/ydb/core/graph/shard/backends.cpp
index 0cd5af7134..809974e9a7 100644
--- a/ydb/core/graph/shard/backends.cpp
+++ b/ydb/core/graph/shard/backends.cpp
@@ -192,7 +192,7 @@ void TMemoryBackend::GetMetrics(const NKikimrGraph::TEvGetMetrics& get, NKikimrG
if (!get.GetSkipBorders()) {
if (get.HasTimeTo()) {
TInstant to(TInstant::Seconds(get.GetTimeTo()));
- if (metricValues.Timestamps.empty() || std::prev(itRight)->Timestamp < to) {
+ if (metricValues.Timestamps.empty() || (itLeft != itRight && std::prev(itRight)->Timestamp < to)) {
metricValues.Timestamps.push_back(to);
for (size_t num = 0; num < indexes.size(); ++num) {
metricValues.Values[num].push_back(NAN);
diff --git a/ydb/core/graph/shard/tx_monitoring.cpp b/ydb/core/graph/shard/tx_monitoring.cpp
index ea21c325f9..4c89ba8fd4 100644
--- a/ydb/core/graph/shard/tx_monitoring.cpp
+++ b/ydb/core/graph/shard/tx_monitoring.cpp
@@ -1,5 +1,6 @@
#include "shard_impl.h"
#include "log.h"
+#include <library/cpp/json/json_writer.h>
namespace NKikimr {
namespace NGraph {
@@ -102,7 +103,7 @@ public:
if (wasLine) {
html << "<br>";
}
- html << "arithmetic " << name << " " << value.ValueA << " " << value.Op << " " << value.ValueB;
+ html << "arithmetic " << name << " " << value.ValueA << " " << value.Op << " " << value.ValueB;
wasLine = true;
}
html << "</td></tr>";
@@ -113,6 +114,45 @@ public:
}
};
+class TTxMonitoringGetSettings : public TTransactionBase<TGraphShard> {
+private:
+ NMon::TEvRemoteHttpInfo::TPtr Event;
+
+public:
+ TTxMonitoringGetSettings(TGraphShard* shard, NMon::TEvRemoteHttpInfo::TPtr ev)
+ : TBase(shard)
+ , Event(std::move(ev))
+ {}
+
+ TTxType GetTxType() const override { return NGraphShard::TXTYPE_MONITORING; }
+
+ bool Execute(TTransactionContext&, const TActorContext&) override {
+ BLOG_D("TTxMonitoringGetSettings::Execute");
+ return true;
+ }
+
+ void Complete(const TActorContext& ctx) override {
+ BLOG_D("TTxMonitoringGetSettings::Complete");
+ NJson::TJsonValue json;
+ switch (Self->BackendType) {
+ case EBackendType::Memory:
+ json["backend"] = "Memory";
+ json["metrics_size"] = Self->MemoryBackend.MetricsIndex.size();
+ json["records_size"] = Self->MemoryBackend.MetricsValues.size();
+ break;
+ case EBackendType::Local:
+ json["backend"] = "Local";
+ json["metrics_size"] = Self->LocalBackend.MetricsIndex.size();
+ break;
+ case EBackendType::External:
+ json["backend"] = "External";
+ break;
+ }
+ ctx.Send(Event->Sender, new NMon::TEvRemoteJsonInfoRes(NJson::WriteJson(json, false)));
+ }
+};
+
+
void TGraphShard::ExecuteTxMonitoring(NMon::TEvRemoteHttpInfo::TPtr ev) {
if (ev->Get()->Cgi().Has("action")) {
if (ev->Get()->Cgi().Get("action") == "change_backend") {
@@ -123,6 +163,10 @@ void TGraphShard::ExecuteTxMonitoring(NMon::TEvRemoteHttpInfo::TPtr ev) {
return;
}
}
+ if (ev->Get()->Cgi().Get("action") == "get_settings") {
+ Execute(new TTxMonitoringGetSettings(this, std::move(ev)));
+ return;
+ }
Send(ev->Sender, new NMon::TEvRemoteHttpInfoRes("<html><p>bad parameters</p></html>"));
return;
}
diff --git a/ydb/core/graph/shard/ya.make b/ydb/core/graph/shard/ya.make
index e62c9b4926..de133aadc0 100644
--- a/ydb/core/graph/shard/ya.make
+++ b/ydb/core/graph/shard/ya.make
@@ -17,6 +17,7 @@ SRCS(
)
PEERDIR(
+ library/cpp/json
ydb/library/actors/core
ydb/core/base
ydb/core/cms/console
diff --git a/ydb/core/grpc_services/grpc_request_check_actor.h b/ydb/core/grpc_services/grpc_request_check_actor.h
index cafe74529c..079ba24d41 100644
--- a/ydb/core/grpc_services/grpc_request_check_actor.h
+++ b/ydb/core/grpc_services/grpc_request_check_actor.h
@@ -40,17 +40,36 @@ bool TGRpcRequestProxyHandleMethods::ValidateAndReplyOnError(TCtx* ctx) {
}
inline const TVector<TEvTicketParser::TEvAuthorizeTicket::TEntry>& GetEntriesForAuthAndCheckRequest(TEvRequestAuthAndCheck::TPtr& ev) {
- if (ev->Get()->YdbToken && ev->Get()->YdbToken->StartsWith("Bearer")) {
- if (AppData()->AuthConfig.GetUseAccessService()
- && (AppData()->DomainsConfig.GetSecurityConfig().ViewerAllowedSIDsSize() > 0 || AppData()->DomainsConfig.GetSecurityConfig().MonitoringAllowedSIDsSize() > 0)) {
- static TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> entries = {
- {NKikimr::TEvTicketParser::TEvAuthorizeTicket::ToPermissions({"ydb.developerApi.get", "ydb.developerApi.update"}), {{"gizmo_id", "gizmo"}}}
- };
- return entries;
+ const bool isBearerToken = ev->Get()->YdbToken && ev->Get()->YdbToken->StartsWith("Bearer");
+ const bool useAccessService = AppData()->AuthConfig.GetUseAccessService();
+ const bool hasClusterAccessResourceId = !AppData()->AuthConfig.GetClusterAccessResourceId().empty();
+ const bool needClusterAccessResourceCheck = AppData()->DomainsConfig.GetSecurityConfig().ViewerAllowedSIDsSize() > 0 ||
+ AppData()->DomainsConfig.GetSecurityConfig().MonitoringAllowedSIDsSize() > 0;
+
+ if (!isBearerToken || !useAccessService || !hasClusterAccessResourceId || !needClusterAccessResourceCheck) {
+ static const TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> emptyEntries = {};
+ return emptyEntries;
+ }
+
+ auto makeEntries = []() -> TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> {
+ const TString& accessServiceType = AppData()->AuthConfig.GetAccessServiceType();
+ TVector<TString> permissions;
+ if (accessServiceType == "Yandex_v2") {
+ permissions = {"ydb.developerApi.get", "ydb.developerApi.update"};
+ } else if (accessServiceType == "Nebius_v1") {
+ permissions = {"ydb.clusters.get", "ydb.clusters.monitor", "ydb.clusters.manage"};
+ } else {
+ return {};
}
- }
- static TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> emptyEntries = {};
- return emptyEntries;
+ const TString& clusterAccessResourceId = AppData()->AuthConfig.GetClusterAccessResourceId();
+ TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> entries = {
+ {NKikimr::TEvTicketParser::TEvAuthorizeTicket::ToPermissions(permissions), {{"gizmo_id", clusterAccessResourceId}}}
+ };
+ return entries;
+ };
+
+ static TVector<NKikimr::TEvTicketParser::TEvAuthorizeTicket::TEntry> entries = makeEntries();
+ return entries;
}
template <typename TEvent>
diff --git a/ydb/core/grpc_services/query/rpc_execute_query.cpp b/ydb/core/grpc_services/query/rpc_execute_query.cpp
index 8e0fe72525..489e606d28 100644
--- a/ydb/core/grpc_services/query/rpc_execute_query.cpp
+++ b/ydb/core/grpc_services/query/rpc_execute_query.cpp
@@ -386,7 +386,7 @@ private:
if (NeedReportStats(*Request_->GetProtoRequest())) {
if (record.HasQueryStats()) {
FillQueryStats(*response.mutable_exec_stats(), record.GetQueryStats());
- response.mutable_exec_stats()->set_query_plan(NKqp::SerializeAnalyzePlan(record.GetQueryStats()));
+ response.mutable_exec_stats()->set_query_plan(record.GetQueryPlan());
}
}
diff --git a/ydb/core/grpc_services/rpc_load_rows.cpp b/ydb/core/grpc_services/rpc_load_rows.cpp
index ffe6527fed..108594bb57 100644
--- a/ydb/core/grpc_services/rpc_load_rows.cpp
+++ b/ydb/core/grpc_services/rpc_load_rows.cpp
@@ -208,9 +208,7 @@ private:
return true;
}
- TVector<std::pair<TString, Ydb::Type>> GetRequestColumns(TString& errorMessage) const override {
- Y_UNUSED(errorMessage);
-
+ TConclusion<TVector<std::pair<TString, Ydb::Type>>> GetRequestColumns() const override {
const auto& type = GetProtoRequest(Request.get())->Getrows().Gettype();
const auto& rowType = type.Getlist_type();
const auto& rowFields = rowType.Getitem().Getstruct_type().Getmembers();
@@ -398,19 +396,18 @@ private:
return true;
}
- TVector<std::pair<TString, Ydb::Type>> GetRequestColumns(TString& errorMessage) const override {
+ TConclusion<TVector<std::pair<TString, Ydb::Type>>> GetRequestColumns() const override {
+ TVector<std::pair<TString, Ydb::Type>> out;
if (GetSourceType() == EUploadSource::CSV) {
// TODO: for CSV with header we have to extract columns from data (from first batch in file stream)
- return {};
+ return out;
}
auto schema = NArrow::DeserializeSchema(GetSourceSchema());
if (!schema) {
- errorMessage = TString("Wrong schema in bulk upsert data");
- return {};
+ return TConclusionStatus::Fail("Wrong schema in bulk upsert data");
}
- TVector<std::pair<TString, Ydb::Type>> out;
out.reserve(schema->num_fields());
for (auto& field : schema->fields()) {
@@ -419,8 +416,7 @@ private:
Ydb::Type ydbType;
if (!ConvertArrowToYdbPrimitive(*type, ydbType)) {
- errorMessage = TString("Cannot convert arrow type to ydb one: " + type->ToString());
- return {};
+ return TConclusionStatus::Fail("Cannot convert arrow type to ydb one: " + type->ToString());
}
out.emplace_back(name, std::move(ydbType));
}
diff --git a/ydb/core/grpc_services/rpc_read_rows.cpp b/ydb/core/grpc_services/rpc_read_rows.cpp
index 8dbebc6339..bc6b1e642c 100644
--- a/ydb/core/grpc_services/rpc_read_rows.cpp
+++ b/ydb/core/grpc_services/rpc_read_rows.cpp
@@ -607,7 +607,11 @@ public:
for (const auto& colMeta : RequestedColumnsMeta) {
const auto type = getTypeFromColMeta(colMeta);
auto* col = resultSet->Addcolumns();
- *col->mutable_type() = NYdb::TProtoAccessor::GetProto(type);
+ if (colMeta.IsNotNullColumn || colMeta.Type.GetTypeId() == NScheme::NTypeIds::Pg) { // pg type in nullable itself
+ *col->mutable_type() = NYdb::TProtoAccessor::GetProto(type);
+ } else {
+ *col->mutable_type()->mutable_optional_type()->mutable_item() = NYdb::TProtoAccessor::GetProto(type);
+ }
*col->mutable_name() = colMeta.Name;
}
@@ -637,18 +641,41 @@ public:
}
case NScheme::NTypeIds::Decimal: {
using namespace NYql::NDecimal;
-
- const auto loHi = cell.AsValue<std::pair<ui64, i64>>();
- Ydb::Value valueProto;
- valueProto.set_low_128(loHi.first);
- valueProto.set_high_128(loHi.second);
- const NYdb::TDecimalValue decimal(valueProto,
- {static_cast<ui8>(colMeta.Type.GetDecimalType().GetPrecision()), static_cast<ui8>(colMeta.Type.GetDecimalType().GetScale())});
- vb.Decimal(decimal);
+
+ NYdb::TDecimalType decimalType{
+ static_cast<ui8>(colMeta.Type.GetDecimalType().GetPrecision()),
+ static_cast<ui8>(colMeta.Type.GetDecimalType().GetScale())
+ };
+
+ if (cell.IsNull()) {
+ vb.EmptyOptional(NYdb::TTypeBuilder().Decimal(decimalType).Build());
+ } else {
+ const auto loHi = cell.AsValue<std::pair<ui64, i64>>();
+ Ydb::Value valueProto;
+ valueProto.set_low_128(loHi.first);
+ valueProto.set_high_128(loHi.second);
+ if (colMeta.IsNotNullColumn) {
+ vb.Decimal({valueProto, decimalType});
+ } else {
+ vb.BeginOptional();
+ vb.Decimal({valueProto, decimalType});
+ vb.EndOptional();
+ }
+ }
break;
}
default: {
- ProtoValueFromCell(vb, colMeta.Type, cell);
+ if (cell.IsNull()) {
+ vb.EmptyOptional((NYdb::EPrimitiveType)colMeta.Type.GetTypeId());
+ } else {
+ if (colMeta.IsNotNullColumn) {
+ ProtoValueFromCell(vb, colMeta.Type, cell);
+ } else {
+ vb.BeginOptional();
+ ProtoValueFromCell(vb, colMeta.Type, cell);
+ vb.EndOptional();
+ }
+ }
break;
}
}
@@ -744,6 +771,7 @@ private:
, Name(colInfo.Name)
, Type(colInfo.PType)
, PTypeMod(colInfo.PTypeMod)
+ , IsNotNullColumn(colInfo.IsNotNullColumn)
{
}
@@ -751,6 +779,7 @@ private:
TString Name;
NScheme::TTypeInfo Type;
TString PTypeMod;
+ bool IsNotNullColumn;
};
TVector<TColumnMeta> RequestedColumnsMeta;
diff --git a/ydb/core/kafka_proxy/actors/actors.h b/ydb/core/kafka_proxy/actors/actors.h
index 1fa2a9cd9e..727ab4ad2e 100644
--- a/ydb/core/kafka_proxy/actors/actors.h
+++ b/ydb/core/kafka_proxy/actors/actors.h
@@ -56,8 +56,6 @@ struct TContext {
return !RequireAuthentication || AuthenticationStep == SUCCESS;
}
-
- TActorId DiscoveryCacheActor;
};
template<std::derived_from<TApiMessage> T>
diff --git a/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp b/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp
index bbc7c83ec8..412b49d125 100644
--- a/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp
+++ b/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.cpp
@@ -1,7 +1,8 @@
#include "kafka_init_producer_id_actor.h"
#include "kafka_init_producer_id_actor_sql.cpp"
-#include "../kafka_transactional_producers_initializers.h"
-#include "../kqp_helper.h"
+#include <ydb/core/kafka_proxy/kafka_transactional_producers_initializers.h>
+#include <ydb/core/kafka_proxy/kafka_transactions_coordinator.h>
+#include <ydb/core/kafka_proxy/kqp_helper.h>
#include <util/random/random.h>
#include <ydb/public/sdk/cpp/src/client/params/impl.h>
@@ -50,6 +51,7 @@ namespace NKafka {
void TKafkaInitProducerIdActor::Bootstrap(const NActors::TActorContext& ctx) {
if (IsTransactionalProducerInitialization()) {
Kqp = std::make_unique<TKqpTxHelper>(Context->DatabasePath);
+ KAFKA_LOG_D("Bootstrapping actor for transactional producer. Sending init table request to KQP.");
Kqp->SendInitTableRequest(ctx, NKikimr::NGRpcProxy::V1::TTransactionalProducersInitManager::GetInstant());
Become(&TKafkaInitProducerIdActor::StateWork);
} else {
@@ -71,6 +73,7 @@ namespace NKafka {
}
void TKafkaInitProducerIdActor::Handle(NMetadata::NProvider::TEvManagerPrepared::TPtr&, const TActorContext& ctx) {
+ KAFKA_LOG_D("Received TEvManagerPrepared. Sending create session request to KQP.");
Kqp->SendCreateSessionRequest(ctx);
}
@@ -119,6 +122,14 @@ namespace NKafka {
HandleQueryResponseFromKqp(ev, ctx);
}
+ void TKafkaInitProducerIdActor::Handle(NKafka::TEvKafka::TEvSaveTxnProducerResponse::TPtr& ev, const TActorContext& ctx) {
+ if (ev->Get()->Status == NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::PRODUCER_FENCED) {
+ SendResponseFail(EKafkaErrors::PRODUCER_FENCED, TStringBuilder() << "Failed to save producer state. Reason: " << ev->Get()->Message << ".");
+ } else {
+ SendSuccessfullResponseForTxProducer(PersistedProducerState, ctx);
+ }
+ }
+
void TKafkaInitProducerIdActor::RequestFullRetry(const TActorContext& ctx) {
CurrentTxAbortRetryNumber++;
Kqp->ResetTxId();
@@ -134,6 +145,7 @@ namespace NKafka {
}
void TKafkaInitProducerIdActor::StartTxProducerInitCycle(const TActorContext& ctx) {
+ KAFKA_LOG_D("Beginning transaction");
Kqp->BeginTransaction(++KqpReqCookie, ctx);
LastSentToKqpRequest = EInitProducerIdKqpRequests::BEGIN_TRANSACTION;
}
@@ -153,7 +165,7 @@ namespace NKafka {
break;
case INSERT:
case UPDATE:
- OnSuccessfullProducerStateUpdate(ev, ctx);
+ OnSuccessfullProducerStateUpdate(ev);
break;
case DELETE_REQ:
SendInsertRequest(ctx);
@@ -186,10 +198,12 @@ namespace NKafka {
}
}
- void TKafkaInitProducerIdActor::OnSuccessfullProducerStateUpdate(NKqp::TEvKqp::TEvQueryResponse::TPtr ev, const TActorContext& ctx) {
+ void TKafkaInitProducerIdActor::OnSuccessfullProducerStateUpdate(NKqp::TEvKqp::TEvQueryResponse::TPtr ev) {
auto producerState = ParseProducerState(ev).value();
- SendSuccessfullResponseForTxProducer(producerState, ctx);
+ PersistedProducerState = std::move(producerState);
+
+ SendSaveTxnProducerStateRequest(producerState);
}
// requests to producer_state table
@@ -265,6 +279,16 @@ namespace NKafka {
Die(ctx);
}
+ void TKafkaInitProducerIdActor::SendSaveTxnProducerStateRequest(const TProducerState& producerState) {
+ KAFKA_LOG_D("Sending save txn producer state request");
+
+ Send(NKafka::MakeKafkaTransactionsServiceID(), new TEvKafka::TEvSaveTxnProducerRequest(
+ producerState.TransactionalId,
+ producerState.ProducerId,
+ producerState.ProducerEpoch
+ ));
+ }
+
// helper methods
bool TKafkaInitProducerIdActor::IsTransactionalProducerInitialization() {
return NKikimr::AppData()->FeatureFlags.GetEnableKafkaTransactions() && !TransactionalId.empty();
diff --git a/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.h b/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.h
index f046001345..403c1ce9ea 100644
--- a/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.h
+++ b/ydb/core/kafka_proxy/actors/kafka_init_producer_id_actor.h
@@ -1,5 +1,6 @@
#include "actors.h"
-#include "../kqp_helper.h"
+#include <ydb/core/kafka_proxy/kqp_helper.h>
+#include <ydb/core/kafka_proxy/kafka_events.h>
#include <ydb/library/actors/core/actor_bootstrapped.h>
#include <ydb/services/metadata/abstract/initialization.h>
@@ -34,6 +35,8 @@ namespace NKafka {
private:
const TContext::TPtr Context;
+ // This field is used to temporaly save producer state when we send to KafkaTransactionCoordinator and await its response
+ TProducerState PersistedProducerState;
// Kafka related fields
const ui64 CorrelationId;
const TString TransactionalId;
@@ -54,21 +57,27 @@ namespace NKafka {
HFunc(NMetadata::NProvider::TEvManagerPrepared, Handle);
HFunc(NKqp::TEvKqp::TEvCreateSessionResponse, Handle);
HFunc(NKqp::TEvKqp::TEvQueryResponse, Handle);
+ HFunc(NKafka::TEvKafka::TEvSaveTxnProducerResponse, Handle);
SFunc(TEvents::TEvPoison, Die);
}
}
+ // events for KQP interaction
void Handle(NMetadata::NProvider::TEvManagerPrepared::TPtr&, const TActorContext& ctx);
void Handle(NKqp::TEvKqp::TEvCreateSessionResponse::TPtr& ev, const TActorContext& ctx);
void Handle(NKqp::TEvKqp::TEvQueryResponse::TPtr& ev, const TActorContext& ctx);
void RequestFullRetry(const TActorContext& ctx);
+ // event from KafkaTransactionCoordinator actor about successful or unsuccessful save of producer new state
+ void Handle(NKafka::TEvKafka::TEvSaveTxnProducerResponse::TPtr& ev, const TActorContext& ctx);
+
+
void Die(const TActorContext& ctx);
// methods with main logic
void StartTxProducerInitCycle(const TActorContext& ctx);
void HandleQueryResponseFromKqp(NKqp::TEvKqp::TEvQueryResponse::TPtr ev, const TActorContext& ctx);
void OnTxProducerStateReceived(NKqp::TEvKqp::TEvQueryResponse::TPtr ev, const TActorContext& ctx);
- void OnSuccessfullProducerStateUpdate(NKqp::TEvKqp::TEvQueryResponse::TPtr ev, const TActorContext& ctx);
+ void OnSuccessfullProducerStateUpdate(NKqp::TEvKqp::TEvQueryResponse::TPtr ev);
// requests to producer_state table
void SendSelectRequest(const TActorContext& ctx);
@@ -84,6 +93,7 @@ namespace NKafka {
// send responses methods
void SendResponseFail(EKafkaErrors error, const TString& message);
void SendSuccessfullResponseForTxProducer(const TProducerState& producerState, const TActorContext& ctx);
+ void SendSaveTxnProducerStateRequest(const TProducerState& producerState);
// helper methods
bool IsTransactionalProducerInitialization();
diff --git a/ydb/core/kafka_proxy/actors/kafka_metadata_actor.h b/ydb/core/kafka_proxy/actors/kafka_metadata_actor.h
index b2bf208b12..5566531915 100644
--- a/ydb/core/kafka_proxy/actors/kafka_metadata_actor.h
+++ b/ydb/core/kafka_proxy/actors/kafka_metadata_actor.h
@@ -12,6 +12,8 @@
namespace NKafka {
+TActorId MakeKafkaDiscoveryCacheID();
+
class TKafkaMetadataActor: public NActors::TActorBootstrapped<TKafkaMetadataActor> {
public:
TKafkaMetadataActor(const TContext::TPtr context, const ui64 correlationId, const TMessagePtr<TMetadataRequestData>& message,
diff --git a/ydb/core/kafka_proxy/actors/kafka_transaction_actor.h b/ydb/core/kafka_proxy/actors/kafka_transaction_actor.h
new file mode 100644
index 0000000000..fc1b11647e
--- /dev/null
+++ b/ydb/core/kafka_proxy/actors/kafka_transaction_actor.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+
+namespace NKafka {
+ /*
+ This class is responsible for one kafka transaction.
+
+ It accumulates transaction state (partitions in tx, offsets) and on commit submits transaction to KQP
+ */
+ class TKafkaTransactionActor : public NActors::TActorBootstrapped<TKafkaTransactionActor> {
+
+ using TBase = NActors::TActorBootstrapped<TKafkaTransactionActor>;
+
+ public:
+ void Bootstrap(const NActors::TActorContext&) {
+ TBase::Become(&TKafkaTransactionActor::StateWork);
+ }
+
+ TStringBuilder LogPrefix() const {
+ return TStringBuilder() << "KafkaTransactionActor";
+ }
+
+ private:
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ // will be eimplemented in a future PR
+ // ToDo: add poison pill handler
+ }
+ }
+ };
+} // namespace NKafka \ No newline at end of file
diff --git a/ydb/core/kafka_proxy/kafka_connection.cpp b/ydb/core/kafka_proxy/kafka_connection.cpp
index 7459487a99..eee2872f10 100644
--- a/ydb/core/kafka_proxy/kafka_connection.cpp
+++ b/ydb/core/kafka_proxy/kafka_connection.cpp
@@ -2,7 +2,9 @@
#include <ydb/core/base/appdata.h>
#include <ydb/core/raw_socket/sock_config.h>
#include <ydb/core/util/address_classifier.h>
+#include <ydb/core/kafka_proxy/kafka_transactions_coordinator.h>
#include <ydb/core/kafka_proxy/actors/kafka_balancer_actor.h>
+#include <ydb/core/kafka_proxy/actors/kafka_metadata_actor.h>
#include "actors/actors.h"
@@ -49,6 +51,7 @@ public:
TEvPollerReady* InactivityEvent = nullptr;
const TActorId ListenerActorId;
+ const TActorId KafkaTxnCoordinatorActorId = NKafka::MakeKafkaTransactionsServiceID();
TIntrusivePtr<TSocketDescriptor> Socket;
TSocketAddressType Address;
@@ -85,8 +88,7 @@ public:
TKafkaConnection(const TActorId& listenerActorId,
TIntrusivePtr<TSocketDescriptor> socket,
TNetworkConfig::TSocketAddressType address,
- const NKikimrConfig::TKafkaProxyConfig& config,
- const TActorId& discoveryCacheActorId)
+ const NKikimrConfig::TKafkaProxyConfig& config)
: ListenerActorId(listenerActorId)
, Socket(std::move(socket))
, Address(address)
@@ -98,7 +100,6 @@ public:
{
SetNonBlock();
IsSslRequired = Socket->IsSslSupported();
- Context->DiscoveryCacheActor = discoveryCacheActorId;
}
void Bootstrap() {
@@ -292,7 +293,7 @@ protected:
}
void HandleMessage(TRequestHeaderData* header, const TMessagePtr<TMetadataRequestData>& message) {
- Register(CreateKafkaMetadataActor(Context, header->CorrelationId, message, Context->DiscoveryCacheActor));
+ Register(CreateKafkaMetadataActor(Context, header->CorrelationId, message, NKafka::MakeKafkaDiscoveryCacheID()));
}
void HandleMessage(const TRequestHeaderData* header, const TMessagePtr<TSaslAuthenticateRequestData>& message) {
@@ -805,9 +806,8 @@ protected:
NActors::IActor* CreateKafkaConnection(const TActorId& listenerActorId,
TIntrusivePtr<TSocketDescriptor> socket,
TNetworkConfig::TSocketAddressType address,
- const NKikimrConfig::TKafkaProxyConfig& config,
- const TActorId& discoveryCacheActorId) {
- return new TKafkaConnection(listenerActorId, std::move(socket), std::move(address), config, discoveryCacheActorId);
+ const NKikimrConfig::TKafkaProxyConfig& config) {
+ return new TKafkaConnection(listenerActorId, std::move(socket), std::move(address), config);
}
} // namespace NKafka
diff --git a/ydb/core/kafka_proxy/kafka_connection.h b/ydb/core/kafka_proxy/kafka_connection.h
index 344b6f1654..68b9e237ef 100644
--- a/ydb/core/kafka_proxy/kafka_connection.h
+++ b/ydb/core/kafka_proxy/kafka_connection.h
@@ -12,7 +12,6 @@ using namespace NKikimr::NRawSocket;
NActors::IActor* CreateKafkaConnection(const TActorId& listenerActorId,
TIntrusivePtr<TSocketDescriptor> socket,
TNetworkConfig::TSocketAddressType address,
- const NKikimrConfig::TKafkaProxyConfig& config,
- const TActorId& discoveryCacheActorId);
+ const NKikimrConfig::TKafkaProxyConfig& config);
} // namespace NKafka
diff --git a/ydb/core/kafka_proxy/kafka_events.h b/ydb/core/kafka_proxy/kafka_events.h
index 14855d40c1..1d15cddc6e 100644
--- a/ydb/core/kafka_proxy/kafka_events.h
+++ b/ydb/core/kafka_proxy/kafka_events.h
@@ -30,6 +30,12 @@ struct TEvKafka {
EvCommitedOffsetsResponse,
EvCreateTopicsResponse,
EvReadSessionInfo,
+ EvSaveTxnProducerRequest,
+ EvSaveTxnProducerResponse,
+ EvAddPartitionsToTxnRequest,
+ EvAddOffsetsToTxnRequest,
+ EvTxnOffsetCommitRequest,
+ EvEndTxnRequest,
EvResponse = EvRequest + 256,
EvInternalEvents = EvResponse + 256,
EvEnd
@@ -246,6 +252,91 @@ struct TEvTopicModificationResponse : public NActors::TEventLocal<TEvTopicModifi
EKafkaErrors Status;
TString Message;
};
+
+struct TEvAddPartitionsToTxnRequest : public TEventLocal<TEvAddPartitionsToTxnRequest, EvAddPartitionsToTxnRequest> {
+ TEvAddPartitionsToTxnRequest(const ui64 correlationId, const TMessagePtr<TAddPartitionsToTxnRequestData>& request, const TActorId connectionId)
+ : CorrelationId(correlationId)
+ , Request(request)
+ , ConnectionId(connectionId)
+ {}
+
+ ui64 CorrelationId;
+ const TMessagePtr<TAddPartitionsToTxnRequestData> Request;
+ TActorId ConnectionId;
+};
+
+struct TEvAddOffsetsToTxnRequest : public TEventLocal<TEvAddOffsetsToTxnRequest, EvAddOffsetsToTxnRequest> {
+ TEvAddOffsetsToTxnRequest(const ui64 correlationId, const TMessagePtr<TAddOffsetsToTxnRequestData>& request, const TActorId connectionId)
+ : CorrelationId(correlationId)
+ , Request(request)
+ , ConnectionId(connectionId)
+ {}
+
+ ui64 CorrelationId;
+ const TMessagePtr<TAddOffsetsToTxnRequestData> Request;
+ TActorId ConnectionId;
+};
+
+struct TEvTxnOffsetCommitRequest : public TEventLocal<TEvTxnOffsetCommitRequest, EvTxnOffsetCommitRequest> {
+ TEvTxnOffsetCommitRequest(const ui64 correlationId, const TMessagePtr<TTxnOffsetCommitRequestData>& request, const TActorId connectionId)
+ : CorrelationId(correlationId)
+ , Request(request)
+ , ConnectionId(connectionId)
+ {}
+
+ ui64 CorrelationId;
+ const TMessagePtr<TTxnOffsetCommitRequestData> Request;
+ TActorId ConnectionId;
+};
+
+struct TEvEndTxnRequest : public TEventLocal<TEvEndTxnRequest, EvEndTxnRequest> {
+ TEvEndTxnRequest(const ui64 correlationId, const TMessagePtr<TEndTxnRequestData>& request, const TActorId connectionId)
+ : CorrelationId(correlationId)
+ , Request(request)
+ , ConnectionId(connectionId)
+ {}
+
+ ui64 CorrelationId;
+ const TMessagePtr<TEndTxnRequestData> Request;
+ TActorId ConnectionId;
+};
+
+/*
+Event sent from TIintProducerActor to TKafkaTransactionRouter to notify that producer id will be obtained by client
+ */
+struct TEvSaveTxnProducerRequest : public NActors::TEventLocal<TEvSaveTxnProducerRequest, EvSaveTxnProducerRequest> {
+ TEvSaveTxnProducerRequest(const TString& transactionalId, const i64 producerId, const i16 producerEpoch) :
+ TransactionalId(std::move(transactionalId)),
+ ProducerId(producerId),
+ ProducerEpoch(producerEpoch)
+ {}
+
+ const TString TransactionalId;
+ const i64 ProducerId;
+ const i16 ProducerEpoch;
+};
+
+/*
+Event sent from TKafkaTransactionRouter to TIintProducerActor to notify that new transactional id was succesfully saved
+
+OK if this transactional producer was not found or older version was found
+PRODUCER_FENCED if newer version of this transactional producer was found
+ */
+struct TEvSaveTxnProducerResponse : public NActors::TEventLocal<TEvSaveTxnProducerResponse, EvSaveTxnProducerResponse> {
+
+ enum EStatus {
+ OK,
+ PRODUCER_FENCED,
+ };
+
+ TEvSaveTxnProducerResponse(EStatus status, const TString& message) :
+ Status(status),
+ Message(std::move(message))
+ {}
+
+ EStatus Status;
+ TString Message;
};
+}; // struct TEvKafka
} // namespace NKafka
diff --git a/ydb/core/kafka_proxy/kafka_listener.h b/ydb/core/kafka_proxy/kafka_listener.h
index c8ef8ef069..e6a988710b 100644
--- a/ydb/core/kafka_proxy/kafka_listener.h
+++ b/ydb/core/kafka_proxy/kafka_listener.h
@@ -7,17 +7,13 @@ namespace NKafka {
using namespace NKikimr::NRawSocket;
-
-TActorId MakeKafkaDiscoveryCacheID();
-
inline NActors::IActor* CreateKafkaListener(
- const NActors::TActorId& poller, const TListenerSettings& settings, const NKikimrConfig::TKafkaProxyConfig& config,
- const TActorId& discoveryCacheActorId
+ const NActors::TActorId& poller, const TListenerSettings& settings, const NKikimrConfig::TKafkaProxyConfig& config
) {
return CreateSocketListener(
poller, settings,
[=](const TActorId& listenerActorId, TIntrusivePtr<TSocketDescriptor> socket, TNetworkConfig::TSocketAddressType address) {
- return CreateKafkaConnection(listenerActorId, socket, address, config, discoveryCacheActorId);
+ return CreateKafkaConnection(listenerActorId, socket, address, config);
},
NKikimrServices::EServiceKikimr::KAFKA_PROXY, EErrorAction::Abort);
}
diff --git a/ydb/core/kafka_proxy/kafka_messages.cpp b/ydb/core/kafka_proxy/kafka_messages.cpp
index ea50786a15..69e830f8c5 100644
--- a/ydb/core/kafka_proxy/kafka_messages.cpp
+++ b/ydb/core/kafka_proxy/kafka_messages.cpp
@@ -23,6 +23,11 @@ const std::unordered_map<EApiKey, TString> EApiKeyNames = {
{EApiKey::API_VERSIONS, "API_VERSIONS"},
{EApiKey::CREATE_TOPICS, "CREATE_TOPICS"},
{EApiKey::INIT_PRODUCER_ID, "INIT_PRODUCER_ID"},
+ {EApiKey::ADD_PARTITIONS_TO_TXN, "ADD_PARTITIONS_TO_TXN"},
+ {EApiKey::ADD_OFFSETS_TO_TXN, "ADD_OFFSETS_TO_TXN"},
+ {EApiKey::END_TXN, "END_TXN"},
+ {EApiKey::TXN_OFFSET_COMMIT, "TXN_OFFSET_COMMIT"},
+ {EApiKey::DESCRIBE_CONFIGS, "DESCRIBE_CONFIGS"},
{EApiKey::ALTER_CONFIGS, "ALTER_CONFIGS"},
{EApiKey::SASL_AUTHENTICATE, "SASL_AUTHENTICATE"},
{EApiKey::CREATE_PARTITIONS, "CREATE_PARTITIONS"},
@@ -61,6 +66,16 @@ std::unique_ptr<TApiMessage> CreateRequest(i16 apiKey) {
return std::make_unique<TCreateTopicsRequestData>();
case INIT_PRODUCER_ID:
return std::make_unique<TInitProducerIdRequestData>();
+ case ADD_PARTITIONS_TO_TXN:
+ return std::make_unique<TAddPartitionsToTxnRequestData>();
+ case ADD_OFFSETS_TO_TXN:
+ return std::make_unique<TAddOffsetsToTxnRequestData>();
+ case END_TXN:
+ return std::make_unique<TEndTxnRequestData>();
+ case TXN_OFFSET_COMMIT:
+ return std::make_unique<TTxnOffsetCommitRequestData>();
+ case DESCRIBE_CONFIGS:
+ return std::make_unique<TDescribeConfigsRequestData>();
case ALTER_CONFIGS:
return std::make_unique<TAlterConfigsRequestData>();
case SASL_AUTHENTICATE:
@@ -104,6 +119,16 @@ std::unique_ptr<TApiMessage> CreateResponse(i16 apiKey) {
return std::make_unique<TCreateTopicsResponseData>();
case INIT_PRODUCER_ID:
return std::make_unique<TInitProducerIdResponseData>();
+ case ADD_PARTITIONS_TO_TXN:
+ return std::make_unique<TAddPartitionsToTxnResponseData>();
+ case ADD_OFFSETS_TO_TXN:
+ return std::make_unique<TAddOffsetsToTxnResponseData>();
+ case END_TXN:
+ return std::make_unique<TEndTxnResponseData>();
+ case TXN_OFFSET_COMMIT:
+ return std::make_unique<TTxnOffsetCommitResponseData>();
+ case DESCRIBE_CONFIGS:
+ return std::make_unique<TDescribeConfigsResponseData>();
case ALTER_CONFIGS:
return std::make_unique<TAlterConfigsResponseData>();
case SASL_AUTHENTICATE:
@@ -203,6 +228,36 @@ TKafkaVersion RequestHeaderVersion(i16 apiKey, TKafkaVersion _version) {
} else {
return 1;
}
+ case ADD_PARTITIONS_TO_TXN:
+ if (_version >= 3) {
+ return 2;
+ } else {
+ return 1;
+ }
+ case ADD_OFFSETS_TO_TXN:
+ if (_version >= 3) {
+ return 2;
+ } else {
+ return 1;
+ }
+ case END_TXN:
+ if (_version >= 3) {
+ return 2;
+ } else {
+ return 1;
+ }
+ case TXN_OFFSET_COMMIT:
+ if (_version >= 3) {
+ return 2;
+ } else {
+ return 1;
+ }
+ case DESCRIBE_CONFIGS:
+ if (_version >= 4) {
+ return 2;
+ } else {
+ return 1;
+ }
case ALTER_CONFIGS:
if (_version >= 2) {
return 2;
@@ -313,6 +368,36 @@ TKafkaVersion ResponseHeaderVersion(i16 apiKey, TKafkaVersion _version) {
} else {
return 0;
}
+ case ADD_PARTITIONS_TO_TXN:
+ if (_version >= 3) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case ADD_OFFSETS_TO_TXN:
+ if (_version >= 3) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case END_TXN:
+ if (_version >= 3) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case TXN_OFFSET_COMMIT:
+ if (_version >= 3) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case DESCRIBE_CONFIGS:
+ if (_version >= 4) {
+ return 1;
+ } else {
+ return 0;
+ }
case ALTER_CONFIGS:
if (_version >= 2) {
return 1;
@@ -349,7 +434,7 @@ const TRequestHeaderData::RequestApiVersionMeta::Type TRequestHeaderData::Reques
const TRequestHeaderData::CorrelationIdMeta::Type TRequestHeaderData::CorrelationIdMeta::Default = 0;
const TRequestHeaderData::ClientIdMeta::Type TRequestHeaderData::ClientIdMeta::Default = {""};
-TRequestHeaderData::TRequestHeaderData()
+TRequestHeaderData::TRequestHeaderData()
: RequestApiKey(RequestApiKeyMeta::Default)
, RequestApiVersion(RequestApiVersionMeta::Default)
, CorrelationId(CorrelationIdMeta::Default)
@@ -364,7 +449,7 @@ void TRequestHeaderData::Read(TKafkaReadable& _readable, TKafkaVersion _version)
NPrivate::Read<RequestApiVersionMeta>(_readable, _version, RequestApiVersion);
NPrivate::Read<CorrelationIdMeta>(_readable, _version, CorrelationId);
NPrivate::Read<ClientIdMeta>(_readable, _version, ClientId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -388,10 +473,10 @@ void TRequestHeaderData::Write(TKafkaWritable& _writable, TKafkaVersion _version
NPrivate::Write<RequestApiVersionMeta>(_collector, _writable, _version, RequestApiVersion);
NPrivate::Write<CorrelationIdMeta>(_collector, _writable, _version, CorrelationId);
NPrivate::Write<ClientIdMeta>(_collector, _writable, _version, ClientId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -401,7 +486,7 @@ i32 TRequestHeaderData::Size(TKafkaVersion _version) const {
NPrivate::Size<RequestApiVersionMeta>(_collector, _version, RequestApiVersion);
NPrivate::Size<CorrelationIdMeta>(_collector, _version, CorrelationId);
NPrivate::Size<ClientIdMeta>(_collector, _version, ClientId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -414,7 +499,7 @@ i32 TRequestHeaderData::Size(TKafkaVersion _version) const {
//
const TResponseHeaderData::CorrelationIdMeta::Type TResponseHeaderData::CorrelationIdMeta::Default = 0;
-TResponseHeaderData::TResponseHeaderData()
+TResponseHeaderData::TResponseHeaderData()
: CorrelationId(CorrelationIdMeta::Default)
{}
@@ -423,7 +508,7 @@ void TResponseHeaderData::Read(TKafkaReadable& _readable, TKafkaVersion _version
ythrow yexception() << "Can't read version " << _version << " of TResponseHeaderData";
}
NPrivate::Read<CorrelationIdMeta>(_readable, _version, CorrelationId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -444,17 +529,17 @@ void TResponseHeaderData::Write(TKafkaWritable& _writable, TKafkaVersion _versio
}
NPrivate::TWriteCollector _collector;
NPrivate::Write<CorrelationIdMeta>(_collector, _writable, _version, CorrelationId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
i32 TResponseHeaderData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<CorrelationIdMeta>(_collector, _version, CorrelationId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -469,7 +554,7 @@ const TProduceRequestData::TransactionalIdMeta::Type TProduceRequestData::Transa
const TProduceRequestData::AcksMeta::Type TProduceRequestData::AcksMeta::Default = 0;
const TProduceRequestData::TimeoutMsMeta::Type TProduceRequestData::TimeoutMsMeta::Default = 0;
-TProduceRequestData::TProduceRequestData()
+TProduceRequestData::TProduceRequestData()
: TransactionalId(TransactionalIdMeta::Default)
, Acks(AcksMeta::Default)
, TimeoutMs(TimeoutMsMeta::Default)
@@ -483,7 +568,7 @@ void TProduceRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version
NPrivate::Read<AcksMeta>(_readable, _version, Acks);
NPrivate::Read<TimeoutMsMeta>(_readable, _version, TimeoutMs);
NPrivate::Read<TopicDataMeta>(_readable, _version, TopicData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -507,10 +592,10 @@ void TProduceRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _versio
NPrivate::Write<AcksMeta>(_collector, _writable, _version, Acks);
NPrivate::Write<TimeoutMsMeta>(_collector, _writable, _version, TimeoutMs);
NPrivate::Write<TopicDataMeta>(_collector, _writable, _version, TopicData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -520,7 +605,7 @@ i32 TProduceRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<AcksMeta>(_collector, _version, Acks);
NPrivate::Size<TimeoutMsMeta>(_collector, _version, TimeoutMs);
NPrivate::Size<TopicDataMeta>(_collector, _version, TopicData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -533,7 +618,7 @@ i32 TProduceRequestData::Size(TKafkaVersion _version) const {
//
const TProduceRequestData::TTopicProduceData::NameMeta::Type TProduceRequestData::TTopicProduceData::NameMeta::Default = {""};
-TProduceRequestData::TTopicProduceData::TTopicProduceData()
+TProduceRequestData::TTopicProduceData::TTopicProduceData()
: Name(NameMeta::Default)
{}
@@ -543,7 +628,7 @@ void TProduceRequestData::TTopicProduceData::Read(TKafkaReadable& _readable, TKa
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionDataMeta>(_readable, _version, PartitionData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -565,10 +650,10 @@ void TProduceRequestData::TTopicProduceData::Write(TKafkaWritable& _writable, TK
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionDataMeta>(_collector, _writable, _version, PartitionData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -576,7 +661,7 @@ i32 TProduceRequestData::TTopicProduceData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionDataMeta>(_collector, _version, PartitionData);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -589,7 +674,7 @@ i32 TProduceRequestData::TTopicProduceData::Size(TKafkaVersion _version) const {
//
const TProduceRequestData::TTopicProduceData::TPartitionProduceData::IndexMeta::Type TProduceRequestData::TTopicProduceData::TPartitionProduceData::IndexMeta::Default = 0;
-TProduceRequestData::TTopicProduceData::TPartitionProduceData::TPartitionProduceData()
+TProduceRequestData::TTopicProduceData::TPartitionProduceData::TPartitionProduceData()
: Index(IndexMeta::Default)
{}
@@ -599,7 +684,7 @@ void TProduceRequestData::TTopicProduceData::TPartitionProduceData::Read(TKafkaR
}
NPrivate::Read<IndexMeta>(_readable, _version, Index);
NPrivate::Read<RecordsMeta>(_readable, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -621,10 +706,10 @@ void TProduceRequestData::TTopicProduceData::TPartitionProduceData::Write(TKafka
NPrivate::TWriteCollector _collector;
NPrivate::Write<IndexMeta>(_collector, _writable, _version, Index);
NPrivate::Write<RecordsMeta>(_collector, _writable, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -632,7 +717,7 @@ i32 TProduceRequestData::TTopicProduceData::TPartitionProduceData::Size(TKafkaVe
NPrivate::TSizeCollector _collector;
NPrivate::Size<IndexMeta>(_collector, _version, Index);
NPrivate::Size<RecordsMeta>(_collector, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -645,7 +730,7 @@ i32 TProduceRequestData::TTopicProduceData::TPartitionProduceData::Size(TKafkaVe
//
const TProduceResponseData::ThrottleTimeMsMeta::Type TProduceResponseData::ThrottleTimeMsMeta::Default = 0;
-TProduceResponseData::TProduceResponseData()
+TProduceResponseData::TProduceResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -655,7 +740,7 @@ void TProduceResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _versio
}
NPrivate::Read<ResponsesMeta>(_readable, _version, Responses);
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -677,10 +762,10 @@ void TProduceResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _versi
NPrivate::TWriteCollector _collector;
NPrivate::Write<ResponsesMeta>(_collector, _writable, _version, Responses);
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -688,7 +773,7 @@ i32 TProduceResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ResponsesMeta>(_collector, _version, Responses);
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -701,7 +786,7 @@ i32 TProduceResponseData::Size(TKafkaVersion _version) const {
//
const TProduceResponseData::TTopicProduceResponse::NameMeta::Type TProduceResponseData::TTopicProduceResponse::NameMeta::Default = {""};
-TProduceResponseData::TTopicProduceResponse::TTopicProduceResponse()
+TProduceResponseData::TTopicProduceResponse::TTopicProduceResponse()
: Name(NameMeta::Default)
{}
@@ -711,7 +796,7 @@ void TProduceResponseData::TTopicProduceResponse::Read(TKafkaReadable& _readable
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionResponsesMeta>(_readable, _version, PartitionResponses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -733,10 +818,10 @@ void TProduceResponseData::TTopicProduceResponse::Write(TKafkaWritable& _writabl
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionResponsesMeta>(_collector, _writable, _version, PartitionResponses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -744,7 +829,7 @@ i32 TProduceResponseData::TTopicProduceResponse::Size(TKafkaVersion _version) co
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionResponsesMeta>(_collector, _version, PartitionResponses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -762,7 +847,7 @@ const TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::Lo
const TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::LogStartOffsetMeta::Type TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::LogStartOffsetMeta::Default = -1;
const TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::ErrorMessageMeta::Type TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::ErrorMessageMeta::Default = std::nullopt;
-TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TPartitionProduceResponse()
+TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TPartitionProduceResponse()
: Index(IndexMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, BaseOffset(BaseOffsetMeta::Default)
@@ -782,7 +867,7 @@ void TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::Rea
NPrivate::Read<LogStartOffsetMeta>(_readable, _version, LogStartOffset);
NPrivate::Read<RecordErrorsMeta>(_readable, _version, RecordErrors);
NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -809,10 +894,10 @@ void TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::Wri
NPrivate::Write<LogStartOffsetMeta>(_collector, _writable, _version, LogStartOffset);
NPrivate::Write<RecordErrorsMeta>(_collector, _writable, _version, RecordErrors);
NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -825,7 +910,7 @@ i32 TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::Size
NPrivate::Size<LogStartOffsetMeta>(_collector, _version, LogStartOffset);
NPrivate::Size<RecordErrorsMeta>(_collector, _version, RecordErrors);
NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -839,7 +924,7 @@ i32 TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::Size
const TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::BatchIndexMeta::Type TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::BatchIndexMeta::Default = 0;
const TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::BatchIndexErrorMessageMeta::Type TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::BatchIndexErrorMessageMeta::Default = std::nullopt;
-TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::TBatchIndexAndErrorMessage()
+TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBatchIndexAndErrorMessage::TBatchIndexAndErrorMessage()
: BatchIndex(BatchIndexMeta::Default)
, BatchIndexErrorMessage(BatchIndexErrorMessageMeta::Default)
{}
@@ -850,7 +935,7 @@ void TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBa
}
NPrivate::Read<BatchIndexMeta>(_readable, _version, BatchIndex);
NPrivate::Read<BatchIndexErrorMessageMeta>(_readable, _version, BatchIndexErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -872,10 +957,10 @@ void TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBa
NPrivate::TWriteCollector _collector;
NPrivate::Write<BatchIndexMeta>(_collector, _writable, _version, BatchIndex);
NPrivate::Write<BatchIndexErrorMessageMeta>(_collector, _writable, _version, BatchIndexErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -883,7 +968,7 @@ i32 TProduceResponseData::TTopicProduceResponse::TPartitionProduceResponse::TBat
NPrivate::TSizeCollector _collector;
NPrivate::Size<BatchIndexMeta>(_collector, _version, BatchIndex);
NPrivate::Size<BatchIndexErrorMessageMeta>(_collector, _version, BatchIndexErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -904,7 +989,7 @@ const TFetchRequestData::SessionIdMeta::Type TFetchRequestData::SessionIdMeta::D
const TFetchRequestData::SessionEpochMeta::Type TFetchRequestData::SessionEpochMeta::Default = -1;
const TFetchRequestData::RackIdMeta::Type TFetchRequestData::RackIdMeta::Default = {""};
-TFetchRequestData::TFetchRequestData()
+TFetchRequestData::TFetchRequestData()
: ClusterId(ClusterIdMeta::Default)
, ReplicaId(ReplicaIdMeta::Default)
, MaxWaitMs(MaxWaitMsMeta::Default)
@@ -931,7 +1016,7 @@ void TFetchRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version)
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<ForgottenTopicsDataMeta>(_readable, _version, ForgottenTopicsData);
NPrivate::Read<RackIdMeta>(_readable, _version, RackId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -965,10 +1050,10 @@ void TFetchRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version)
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<ForgottenTopicsDataMeta>(_collector, _writable, _version, ForgottenTopicsData);
NPrivate::Write<RackIdMeta>(_collector, _writable, _version, RackId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
NPrivate::WriteTag<ClusterIdMeta>(_writable, _version, ClusterId);
}
}
@@ -986,7 +1071,7 @@ i32 TFetchRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<ForgottenTopicsDataMeta>(_collector, _version, ForgottenTopicsData);
NPrivate::Size<RackIdMeta>(_collector, _version, RackId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1000,7 +1085,7 @@ i32 TFetchRequestData::Size(TKafkaVersion _version) const {
const TFetchRequestData::TFetchTopic::TopicMeta::Type TFetchRequestData::TFetchTopic::TopicMeta::Default = {""};
const TFetchRequestData::TFetchTopic::TopicIdMeta::Type TFetchRequestData::TFetchTopic::TopicIdMeta::Default = TKafkaUuid(0, 0);
-TFetchRequestData::TFetchTopic::TFetchTopic()
+TFetchRequestData::TFetchTopic::TFetchTopic()
: Topic(TopicMeta::Default)
, TopicId(TopicIdMeta::Default)
{}
@@ -1012,7 +1097,7 @@ void TFetchRequestData::TFetchTopic::Read(TKafkaReadable& _readable, TKafkaVersi
NPrivate::Read<TopicMeta>(_readable, _version, Topic);
NPrivate::Read<TopicIdMeta>(_readable, _version, TopicId);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1035,10 +1120,10 @@ void TFetchRequestData::TFetchTopic::Write(TKafkaWritable& _writable, TKafkaVers
NPrivate::Write<TopicMeta>(_collector, _writable, _version, Topic);
NPrivate::Write<TopicIdMeta>(_collector, _writable, _version, TopicId);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1047,7 +1132,7 @@ i32 TFetchRequestData::TFetchTopic::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicMeta>(_collector, _version, Topic);
NPrivate::Size<TopicIdMeta>(_collector, _version, TopicId);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1065,7 +1150,7 @@ const TFetchRequestData::TFetchTopic::TFetchPartition::LastFetchedEpochMeta::Typ
const TFetchRequestData::TFetchTopic::TFetchPartition::LogStartOffsetMeta::Type TFetchRequestData::TFetchTopic::TFetchPartition::LogStartOffsetMeta::Default = -1;
const TFetchRequestData::TFetchTopic::TFetchPartition::PartitionMaxBytesMeta::Type TFetchRequestData::TFetchTopic::TFetchPartition::PartitionMaxBytesMeta::Default = 0;
-TFetchRequestData::TFetchTopic::TFetchPartition::TFetchPartition()
+TFetchRequestData::TFetchTopic::TFetchPartition::TFetchPartition()
: Partition(PartitionMeta::Default)
, CurrentLeaderEpoch(CurrentLeaderEpochMeta::Default)
, FetchOffset(FetchOffsetMeta::Default)
@@ -1084,7 +1169,7 @@ void TFetchRequestData::TFetchTopic::TFetchPartition::Read(TKafkaReadable& _read
NPrivate::Read<LastFetchedEpochMeta>(_readable, _version, LastFetchedEpoch);
NPrivate::Read<LogStartOffsetMeta>(_readable, _version, LogStartOffset);
NPrivate::Read<PartitionMaxBytesMeta>(_readable, _version, PartitionMaxBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1110,10 +1195,10 @@ void TFetchRequestData::TFetchTopic::TFetchPartition::Write(TKafkaWritable& _wri
NPrivate::Write<LastFetchedEpochMeta>(_collector, _writable, _version, LastFetchedEpoch);
NPrivate::Write<LogStartOffsetMeta>(_collector, _writable, _version, LogStartOffset);
NPrivate::Write<PartitionMaxBytesMeta>(_collector, _writable, _version, PartitionMaxBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1125,7 +1210,7 @@ i32 TFetchRequestData::TFetchTopic::TFetchPartition::Size(TKafkaVersion _version
NPrivate::Size<LastFetchedEpochMeta>(_collector, _version, LastFetchedEpoch);
NPrivate::Size<LogStartOffsetMeta>(_collector, _version, LogStartOffset);
NPrivate::Size<PartitionMaxBytesMeta>(_collector, _version, PartitionMaxBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1139,7 +1224,7 @@ i32 TFetchRequestData::TFetchTopic::TFetchPartition::Size(TKafkaVersion _version
const TFetchRequestData::TForgottenTopic::TopicMeta::Type TFetchRequestData::TForgottenTopic::TopicMeta::Default = {""};
const TFetchRequestData::TForgottenTopic::TopicIdMeta::Type TFetchRequestData::TForgottenTopic::TopicIdMeta::Default = TKafkaUuid(0, 0);
-TFetchRequestData::TForgottenTopic::TForgottenTopic()
+TFetchRequestData::TForgottenTopic::TForgottenTopic()
: Topic(TopicMeta::Default)
, TopicId(TopicIdMeta::Default)
{}
@@ -1151,7 +1236,7 @@ void TFetchRequestData::TForgottenTopic::Read(TKafkaReadable& _readable, TKafkaV
NPrivate::Read<TopicMeta>(_readable, _version, Topic);
NPrivate::Read<TopicIdMeta>(_readable, _version, TopicId);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1174,10 +1259,10 @@ void TFetchRequestData::TForgottenTopic::Write(TKafkaWritable& _writable, TKafka
NPrivate::Write<TopicMeta>(_collector, _writable, _version, Topic);
NPrivate::Write<TopicIdMeta>(_collector, _writable, _version, TopicId);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1186,7 +1271,7 @@ i32 TFetchRequestData::TForgottenTopic::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicMeta>(_collector, _version, Topic);
NPrivate::Size<TopicIdMeta>(_collector, _version, TopicId);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1201,7 +1286,7 @@ const TFetchResponseData::ThrottleTimeMsMeta::Type TFetchResponseData::ThrottleT
const TFetchResponseData::ErrorCodeMeta::Type TFetchResponseData::ErrorCodeMeta::Default = 0;
const TFetchResponseData::SessionIdMeta::Type TFetchResponseData::SessionIdMeta::Default = 0;
-TFetchResponseData::TFetchResponseData()
+TFetchResponseData::TFetchResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, SessionId(SessionIdMeta::Default)
@@ -1215,7 +1300,7 @@ void TFetchResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version)
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<SessionIdMeta>(_readable, _version, SessionId);
NPrivate::Read<ResponsesMeta>(_readable, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1239,10 +1324,10 @@ void TFetchResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<SessionIdMeta>(_collector, _writable, _version, SessionId);
NPrivate::Write<ResponsesMeta>(_collector, _writable, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1252,7 +1337,7 @@ i32 TFetchResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<SessionIdMeta>(_collector, _version, SessionId);
NPrivate::Size<ResponsesMeta>(_collector, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1266,7 +1351,7 @@ i32 TFetchResponseData::Size(TKafkaVersion _version) const {
const TFetchResponseData::TFetchableTopicResponse::TopicMeta::Type TFetchResponseData::TFetchableTopicResponse::TopicMeta::Default = {""};
const TFetchResponseData::TFetchableTopicResponse::TopicIdMeta::Type TFetchResponseData::TFetchableTopicResponse::TopicIdMeta::Default = TKafkaUuid(0, 0);
-TFetchResponseData::TFetchableTopicResponse::TFetchableTopicResponse()
+TFetchResponseData::TFetchableTopicResponse::TFetchableTopicResponse()
: Topic(TopicMeta::Default)
, TopicId(TopicIdMeta::Default)
{}
@@ -1278,7 +1363,7 @@ void TFetchResponseData::TFetchableTopicResponse::Read(TKafkaReadable& _readable
NPrivate::Read<TopicMeta>(_readable, _version, Topic);
NPrivate::Read<TopicIdMeta>(_readable, _version, TopicId);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1301,10 +1386,10 @@ void TFetchResponseData::TFetchableTopicResponse::Write(TKafkaWritable& _writabl
NPrivate::Write<TopicMeta>(_collector, _writable, _version, Topic);
NPrivate::Write<TopicIdMeta>(_collector, _writable, _version, TopicId);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1313,7 +1398,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::Size(TKafkaVersion _version) co
NPrivate::Size<TopicMeta>(_collector, _version, Topic);
NPrivate::Size<TopicIdMeta>(_collector, _version, TopicId);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1331,7 +1416,7 @@ const TFetchResponseData::TFetchableTopicResponse::TPartitionData::LastStableOff
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::LogStartOffsetMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::LogStartOffsetMeta::Default = -1;
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::PreferredReadReplicaMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::PreferredReadReplicaMeta::Default = -1;
-TFetchResponseData::TFetchableTopicResponse::TPartitionData::TPartitionData()
+TFetchResponseData::TFetchableTopicResponse::TPartitionData::TPartitionData()
: PartitionIndex(PartitionIndexMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, HighWatermark(HighWatermarkMeta::Default)
@@ -1355,7 +1440,7 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::Read(TKafkaRea
NPrivate::Read<AbortedTransactionsMeta>(_readable, _version, AbortedTransactions);
NPrivate::Read<PreferredReadReplicaMeta>(_readable, _version, PreferredReadReplica);
NPrivate::Read<RecordsMeta>(_readable, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1395,10 +1480,10 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::Write(TKafkaWr
NPrivate::Write<AbortedTransactionsMeta>(_collector, _writable, _version, AbortedTransactions);
NPrivate::Write<PreferredReadReplicaMeta>(_collector, _writable, _version, PreferredReadReplica);
NPrivate::Write<RecordsMeta>(_collector, _writable, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
NPrivate::WriteTag<DivergingEpochMeta>(_writable, _version, DivergingEpoch);
NPrivate::WriteTag<CurrentLeaderMeta>(_writable, _version, CurrentLeader);
NPrivate::WriteTag<SnapshotIdMeta>(_writable, _version, SnapshotId);
@@ -1418,7 +1503,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::Size(TKafkaVers
NPrivate::Size<AbortedTransactionsMeta>(_collector, _version, AbortedTransactions);
NPrivate::Size<PreferredReadReplicaMeta>(_collector, _version, PreferredReadReplica);
NPrivate::Size<RecordsMeta>(_collector, _version, Records);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1432,7 +1517,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::Size(TKafkaVers
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::EpochMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::EpochMeta::Default = -1;
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::EndOffsetMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::EndOffsetMeta::Default = -1;
-TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::TEpochEndOffset()
+TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset::TEpochEndOffset()
: Epoch(EpochMeta::Default)
, EndOffset(EndOffsetMeta::Default)
{}
@@ -1443,7 +1528,7 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffse
}
NPrivate::Read<EpochMeta>(_readable, _version, Epoch);
NPrivate::Read<EndOffsetMeta>(_readable, _version, EndOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1465,10 +1550,10 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffse
NPrivate::TWriteCollector _collector;
NPrivate::Write<EpochMeta>(_collector, _writable, _version, Epoch);
NPrivate::Write<EndOffsetMeta>(_collector, _writable, _version, EndOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1476,7 +1561,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset
NPrivate::TSizeCollector _collector;
NPrivate::Size<EpochMeta>(_collector, _version, Epoch);
NPrivate::Size<EndOffsetMeta>(_collector, _version, EndOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1490,7 +1575,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TEpochEndOffset
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::LeaderIdMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::LeaderIdMeta::Default = -1;
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::LeaderEpochMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::LeaderEpochMeta::Default = -1;
-TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::TLeaderIdAndEpoch()
+TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpoch::TLeaderIdAndEpoch()
: LeaderId(LeaderIdMeta::Default)
, LeaderEpoch(LeaderEpochMeta::Default)
{}
@@ -1501,7 +1586,7 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEp
}
NPrivate::Read<LeaderIdMeta>(_readable, _version, LeaderId);
NPrivate::Read<LeaderEpochMeta>(_readable, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1523,10 +1608,10 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEp
NPrivate::TWriteCollector _collector;
NPrivate::Write<LeaderIdMeta>(_collector, _writable, _version, LeaderId);
NPrivate::Write<LeaderEpochMeta>(_collector, _writable, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1534,7 +1619,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpo
NPrivate::TSizeCollector _collector;
NPrivate::Size<LeaderIdMeta>(_collector, _version, LeaderId);
NPrivate::Size<LeaderEpochMeta>(_collector, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1548,7 +1633,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TLeaderIdAndEpo
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::EndOffsetMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::EndOffsetMeta::Default = -1;
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::EpochMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::EpochMeta::Default = -1;
-TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::TSnapshotId()
+TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::TSnapshotId()
: EndOffset(EndOffsetMeta::Default)
, Epoch(EpochMeta::Default)
{}
@@ -1559,7 +1644,7 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::R
}
NPrivate::Read<EndOffsetMeta>(_readable, _version, EndOffset);
NPrivate::Read<EpochMeta>(_readable, _version, Epoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1581,10 +1666,10 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::W
NPrivate::TWriteCollector _collector;
NPrivate::Write<EndOffsetMeta>(_collector, _writable, _version, EndOffset);
NPrivate::Write<EpochMeta>(_collector, _writable, _version, Epoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1592,7 +1677,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::Si
NPrivate::TSizeCollector _collector;
NPrivate::Size<EndOffsetMeta>(_collector, _version, EndOffset);
NPrivate::Size<EpochMeta>(_collector, _version, Epoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1606,7 +1691,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TSnapshotId::Si
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::ProducerIdMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::ProducerIdMeta::Default = 0;
const TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::FirstOffsetMeta::Type TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::FirstOffsetMeta::Default = 0;
-TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::TAbortedTransaction()
+TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransaction::TAbortedTransaction()
: ProducerId(ProducerIdMeta::Default)
, FirstOffset(FirstOffsetMeta::Default)
{}
@@ -1617,7 +1702,7 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransa
}
NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
NPrivate::Read<FirstOffsetMeta>(_readable, _version, FirstOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1639,10 +1724,10 @@ void TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransa
NPrivate::TWriteCollector _collector;
NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
NPrivate::Write<FirstOffsetMeta>(_collector, _writable, _version, FirstOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1650,7 +1735,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransac
NPrivate::TSizeCollector _collector;
NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
NPrivate::Size<FirstOffsetMeta>(_collector, _version, FirstOffset);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1664,7 +1749,7 @@ i32 TFetchResponseData::TFetchableTopicResponse::TPartitionData::TAbortedTransac
const TListOffsetsRequestData::ReplicaIdMeta::Type TListOffsetsRequestData::ReplicaIdMeta::Default = 0;
const TListOffsetsRequestData::IsolationLevelMeta::Type TListOffsetsRequestData::IsolationLevelMeta::Default = 0;
-TListOffsetsRequestData::TListOffsetsRequestData()
+TListOffsetsRequestData::TListOffsetsRequestData()
: ReplicaId(ReplicaIdMeta::Default)
, IsolationLevel(IsolationLevelMeta::Default)
{}
@@ -1676,7 +1761,7 @@ void TListOffsetsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ver
NPrivate::Read<ReplicaIdMeta>(_readable, _version, ReplicaId);
NPrivate::Read<IsolationLevelMeta>(_readable, _version, IsolationLevel);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1699,10 +1784,10 @@ void TListOffsetsRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _ve
NPrivate::Write<ReplicaIdMeta>(_collector, _writable, _version, ReplicaId);
NPrivate::Write<IsolationLevelMeta>(_collector, _writable, _version, IsolationLevel);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1711,7 +1796,7 @@ i32 TListOffsetsRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<ReplicaIdMeta>(_collector, _version, ReplicaId);
NPrivate::Size<IsolationLevelMeta>(_collector, _version, IsolationLevel);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1724,7 +1809,7 @@ i32 TListOffsetsRequestData::Size(TKafkaVersion _version) const {
//
const TListOffsetsRequestData::TListOffsetsTopic::NameMeta::Type TListOffsetsRequestData::TListOffsetsTopic::NameMeta::Default = {""};
-TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsTopic()
+TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsTopic()
: Name(NameMeta::Default)
{}
@@ -1734,7 +1819,7 @@ void TListOffsetsRequestData::TListOffsetsTopic::Read(TKafkaReadable& _readable,
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1756,10 +1841,10 @@ void TListOffsetsRequestData::TListOffsetsTopic::Write(TKafkaWritable& _writable
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1767,7 +1852,7 @@ i32 TListOffsetsRequestData::TListOffsetsTopic::Size(TKafkaVersion _version) con
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1783,7 +1868,7 @@ const TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::Current
const TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::TimestampMeta::Type TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::TimestampMeta::Default = 0;
const TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::MaxNumOffsetsMeta::Type TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::MaxNumOffsetsMeta::Default = 1;
-TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::TListOffsetsPartition()
+TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::TListOffsetsPartition()
: PartitionIndex(PartitionIndexMeta::Default)
, CurrentLeaderEpoch(CurrentLeaderEpochMeta::Default)
, Timestamp(TimestampMeta::Default)
@@ -1798,7 +1883,7 @@ void TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::Read(TKa
NPrivate::Read<CurrentLeaderEpochMeta>(_readable, _version, CurrentLeaderEpoch);
NPrivate::Read<TimestampMeta>(_readable, _version, Timestamp);
NPrivate::Read<MaxNumOffsetsMeta>(_readable, _version, MaxNumOffsets);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1822,10 +1907,10 @@ void TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::Write(TK
NPrivate::Write<CurrentLeaderEpochMeta>(_collector, _writable, _version, CurrentLeaderEpoch);
NPrivate::Write<TimestampMeta>(_collector, _writable, _version, Timestamp);
NPrivate::Write<MaxNumOffsetsMeta>(_collector, _writable, _version, MaxNumOffsets);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1835,7 +1920,7 @@ i32 TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::Size(TKaf
NPrivate::Size<CurrentLeaderEpochMeta>(_collector, _version, CurrentLeaderEpoch);
NPrivate::Size<TimestampMeta>(_collector, _version, Timestamp);
NPrivate::Size<MaxNumOffsetsMeta>(_collector, _version, MaxNumOffsets);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1848,7 +1933,7 @@ i32 TListOffsetsRequestData::TListOffsetsTopic::TListOffsetsPartition::Size(TKaf
//
const TListOffsetsResponseData::ThrottleTimeMsMeta::Type TListOffsetsResponseData::ThrottleTimeMsMeta::Default = 0;
-TListOffsetsResponseData::TListOffsetsResponseData()
+TListOffsetsResponseData::TListOffsetsResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -1858,7 +1943,7 @@ void TListOffsetsResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1880,10 +1965,10 @@ void TListOffsetsResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1891,7 +1976,7 @@ i32 TListOffsetsResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1904,7 +1989,7 @@ i32 TListOffsetsResponseData::Size(TKafkaVersion _version) const {
//
const TListOffsetsResponseData::TListOffsetsTopicResponse::NameMeta::Type TListOffsetsResponseData::TListOffsetsTopicResponse::NameMeta::Default = {""};
-TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsTopicResponse()
+TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsTopicResponse()
: Name(NameMeta::Default)
{}
@@ -1914,7 +1999,7 @@ void TListOffsetsResponseData::TListOffsetsTopicResponse::Read(TKafkaReadable& _
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -1936,10 +2021,10 @@ void TListOffsetsResponseData::TListOffsetsTopicResponse::Write(TKafkaWritable&
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -1947,7 +2032,7 @@ i32 TListOffsetsResponseData::TListOffsetsTopicResponse::Size(TKafkaVersion _ver
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -1964,7 +2049,7 @@ const TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartition
const TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::OffsetMeta::Type TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::OffsetMeta::Default = -1;
const TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::LeaderEpochMeta::Type TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::LeaderEpochMeta::Default = -1;
-TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::TListOffsetsPartitionResponse()
+TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionResponse::TListOffsetsPartitionResponse()
: PartitionIndex(PartitionIndexMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, Timestamp(TimestampMeta::Default)
@@ -1982,7 +2067,7 @@ void TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionR
NPrivate::Read<TimestampMeta>(_readable, _version, Timestamp);
NPrivate::Read<OffsetMeta>(_readable, _version, Offset);
NPrivate::Read<LeaderEpochMeta>(_readable, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2008,10 +2093,10 @@ void TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionR
NPrivate::Write<TimestampMeta>(_collector, _writable, _version, Timestamp);
NPrivate::Write<OffsetMeta>(_collector, _writable, _version, Offset);
NPrivate::Write<LeaderEpochMeta>(_collector, _writable, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2023,7 +2108,7 @@ i32 TListOffsetsResponseData::TListOffsetsTopicResponse::TListOffsetsPartitionRe
NPrivate::Size<TimestampMeta>(_collector, _version, Timestamp);
NPrivate::Size<OffsetMeta>(_collector, _version, Offset);
NPrivate::Size<LeaderEpochMeta>(_collector, _version, LeaderEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2038,7 +2123,7 @@ const TMetadataRequestData::AllowAutoTopicCreationMeta::Type TMetadataRequestDat
const TMetadataRequestData::IncludeClusterAuthorizedOperationsMeta::Type TMetadataRequestData::IncludeClusterAuthorizedOperationsMeta::Default = false;
const TMetadataRequestData::IncludeTopicAuthorizedOperationsMeta::Type TMetadataRequestData::IncludeTopicAuthorizedOperationsMeta::Default = false;
-TMetadataRequestData::TMetadataRequestData()
+TMetadataRequestData::TMetadataRequestData()
: AllowAutoTopicCreation(AllowAutoTopicCreationMeta::Default)
, IncludeClusterAuthorizedOperations(IncludeClusterAuthorizedOperationsMeta::Default)
, IncludeTopicAuthorizedOperations(IncludeTopicAuthorizedOperationsMeta::Default)
@@ -2052,7 +2137,7 @@ void TMetadataRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _versio
NPrivate::Read<AllowAutoTopicCreationMeta>(_readable, _version, AllowAutoTopicCreation);
NPrivate::Read<IncludeClusterAuthorizedOperationsMeta>(_readable, _version, IncludeClusterAuthorizedOperations);
NPrivate::Read<IncludeTopicAuthorizedOperationsMeta>(_readable, _version, IncludeTopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2076,10 +2161,10 @@ void TMetadataRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _versi
NPrivate::Write<AllowAutoTopicCreationMeta>(_collector, _writable, _version, AllowAutoTopicCreation);
NPrivate::Write<IncludeClusterAuthorizedOperationsMeta>(_collector, _writable, _version, IncludeClusterAuthorizedOperations);
NPrivate::Write<IncludeTopicAuthorizedOperationsMeta>(_collector, _writable, _version, IncludeTopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2089,7 +2174,7 @@ i32 TMetadataRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<AllowAutoTopicCreationMeta>(_collector, _version, AllowAutoTopicCreation);
NPrivate::Size<IncludeClusterAuthorizedOperationsMeta>(_collector, _version, IncludeClusterAuthorizedOperations);
NPrivate::Size<IncludeTopicAuthorizedOperationsMeta>(_collector, _version, IncludeTopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2103,7 +2188,7 @@ i32 TMetadataRequestData::Size(TKafkaVersion _version) const {
const TMetadataRequestData::TMetadataRequestTopic::TopicIdMeta::Type TMetadataRequestData::TMetadataRequestTopic::TopicIdMeta::Default = TKafkaUuid(0, 0);
const TMetadataRequestData::TMetadataRequestTopic::NameMeta::Type TMetadataRequestData::TMetadataRequestTopic::NameMeta::Default = {""};
-TMetadataRequestData::TMetadataRequestTopic::TMetadataRequestTopic()
+TMetadataRequestData::TMetadataRequestTopic::TMetadataRequestTopic()
: TopicId(TopicIdMeta::Default)
, Name(NameMeta::Default)
{}
@@ -2114,7 +2199,7 @@ void TMetadataRequestData::TMetadataRequestTopic::Read(TKafkaReadable& _readable
}
NPrivate::Read<TopicIdMeta>(_readable, _version, TopicId);
NPrivate::Read<NameMeta>(_readable, _version, Name);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2136,10 +2221,10 @@ void TMetadataRequestData::TMetadataRequestTopic::Write(TKafkaWritable& _writabl
NPrivate::TWriteCollector _collector;
NPrivate::Write<TopicIdMeta>(_collector, _writable, _version, TopicId);
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2147,7 +2232,7 @@ i32 TMetadataRequestData::TMetadataRequestTopic::Size(TKafkaVersion _version) co
NPrivate::TSizeCollector _collector;
NPrivate::Size<TopicIdMeta>(_collector, _version, TopicId);
NPrivate::Size<NameMeta>(_collector, _version, Name);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2163,7 +2248,7 @@ const TMetadataResponseData::ClusterIdMeta::Type TMetadataResponseData::ClusterI
const TMetadataResponseData::ControllerIdMeta::Type TMetadataResponseData::ControllerIdMeta::Default = -1;
const TMetadataResponseData::ClusterAuthorizedOperationsMeta::Type TMetadataResponseData::ClusterAuthorizedOperationsMeta::Default = -2147483648;
-TMetadataResponseData::TMetadataResponseData()
+TMetadataResponseData::TMetadataResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ClusterId(ClusterIdMeta::Default)
, ControllerId(ControllerIdMeta::Default)
@@ -2180,7 +2265,7 @@ void TMetadataResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _versi
NPrivate::Read<ControllerIdMeta>(_readable, _version, ControllerId);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<ClusterAuthorizedOperationsMeta>(_readable, _version, ClusterAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2206,10 +2291,10 @@ void TMetadataResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _vers
NPrivate::Write<ControllerIdMeta>(_collector, _writable, _version, ControllerId);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<ClusterAuthorizedOperationsMeta>(_collector, _writable, _version, ClusterAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2221,7 +2306,7 @@ i32 TMetadataResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<ControllerIdMeta>(_collector, _version, ControllerId);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<ClusterAuthorizedOperationsMeta>(_collector, _version, ClusterAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2237,7 +2322,7 @@ const TMetadataResponseData::TMetadataResponseBroker::HostMeta::Type TMetadataRe
const TMetadataResponseData::TMetadataResponseBroker::PortMeta::Type TMetadataResponseData::TMetadataResponseBroker::PortMeta::Default = 0;
const TMetadataResponseData::TMetadataResponseBroker::RackMeta::Type TMetadataResponseData::TMetadataResponseBroker::RackMeta::Default = std::nullopt;
-TMetadataResponseData::TMetadataResponseBroker::TMetadataResponseBroker()
+TMetadataResponseData::TMetadataResponseBroker::TMetadataResponseBroker()
: NodeId(NodeIdMeta::Default)
, Host(HostMeta::Default)
, Port(PortMeta::Default)
@@ -2252,7 +2337,7 @@ void TMetadataResponseData::TMetadataResponseBroker::Read(TKafkaReadable& _reada
NPrivate::Read<HostMeta>(_readable, _version, Host);
NPrivate::Read<PortMeta>(_readable, _version, Port);
NPrivate::Read<RackMeta>(_readable, _version, Rack);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2276,10 +2361,10 @@ void TMetadataResponseData::TMetadataResponseBroker::Write(TKafkaWritable& _writ
NPrivate::Write<HostMeta>(_collector, _writable, _version, Host);
NPrivate::Write<PortMeta>(_collector, _writable, _version, Port);
NPrivate::Write<RackMeta>(_collector, _writable, _version, Rack);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2289,7 +2374,7 @@ i32 TMetadataResponseData::TMetadataResponseBroker::Size(TKafkaVersion _version)
NPrivate::Size<HostMeta>(_collector, _version, Host);
NPrivate::Size<PortMeta>(_collector, _version, Port);
NPrivate::Size<RackMeta>(_collector, _version, Rack);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2306,7 +2391,7 @@ const TMetadataResponseData::TMetadataResponseTopic::TopicIdMeta::Type TMetadata
const TMetadataResponseData::TMetadataResponseTopic::IsInternalMeta::Type TMetadataResponseData::TMetadataResponseTopic::IsInternalMeta::Default = false;
const TMetadataResponseData::TMetadataResponseTopic::TopicAuthorizedOperationsMeta::Type TMetadataResponseData::TMetadataResponseTopic::TopicAuthorizedOperationsMeta::Default = -2147483648;
-TMetadataResponseData::TMetadataResponseTopic::TMetadataResponseTopic()
+TMetadataResponseData::TMetadataResponseTopic::TMetadataResponseTopic()
: ErrorCode(ErrorCodeMeta::Default)
, Name(NameMeta::Default)
, TopicId(TopicIdMeta::Default)
@@ -2324,7 +2409,7 @@ void TMetadataResponseData::TMetadataResponseTopic::Read(TKafkaReadable& _readab
NPrivate::Read<IsInternalMeta>(_readable, _version, IsInternal);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
NPrivate::Read<TopicAuthorizedOperationsMeta>(_readable, _version, TopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2350,10 +2435,10 @@ void TMetadataResponseData::TMetadataResponseTopic::Write(TKafkaWritable& _writa
NPrivate::Write<IsInternalMeta>(_collector, _writable, _version, IsInternal);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
NPrivate::Write<TopicAuthorizedOperationsMeta>(_collector, _writable, _version, TopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2365,7 +2450,7 @@ i32 TMetadataResponseData::TMetadataResponseTopic::Size(TKafkaVersion _version)
NPrivate::Size<IsInternalMeta>(_collector, _version, IsInternal);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
NPrivate::Size<TopicAuthorizedOperationsMeta>(_collector, _version, TopicAuthorizedOperations);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2381,7 +2466,7 @@ const TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition:
const TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::LeaderIdMeta::Type TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::LeaderIdMeta::Default = 0;
const TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::LeaderEpochMeta::Type TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::LeaderEpochMeta::Default = -1;
-TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::TMetadataResponsePartition()
+TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::TMetadataResponsePartition()
: ErrorCode(ErrorCodeMeta::Default)
, PartitionIndex(PartitionIndexMeta::Default)
, LeaderId(LeaderIdMeta::Default)
@@ -2399,7 +2484,7 @@ void TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::
NPrivate::Read<ReplicaNodesMeta>(_readable, _version, ReplicaNodes);
NPrivate::Read<IsrNodesMeta>(_readable, _version, IsrNodes);
NPrivate::Read<OfflineReplicasMeta>(_readable, _version, OfflineReplicas);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2426,10 +2511,10 @@ void TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::
NPrivate::Write<ReplicaNodesMeta>(_collector, _writable, _version, ReplicaNodes);
NPrivate::Write<IsrNodesMeta>(_collector, _writable, _version, IsrNodes);
NPrivate::Write<OfflineReplicasMeta>(_collector, _writable, _version, OfflineReplicas);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2442,7 +2527,7 @@ i32 TMetadataResponseData::TMetadataResponseTopic::TMetadataResponsePartition::S
NPrivate::Size<ReplicaNodesMeta>(_collector, _version, ReplicaNodes);
NPrivate::Size<IsrNodesMeta>(_collector, _version, IsrNodes);
NPrivate::Size<OfflineReplicasMeta>(_collector, _version, OfflineReplicas);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2459,7 +2544,7 @@ const TOffsetCommitRequestData::MemberIdMeta::Type TOffsetCommitRequestData::Mem
const TOffsetCommitRequestData::GroupInstanceIdMeta::Type TOffsetCommitRequestData::GroupInstanceIdMeta::Default = std::nullopt;
const TOffsetCommitRequestData::RetentionTimeMsMeta::Type TOffsetCommitRequestData::RetentionTimeMsMeta::Default = -1;
-TOffsetCommitRequestData::TOffsetCommitRequestData()
+TOffsetCommitRequestData::TOffsetCommitRequestData()
: GroupId(GroupIdMeta::Default)
, GenerationId(GenerationIdMeta::Default)
, MemberId(MemberIdMeta::Default)
@@ -2477,7 +2562,7 @@ void TOffsetCommitRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
NPrivate::Read<RetentionTimeMsMeta>(_readable, _version, RetentionTimeMs);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2503,10 +2588,10 @@ void TOffsetCommitRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
NPrivate::Write<RetentionTimeMsMeta>(_collector, _writable, _version, RetentionTimeMs);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2518,7 +2603,7 @@ i32 TOffsetCommitRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
NPrivate::Size<RetentionTimeMsMeta>(_collector, _version, RetentionTimeMs);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2531,7 +2616,7 @@ i32 TOffsetCommitRequestData::Size(TKafkaVersion _version) const {
//
const TOffsetCommitRequestData::TOffsetCommitRequestTopic::NameMeta::Type TOffsetCommitRequestData::TOffsetCommitRequestTopic::NameMeta::Default = {""};
-TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestTopic()
+TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestTopic()
: Name(NameMeta::Default)
{}
@@ -2541,7 +2626,7 @@ void TOffsetCommitRequestData::TOffsetCommitRequestTopic::Read(TKafkaReadable& _
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2563,10 +2648,10 @@ void TOffsetCommitRequestData::TOffsetCommitRequestTopic::Write(TKafkaWritable&
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2574,7 +2659,7 @@ i32 TOffsetCommitRequestData::TOffsetCommitRequestTopic::Size(TKafkaVersion _ver
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2591,7 +2676,7 @@ const TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestP
const TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::CommitTimestampMeta::Type TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::CommitTimestampMeta::Default = -1;
const TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::CommittedMetadataMeta::Type TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::CommittedMetadataMeta::Default = {""};
-TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::TOffsetCommitRequestPartition()
+TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPartition::TOffsetCommitRequestPartition()
: PartitionIndex(PartitionIndexMeta::Default)
, CommittedOffset(CommittedOffsetMeta::Default)
, CommittedLeaderEpoch(CommittedLeaderEpochMeta::Default)
@@ -2608,7 +2693,7 @@ void TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPa
NPrivate::Read<CommittedLeaderEpochMeta>(_readable, _version, CommittedLeaderEpoch);
NPrivate::Read<CommitTimestampMeta>(_readable, _version, CommitTimestamp);
NPrivate::Read<CommittedMetadataMeta>(_readable, _version, CommittedMetadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2633,10 +2718,10 @@ void TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPa
NPrivate::Write<CommittedLeaderEpochMeta>(_collector, _writable, _version, CommittedLeaderEpoch);
NPrivate::Write<CommitTimestampMeta>(_collector, _writable, _version, CommitTimestamp);
NPrivate::Write<CommittedMetadataMeta>(_collector, _writable, _version, CommittedMetadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2647,7 +2732,7 @@ i32 TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPar
NPrivate::Size<CommittedLeaderEpochMeta>(_collector, _version, CommittedLeaderEpoch);
NPrivate::Size<CommitTimestampMeta>(_collector, _version, CommitTimestamp);
NPrivate::Size<CommittedMetadataMeta>(_collector, _version, CommittedMetadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2660,7 +2745,7 @@ i32 TOffsetCommitRequestData::TOffsetCommitRequestTopic::TOffsetCommitRequestPar
//
const TOffsetCommitResponseData::ThrottleTimeMsMeta::Type TOffsetCommitResponseData::ThrottleTimeMsMeta::Default = 0;
-TOffsetCommitResponseData::TOffsetCommitResponseData()
+TOffsetCommitResponseData::TOffsetCommitResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -2670,7 +2755,7 @@ void TOffsetCommitResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _v
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2692,10 +2777,10 @@ void TOffsetCommitResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2703,7 +2788,7 @@ i32 TOffsetCommitResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2716,7 +2801,7 @@ i32 TOffsetCommitResponseData::Size(TKafkaVersion _version) const {
//
const TOffsetCommitResponseData::TOffsetCommitResponseTopic::NameMeta::Type TOffsetCommitResponseData::TOffsetCommitResponseTopic::NameMeta::Default = {""};
-TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponseTopic()
+TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponseTopic()
: Name(NameMeta::Default)
{}
@@ -2726,7 +2811,7 @@ void TOffsetCommitResponseData::TOffsetCommitResponseTopic::Read(TKafkaReadable&
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2748,10 +2833,10 @@ void TOffsetCommitResponseData::TOffsetCommitResponseTopic::Write(TKafkaWritable
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2759,7 +2844,7 @@ i32 TOffsetCommitResponseData::TOffsetCommitResponseTopic::Size(TKafkaVersion _v
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2773,7 +2858,7 @@ i32 TOffsetCommitResponseData::TOffsetCommitResponseTopic::Size(TKafkaVersion _v
const TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::PartitionIndexMeta::Type TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::PartitionIndexMeta::Default = 0;
const TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::ErrorCodeMeta::Type TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::ErrorCodeMeta::Default = 0;
-TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::TOffsetCommitResponsePartition()
+TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponsePartition::TOffsetCommitResponsePartition()
: PartitionIndex(PartitionIndexMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
{}
@@ -2784,7 +2869,7 @@ void TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitRespons
}
NPrivate::Read<PartitionIndexMeta>(_readable, _version, PartitionIndex);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2806,10 +2891,10 @@ void TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitRespons
NPrivate::TWriteCollector _collector;
NPrivate::Write<PartitionIndexMeta>(_collector, _writable, _version, PartitionIndex);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2817,7 +2902,7 @@ i32 TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponse
NPrivate::TSizeCollector _collector;
NPrivate::Size<PartitionIndexMeta>(_collector, _version, PartitionIndex);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2831,7 +2916,7 @@ i32 TOffsetCommitResponseData::TOffsetCommitResponseTopic::TOffsetCommitResponse
const TOffsetFetchRequestData::GroupIdMeta::Type TOffsetFetchRequestData::GroupIdMeta::Default = {""};
const TOffsetFetchRequestData::RequireStableMeta::Type TOffsetFetchRequestData::RequireStableMeta::Default = false;
-TOffsetFetchRequestData::TOffsetFetchRequestData()
+TOffsetFetchRequestData::TOffsetFetchRequestData()
: GroupId(GroupIdMeta::Default)
, RequireStable(RequireStableMeta::Default)
{}
@@ -2844,7 +2929,7 @@ void TOffsetFetchRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ver
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<GroupsMeta>(_readable, _version, Groups);
NPrivate::Read<RequireStableMeta>(_readable, _version, RequireStable);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2868,10 +2953,10 @@ void TOffsetFetchRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _ve
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<GroupsMeta>(_collector, _writable, _version, Groups);
NPrivate::Write<RequireStableMeta>(_collector, _writable, _version, RequireStable);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2881,7 +2966,7 @@ i32 TOffsetFetchRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<GroupsMeta>(_collector, _version, Groups);
NPrivate::Size<RequireStableMeta>(_collector, _version, RequireStable);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2894,7 +2979,7 @@ i32 TOffsetFetchRequestData::Size(TKafkaVersion _version) const {
//
const TOffsetFetchRequestData::TOffsetFetchRequestTopic::NameMeta::Type TOffsetFetchRequestData::TOffsetFetchRequestTopic::NameMeta::Default = {""};
-TOffsetFetchRequestData::TOffsetFetchRequestTopic::TOffsetFetchRequestTopic()
+TOffsetFetchRequestData::TOffsetFetchRequestTopic::TOffsetFetchRequestTopic()
: Name(NameMeta::Default)
{}
@@ -2904,7 +2989,7 @@ void TOffsetFetchRequestData::TOffsetFetchRequestTopic::Read(TKafkaReadable& _re
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionIndexesMeta>(_readable, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2926,10 +3011,10 @@ void TOffsetFetchRequestData::TOffsetFetchRequestTopic::Write(TKafkaWritable& _w
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionIndexesMeta>(_collector, _writable, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2937,7 +3022,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestTopic::Size(TKafkaVersion _versi
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionIndexesMeta>(_collector, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -2950,7 +3035,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestTopic::Size(TKafkaVersion _versi
//
const TOffsetFetchRequestData::TOffsetFetchRequestGroup::GroupIdMeta::Type TOffsetFetchRequestData::TOffsetFetchRequestGroup::GroupIdMeta::Default = {""};
-TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestGroup()
+TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestGroup()
: GroupId(GroupIdMeta::Default)
{}
@@ -2960,7 +3045,7 @@ void TOffsetFetchRequestData::TOffsetFetchRequestGroup::Read(TKafkaReadable& _re
}
NPrivate::Read<GroupIdMeta>(_readable, _version, GroupId);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -2982,10 +3067,10 @@ void TOffsetFetchRequestData::TOffsetFetchRequestGroup::Write(TKafkaWritable& _w
NPrivate::TWriteCollector _collector;
NPrivate::Write<GroupIdMeta>(_collector, _writable, _version, GroupId);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -2993,7 +3078,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestGroup::Size(TKafkaVersion _versi
NPrivate::TSizeCollector _collector;
NPrivate::Size<GroupIdMeta>(_collector, _version, GroupId);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3006,7 +3091,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestGroup::Size(TKafkaVersion _versi
//
const TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics::NameMeta::Type TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics::NameMeta::Default = {""};
-TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics::TOffsetFetchRequestTopics()
+TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics::TOffsetFetchRequestTopics()
: Name(NameMeta::Default)
{}
@@ -3016,7 +3101,7 @@ void TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopic
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionIndexesMeta>(_readable, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3038,10 +3123,10 @@ void TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopic
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionIndexesMeta>(_collector, _writable, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3049,7 +3134,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionIndexesMeta>(_collector, _version, PartitionIndexes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3063,7 +3148,7 @@ i32 TOffsetFetchRequestData::TOffsetFetchRequestGroup::TOffsetFetchRequestTopics
const TOffsetFetchResponseData::ThrottleTimeMsMeta::Type TOffsetFetchResponseData::ThrottleTimeMsMeta::Default = 0;
const TOffsetFetchResponseData::ErrorCodeMeta::Type TOffsetFetchResponseData::ErrorCodeMeta::Default = 0;
-TOffsetFetchResponseData::TOffsetFetchResponseData()
+TOffsetFetchResponseData::TOffsetFetchResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
{}
@@ -3076,7 +3161,7 @@ void TOffsetFetchResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<GroupsMeta>(_readable, _version, Groups);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3100,10 +3185,10 @@ void TOffsetFetchResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<GroupsMeta>(_collector, _writable, _version, Groups);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3113,7 +3198,7 @@ i32 TOffsetFetchResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<GroupsMeta>(_collector, _version, Groups);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3126,7 +3211,7 @@ i32 TOffsetFetchResponseData::Size(TKafkaVersion _version) const {
//
const TOffsetFetchResponseData::TOffsetFetchResponseTopic::NameMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseTopic::NameMeta::Default = {""};
-TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponseTopic()
+TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponseTopic()
: Name(NameMeta::Default)
{}
@@ -3136,7 +3221,7 @@ void TOffsetFetchResponseData::TOffsetFetchResponseTopic::Read(TKafkaReadable& _
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3158,10 +3243,10 @@ void TOffsetFetchResponseData::TOffsetFetchResponseTopic::Write(TKafkaWritable&
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3169,7 +3254,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseTopic::Size(TKafkaVersion _ver
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3186,7 +3271,7 @@ const TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponseP
const TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::MetadataMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::MetadataMeta::Default = {""};
const TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::ErrorCodeMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::ErrorCodeMeta::Default = 0;
-TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::TOffsetFetchResponsePartition()
+TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePartition::TOffsetFetchResponsePartition()
: PartitionIndex(PartitionIndexMeta::Default)
, CommittedOffset(CommittedOffsetMeta::Default)
, CommittedLeaderEpoch(CommittedLeaderEpochMeta::Default)
@@ -3203,7 +3288,7 @@ void TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePa
NPrivate::Read<CommittedLeaderEpochMeta>(_readable, _version, CommittedLeaderEpoch);
NPrivate::Read<MetadataMeta>(_readable, _version, Metadata);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3228,10 +3313,10 @@ void TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePa
NPrivate::Write<CommittedLeaderEpochMeta>(_collector, _writable, _version, CommittedLeaderEpoch);
NPrivate::Write<MetadataMeta>(_collector, _writable, _version, Metadata);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3242,7 +3327,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePar
NPrivate::Size<CommittedLeaderEpochMeta>(_collector, _version, CommittedLeaderEpoch);
NPrivate::Size<MetadataMeta>(_collector, _version, Metadata);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3256,7 +3341,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseTopic::TOffsetFetchResponsePar
const TOffsetFetchResponseData::TOffsetFetchResponseGroup::GroupIdMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseGroup::GroupIdMeta::Default = {""};
const TOffsetFetchResponseData::TOffsetFetchResponseGroup::ErrorCodeMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseGroup::ErrorCodeMeta::Default = 0;
-TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseGroup()
+TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseGroup()
: GroupId(GroupIdMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
{}
@@ -3268,7 +3353,7 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::Read(TKafkaReadable& _
NPrivate::Read<GroupIdMeta>(_readable, _version, GroupId);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3291,10 +3376,10 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::Write(TKafkaWritable&
NPrivate::Write<GroupIdMeta>(_collector, _writable, _version, GroupId);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3303,7 +3388,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseGroup::Size(TKafkaVersion _ver
NPrivate::Size<GroupIdMeta>(_collector, _version, GroupId);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3316,7 +3401,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseGroup::Size(TKafkaVersion _ver
//
const TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::NameMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::NameMeta::Default = {""};
-TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponseTopics()
+TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponseTopics()
: Name(NameMeta::Default)
{}
@@ -3326,7 +3411,7 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTo
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3348,10 +3433,10 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTo
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3359,7 +3444,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTop
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3376,7 +3461,7 @@ const TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseT
const TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::MetadataMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::MetadataMeta::Default = {""};
const TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::ErrorCodeMeta::Type TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::ErrorCodeMeta::Default = 0;
-TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::TOffsetFetchResponsePartitions()
+TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTopics::TOffsetFetchResponsePartitions::TOffsetFetchResponsePartitions()
: PartitionIndex(PartitionIndexMeta::Default)
, CommittedOffset(CommittedOffsetMeta::Default)
, CommittedLeaderEpoch(CommittedLeaderEpochMeta::Default)
@@ -3393,7 +3478,7 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTo
NPrivate::Read<CommittedLeaderEpochMeta>(_readable, _version, CommittedLeaderEpoch);
NPrivate::Read<MetadataMeta>(_readable, _version, Metadata);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3418,10 +3503,10 @@ void TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTo
NPrivate::Write<CommittedLeaderEpochMeta>(_collector, _writable, _version, CommittedLeaderEpoch);
NPrivate::Write<MetadataMeta>(_collector, _writable, _version, Metadata);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3432,7 +3517,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTop
NPrivate::Size<CommittedLeaderEpochMeta>(_collector, _version, CommittedLeaderEpoch);
NPrivate::Size<MetadataMeta>(_collector, _version, Metadata);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3446,7 +3531,7 @@ i32 TOffsetFetchResponseData::TOffsetFetchResponseGroup::TOffsetFetchResponseTop
const TFindCoordinatorRequestData::KeyMeta::Type TFindCoordinatorRequestData::KeyMeta::Default = {""};
const TFindCoordinatorRequestData::KeyTypeMeta::Type TFindCoordinatorRequestData::KeyTypeMeta::Default = 0;
-TFindCoordinatorRequestData::TFindCoordinatorRequestData()
+TFindCoordinatorRequestData::TFindCoordinatorRequestData()
: Key(KeyMeta::Default)
, KeyType(KeyTypeMeta::Default)
{}
@@ -3458,7 +3543,7 @@ void TFindCoordinatorRequestData::Read(TKafkaReadable& _readable, TKafkaVersion
NPrivate::Read<KeyMeta>(_readable, _version, Key);
NPrivate::Read<KeyTypeMeta>(_readable, _version, KeyType);
NPrivate::Read<CoordinatorKeysMeta>(_readable, _version, CoordinatorKeys);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3481,10 +3566,10 @@ void TFindCoordinatorRequestData::Write(TKafkaWritable& _writable, TKafkaVersion
NPrivate::Write<KeyMeta>(_collector, _writable, _version, Key);
NPrivate::Write<KeyTypeMeta>(_collector, _writable, _version, KeyType);
NPrivate::Write<CoordinatorKeysMeta>(_collector, _writable, _version, CoordinatorKeys);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3493,7 +3578,7 @@ i32 TFindCoordinatorRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<KeyMeta>(_collector, _version, Key);
NPrivate::Size<KeyTypeMeta>(_collector, _version, KeyType);
NPrivate::Size<CoordinatorKeysMeta>(_collector, _version, CoordinatorKeys);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3511,7 +3596,7 @@ const TFindCoordinatorResponseData::NodeIdMeta::Type TFindCoordinatorResponseDat
const TFindCoordinatorResponseData::HostMeta::Type TFindCoordinatorResponseData::HostMeta::Default = {""};
const TFindCoordinatorResponseData::PortMeta::Type TFindCoordinatorResponseData::PortMeta::Default = 0;
-TFindCoordinatorResponseData::TFindCoordinatorResponseData()
+TFindCoordinatorResponseData::TFindCoordinatorResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, ErrorMessage(ErrorMessageMeta::Default)
@@ -3531,7 +3616,7 @@ void TFindCoordinatorResponseData::Read(TKafkaReadable& _readable, TKafkaVersion
NPrivate::Read<HostMeta>(_readable, _version, Host);
NPrivate::Read<PortMeta>(_readable, _version, Port);
NPrivate::Read<CoordinatorsMeta>(_readable, _version, Coordinators);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3558,10 +3643,10 @@ void TFindCoordinatorResponseData::Write(TKafkaWritable& _writable, TKafkaVersio
NPrivate::Write<HostMeta>(_collector, _writable, _version, Host);
NPrivate::Write<PortMeta>(_collector, _writable, _version, Port);
NPrivate::Write<CoordinatorsMeta>(_collector, _writable, _version, Coordinators);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3574,7 +3659,7 @@ i32 TFindCoordinatorResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<HostMeta>(_collector, _version, Host);
NPrivate::Size<PortMeta>(_collector, _version, Port);
NPrivate::Size<CoordinatorsMeta>(_collector, _version, Coordinators);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3592,7 +3677,7 @@ const TFindCoordinatorResponseData::TCoordinator::PortMeta::Type TFindCoordinato
const TFindCoordinatorResponseData::TCoordinator::ErrorCodeMeta::Type TFindCoordinatorResponseData::TCoordinator::ErrorCodeMeta::Default = 0;
const TFindCoordinatorResponseData::TCoordinator::ErrorMessageMeta::Type TFindCoordinatorResponseData::TCoordinator::ErrorMessageMeta::Default = {""};
-TFindCoordinatorResponseData::TCoordinator::TCoordinator()
+TFindCoordinatorResponseData::TCoordinator::TCoordinator()
: Key(KeyMeta::Default)
, NodeId(NodeIdMeta::Default)
, Host(HostMeta::Default)
@@ -3611,7 +3696,7 @@ void TFindCoordinatorResponseData::TCoordinator::Read(TKafkaReadable& _readable,
NPrivate::Read<PortMeta>(_readable, _version, Port);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3637,10 +3722,10 @@ void TFindCoordinatorResponseData::TCoordinator::Write(TKafkaWritable& _writable
NPrivate::Write<PortMeta>(_collector, _writable, _version, Port);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3652,7 +3737,7 @@ i32 TFindCoordinatorResponseData::TCoordinator::Size(TKafkaVersion _version) con
NPrivate::Size<PortMeta>(_collector, _version, Port);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3671,7 +3756,7 @@ const TJoinGroupRequestData::GroupInstanceIdMeta::Type TJoinGroupRequestData::Gr
const TJoinGroupRequestData::ProtocolTypeMeta::Type TJoinGroupRequestData::ProtocolTypeMeta::Default = {""};
const TJoinGroupRequestData::ReasonMeta::Type TJoinGroupRequestData::ReasonMeta::Default = std::nullopt;
-TJoinGroupRequestData::TJoinGroupRequestData()
+TJoinGroupRequestData::TJoinGroupRequestData()
: GroupId(GroupIdMeta::Default)
, SessionTimeoutMs(SessionTimeoutMsMeta::Default)
, RebalanceTimeoutMs(RebalanceTimeoutMsMeta::Default)
@@ -3693,7 +3778,7 @@ void TJoinGroupRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _versi
NPrivate::Read<ProtocolTypeMeta>(_readable, _version, ProtocolType);
NPrivate::Read<ProtocolsMeta>(_readable, _version, Protocols);
NPrivate::Read<ReasonMeta>(_readable, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3721,10 +3806,10 @@ void TJoinGroupRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _vers
NPrivate::Write<ProtocolTypeMeta>(_collector, _writable, _version, ProtocolType);
NPrivate::Write<ProtocolsMeta>(_collector, _writable, _version, Protocols);
NPrivate::Write<ReasonMeta>(_collector, _writable, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3738,7 +3823,7 @@ i32 TJoinGroupRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<ProtocolTypeMeta>(_collector, _version, ProtocolType);
NPrivate::Size<ProtocolsMeta>(_collector, _version, Protocols);
NPrivate::Size<ReasonMeta>(_collector, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3751,7 +3836,7 @@ i32 TJoinGroupRequestData::Size(TKafkaVersion _version) const {
//
const TJoinGroupRequestData::TJoinGroupRequestProtocol::NameMeta::Type TJoinGroupRequestData::TJoinGroupRequestProtocol::NameMeta::Default = {""};
-TJoinGroupRequestData::TJoinGroupRequestProtocol::TJoinGroupRequestProtocol()
+TJoinGroupRequestData::TJoinGroupRequestProtocol::TJoinGroupRequestProtocol()
: Name(NameMeta::Default)
{}
@@ -3761,7 +3846,7 @@ void TJoinGroupRequestData::TJoinGroupRequestProtocol::Read(TKafkaReadable& _rea
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<MetadataMeta>(_readable, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3783,10 +3868,10 @@ void TJoinGroupRequestData::TJoinGroupRequestProtocol::Write(TKafkaWritable& _wr
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<MetadataMeta>(_collector, _writable, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3794,7 +3879,7 @@ i32 TJoinGroupRequestData::TJoinGroupRequestProtocol::Size(TKafkaVersion _versio
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<MetadataMeta>(_collector, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3814,7 +3899,7 @@ const TJoinGroupResponseData::LeaderMeta::Type TJoinGroupResponseData::LeaderMet
const TJoinGroupResponseData::SkipAssignmentMeta::Type TJoinGroupResponseData::SkipAssignmentMeta::Default = false;
const TJoinGroupResponseData::MemberIdMeta::Type TJoinGroupResponseData::MemberIdMeta::Default = {""};
-TJoinGroupResponseData::TJoinGroupResponseData()
+TJoinGroupResponseData::TJoinGroupResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, GenerationId(GenerationIdMeta::Default)
@@ -3838,7 +3923,7 @@ void TJoinGroupResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _vers
NPrivate::Read<SkipAssignmentMeta>(_readable, _version, SkipAssignment);
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<MembersMeta>(_readable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3867,10 +3952,10 @@ void TJoinGroupResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _ver
NPrivate::Write<SkipAssignmentMeta>(_collector, _writable, _version, SkipAssignment);
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<MembersMeta>(_collector, _writable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3885,7 +3970,7 @@ i32 TJoinGroupResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<SkipAssignmentMeta>(_collector, _version, SkipAssignment);
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<MembersMeta>(_collector, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3899,7 +3984,7 @@ i32 TJoinGroupResponseData::Size(TKafkaVersion _version) const {
const TJoinGroupResponseData::TJoinGroupResponseMember::MemberIdMeta::Type TJoinGroupResponseData::TJoinGroupResponseMember::MemberIdMeta::Default = {""};
const TJoinGroupResponseData::TJoinGroupResponseMember::GroupInstanceIdMeta::Type TJoinGroupResponseData::TJoinGroupResponseMember::GroupInstanceIdMeta::Default = std::nullopt;
-TJoinGroupResponseData::TJoinGroupResponseMember::TJoinGroupResponseMember()
+TJoinGroupResponseData::TJoinGroupResponseMember::TJoinGroupResponseMember()
: MemberId(MemberIdMeta::Default)
, GroupInstanceId(GroupInstanceIdMeta::Default)
{}
@@ -3911,7 +3996,7 @@ void TJoinGroupResponseData::TJoinGroupResponseMember::Read(TKafkaReadable& _rea
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
NPrivate::Read<MetadataMeta>(_readable, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -3934,10 +4019,10 @@ void TJoinGroupResponseData::TJoinGroupResponseMember::Write(TKafkaWritable& _wr
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
NPrivate::Write<MetadataMeta>(_collector, _writable, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -3946,7 +4031,7 @@ i32 TJoinGroupResponseData::TJoinGroupResponseMember::Size(TKafkaVersion _versio
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
NPrivate::Size<MetadataMeta>(_collector, _version, Metadata);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -3962,7 +4047,7 @@ const THeartbeatRequestData::GenerationIdMeta::Type THeartbeatRequestData::Gener
const THeartbeatRequestData::MemberIdMeta::Type THeartbeatRequestData::MemberIdMeta::Default = {""};
const THeartbeatRequestData::GroupInstanceIdMeta::Type THeartbeatRequestData::GroupInstanceIdMeta::Default = std::nullopt;
-THeartbeatRequestData::THeartbeatRequestData()
+THeartbeatRequestData::THeartbeatRequestData()
: GroupId(GroupIdMeta::Default)
, GenerationId(GenerationIdMeta::Default)
, MemberId(MemberIdMeta::Default)
@@ -3977,7 +4062,7 @@ void THeartbeatRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _versi
NPrivate::Read<GenerationIdMeta>(_readable, _version, GenerationId);
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4001,10 +4086,10 @@ void THeartbeatRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _vers
NPrivate::Write<GenerationIdMeta>(_collector, _writable, _version, GenerationId);
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4014,7 +4099,7 @@ i32 THeartbeatRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<GenerationIdMeta>(_collector, _version, GenerationId);
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4028,7 +4113,7 @@ i32 THeartbeatRequestData::Size(TKafkaVersion _version) const {
const THeartbeatResponseData::ThrottleTimeMsMeta::Type THeartbeatResponseData::ThrottleTimeMsMeta::Default = 0;
const THeartbeatResponseData::ErrorCodeMeta::Type THeartbeatResponseData::ErrorCodeMeta::Default = 0;
-THeartbeatResponseData::THeartbeatResponseData()
+THeartbeatResponseData::THeartbeatResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
{}
@@ -4039,7 +4124,7 @@ void THeartbeatResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _vers
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4061,10 +4146,10 @@ void THeartbeatResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _ver
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4072,7 +4157,7 @@ i32 THeartbeatResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4086,7 +4171,7 @@ i32 THeartbeatResponseData::Size(TKafkaVersion _version) const {
const TLeaveGroupRequestData::GroupIdMeta::Type TLeaveGroupRequestData::GroupIdMeta::Default = {""};
const TLeaveGroupRequestData::MemberIdMeta::Type TLeaveGroupRequestData::MemberIdMeta::Default = {""};
-TLeaveGroupRequestData::TLeaveGroupRequestData()
+TLeaveGroupRequestData::TLeaveGroupRequestData()
: GroupId(GroupIdMeta::Default)
, MemberId(MemberIdMeta::Default)
{}
@@ -4098,7 +4183,7 @@ void TLeaveGroupRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _vers
NPrivate::Read<GroupIdMeta>(_readable, _version, GroupId);
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<MembersMeta>(_readable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4121,10 +4206,10 @@ void TLeaveGroupRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _ver
NPrivate::Write<GroupIdMeta>(_collector, _writable, _version, GroupId);
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<MembersMeta>(_collector, _writable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4133,7 +4218,7 @@ i32 TLeaveGroupRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<GroupIdMeta>(_collector, _version, GroupId);
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<MembersMeta>(_collector, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4148,7 +4233,7 @@ const TLeaveGroupRequestData::TMemberIdentity::MemberIdMeta::Type TLeaveGroupReq
const TLeaveGroupRequestData::TMemberIdentity::GroupInstanceIdMeta::Type TLeaveGroupRequestData::TMemberIdentity::GroupInstanceIdMeta::Default = std::nullopt;
const TLeaveGroupRequestData::TMemberIdentity::ReasonMeta::Type TLeaveGroupRequestData::TMemberIdentity::ReasonMeta::Default = std::nullopt;
-TLeaveGroupRequestData::TMemberIdentity::TMemberIdentity()
+TLeaveGroupRequestData::TMemberIdentity::TMemberIdentity()
: MemberId(MemberIdMeta::Default)
, GroupInstanceId(GroupInstanceIdMeta::Default)
, Reason(ReasonMeta::Default)
@@ -4161,7 +4246,7 @@ void TLeaveGroupRequestData::TMemberIdentity::Read(TKafkaReadable& _readable, TK
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
NPrivate::Read<ReasonMeta>(_readable, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4184,10 +4269,10 @@ void TLeaveGroupRequestData::TMemberIdentity::Write(TKafkaWritable& _writable, T
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
NPrivate::Write<ReasonMeta>(_collector, _writable, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4196,7 +4281,7 @@ i32 TLeaveGroupRequestData::TMemberIdentity::Size(TKafkaVersion _version) const
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
NPrivate::Size<ReasonMeta>(_collector, _version, Reason);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4210,7 +4295,7 @@ i32 TLeaveGroupRequestData::TMemberIdentity::Size(TKafkaVersion _version) const
const TLeaveGroupResponseData::ThrottleTimeMsMeta::Type TLeaveGroupResponseData::ThrottleTimeMsMeta::Default = 0;
const TLeaveGroupResponseData::ErrorCodeMeta::Type TLeaveGroupResponseData::ErrorCodeMeta::Default = 0;
-TLeaveGroupResponseData::TLeaveGroupResponseData()
+TLeaveGroupResponseData::TLeaveGroupResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
{}
@@ -4222,7 +4307,7 @@ void TLeaveGroupResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _ver
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<MembersMeta>(_readable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4245,10 +4330,10 @@ void TLeaveGroupResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _ve
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<MembersMeta>(_collector, _writable, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4257,7 +4342,7 @@ i32 TLeaveGroupResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<MembersMeta>(_collector, _version, Members);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4272,7 +4357,7 @@ const TLeaveGroupResponseData::TMemberResponse::MemberIdMeta::Type TLeaveGroupRe
const TLeaveGroupResponseData::TMemberResponse::GroupInstanceIdMeta::Type TLeaveGroupResponseData::TMemberResponse::GroupInstanceIdMeta::Default = {""};
const TLeaveGroupResponseData::TMemberResponse::ErrorCodeMeta::Type TLeaveGroupResponseData::TMemberResponse::ErrorCodeMeta::Default = 0;
-TLeaveGroupResponseData::TMemberResponse::TMemberResponse()
+TLeaveGroupResponseData::TMemberResponse::TMemberResponse()
: MemberId(MemberIdMeta::Default)
, GroupInstanceId(GroupInstanceIdMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
@@ -4285,7 +4370,7 @@ void TLeaveGroupResponseData::TMemberResponse::Read(TKafkaReadable& _readable, T
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4308,10 +4393,10 @@ void TLeaveGroupResponseData::TMemberResponse::Write(TKafkaWritable& _writable,
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4320,7 +4405,7 @@ i32 TLeaveGroupResponseData::TMemberResponse::Size(TKafkaVersion _version) const
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4338,7 +4423,7 @@ const TSyncGroupRequestData::GroupInstanceIdMeta::Type TSyncGroupRequestData::Gr
const TSyncGroupRequestData::ProtocolTypeMeta::Type TSyncGroupRequestData::ProtocolTypeMeta::Default = std::nullopt;
const TSyncGroupRequestData::ProtocolNameMeta::Type TSyncGroupRequestData::ProtocolNameMeta::Default = std::nullopt;
-TSyncGroupRequestData::TSyncGroupRequestData()
+TSyncGroupRequestData::TSyncGroupRequestData()
: GroupId(GroupIdMeta::Default)
, GenerationId(GenerationIdMeta::Default)
, MemberId(MemberIdMeta::Default)
@@ -4358,7 +4443,7 @@ void TSyncGroupRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _versi
NPrivate::Read<ProtocolTypeMeta>(_readable, _version, ProtocolType);
NPrivate::Read<ProtocolNameMeta>(_readable, _version, ProtocolName);
NPrivate::Read<AssignmentsMeta>(_readable, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4385,10 +4470,10 @@ void TSyncGroupRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _vers
NPrivate::Write<ProtocolTypeMeta>(_collector, _writable, _version, ProtocolType);
NPrivate::Write<ProtocolNameMeta>(_collector, _writable, _version, ProtocolName);
NPrivate::Write<AssignmentsMeta>(_collector, _writable, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4401,7 +4486,7 @@ i32 TSyncGroupRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<ProtocolTypeMeta>(_collector, _version, ProtocolType);
NPrivate::Size<ProtocolNameMeta>(_collector, _version, ProtocolName);
NPrivate::Size<AssignmentsMeta>(_collector, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4414,7 +4499,7 @@ i32 TSyncGroupRequestData::Size(TKafkaVersion _version) const {
//
const TSyncGroupRequestData::TSyncGroupRequestAssignment::MemberIdMeta::Type TSyncGroupRequestData::TSyncGroupRequestAssignment::MemberIdMeta::Default = {""};
-TSyncGroupRequestData::TSyncGroupRequestAssignment::TSyncGroupRequestAssignment()
+TSyncGroupRequestData::TSyncGroupRequestAssignment::TSyncGroupRequestAssignment()
: MemberId(MemberIdMeta::Default)
{}
@@ -4424,7 +4509,7 @@ void TSyncGroupRequestData::TSyncGroupRequestAssignment::Read(TKafkaReadable& _r
}
NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
NPrivate::Read<AssignmentMeta>(_readable, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4446,10 +4531,10 @@ void TSyncGroupRequestData::TSyncGroupRequestAssignment::Write(TKafkaWritable& _
NPrivate::TWriteCollector _collector;
NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
NPrivate::Write<AssignmentMeta>(_collector, _writable, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4457,7 +4542,7 @@ i32 TSyncGroupRequestData::TSyncGroupRequestAssignment::Size(TKafkaVersion _vers
NPrivate::TSizeCollector _collector;
NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
NPrivate::Size<AssignmentMeta>(_collector, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4473,7 +4558,7 @@ const TSyncGroupResponseData::ErrorCodeMeta::Type TSyncGroupResponseData::ErrorC
const TSyncGroupResponseData::ProtocolTypeMeta::Type TSyncGroupResponseData::ProtocolTypeMeta::Default = std::nullopt;
const TSyncGroupResponseData::ProtocolNameMeta::Type TSyncGroupResponseData::ProtocolNameMeta::Default = std::nullopt;
-TSyncGroupResponseData::TSyncGroupResponseData()
+TSyncGroupResponseData::TSyncGroupResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, ProtocolType(ProtocolTypeMeta::Default)
@@ -4489,7 +4574,7 @@ void TSyncGroupResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _vers
NPrivate::Read<ProtocolTypeMeta>(_readable, _version, ProtocolType);
NPrivate::Read<ProtocolNameMeta>(_readable, _version, ProtocolName);
NPrivate::Read<AssignmentMeta>(_readable, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4514,10 +4599,10 @@ void TSyncGroupResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _ver
NPrivate::Write<ProtocolTypeMeta>(_collector, _writable, _version, ProtocolType);
NPrivate::Write<ProtocolNameMeta>(_collector, _writable, _version, ProtocolName);
NPrivate::Write<AssignmentMeta>(_collector, _writable, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4528,19 +4613,20 @@ i32 TSyncGroupResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<ProtocolTypeMeta>(_collector, _version, ProtocolType);
NPrivate::Size<ProtocolNameMeta>(_collector, _version, ProtocolName);
NPrivate::Size<AssignmentMeta>(_collector, _version, Assignment);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
return _collector.Size;
}
+
//
// TSaslHandshakeRequestData
//
const TSaslHandshakeRequestData::MechanismMeta::Type TSaslHandshakeRequestData::MechanismMeta::Default = {""};
-TSaslHandshakeRequestData::TSaslHandshakeRequestData()
+TSaslHandshakeRequestData::TSaslHandshakeRequestData()
: Mechanism(MechanismMeta::Default)
{}
@@ -4549,7 +4635,7 @@ void TSaslHandshakeRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _v
ythrow yexception() << "Can't read version " << _version << " of TSaslHandshakeRequestData";
}
NPrivate::Read<MechanismMeta>(_readable, _version, Mechanism);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4570,17 +4656,17 @@ void TSaslHandshakeRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _
}
NPrivate::TWriteCollector _collector;
NPrivate::Write<MechanismMeta>(_collector, _writable, _version, Mechanism);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
i32 TSaslHandshakeRequestData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<MechanismMeta>(_collector, _version, Mechanism);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4593,7 +4679,7 @@ i32 TSaslHandshakeRequestData::Size(TKafkaVersion _version) const {
//
const TSaslHandshakeResponseData::ErrorCodeMeta::Type TSaslHandshakeResponseData::ErrorCodeMeta::Default = 0;
-TSaslHandshakeResponseData::TSaslHandshakeResponseData()
+TSaslHandshakeResponseData::TSaslHandshakeResponseData()
: ErrorCode(ErrorCodeMeta::Default)
{}
@@ -4603,7 +4689,7 @@ void TSaslHandshakeResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _
}
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<MechanismsMeta>(_readable, _version, Mechanisms);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4625,10 +4711,10 @@ void TSaslHandshakeResponseData::Write(TKafkaWritable& _writable, TKafkaVersion
NPrivate::TWriteCollector _collector;
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<MechanismsMeta>(_collector, _writable, _version, Mechanisms);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4636,7 +4722,7 @@ i32 TSaslHandshakeResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<MechanismsMeta>(_collector, _version, Mechanisms);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4650,7 +4736,7 @@ i32 TSaslHandshakeResponseData::Size(TKafkaVersion _version) const {
const TApiVersionsRequestData::ClientSoftwareNameMeta::Type TApiVersionsRequestData::ClientSoftwareNameMeta::Default = {""};
const TApiVersionsRequestData::ClientSoftwareVersionMeta::Type TApiVersionsRequestData::ClientSoftwareVersionMeta::Default = {""};
-TApiVersionsRequestData::TApiVersionsRequestData()
+TApiVersionsRequestData::TApiVersionsRequestData()
: ClientSoftwareName(ClientSoftwareNameMeta::Default)
, ClientSoftwareVersion(ClientSoftwareVersionMeta::Default)
{}
@@ -4661,7 +4747,7 @@ void TApiVersionsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ver
}
NPrivate::Read<ClientSoftwareNameMeta>(_readable, _version, ClientSoftwareName);
NPrivate::Read<ClientSoftwareVersionMeta>(_readable, _version, ClientSoftwareVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4683,10 +4769,10 @@ void TApiVersionsRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _ve
NPrivate::TWriteCollector _collector;
NPrivate::Write<ClientSoftwareNameMeta>(_collector, _writable, _version, ClientSoftwareName);
NPrivate::Write<ClientSoftwareVersionMeta>(_collector, _writable, _version, ClientSoftwareVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4694,7 +4780,7 @@ i32 TApiVersionsRequestData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ClientSoftwareNameMeta>(_collector, _version, ClientSoftwareName);
NPrivate::Size<ClientSoftwareVersionMeta>(_collector, _version, ClientSoftwareVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4710,7 +4796,7 @@ const TApiVersionsResponseData::ThrottleTimeMsMeta::Type TApiVersionsResponseDat
const TApiVersionsResponseData::FinalizedFeaturesEpochMeta::Type TApiVersionsResponseData::FinalizedFeaturesEpochMeta::Default = -1;
const TApiVersionsResponseData::ZkMigrationReadyMeta::Type TApiVersionsResponseData::ZkMigrationReadyMeta::Default = false;
-TApiVersionsResponseData::TApiVersionsResponseData()
+TApiVersionsResponseData::TApiVersionsResponseData()
: ErrorCode(ErrorCodeMeta::Default)
, ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, FinalizedFeaturesEpoch(FinalizedFeaturesEpochMeta::Default)
@@ -4728,7 +4814,7 @@ void TApiVersionsResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
NPrivate::Read<FinalizedFeaturesEpochMeta>(_readable, _version, FinalizedFeaturesEpoch);
NPrivate::Read<FinalizedFeaturesMeta>(_readable, _version, FinalizedFeatures);
NPrivate::Read<ZkMigrationReadyMeta>(_readable, _version, ZkMigrationReady);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4767,10 +4853,10 @@ void TApiVersionsResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::Write<FinalizedFeaturesEpochMeta>(_collector, _writable, _version, FinalizedFeaturesEpoch);
NPrivate::Write<FinalizedFeaturesMeta>(_collector, _writable, _version, FinalizedFeatures);
NPrivate::Write<ZkMigrationReadyMeta>(_collector, _writable, _version, ZkMigrationReady);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
NPrivate::WriteTag<SupportedFeaturesMeta>(_writable, _version, SupportedFeatures);
NPrivate::WriteTag<FinalizedFeaturesEpochMeta>(_writable, _version, FinalizedFeaturesEpoch);
NPrivate::WriteTag<FinalizedFeaturesMeta>(_writable, _version, FinalizedFeatures);
@@ -4787,7 +4873,7 @@ i32 TApiVersionsResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<FinalizedFeaturesEpochMeta>(_collector, _version, FinalizedFeaturesEpoch);
NPrivate::Size<FinalizedFeaturesMeta>(_collector, _version, FinalizedFeatures);
NPrivate::Size<ZkMigrationReadyMeta>(_collector, _version, ZkMigrationReady);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4802,7 +4888,7 @@ const TApiVersionsResponseData::TApiVersion::ApiKeyMeta::Type TApiVersionsRespon
const TApiVersionsResponseData::TApiVersion::MinVersionMeta::Type TApiVersionsResponseData::TApiVersion::MinVersionMeta::Default = 0;
const TApiVersionsResponseData::TApiVersion::MaxVersionMeta::Type TApiVersionsResponseData::TApiVersion::MaxVersionMeta::Default = 0;
-TApiVersionsResponseData::TApiVersion::TApiVersion()
+TApiVersionsResponseData::TApiVersion::TApiVersion()
: ApiKey(ApiKeyMeta::Default)
, MinVersion(MinVersionMeta::Default)
, MaxVersion(MaxVersionMeta::Default)
@@ -4815,7 +4901,7 @@ void TApiVersionsResponseData::TApiVersion::Read(TKafkaReadable& _readable, TKaf
NPrivate::Read<ApiKeyMeta>(_readable, _version, ApiKey);
NPrivate::Read<MinVersionMeta>(_readable, _version, MinVersion);
NPrivate::Read<MaxVersionMeta>(_readable, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4838,10 +4924,10 @@ void TApiVersionsResponseData::TApiVersion::Write(TKafkaWritable& _writable, TKa
NPrivate::Write<ApiKeyMeta>(_collector, _writable, _version, ApiKey);
NPrivate::Write<MinVersionMeta>(_collector, _writable, _version, MinVersion);
NPrivate::Write<MaxVersionMeta>(_collector, _writable, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4850,7 +4936,7 @@ i32 TApiVersionsResponseData::TApiVersion::Size(TKafkaVersion _version) const {
NPrivate::Size<ApiKeyMeta>(_collector, _version, ApiKey);
NPrivate::Size<MinVersionMeta>(_collector, _version, MinVersion);
NPrivate::Size<MaxVersionMeta>(_collector, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4865,7 +4951,7 @@ const TApiVersionsResponseData::TSupportedFeatureKey::NameMeta::Type TApiVersion
const TApiVersionsResponseData::TSupportedFeatureKey::MinVersionMeta::Type TApiVersionsResponseData::TSupportedFeatureKey::MinVersionMeta::Default = 0;
const TApiVersionsResponseData::TSupportedFeatureKey::MaxVersionMeta::Type TApiVersionsResponseData::TSupportedFeatureKey::MaxVersionMeta::Default = 0;
-TApiVersionsResponseData::TSupportedFeatureKey::TSupportedFeatureKey()
+TApiVersionsResponseData::TSupportedFeatureKey::TSupportedFeatureKey()
: Name(NameMeta::Default)
, MinVersion(MinVersionMeta::Default)
, MaxVersion(MaxVersionMeta::Default)
@@ -4878,7 +4964,7 @@ void TApiVersionsResponseData::TSupportedFeatureKey::Read(TKafkaReadable& _reada
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<MinVersionMeta>(_readable, _version, MinVersion);
NPrivate::Read<MaxVersionMeta>(_readable, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4901,10 +4987,10 @@ void TApiVersionsResponseData::TSupportedFeatureKey::Write(TKafkaWritable& _writ
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<MinVersionMeta>(_collector, _writable, _version, MinVersion);
NPrivate::Write<MaxVersionMeta>(_collector, _writable, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4913,7 +4999,7 @@ i32 TApiVersionsResponseData::TSupportedFeatureKey::Size(TKafkaVersion _version)
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<MinVersionMeta>(_collector, _version, MinVersion);
NPrivate::Size<MaxVersionMeta>(_collector, _version, MaxVersion);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4928,7 +5014,7 @@ const TApiVersionsResponseData::TFinalizedFeatureKey::NameMeta::Type TApiVersion
const TApiVersionsResponseData::TFinalizedFeatureKey::MaxVersionLevelMeta::Type TApiVersionsResponseData::TFinalizedFeatureKey::MaxVersionLevelMeta::Default = 0;
const TApiVersionsResponseData::TFinalizedFeatureKey::MinVersionLevelMeta::Type TApiVersionsResponseData::TFinalizedFeatureKey::MinVersionLevelMeta::Default = 0;
-TApiVersionsResponseData::TFinalizedFeatureKey::TFinalizedFeatureKey()
+TApiVersionsResponseData::TFinalizedFeatureKey::TFinalizedFeatureKey()
: Name(NameMeta::Default)
, MaxVersionLevel(MaxVersionLevelMeta::Default)
, MinVersionLevel(MinVersionLevelMeta::Default)
@@ -4941,7 +5027,7 @@ void TApiVersionsResponseData::TFinalizedFeatureKey::Read(TKafkaReadable& _reada
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<MaxVersionLevelMeta>(_readable, _version, MaxVersionLevel);
NPrivate::Read<MinVersionLevelMeta>(_readable, _version, MinVersionLevel);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -4964,10 +5050,10 @@ void TApiVersionsResponseData::TFinalizedFeatureKey::Write(TKafkaWritable& _writ
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<MaxVersionLevelMeta>(_collector, _writable, _version, MaxVersionLevel);
NPrivate::Write<MinVersionLevelMeta>(_collector, _writable, _version, MinVersionLevel);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -4976,7 +5062,7 @@ i32 TApiVersionsResponseData::TFinalizedFeatureKey::Size(TKafkaVersion _version)
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<MaxVersionLevelMeta>(_collector, _version, MaxVersionLevel);
NPrivate::Size<MinVersionLevelMeta>(_collector, _version, MinVersionLevel);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -4990,7 +5076,7 @@ i32 TApiVersionsResponseData::TFinalizedFeatureKey::Size(TKafkaVersion _version)
const TCreateTopicsRequestData::TimeoutMsMeta::Type TCreateTopicsRequestData::TimeoutMsMeta::Default = 60000;
const TCreateTopicsRequestData::ValidateOnlyMeta::Type TCreateTopicsRequestData::ValidateOnlyMeta::Default = false;
-TCreateTopicsRequestData::TCreateTopicsRequestData()
+TCreateTopicsRequestData::TCreateTopicsRequestData()
: TimeoutMs(TimeoutMsMeta::Default)
, ValidateOnly(ValidateOnlyMeta::Default)
{}
@@ -5002,7 +5088,7 @@ void TCreateTopicsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<TimeoutMsMeta>(_readable, _version, TimeoutMs);
NPrivate::Read<ValidateOnlyMeta>(_readable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5025,10 +5111,10 @@ void TCreateTopicsRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<TimeoutMsMeta>(_collector, _writable, _version, TimeoutMs);
NPrivate::Write<ValidateOnlyMeta>(_collector, _writable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5037,7 +5123,7 @@ i32 TCreateTopicsRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<TimeoutMsMeta>(_collector, _version, TimeoutMs);
NPrivate::Size<ValidateOnlyMeta>(_collector, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5052,7 +5138,7 @@ const TCreateTopicsRequestData::TCreatableTopic::NameMeta::Type TCreateTopicsReq
const TCreateTopicsRequestData::TCreatableTopic::NumPartitionsMeta::Type TCreateTopicsRequestData::TCreatableTopic::NumPartitionsMeta::Default = 0;
const TCreateTopicsRequestData::TCreatableTopic::ReplicationFactorMeta::Type TCreateTopicsRequestData::TCreatableTopic::ReplicationFactorMeta::Default = 0;
-TCreateTopicsRequestData::TCreatableTopic::TCreatableTopic()
+TCreateTopicsRequestData::TCreatableTopic::TCreatableTopic()
: Name(NameMeta::Default)
, NumPartitions(NumPartitionsMeta::Default)
, ReplicationFactor(ReplicationFactorMeta::Default)
@@ -5067,7 +5153,7 @@ void TCreateTopicsRequestData::TCreatableTopic::Read(TKafkaReadable& _readable,
NPrivate::Read<ReplicationFactorMeta>(_readable, _version, ReplicationFactor);
NPrivate::Read<AssignmentsMeta>(_readable, _version, Assignments);
NPrivate::Read<ConfigsMeta>(_readable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5092,10 +5178,10 @@ void TCreateTopicsRequestData::TCreatableTopic::Write(TKafkaWritable& _writable,
NPrivate::Write<ReplicationFactorMeta>(_collector, _writable, _version, ReplicationFactor);
NPrivate::Write<AssignmentsMeta>(_collector, _writable, _version, Assignments);
NPrivate::Write<ConfigsMeta>(_collector, _writable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5106,7 +5192,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::Size(TKafkaVersion _version) cons
NPrivate::Size<ReplicationFactorMeta>(_collector, _version, ReplicationFactor);
NPrivate::Size<AssignmentsMeta>(_collector, _version, Assignments);
NPrivate::Size<ConfigsMeta>(_collector, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5119,7 +5205,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::Size(TKafkaVersion _version) cons
//
const TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::PartitionIndexMeta::Type TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::PartitionIndexMeta::Default = 0;
-TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::TCreatableReplicaAssignment()
+TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::TCreatableReplicaAssignment()
: PartitionIndex(PartitionIndexMeta::Default)
{}
@@ -5129,7 +5215,7 @@ void TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::Rea
}
NPrivate::Read<PartitionIndexMeta>(_readable, _version, PartitionIndex);
NPrivate::Read<BrokerIdsMeta>(_readable, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5151,10 +5237,10 @@ void TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::Wri
NPrivate::TWriteCollector _collector;
NPrivate::Write<PartitionIndexMeta>(_collector, _writable, _version, PartitionIndex);
NPrivate::Write<BrokerIdsMeta>(_collector, _writable, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5162,7 +5248,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::Size
NPrivate::TSizeCollector _collector;
NPrivate::Size<PartitionIndexMeta>(_collector, _version, PartitionIndex);
NPrivate::Size<BrokerIdsMeta>(_collector, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5176,7 +5262,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::TCreatableReplicaAssignment::Size
const TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::NameMeta::Type TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::NameMeta::Default = {""};
const TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::ValueMeta::Type TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::ValueMeta::Default = {""};
-TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::TCreateableTopicConfig()
+TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::TCreateableTopicConfig()
: Name(NameMeta::Default)
, Value(ValueMeta::Default)
{}
@@ -5187,7 +5273,7 @@ void TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::Read(TKa
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<ValueMeta>(_readable, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5209,10 +5295,10 @@ void TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::Write(TK
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<ValueMeta>(_collector, _writable, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5220,7 +5306,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::Size(TKaf
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<ValueMeta>(_collector, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5233,7 +5319,7 @@ i32 TCreateTopicsRequestData::TCreatableTopic::TCreateableTopicConfig::Size(TKaf
//
const TCreateTopicsResponseData::ThrottleTimeMsMeta::Type TCreateTopicsResponseData::ThrottleTimeMsMeta::Default = 0;
-TCreateTopicsResponseData::TCreateTopicsResponseData()
+TCreateTopicsResponseData::TCreateTopicsResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -5243,7 +5329,7 @@ void TCreateTopicsResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _v
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5265,10 +5351,10 @@ void TCreateTopicsResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5276,7 +5362,7 @@ i32 TCreateTopicsResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5295,7 +5381,7 @@ const TCreateTopicsResponseData::TCreatableTopicResult::TopicConfigErrorCodeMeta
const TCreateTopicsResponseData::TCreatableTopicResult::NumPartitionsMeta::Type TCreateTopicsResponseData::TCreatableTopicResult::NumPartitionsMeta::Default = -1;
const TCreateTopicsResponseData::TCreatableTopicResult::ReplicationFactorMeta::Type TCreateTopicsResponseData::TCreatableTopicResult::ReplicationFactorMeta::Default = -1;
-TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicResult()
+TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicResult()
: Name(NameMeta::Default)
, TopicId(TopicIdMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
@@ -5317,7 +5403,7 @@ void TCreateTopicsResponseData::TCreatableTopicResult::Read(TKafkaReadable& _rea
NPrivate::Read<NumPartitionsMeta>(_readable, _version, NumPartitions);
NPrivate::Read<ReplicationFactorMeta>(_readable, _version, ReplicationFactor);
NPrivate::Read<ConfigsMeta>(_readable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5348,10 +5434,10 @@ void TCreateTopicsResponseData::TCreatableTopicResult::Write(TKafkaWritable& _wr
NPrivate::Write<NumPartitionsMeta>(_collector, _writable, _version, NumPartitions);
NPrivate::Write<ReplicationFactorMeta>(_collector, _writable, _version, ReplicationFactor);
NPrivate::Write<ConfigsMeta>(_collector, _writable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
NPrivate::WriteTag<TopicConfigErrorCodeMeta>(_writable, _version, TopicConfigErrorCode);
}
}
@@ -5366,7 +5452,7 @@ i32 TCreateTopicsResponseData::TCreatableTopicResult::Size(TKafkaVersion _versio
NPrivate::Size<NumPartitionsMeta>(_collector, _version, NumPartitions);
NPrivate::Size<ReplicationFactorMeta>(_collector, _version, ReplicationFactor);
NPrivate::Size<ConfigsMeta>(_collector, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5383,7 +5469,7 @@ const TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::
const TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::ConfigSourceMeta::Type TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::ConfigSourceMeta::Default = -1;
const TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::IsSensitiveMeta::Type TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::IsSensitiveMeta::Default = false;
-TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::TCreatableTopicConfigs()
+TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::TCreatableTopicConfigs()
: Name(NameMeta::Default)
, Value(ValueMeta::Default)
, ReadOnly(ReadOnlyMeta::Default)
@@ -5400,7 +5486,7 @@ void TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::R
NPrivate::Read<ReadOnlyMeta>(_readable, _version, ReadOnly);
NPrivate::Read<ConfigSourceMeta>(_readable, _version, ConfigSource);
NPrivate::Read<IsSensitiveMeta>(_readable, _version, IsSensitive);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5425,10 +5511,10 @@ void TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::W
NPrivate::Write<ReadOnlyMeta>(_collector, _writable, _version, ReadOnly);
NPrivate::Write<ConfigSourceMeta>(_collector, _writable, _version, ConfigSource);
NPrivate::Write<IsSensitiveMeta>(_collector, _writable, _version, IsSensitive);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5439,7 +5525,7 @@ i32 TCreateTopicsResponseData::TCreatableTopicResult::TCreatableTopicConfigs::Si
NPrivate::Size<ReadOnlyMeta>(_collector, _version, ReadOnly);
NPrivate::Size<ConfigSourceMeta>(_collector, _version, ConfigSource);
NPrivate::Size<IsSensitiveMeta>(_collector, _version, IsSensitive);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5455,7 +5541,7 @@ const TInitProducerIdRequestData::TransactionTimeoutMsMeta::Type TInitProducerId
const TInitProducerIdRequestData::ProducerIdMeta::Type TInitProducerIdRequestData::ProducerIdMeta::Default = -1;
const TInitProducerIdRequestData::ProducerEpochMeta::Type TInitProducerIdRequestData::ProducerEpochMeta::Default = -1;
-TInitProducerIdRequestData::TInitProducerIdRequestData()
+TInitProducerIdRequestData::TInitProducerIdRequestData()
: TransactionalId(TransactionalIdMeta::Default)
, TransactionTimeoutMs(TransactionTimeoutMsMeta::Default)
, ProducerId(ProducerIdMeta::Default)
@@ -5470,7 +5556,7 @@ void TInitProducerIdRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _
NPrivate::Read<TransactionTimeoutMsMeta>(_readable, _version, TransactionTimeoutMs);
NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5494,10 +5580,10 @@ void TInitProducerIdRequestData::Write(TKafkaWritable& _writable, TKafkaVersion
NPrivate::Write<TransactionTimeoutMsMeta>(_collector, _writable, _version, TransactionTimeoutMs);
NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5507,7 +5593,7 @@ i32 TInitProducerIdRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<TransactionTimeoutMsMeta>(_collector, _version, TransactionTimeoutMs);
NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5523,7 +5609,7 @@ const TInitProducerIdResponseData::ErrorCodeMeta::Type TInitProducerIdResponseDa
const TInitProducerIdResponseData::ProducerIdMeta::Type TInitProducerIdResponseData::ProducerIdMeta::Default = -1;
const TInitProducerIdResponseData::ProducerEpochMeta::Type TInitProducerIdResponseData::ProducerEpochMeta::Default = 0;
-TInitProducerIdResponseData::TInitProducerIdResponseData()
+TInitProducerIdResponseData::TInitProducerIdResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, ProducerId(ProducerIdMeta::Default)
@@ -5538,7 +5624,7 @@ void TInitProducerIdResponseData::Read(TKafkaReadable& _readable, TKafkaVersion
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5562,20 +5648,1347 @@ void TInitProducerIdResponseData::Write(TKafkaWritable& _writable, TKafkaVersion
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+i32 TInitProducerIdResponseData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+ NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
+ NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddPartitionsToTxnRequestData
+//
+const TAddPartitionsToTxnRequestData::TransactionalIdMeta::Type TAddPartitionsToTxnRequestData::TransactionalIdMeta::Default = {""};
+const TAddPartitionsToTxnRequestData::ProducerIdMeta::Type TAddPartitionsToTxnRequestData::ProducerIdMeta::Default = 0;
+const TAddPartitionsToTxnRequestData::ProducerEpochMeta::Type TAddPartitionsToTxnRequestData::ProducerEpochMeta::Default = 0;
+
+TAddPartitionsToTxnRequestData::TAddPartitionsToTxnRequestData()
+ : TransactionalId(TransactionalIdMeta::Default)
+ , ProducerId(ProducerIdMeta::Default)
+ , ProducerEpoch(ProducerEpochMeta::Default)
+{}
+
+void TAddPartitionsToTxnRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddPartitionsToTxnRequestData";
+ }
+ NPrivate::Read<TransactionalIdMeta>(_readable, _version, TransactionalId);
+ NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
+ NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
+ NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddPartitionsToTxnRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddPartitionsToTxnRequestData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<TransactionalIdMeta>(_collector, _writable, _version, TransactionalId);
+ NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
+ NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
+ NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+i32 TAddPartitionsToTxnRequestData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<TransactionalIdMeta>(_collector, _version, TransactionalId);
+ NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
+ NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
+ NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
+ return _collector.Size;
}
-i32 TInitProducerIdResponseData::Size(TKafkaVersion _version) const {
+
+//
+// TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic
+//
+const TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::NameMeta::Type TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::NameMeta::Default = {""};
+
+TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::TAddPartitionsToTxnTopic()
+ : Name(NameMeta::Default)
+{}
+
+void TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddPartitionsToTxnResponseData
+//
+const TAddPartitionsToTxnResponseData::ThrottleTimeMsMeta::Type TAddPartitionsToTxnResponseData::ThrottleTimeMsMeta::Default = 0;
+
+TAddPartitionsToTxnResponseData::TAddPartitionsToTxnResponseData()
+ : ThrottleTimeMs(ThrottleTimeMsMeta::Default)
+{}
+
+void TAddPartitionsToTxnResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddPartitionsToTxnResponseData";
+ }
+ NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
+ NPrivate::Read<ResultsMeta>(_readable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddPartitionsToTxnResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddPartitionsToTxnResponseData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
+ NPrivate::Write<ResultsMeta>(_collector, _writable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TAddPartitionsToTxnResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<ResultsMeta>(_collector, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult
+//
+const TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::NameMeta::Type TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::NameMeta::Default = {""};
+
+TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnTopicResult()
+ : Name(NameMeta::Default)
+{}
+
+void TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<ResultsMeta>(_readable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<ResultsMeta>(_collector, _writable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<ResultsMeta>(_collector, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult
+//
+const TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::PartitionIndexMeta::Type TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::PartitionIndexMeta::Default = 0;
+const TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::ErrorCodeMeta::Type TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::ErrorCodeMeta::Default = 0;
+
+TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::TAddPartitionsToTxnPartitionResult()
+ : PartitionIndex(PartitionIndexMeta::Default)
+ , ErrorCode(ErrorCodeMeta::Default)
+{}
+
+void TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult";
+ }
+ NPrivate::Read<PartitionIndexMeta>(_readable, _version, PartitionIndex);
+ NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<PartitionIndexMeta>(_collector, _writable, _version, PartitionIndex);
+ NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<PartitionIndexMeta>(_collector, _version, PartitionIndex);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddOffsetsToTxnRequestData
+//
+const TAddOffsetsToTxnRequestData::TransactionalIdMeta::Type TAddOffsetsToTxnRequestData::TransactionalIdMeta::Default = {""};
+const TAddOffsetsToTxnRequestData::ProducerIdMeta::Type TAddOffsetsToTxnRequestData::ProducerIdMeta::Default = 0;
+const TAddOffsetsToTxnRequestData::ProducerEpochMeta::Type TAddOffsetsToTxnRequestData::ProducerEpochMeta::Default = 0;
+const TAddOffsetsToTxnRequestData::GroupIdMeta::Type TAddOffsetsToTxnRequestData::GroupIdMeta::Default = {""};
+
+TAddOffsetsToTxnRequestData::TAddOffsetsToTxnRequestData()
+ : TransactionalId(TransactionalIdMeta::Default)
+ , ProducerId(ProducerIdMeta::Default)
+ , ProducerEpoch(ProducerEpochMeta::Default)
+ , GroupId(GroupIdMeta::Default)
+{}
+
+void TAddOffsetsToTxnRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddOffsetsToTxnRequestData";
+ }
+ NPrivate::Read<TransactionalIdMeta>(_readable, _version, TransactionalId);
+ NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
+ NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
+ NPrivate::Read<GroupIdMeta>(_readable, _version, GroupId);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddOffsetsToTxnRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddOffsetsToTxnRequestData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<TransactionalIdMeta>(_collector, _writable, _version, TransactionalId);
+ NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
+ NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
+ NPrivate::Write<GroupIdMeta>(_collector, _writable, _version, GroupId);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TAddOffsetsToTxnRequestData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<TransactionalIdMeta>(_collector, _version, TransactionalId);
NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
+ NPrivate::Size<GroupIdMeta>(_collector, _version, GroupId);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TAddOffsetsToTxnResponseData
+//
+const TAddOffsetsToTxnResponseData::ThrottleTimeMsMeta::Type TAddOffsetsToTxnResponseData::ThrottleTimeMsMeta::Default = 0;
+const TAddOffsetsToTxnResponseData::ErrorCodeMeta::Type TAddOffsetsToTxnResponseData::ErrorCodeMeta::Default = 0;
+
+TAddOffsetsToTxnResponseData::TAddOffsetsToTxnResponseData()
+ : ThrottleTimeMs(ThrottleTimeMsMeta::Default)
+ , ErrorCode(ErrorCodeMeta::Default)
+{}
+
+void TAddOffsetsToTxnResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TAddOffsetsToTxnResponseData";
+ }
+ NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
+ NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TAddOffsetsToTxnResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TAddOffsetsToTxnResponseData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
+ NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+i32 TAddOffsetsToTxnResponseData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TEndTxnRequestData
+//
+const TEndTxnRequestData::TransactionalIdMeta::Type TEndTxnRequestData::TransactionalIdMeta::Default = {""};
+const TEndTxnRequestData::ProducerIdMeta::Type TEndTxnRequestData::ProducerIdMeta::Default = 0;
+const TEndTxnRequestData::ProducerEpochMeta::Type TEndTxnRequestData::ProducerEpochMeta::Default = 0;
+const TEndTxnRequestData::CommittedMeta::Type TEndTxnRequestData::CommittedMeta::Default = false;
+
+TEndTxnRequestData::TEndTxnRequestData()
+ : TransactionalId(TransactionalIdMeta::Default)
+ , ProducerId(ProducerIdMeta::Default)
+ , ProducerEpoch(ProducerEpochMeta::Default)
+ , Committed(CommittedMeta::Default)
+{}
+
+void TEndTxnRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TEndTxnRequestData";
+ }
+ NPrivate::Read<TransactionalIdMeta>(_readable, _version, TransactionalId);
+ NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
+ NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
+ NPrivate::Read<CommittedMeta>(_readable, _version, Committed);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TEndTxnRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TEndTxnRequestData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<TransactionalIdMeta>(_collector, _writable, _version, TransactionalId);
+ NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
+ NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
+ NPrivate::Write<CommittedMeta>(_collector, _writable, _version, Committed);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TEndTxnRequestData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<TransactionalIdMeta>(_collector, _version, TransactionalId);
+ NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
+ NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
+ NPrivate::Size<CommittedMeta>(_collector, _version, Committed);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TEndTxnResponseData
+//
+const TEndTxnResponseData::ThrottleTimeMsMeta::Type TEndTxnResponseData::ThrottleTimeMsMeta::Default = 0;
+const TEndTxnResponseData::ErrorCodeMeta::Type TEndTxnResponseData::ErrorCodeMeta::Default = 0;
+
+TEndTxnResponseData::TEndTxnResponseData()
+ : ThrottleTimeMs(ThrottleTimeMsMeta::Default)
+ , ErrorCode(ErrorCodeMeta::Default)
+{}
+
+void TEndTxnResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TEndTxnResponseData";
+ }
+ NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
+ NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TEndTxnResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TEndTxnResponseData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
+ NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TEndTxnResponseData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitRequestData
+//
+const TTxnOffsetCommitRequestData::TransactionalIdMeta::Type TTxnOffsetCommitRequestData::TransactionalIdMeta::Default = {""};
+const TTxnOffsetCommitRequestData::GroupIdMeta::Type TTxnOffsetCommitRequestData::GroupIdMeta::Default = {""};
+const TTxnOffsetCommitRequestData::ProducerIdMeta::Type TTxnOffsetCommitRequestData::ProducerIdMeta::Default = 0;
+const TTxnOffsetCommitRequestData::ProducerEpochMeta::Type TTxnOffsetCommitRequestData::ProducerEpochMeta::Default = 0;
+const TTxnOffsetCommitRequestData::GenerationIdMeta::Type TTxnOffsetCommitRequestData::GenerationIdMeta::Default = -1;
+const TTxnOffsetCommitRequestData::MemberIdMeta::Type TTxnOffsetCommitRequestData::MemberIdMeta::Default = {""};
+const TTxnOffsetCommitRequestData::GroupInstanceIdMeta::Type TTxnOffsetCommitRequestData::GroupInstanceIdMeta::Default = std::nullopt;
+
+TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestData()
+ : TransactionalId(TransactionalIdMeta::Default)
+ , GroupId(GroupIdMeta::Default)
+ , ProducerId(ProducerIdMeta::Default)
+ , ProducerEpoch(ProducerEpochMeta::Default)
+ , GenerationId(GenerationIdMeta::Default)
+ , MemberId(MemberIdMeta::Default)
+ , GroupInstanceId(GroupInstanceIdMeta::Default)
+{}
+
+void TTxnOffsetCommitRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitRequestData";
+ }
+ NPrivate::Read<TransactionalIdMeta>(_readable, _version, TransactionalId);
+ NPrivate::Read<GroupIdMeta>(_readable, _version, GroupId);
+ NPrivate::Read<ProducerIdMeta>(_readable, _version, ProducerId);
+ NPrivate::Read<ProducerEpochMeta>(_readable, _version, ProducerEpoch);
+ NPrivate::Read<GenerationIdMeta>(_readable, _version, GenerationId);
+ NPrivate::Read<MemberIdMeta>(_readable, _version, MemberId);
+ NPrivate::Read<GroupInstanceIdMeta>(_readable, _version, GroupInstanceId);
+ NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitRequestData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<TransactionalIdMeta>(_collector, _writable, _version, TransactionalId);
+ NPrivate::Write<GroupIdMeta>(_collector, _writable, _version, GroupId);
+ NPrivate::Write<ProducerIdMeta>(_collector, _writable, _version, ProducerId);
+ NPrivate::Write<ProducerEpochMeta>(_collector, _writable, _version, ProducerEpoch);
+ NPrivate::Write<GenerationIdMeta>(_collector, _writable, _version, GenerationId);
+ NPrivate::Write<MemberIdMeta>(_collector, _writable, _version, MemberId);
+ NPrivate::Write<GroupInstanceIdMeta>(_collector, _writable, _version, GroupInstanceId);
+ NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitRequestData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<TransactionalIdMeta>(_collector, _version, TransactionalId);
+ NPrivate::Size<GroupIdMeta>(_collector, _version, GroupId);
+ NPrivate::Size<ProducerIdMeta>(_collector, _version, ProducerId);
+ NPrivate::Size<ProducerEpochMeta>(_collector, _version, ProducerEpoch);
+ NPrivate::Size<GenerationIdMeta>(_collector, _version, GenerationId);
+ NPrivate::Size<MemberIdMeta>(_collector, _version, MemberId);
+ NPrivate::Size<GroupInstanceIdMeta>(_collector, _version, GroupInstanceId);
+ NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic
+//
+const TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::NameMeta::Type TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::NameMeta::Default = {""};
+
+TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestTopic()
+ : Name(NameMeta::Default)
+{}
+
+void TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition
+//
+const TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::PartitionIndexMeta::Type TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::PartitionIndexMeta::Default = 0;
+const TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedOffsetMeta::Type TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedOffsetMeta::Default = 0;
+const TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedLeaderEpochMeta::Type TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedLeaderEpochMeta::Default = -1;
+const TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedMetadataMeta::Type TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::CommittedMetadataMeta::Default = {""};
+
+TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::TTxnOffsetCommitRequestPartition()
+ : PartitionIndex(PartitionIndexMeta::Default)
+ , CommittedOffset(CommittedOffsetMeta::Default)
+ , CommittedLeaderEpoch(CommittedLeaderEpochMeta::Default)
+ , CommittedMetadata(CommittedMetadataMeta::Default)
+{}
+
+void TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition";
+ }
+ NPrivate::Read<PartitionIndexMeta>(_readable, _version, PartitionIndex);
+ NPrivate::Read<CommittedOffsetMeta>(_readable, _version, CommittedOffset);
+ NPrivate::Read<CommittedLeaderEpochMeta>(_readable, _version, CommittedLeaderEpoch);
+ NPrivate::Read<CommittedMetadataMeta>(_readable, _version, CommittedMetadata);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<PartitionIndexMeta>(_collector, _writable, _version, PartitionIndex);
+ NPrivate::Write<CommittedOffsetMeta>(_collector, _writable, _version, CommittedOffset);
+ NPrivate::Write<CommittedLeaderEpochMeta>(_collector, _writable, _version, CommittedLeaderEpoch);
+ NPrivate::Write<CommittedMetadataMeta>(_collector, _writable, _version, CommittedMetadata);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<PartitionIndexMeta>(_collector, _version, PartitionIndex);
+ NPrivate::Size<CommittedOffsetMeta>(_collector, _version, CommittedOffset);
+ NPrivate::Size<CommittedLeaderEpochMeta>(_collector, _version, CommittedLeaderEpoch);
+ NPrivate::Size<CommittedMetadataMeta>(_collector, _version, CommittedMetadata);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitResponseData
+//
+const TTxnOffsetCommitResponseData::ThrottleTimeMsMeta::Type TTxnOffsetCommitResponseData::ThrottleTimeMsMeta::Default = 0;
+
+TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseData()
+ : ThrottleTimeMs(ThrottleTimeMsMeta::Default)
+{}
+
+void TTxnOffsetCommitResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitResponseData";
+ }
+ NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
+ NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitResponseData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
+ NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitResponseData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic
+//
+const TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::NameMeta::Type TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::NameMeta::Default = {""};
+
+TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponseTopic()
+ : Name(NameMeta::Default)
+{}
+
+void TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<PartitionsMeta>(_readable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<PartitionsMeta>(_collector, _writable, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<PartitionsMeta>(_collector, _version, Partitions);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition
+//
+const TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::PartitionIndexMeta::Type TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::PartitionIndexMeta::Default = 0;
+const TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::ErrorCodeMeta::Type TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::ErrorCodeMeta::Default = 0;
+
+TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::TTxnOffsetCommitResponsePartition()
+ : PartitionIndex(PartitionIndexMeta::Default)
+ , ErrorCode(ErrorCodeMeta::Default)
+{}
+
+void TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition";
+ }
+ NPrivate::Read<PartitionIndexMeta>(_readable, _version, PartitionIndex);
+ NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<PartitionIndexMeta>(_collector, _writable, _version, PartitionIndex);
+ NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<PartitionIndexMeta>(_collector, _version, PartitionIndex);
+ NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsRequestData
+//
+const TDescribeConfigsRequestData::IncludeSynonymsMeta::Type TDescribeConfigsRequestData::IncludeSynonymsMeta::Default = false;
+const TDescribeConfigsRequestData::IncludeDocumentationMeta::Type TDescribeConfigsRequestData::IncludeDocumentationMeta::Default = false;
+
+TDescribeConfigsRequestData::TDescribeConfigsRequestData()
+ : IncludeSynonyms(IncludeSynonymsMeta::Default)
+ , IncludeDocumentation(IncludeDocumentationMeta::Default)
+{}
+
+void TDescribeConfigsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsRequestData";
+ }
+ NPrivate::Read<ResourcesMeta>(_readable, _version, Resources);
+ NPrivate::Read<IncludeSynonymsMeta>(_readable, _version, IncludeSynonyms);
+ NPrivate::Read<IncludeDocumentationMeta>(_readable, _version, IncludeDocumentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsRequestData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ResourcesMeta>(_collector, _writable, _version, Resources);
+ NPrivate::Write<IncludeSynonymsMeta>(_collector, _writable, _version, IncludeSynonyms);
+ NPrivate::Write<IncludeDocumentationMeta>(_collector, _writable, _version, IncludeDocumentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsRequestData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ResourcesMeta>(_collector, _version, Resources);
+ NPrivate::Size<IncludeSynonymsMeta>(_collector, _version, IncludeSynonyms);
+ NPrivate::Size<IncludeDocumentationMeta>(_collector, _version, IncludeDocumentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsRequestData::TDescribeConfigsResource
+//
+const TDescribeConfigsRequestData::TDescribeConfigsResource::ResourceTypeMeta::Type TDescribeConfigsRequestData::TDescribeConfigsResource::ResourceTypeMeta::Default = 0;
+const TDescribeConfigsRequestData::TDescribeConfigsResource::ResourceNameMeta::Type TDescribeConfigsRequestData::TDescribeConfigsResource::ResourceNameMeta::Default = {""};
+
+TDescribeConfigsRequestData::TDescribeConfigsResource::TDescribeConfigsResource()
+ : ResourceType(ResourceTypeMeta::Default)
+ , ResourceName(ResourceNameMeta::Default)
+{}
+
+void TDescribeConfigsRequestData::TDescribeConfigsResource::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsRequestData::TDescribeConfigsResource";
+ }
+ NPrivate::Read<ResourceTypeMeta>(_readable, _version, ResourceType);
+ NPrivate::Read<ResourceNameMeta>(_readable, _version, ResourceName);
+ NPrivate::Read<ConfigurationKeysMeta>(_readable, _version, ConfigurationKeys);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsRequestData::TDescribeConfigsResource::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsRequestData::TDescribeConfigsResource";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ResourceTypeMeta>(_collector, _writable, _version, ResourceType);
+ NPrivate::Write<ResourceNameMeta>(_collector, _writable, _version, ResourceName);
+ NPrivate::Write<ConfigurationKeysMeta>(_collector, _writable, _version, ConfigurationKeys);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsRequestData::TDescribeConfigsResource::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ResourceTypeMeta>(_collector, _version, ResourceType);
+ NPrivate::Size<ResourceNameMeta>(_collector, _version, ResourceName);
+ NPrivate::Size<ConfigurationKeysMeta>(_collector, _version, ConfigurationKeys);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsResponseData
+//
+const TDescribeConfigsResponseData::ThrottleTimeMsMeta::Type TDescribeConfigsResponseData::ThrottleTimeMsMeta::Default = 0;
+
+TDescribeConfigsResponseData::TDescribeConfigsResponseData()
+ : ThrottleTimeMs(ThrottleTimeMsMeta::Default)
+{}
+
+void TDescribeConfigsResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsResponseData";
+ }
+ NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
+ NPrivate::Read<ResultsMeta>(_readable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsResponseData";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
+ NPrivate::Write<ResultsMeta>(_collector, _writable, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsResponseData::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
+ NPrivate::Size<ResultsMeta>(_collector, _version, Results);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsResponseData::TDescribeConfigsResult
+//
+const TDescribeConfigsResponseData::TDescribeConfigsResult::ErrorCodeMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::ErrorCodeMeta::Default = 0;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::ErrorMessageMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::ErrorMessageMeta::Default = {""};
+const TDescribeConfigsResponseData::TDescribeConfigsResult::ResourceTypeMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::ResourceTypeMeta::Default = 0;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::ResourceNameMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::ResourceNameMeta::Default = {""};
+
+TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResult()
+ : ErrorCode(ErrorCodeMeta::Default)
+ , ErrorMessage(ErrorMessageMeta::Default)
+ , ResourceType(ResourceTypeMeta::Default)
+ , ResourceName(ResourceNameMeta::Default)
+{}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult";
+ }
+ NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
+ NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
+ NPrivate::Read<ResourceTypeMeta>(_readable, _version, ResourceType);
+ NPrivate::Read<ResourceNameMeta>(_readable, _version, ResourceName);
+ NPrivate::Read<ConfigsMeta>(_readable, _version, Configs);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
+ NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
+ NPrivate::Write<ResourceTypeMeta>(_collector, _writable, _version, ResourceType);
+ NPrivate::Write<ResourceNameMeta>(_collector, _writable, _version, ResourceName);
+ NPrivate::Write<ConfigsMeta>(_collector, _writable, _version, Configs);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsResponseData::TDescribeConfigsResult::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
+ NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
+ NPrivate::Size<ResourceTypeMeta>(_collector, _version, ResourceType);
+ NPrivate::Size<ResourceNameMeta>(_collector, _version, ResourceName);
+ NPrivate::Size<ConfigsMeta>(_collector, _version, Configs);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult
+//
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::NameMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::NameMeta::Default = {""};
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ValueMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ValueMeta::Default = {""};
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ReadOnlyMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ReadOnlyMeta::Default = false;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::IsDefaultMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::IsDefaultMeta::Default = false;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ConfigSourceMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ConfigSourceMeta::Default = -1;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::IsSensitiveMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::IsSensitiveMeta::Default = false;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ConfigTypeMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::ConfigTypeMeta::Default = 0;
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::DocumentationMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::DocumentationMeta::Default = {""};
+
+TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsResourceResult()
+ : Name(NameMeta::Default)
+ , Value(ValueMeta::Default)
+ , ReadOnly(ReadOnlyMeta::Default)
+ , IsDefault(IsDefaultMeta::Default)
+ , ConfigSource(ConfigSourceMeta::Default)
+ , IsSensitive(IsSensitiveMeta::Default)
+ , ConfigType(ConfigTypeMeta::Default)
+ , Documentation(DocumentationMeta::Default)
+{}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<ValueMeta>(_readable, _version, Value);
+ NPrivate::Read<ReadOnlyMeta>(_readable, _version, ReadOnly);
+ NPrivate::Read<IsDefaultMeta>(_readable, _version, IsDefault);
+ NPrivate::Read<ConfigSourceMeta>(_readable, _version, ConfigSource);
+ NPrivate::Read<IsSensitiveMeta>(_readable, _version, IsSensitive);
+ NPrivate::Read<SynonymsMeta>(_readable, _version, Synonyms);
+ NPrivate::Read<ConfigTypeMeta>(_readable, _version, ConfigType);
+ NPrivate::Read<DocumentationMeta>(_readable, _version, Documentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<ValueMeta>(_collector, _writable, _version, Value);
+ NPrivate::Write<ReadOnlyMeta>(_collector, _writable, _version, ReadOnly);
+ NPrivate::Write<IsDefaultMeta>(_collector, _writable, _version, IsDefault);
+ NPrivate::Write<ConfigSourceMeta>(_collector, _writable, _version, ConfigSource);
+ NPrivate::Write<IsSensitiveMeta>(_collector, _writable, _version, IsSensitive);
+ NPrivate::Write<SynonymsMeta>(_collector, _writable, _version, Synonyms);
+ NPrivate::Write<ConfigTypeMeta>(_collector, _writable, _version, ConfigType);
+ NPrivate::Write<DocumentationMeta>(_collector, _writable, _version, Documentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<ValueMeta>(_collector, _version, Value);
+ NPrivate::Size<ReadOnlyMeta>(_collector, _version, ReadOnly);
+ NPrivate::Size<IsDefaultMeta>(_collector, _version, IsDefault);
+ NPrivate::Size<ConfigSourceMeta>(_collector, _version, ConfigSource);
+ NPrivate::Size<IsSensitiveMeta>(_collector, _version, IsSensitive);
+ NPrivate::Size<SynonymsMeta>(_collector, _version, Synonyms);
+ NPrivate::Size<ConfigTypeMeta>(_collector, _version, ConfigType);
+ NPrivate::Size<DocumentationMeta>(_collector, _version, Documentation);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
+ }
+ return _collector.Size;
+}
+
+
+//
+// TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym
+//
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::NameMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::NameMeta::Default = {""};
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::ValueMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::ValueMeta::Default = {""};
+const TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::SourceMeta::Type TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::SourceMeta::Default = 0;
+
+TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::TDescribeConfigsSynonym()
+ : Name(NameMeta::Default)
+ , Value(ValueMeta::Default)
+ , Source(SourceMeta::Default)
+{}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't read version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym";
+ }
+ NPrivate::Read<NameMeta>(_readable, _version, Name);
+ NPrivate::Read<ValueMeta>(_readable, _version, Value);
+ NPrivate::Read<SourceMeta>(_readable, _version, Source);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
+ for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
+ ui32 _tag = _readable.readUnsignedVarint<ui32>();
+ ui32 _size = _readable.readUnsignedVarint<ui32>();
+ switch (_tag) {
+ default:
+ _readable.skip(_size); // skip unknown tag
+ break;
+ }
+ }
+ }
+}
+
+void TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::Write(TKafkaWritable& _writable, TKafkaVersion _version) const {
+ if (!NPrivate::VersionCheck<MessageMeta::PresentVersions.Min, MessageMeta::PresentVersions.Max>(_version)) {
+ ythrow yexception() << "Can't write version " << _version << " of TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym";
+ }
+ NPrivate::TWriteCollector _collector;
+ NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
+ NPrivate::Write<ValueMeta>(_collector, _writable, _version, Value);
+ NPrivate::Write<SourceMeta>(_collector, _writable, _version, Source);
+
+ if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
+ _writable.writeUnsignedVarint(_collector.NumTaggedFields);
+
+ }
+}
+
+i32 TDescribeConfigsResponseData::TDescribeConfigsResult::TDescribeConfigsResourceResult::TDescribeConfigsSynonym::Size(TKafkaVersion _version) const {
+ NPrivate::TSizeCollector _collector;
+ NPrivate::Size<NameMeta>(_collector, _version, Name);
+ NPrivate::Size<ValueMeta>(_collector, _version, Value);
+ NPrivate::Size<SourceMeta>(_collector, _version, Source);
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5588,7 +7001,7 @@ i32 TInitProducerIdResponseData::Size(TKafkaVersion _version) const {
//
const TAlterConfigsRequestData::ValidateOnlyMeta::Type TAlterConfigsRequestData::ValidateOnlyMeta::Default = false;
-TAlterConfigsRequestData::TAlterConfigsRequestData()
+TAlterConfigsRequestData::TAlterConfigsRequestData()
: ValidateOnly(ValidateOnlyMeta::Default)
{}
@@ -5598,7 +7011,7 @@ void TAlterConfigsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _ve
}
NPrivate::Read<ResourcesMeta>(_readable, _version, Resources);
NPrivate::Read<ValidateOnlyMeta>(_readable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5620,10 +7033,10 @@ void TAlterConfigsRequestData::Write(TKafkaWritable& _writable, TKafkaVersion _v
NPrivate::TWriteCollector _collector;
NPrivate::Write<ResourcesMeta>(_collector, _writable, _version, Resources);
NPrivate::Write<ValidateOnlyMeta>(_collector, _writable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5631,7 +7044,7 @@ i32 TAlterConfigsRequestData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ResourcesMeta>(_collector, _version, Resources);
NPrivate::Size<ValidateOnlyMeta>(_collector, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5645,7 +7058,7 @@ i32 TAlterConfigsRequestData::Size(TKafkaVersion _version) const {
const TAlterConfigsRequestData::TAlterConfigsResource::ResourceTypeMeta::Type TAlterConfigsRequestData::TAlterConfigsResource::ResourceTypeMeta::Default = 0;
const TAlterConfigsRequestData::TAlterConfigsResource::ResourceNameMeta::Type TAlterConfigsRequestData::TAlterConfigsResource::ResourceNameMeta::Default = {""};
-TAlterConfigsRequestData::TAlterConfigsResource::TAlterConfigsResource()
+TAlterConfigsRequestData::TAlterConfigsResource::TAlterConfigsResource()
: ResourceType(ResourceTypeMeta::Default)
, ResourceName(ResourceNameMeta::Default)
{}
@@ -5657,7 +7070,7 @@ void TAlterConfigsRequestData::TAlterConfigsResource::Read(TKafkaReadable& _read
NPrivate::Read<ResourceTypeMeta>(_readable, _version, ResourceType);
NPrivate::Read<ResourceNameMeta>(_readable, _version, ResourceName);
NPrivate::Read<ConfigsMeta>(_readable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5680,10 +7093,10 @@ void TAlterConfigsRequestData::TAlterConfigsResource::Write(TKafkaWritable& _wri
NPrivate::Write<ResourceTypeMeta>(_collector, _writable, _version, ResourceType);
NPrivate::Write<ResourceNameMeta>(_collector, _writable, _version, ResourceName);
NPrivate::Write<ConfigsMeta>(_collector, _writable, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5692,7 +7105,7 @@ i32 TAlterConfigsRequestData::TAlterConfigsResource::Size(TKafkaVersion _version
NPrivate::Size<ResourceTypeMeta>(_collector, _version, ResourceType);
NPrivate::Size<ResourceNameMeta>(_collector, _version, ResourceName);
NPrivate::Size<ConfigsMeta>(_collector, _version, Configs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5706,7 +7119,7 @@ i32 TAlterConfigsRequestData::TAlterConfigsResource::Size(TKafkaVersion _version
const TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::NameMeta::Type TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::NameMeta::Default = {""};
const TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::ValueMeta::Type TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::ValueMeta::Default = {""};
-TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::TAlterableConfig()
+TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::TAlterableConfig()
: Name(NameMeta::Default)
, Value(ValueMeta::Default)
{}
@@ -5717,7 +7130,7 @@ void TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::Read(TKa
}
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<ValueMeta>(_readable, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5739,10 +7152,10 @@ void TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::Write(TK
NPrivate::TWriteCollector _collector;
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<ValueMeta>(_collector, _writable, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5750,7 +7163,7 @@ i32 TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::Size(TKaf
NPrivate::TSizeCollector _collector;
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<ValueMeta>(_collector, _version, Value);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5763,7 +7176,7 @@ i32 TAlterConfigsRequestData::TAlterConfigsResource::TAlterableConfig::Size(TKaf
//
const TAlterConfigsResponseData::ThrottleTimeMsMeta::Type TAlterConfigsResponseData::ThrottleTimeMsMeta::Default = 0;
-TAlterConfigsResponseData::TAlterConfigsResponseData()
+TAlterConfigsResponseData::TAlterConfigsResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -5773,7 +7186,7 @@ void TAlterConfigsResponseData::Read(TKafkaReadable& _readable, TKafkaVersion _v
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<ResponsesMeta>(_readable, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5795,10 +7208,10 @@ void TAlterConfigsResponseData::Write(TKafkaWritable& _writable, TKafkaVersion _
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<ResponsesMeta>(_collector, _writable, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5806,7 +7219,7 @@ i32 TAlterConfigsResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<ResponsesMeta>(_collector, _version, Responses);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5822,7 +7235,7 @@ const TAlterConfigsResponseData::TAlterConfigsResourceResponse::ErrorMessageMeta
const TAlterConfigsResponseData::TAlterConfigsResourceResponse::ResourceTypeMeta::Type TAlterConfigsResponseData::TAlterConfigsResourceResponse::ResourceTypeMeta::Default = 0;
const TAlterConfigsResponseData::TAlterConfigsResourceResponse::ResourceNameMeta::Type TAlterConfigsResponseData::TAlterConfigsResourceResponse::ResourceNameMeta::Default = {""};
-TAlterConfigsResponseData::TAlterConfigsResourceResponse::TAlterConfigsResourceResponse()
+TAlterConfigsResponseData::TAlterConfigsResourceResponse::TAlterConfigsResourceResponse()
: ErrorCode(ErrorCodeMeta::Default)
, ErrorMessage(ErrorMessageMeta::Default)
, ResourceType(ResourceTypeMeta::Default)
@@ -5837,7 +7250,7 @@ void TAlterConfigsResponseData::TAlterConfigsResourceResponse::Read(TKafkaReadab
NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
NPrivate::Read<ResourceTypeMeta>(_readable, _version, ResourceType);
NPrivate::Read<ResourceNameMeta>(_readable, _version, ResourceName);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5861,10 +7274,10 @@ void TAlterConfigsResponseData::TAlterConfigsResourceResponse::Write(TKafkaWrita
NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
NPrivate::Write<ResourceTypeMeta>(_collector, _writable, _version, ResourceType);
NPrivate::Write<ResourceNameMeta>(_collector, _writable, _version, ResourceName);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5874,7 +7287,7 @@ i32 TAlterConfigsResponseData::TAlterConfigsResourceResponse::Size(TKafkaVersion
NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
NPrivate::Size<ResourceTypeMeta>(_collector, _version, ResourceType);
NPrivate::Size<ResourceNameMeta>(_collector, _version, ResourceName);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5886,7 +7299,7 @@ i32 TAlterConfigsResponseData::TAlterConfigsResourceResponse::Size(TKafkaVersion
// TSaslAuthenticateRequestData
//
-TSaslAuthenticateRequestData::TSaslAuthenticateRequestData()
+TSaslAuthenticateRequestData::TSaslAuthenticateRequestData()
{}
void TSaslAuthenticateRequestData::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
@@ -5894,7 +7307,7 @@ void TSaslAuthenticateRequestData::Read(TKafkaReadable& _readable, TKafkaVersion
ythrow yexception() << "Can't read version " << _version << " of TSaslAuthenticateRequestData";
}
NPrivate::Read<AuthBytesMeta>(_readable, _version, AuthBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5915,17 +7328,17 @@ void TSaslAuthenticateRequestData::Write(TKafkaWritable& _writable, TKafkaVersio
}
NPrivate::TWriteCollector _collector;
NPrivate::Write<AuthBytesMeta>(_collector, _writable, _version, AuthBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
i32 TSaslAuthenticateRequestData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<AuthBytesMeta>(_collector, _version, AuthBytes);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -5940,7 +7353,7 @@ const TSaslAuthenticateResponseData::ErrorCodeMeta::Type TSaslAuthenticateRespon
const TSaslAuthenticateResponseData::ErrorMessageMeta::Type TSaslAuthenticateResponseData::ErrorMessageMeta::Default = {""};
const TSaslAuthenticateResponseData::SessionLifetimeMsMeta::Type TSaslAuthenticateResponseData::SessionLifetimeMsMeta::Default = 0;
-TSaslAuthenticateResponseData::TSaslAuthenticateResponseData()
+TSaslAuthenticateResponseData::TSaslAuthenticateResponseData()
: ErrorCode(ErrorCodeMeta::Default)
, ErrorMessage(ErrorMessageMeta::Default)
, SessionLifetimeMs(SessionLifetimeMsMeta::Default)
@@ -5954,7 +7367,7 @@ void TSaslAuthenticateResponseData::Read(TKafkaReadable& _readable, TKafkaVersio
NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
NPrivate::Read<AuthBytesMeta>(_readable, _version, AuthBytes);
NPrivate::Read<SessionLifetimeMsMeta>(_readable, _version, SessionLifetimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -5978,10 +7391,10 @@ void TSaslAuthenticateResponseData::Write(TKafkaWritable& _writable, TKafkaVersi
NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
NPrivate::Write<AuthBytesMeta>(_collector, _writable, _version, AuthBytes);
NPrivate::Write<SessionLifetimeMsMeta>(_collector, _writable, _version, SessionLifetimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -5991,7 +7404,7 @@ i32 TSaslAuthenticateResponseData::Size(TKafkaVersion _version) const {
NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
NPrivate::Size<AuthBytesMeta>(_collector, _version, AuthBytes);
NPrivate::Size<SessionLifetimeMsMeta>(_collector, _version, SessionLifetimeMs);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -6005,7 +7418,7 @@ i32 TSaslAuthenticateResponseData::Size(TKafkaVersion _version) const {
const TCreatePartitionsRequestData::TimeoutMsMeta::Type TCreatePartitionsRequestData::TimeoutMsMeta::Default = 0;
const TCreatePartitionsRequestData::ValidateOnlyMeta::Type TCreatePartitionsRequestData::ValidateOnlyMeta::Default = false;
-TCreatePartitionsRequestData::TCreatePartitionsRequestData()
+TCreatePartitionsRequestData::TCreatePartitionsRequestData()
: TimeoutMs(TimeoutMsMeta::Default)
, ValidateOnly(ValidateOnlyMeta::Default)
{}
@@ -6017,7 +7430,7 @@ void TCreatePartitionsRequestData::Read(TKafkaReadable& _readable, TKafkaVersion
NPrivate::Read<TopicsMeta>(_readable, _version, Topics);
NPrivate::Read<TimeoutMsMeta>(_readable, _version, TimeoutMs);
NPrivate::Read<ValidateOnlyMeta>(_readable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -6040,10 +7453,10 @@ void TCreatePartitionsRequestData::Write(TKafkaWritable& _writable, TKafkaVersio
NPrivate::Write<TopicsMeta>(_collector, _writable, _version, Topics);
NPrivate::Write<TimeoutMsMeta>(_collector, _writable, _version, TimeoutMs);
NPrivate::Write<ValidateOnlyMeta>(_collector, _writable, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -6052,7 +7465,7 @@ i32 TCreatePartitionsRequestData::Size(TKafkaVersion _version) const {
NPrivate::Size<TopicsMeta>(_collector, _version, Topics);
NPrivate::Size<TimeoutMsMeta>(_collector, _version, TimeoutMs);
NPrivate::Size<ValidateOnlyMeta>(_collector, _version, ValidateOnly);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -6066,7 +7479,7 @@ i32 TCreatePartitionsRequestData::Size(TKafkaVersion _version) const {
const TCreatePartitionsRequestData::TCreatePartitionsTopic::NameMeta::Type TCreatePartitionsRequestData::TCreatePartitionsTopic::NameMeta::Default = {""};
const TCreatePartitionsRequestData::TCreatePartitionsTopic::CountMeta::Type TCreatePartitionsRequestData::TCreatePartitionsTopic::CountMeta::Default = 0;
-TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsTopic()
+TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsTopic()
: Name(NameMeta::Default)
, Count(CountMeta::Default)
{}
@@ -6078,7 +7491,7 @@ void TCreatePartitionsRequestData::TCreatePartitionsTopic::Read(TKafkaReadable&
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<CountMeta>(_readable, _version, Count);
NPrivate::Read<AssignmentsMeta>(_readable, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -6101,10 +7514,10 @@ void TCreatePartitionsRequestData::TCreatePartitionsTopic::Write(TKafkaWritable&
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<CountMeta>(_collector, _writable, _version, Count);
NPrivate::Write<AssignmentsMeta>(_collector, _writable, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -6113,7 +7526,7 @@ i32 TCreatePartitionsRequestData::TCreatePartitionsTopic::Size(TKafkaVersion _ve
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<CountMeta>(_collector, _version, Count);
NPrivate::Size<AssignmentsMeta>(_collector, _version, Assignments);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -6125,7 +7538,7 @@ i32 TCreatePartitionsRequestData::TCreatePartitionsTopic::Size(TKafkaVersion _ve
// TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment
//
-TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment::TCreatePartitionsAssignment()
+TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment::TCreatePartitionsAssignment()
{}
void TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment::Read(TKafkaReadable& _readable, TKafkaVersion _version) {
@@ -6133,7 +7546,7 @@ void TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssi
ythrow yexception() << "Can't read version " << _version << " of TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment";
}
NPrivate::Read<BrokerIdsMeta>(_readable, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -6154,17 +7567,17 @@ void TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssi
}
NPrivate::TWriteCollector _collector;
NPrivate::Write<BrokerIdsMeta>(_collector, _writable, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
i32 TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssignment::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<BrokerIdsMeta>(_collector, _version, BrokerIds);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -6177,7 +7590,7 @@ i32 TCreatePartitionsRequestData::TCreatePartitionsTopic::TCreatePartitionsAssig
//
const TCreatePartitionsResponseData::ThrottleTimeMsMeta::Type TCreatePartitionsResponseData::ThrottleTimeMsMeta::Default = 0;
-TCreatePartitionsResponseData::TCreatePartitionsResponseData()
+TCreatePartitionsResponseData::TCreatePartitionsResponseData()
: ThrottleTimeMs(ThrottleTimeMsMeta::Default)
{}
@@ -6187,7 +7600,7 @@ void TCreatePartitionsResponseData::Read(TKafkaReadable& _readable, TKafkaVersio
}
NPrivate::Read<ThrottleTimeMsMeta>(_readable, _version, ThrottleTimeMs);
NPrivate::Read<ResultsMeta>(_readable, _version, Results);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -6209,10 +7622,10 @@ void TCreatePartitionsResponseData::Write(TKafkaWritable& _writable, TKafkaVersi
NPrivate::TWriteCollector _collector;
NPrivate::Write<ThrottleTimeMsMeta>(_collector, _writable, _version, ThrottleTimeMs);
NPrivate::Write<ResultsMeta>(_collector, _writable, _version, Results);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -6220,7 +7633,7 @@ i32 TCreatePartitionsResponseData::Size(TKafkaVersion _version) const {
NPrivate::TSizeCollector _collector;
NPrivate::Size<ThrottleTimeMsMeta>(_collector, _version, ThrottleTimeMs);
NPrivate::Size<ResultsMeta>(_collector, _version, Results);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
@@ -6235,7 +7648,7 @@ const TCreatePartitionsResponseData::TCreatePartitionsTopicResult::NameMeta::Typ
const TCreatePartitionsResponseData::TCreatePartitionsTopicResult::ErrorCodeMeta::Type TCreatePartitionsResponseData::TCreatePartitionsTopicResult::ErrorCodeMeta::Default = 0;
const TCreatePartitionsResponseData::TCreatePartitionsTopicResult::ErrorMessageMeta::Type TCreatePartitionsResponseData::TCreatePartitionsTopicResult::ErrorMessageMeta::Default = std::nullopt;
-TCreatePartitionsResponseData::TCreatePartitionsTopicResult::TCreatePartitionsTopicResult()
+TCreatePartitionsResponseData::TCreatePartitionsTopicResult::TCreatePartitionsTopicResult()
: Name(NameMeta::Default)
, ErrorCode(ErrorCodeMeta::Default)
, ErrorMessage(ErrorMessageMeta::Default)
@@ -6248,7 +7661,7 @@ void TCreatePartitionsResponseData::TCreatePartitionsTopicResult::Read(TKafkaRea
NPrivate::Read<NameMeta>(_readable, _version, Name);
NPrivate::Read<ErrorCodeMeta>(_readable, _version, ErrorCode);
NPrivate::Read<ErrorMessageMeta>(_readable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
ui32 _numTaggedFields = _readable.readUnsignedVarint<ui32>();
for (ui32 _i = 0; _i < _numTaggedFields; ++_i) {
@@ -6271,10 +7684,10 @@ void TCreatePartitionsResponseData::TCreatePartitionsTopicResult::Write(TKafkaWr
NPrivate::Write<NameMeta>(_collector, _writable, _version, Name);
NPrivate::Write<ErrorCodeMeta>(_collector, _writable, _version, ErrorCode);
NPrivate::Write<ErrorMessageMeta>(_collector, _writable, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_writable.writeUnsignedVarint(_collector.NumTaggedFields);
-
+
}
}
@@ -6283,7 +7696,7 @@ i32 TCreatePartitionsResponseData::TCreatePartitionsTopicResult::Size(TKafkaVers
NPrivate::Size<NameMeta>(_collector, _version, Name);
NPrivate::Size<ErrorCodeMeta>(_collector, _version, ErrorCode);
NPrivate::Size<ErrorMessageMeta>(_collector, _version, ErrorMessage);
-
+
if (NPrivate::VersionCheck<MessageMeta::FlexibleVersions.Min, MessageMeta::FlexibleVersions.Max>(_version)) {
_collector.Size += NPrivate::SizeOfUnsignedVarint(_collector.NumTaggedFields);
}
diff --git a/ydb/core/kafka_proxy/kafka_messages.h b/ydb/core/kafka_proxy/kafka_messages.h
index 72f55d30fc..dfb837f409 100644
--- a/ydb/core/kafka_proxy/kafka_messages.h
+++ b/ydb/core/kafka_proxy/kafka_messages.h
@@ -5,6 +5,7 @@
#pragma once
#include "kafka_messages_int.h"
+
namespace NKafka {
enum EListenerType {
@@ -14,25 +15,30 @@ enum EListenerType {
};
enum EApiKey {
- HEADER = -1, // []
- PRODUCE = 0, // [ZK_BROKER, BROKER]
- FETCH = 1, // [ZK_BROKER, BROKER, CONTROLLER]
- LIST_OFFSETS = 2, // [ZK_BROKER, BROKER]
- METADATA = 3, // [ZK_BROKER, BROKER]
- OFFSET_COMMIT = 8, // [ZK_BROKER, BROKER]
- OFFSET_FETCH = 9, // [ZK_BROKER, BROKER]
- FIND_COORDINATOR = 10, // [ZK_BROKER, BROKER]
- JOIN_GROUP = 11, // [ZK_BROKER, BROKER]
- HEARTBEAT = 12, // [ZK_BROKER, BROKER]
- LEAVE_GROUP = 13, // [ZK_BROKER, BROKER]
- SYNC_GROUP = 14, // [ZK_BROKER, BROKER]
- SASL_HANDSHAKE = 17, // [ZK_BROKER, BROKER, CONTROLLER]
- API_VERSIONS = 18, // [ZK_BROKER, BROKER, CONTROLLER]
- CREATE_TOPICS = 19, // [ZK_BROKER, BROKER, CONTROLLER]
- INIT_PRODUCER_ID = 22, // [ZK_BROKER, BROKER]
- ALTER_CONFIGS = 33, // [ZK_BROKER, BROKER, CONTROLLER]
- SASL_AUTHENTICATE = 36, // [ZK_BROKER, BROKER, CONTROLLER]
- CREATE_PARTITIONS = 37, // [ZK_BROKER, BROKER, CONTROLLER]
+ HEADER = -1, // []
+ PRODUCE = 0, // [ZK_BROKER, BROKER]
+ FETCH = 1, // [ZK_BROKER, BROKER, CONTROLLER]
+ LIST_OFFSETS = 2, // [ZK_BROKER, BROKER]
+ METADATA = 3, // [ZK_BROKER, BROKER]
+ OFFSET_COMMIT = 8, // [ZK_BROKER, BROKER]
+ OFFSET_FETCH = 9, // [ZK_BROKER, BROKER]
+ FIND_COORDINATOR = 10, // [ZK_BROKER, BROKER]
+ JOIN_GROUP = 11, // [ZK_BROKER, BROKER]
+ HEARTBEAT = 12, // [ZK_BROKER, BROKER]
+ LEAVE_GROUP = 13, // [ZK_BROKER, BROKER]
+ SYNC_GROUP = 14, // [ZK_BROKER, BROKER]
+ SASL_HANDSHAKE = 17, // [ZK_BROKER, BROKER, CONTROLLER]
+ API_VERSIONS = 18, // [ZK_BROKER, BROKER, CONTROLLER]
+ CREATE_TOPICS = 19, // [ZK_BROKER, BROKER, CONTROLLER]
+ INIT_PRODUCER_ID = 22, // [ZK_BROKER, BROKER]
+ ADD_PARTITIONS_TO_TXN = 24, // [ZK_BROKER, BROKER]
+ ADD_OFFSETS_TO_TXN = 25, // [ZK_BROKER, BROKER]
+ END_TXN = 26, // [ZK_BROKER, BROKER]
+ TXN_OFFSET_COMMIT = 28, // [ZK_BROKER, BROKER]
+ DESCRIBE_CONFIGS = 32, // [ZK_BROKER, BROKER]
+ ALTER_CONFIGS = 33, // [ZK_BROKER, BROKER, CONTROLLER]
+ SASL_AUTHENTICATE = 36, // [ZK_BROKER, BROKER, CONTROLLER]
+ CREATE_PARTITIONS = 37, // [ZK_BROKER, BROKER, CONTROLLER]
};
extern const std::unordered_map<EApiKey, TString> EApiKeyNames;
@@ -43,80 +49,80 @@ extern const std::unordered_map<EApiKey, TString> EApiKeyNames;
class TRequestHeaderData : public TApiMessage {
public:
typedef std::shared_ptr<TRequestHeaderData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TRequestHeaderData();
~TRequestHeaderData() = default;
-
+
struct RequestApiKeyMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "requestApiKey";
static constexpr const char* About = "The API key of this request.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
RequestApiKeyMeta::Type RequestApiKey;
-
+
struct RequestApiVersionMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "requestApiVersion";
static constexpr const char* About = "The API version of this request.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
RequestApiVersionMeta::Type RequestApiVersion;
-
+
struct CorrelationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "correlationId";
static constexpr const char* About = "The correlation ID of this request.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
CorrelationIdMeta::Type CorrelationId;
-
+
struct ClientIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "clientId";
static constexpr const char* About = "The client ID string.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
ClientIdMeta::Type ClientId;
-
+
i16 ApiKey() const override { return HEADER; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TRequestHeaderData& other) const = default;
};
@@ -124,35 +130,35 @@ public:
class TResponseHeaderData : public TApiMessage {
public:
typedef std::shared_ptr<TResponseHeaderData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 1};
static constexpr TKafkaVersions FlexibleVersions = {1, Max<TKafkaVersion>()};
};
-
+
TResponseHeaderData();
~TResponseHeaderData() = default;
-
+
struct CorrelationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "correlationId";
static constexpr const char* About = "The correlation ID of this response.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {1, Max<TKafkaVersion>()};
};
CorrelationIdMeta::Type CorrelationId;
-
+
i16 ApiKey() const override { return HEADER; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TResponseHeaderData& other) const = default;
};
@@ -160,175 +166,175 @@ public:
class TProduceRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TProduceRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TProduceRequestData();
~TProduceRequestData() = default;
-
+
class TTopicProduceData : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TTopicProduceData();
~TTopicProduceData() = default;
-
+
class TPartitionProduceData : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TPartitionProduceData();
~TPartitionProduceData() = default;
-
+
struct IndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "index";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IndexMeta::Type Index;
-
+
struct RecordsMeta {
using Type = TKafkaRecords;
using TypeDesc = NPrivate::TKafkaRecordsDesc;
-
+
static constexpr const char* Name = "records";
static constexpr const char* About = "The record data to be produced.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
RecordsMeta::Type Records;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TPartitionProduceData& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionDataMeta {
using ItemType = TPartitionProduceData;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TPartitionProduceData>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitionData";
static constexpr const char* About = "Each partition to produce to.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
PartitionDataMeta::Type PartitionData;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TTopicProduceData& other) const = default;
};
-
+
struct TransactionalIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "transactionalId";
static constexpr const char* About = "The transactional ID, or null if the producer is not transactional.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TransactionalIdMeta::Type TransactionalId;
-
+
struct AcksMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "acks";
static constexpr const char* About = "The number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
AcksMeta::Type Acks;
-
+
struct TimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "timeoutMs";
static constexpr const char* About = "The timeout to await a response in milliseconds.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TimeoutMsMeta::Type TimeoutMs;
-
+
struct TopicDataMeta {
using ItemType = TTopicProduceData;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TTopicProduceData>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topicData";
static constexpr const char* About = "Each topic to produce to.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TopicDataMeta::Type TopicData;
-
+
i16 ApiKey() const override { return PRODUCE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TProduceRequestData& other) const = default;
};
@@ -336,269 +342,269 @@ public:
class TProduceResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TProduceResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TProduceResponseData();
~TProduceResponseData() = default;
-
+
class TTopicProduceResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TTopicProduceResponse();
~TTopicProduceResponse() = default;
-
+
class TPartitionProduceResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TPartitionProduceResponse();
~TPartitionProduceResponse() = default;
-
+
class TBatchIndexAndErrorMessage : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 9};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TBatchIndexAndErrorMessage();
~TBatchIndexAndErrorMessage() = default;
-
+
struct BatchIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "batchIndex";
static constexpr const char* About = "The batch index of the record that cause the batch to be dropped";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
BatchIndexMeta::Type BatchIndex;
-
+
struct BatchIndexErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "batchIndexErrorMessage";
static constexpr const char* About = "The error message of the record that caused the batch to be dropped";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
BatchIndexErrorMessageMeta::Type BatchIndexErrorMessage;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TBatchIndexAndErrorMessage& other) const = default;
};
-
+
struct IndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "index";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IndexMeta::Type Index;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct BaseOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "baseOffset";
static constexpr const char* About = "The base offset.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
BaseOffsetMeta::Type BaseOffset;
-
+
struct LogAppendTimeMsMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "logAppendTimeMs";
static constexpr const char* About = "The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
LogAppendTimeMsMeta::Type LogAppendTimeMs;
-
+
struct LogStartOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "logStartOffset";
static constexpr const char* About = "The log start offset.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
LogStartOffsetMeta::Type LogStartOffset;
-
+
struct RecordErrorsMeta {
using ItemType = TBatchIndexAndErrorMessage;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TBatchIndexAndErrorMessage>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "recordErrors";
static constexpr const char* About = "The batch indices of records that caused the batch to be dropped";
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
RecordErrorsMeta::Type RecordErrors;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The global error message summarizing the common root cause of the records that caused the batch to be dropped";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TPartitionProduceResponse& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionResponsesMeta {
using ItemType = TPartitionProduceResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TPartitionProduceResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitionResponses";
static constexpr const char* About = "Each partition that we produced to within the topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
PartitionResponsesMeta::Type PartitionResponses;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TTopicProduceResponse& other) const = default;
};
-
+
struct ResponsesMeta {
using ItemType = TTopicProduceResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TTopicProduceResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "responses";
static constexpr const char* About = "Each produce response";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ResponsesMeta::Type Responses;
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
i16 ApiKey() const override { return PRODUCE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TProduceResponseData& other) const = default;
};
@@ -606,421 +612,421 @@ public:
class TFetchRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TFetchRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TFetchRequestData();
~TFetchRequestData() = default;
-
+
class TFetchTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TFetchTopic();
~TFetchTopic() = default;
-
+
class TFetchPartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TFetchPartition();
~TFetchPartition() = default;
-
+
struct PartitionMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partition";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionMeta::Type Partition;
-
+
struct CurrentLeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "currentLeaderEpoch";
static constexpr const char* About = "The current leader epoch of the partition.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {9, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
CurrentLeaderEpochMeta::Type CurrentLeaderEpoch;
-
+
struct FetchOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "fetchOffset";
static constexpr const char* About = "The message offset.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
FetchOffsetMeta::Type FetchOffset;
-
+
struct LastFetchedEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "lastFetchedEpoch";
static constexpr const char* About = "The epoch of the last fetched record or -1 if there is none";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
LastFetchedEpochMeta::Type LastFetchedEpoch;
-
+
struct LogStartOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "logStartOffset";
static constexpr const char* About = "The earliest available offset of the follower replica. The field is only used when the request is sent by the follower.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
LogStartOffsetMeta::Type LogStartOffset;
-
+
struct PartitionMaxBytesMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionMaxBytes";
static constexpr const char* About = "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionMaxBytesMeta::Type PartitionMaxBytes;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFetchPartition& other) const = default;
};
-
+
struct TopicMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "topic";
static constexpr const char* About = "The name of the topic to fetch.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
TopicMeta::Type Topic;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The unique topic ID";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {13, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct PartitionsMeta {
using ItemType = TFetchPartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TFetchPartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The partitions to fetch.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFetchTopic& other) const = default;
};
-
+
class TForgottenTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {7, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TForgottenTopic();
~TForgottenTopic() = default;
-
+
struct TopicMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "topic";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
TopicMeta::Type Topic;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The unique topic ID";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {13, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct PartitionsMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The partitions indexes to forget.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TForgottenTopic& other) const = default;
};
-
+
struct ClusterIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "clusterId";
static constexpr const char* About = "The clusterId if known. This is used to validate metadata fetches prior to broker registration.";
static constexpr const TKafkaInt32 Tag = 0;
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ClusterIdMeta::Type ClusterId;
-
+
struct ReplicaIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "replicaId";
static constexpr const char* About = "The broker ID of the follower, of -1 if this request is from a consumer.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ReplicaIdMeta::Type ReplicaId;
-
+
struct MaxWaitMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxWaitMs";
static constexpr const char* About = "The maximum time in milliseconds to wait for the response.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
MaxWaitMsMeta::Type MaxWaitMs;
-
+
struct MinBytesMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "minBytes";
static constexpr const char* About = "The minimum bytes to accumulate in the response.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
MinBytesMeta::Type MinBytes;
-
+
struct MaxBytesMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxBytes";
static constexpr const char* About = "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored.";
static const Type Default; // = 0x7fffffff;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
MaxBytesMeta::Type MaxBytes;
-
+
struct IsolationLevelMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "isolationLevel";
static constexpr const char* About = "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
IsolationLevelMeta::Type IsolationLevel;
-
+
struct SessionIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "sessionId";
static constexpr const char* About = "The fetch session ID.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
SessionIdMeta::Type SessionId;
-
+
struct SessionEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "sessionEpoch";
static constexpr const char* About = "The fetch session epoch, which is used for ordering requests in a session.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
SessionEpochMeta::Type SessionEpoch;
-
+
struct TopicsMeta {
using ItemType = TFetchTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TFetchTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The topics to fetch.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct ForgottenTopicsDataMeta {
using ItemType = TForgottenTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TForgottenTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "forgottenTopicsData";
static constexpr const char* About = "In an incremental fetch request, the partitions to remove.";
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ForgottenTopicsDataMeta::Type ForgottenTopicsData;
-
+
struct RackIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "rackId";
static constexpr const char* About = "Rack ID of the consumer making this request";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {11, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
RackIdMeta::Type RackId;
-
+
i16 ApiKey() const override { return FETCH; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFetchRequestData& other) const = default;
};
@@ -1028,514 +1034,514 @@ public:
class TFetchResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TFetchResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TFetchResponseData();
~TFetchResponseData() = default;
-
+
class TFetchableTopicResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TFetchableTopicResponse();
~TFetchableTopicResponse() = default;
-
+
class TPartitionData : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TPartitionData();
~TPartitionData() = default;
-
+
class TEpochEndOffset : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {12, 13};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TEpochEndOffset();
~TEpochEndOffset() = default;
-
+
struct EpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "epoch";
static constexpr const char* About = "";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
EpochMeta::Type Epoch;
-
+
struct EndOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "endOffset";
static constexpr const char* About = "";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
EndOffsetMeta::Type EndOffset;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TEpochEndOffset& other) const = default;
};
-
+
class TLeaderIdAndEpoch : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {12, 13};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TLeaderIdAndEpoch();
~TLeaderIdAndEpoch() = default;
-
+
struct LeaderIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "leaderId";
static constexpr const char* About = "The ID of the current leader or -1 if the leader is unknown.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
LeaderIdMeta::Type LeaderId;
-
+
struct LeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "leaderEpoch";
static constexpr const char* About = "The latest known leader epoch";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
LeaderEpochMeta::Type LeaderEpoch;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TLeaderIdAndEpoch& other) const = default;
};
-
+
class TSnapshotId : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {12, 13};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TSnapshotId();
~TSnapshotId() = default;
-
+
struct EndOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "endOffset";
static constexpr const char* About = "";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
EndOffsetMeta::Type EndOffset;
-
+
struct EpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "epoch";
static constexpr const char* About = "";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
EpochMeta::Type Epoch;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSnapshotId& other) const = default;
};
-
+
class TAbortedTransaction : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {4, 13};
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
-
+
TAbortedTransaction();
~TAbortedTransaction() = default;
-
+
struct ProducerIdMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "producerId";
static constexpr const char* About = "The producer id associated with the aborted transaction.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ProducerIdMeta::Type ProducerId;
-
+
struct FirstOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "firstOffset";
static constexpr const char* About = "The first offset in the aborted transaction.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
FirstOffsetMeta::Type FirstOffset;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAbortedTransaction& other) const = default;
};
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no fetch error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct HighWatermarkMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "highWatermark";
static constexpr const char* About = "The current high water mark.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
HighWatermarkMeta::Type HighWatermark;
-
+
struct LastStableOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "lastStableOffset";
static constexpr const char* About = "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
LastStableOffsetMeta::Type LastStableOffset;
-
+
struct LogStartOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "logStartOffset";
static constexpr const char* About = "The current log start offset.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
LogStartOffsetMeta::Type LogStartOffset;
-
+
struct DivergingEpochMeta {
using Type = TEpochEndOffset;
using TypeDesc = NPrivate::TKafkaStructDesc;
-
+
static constexpr const char* Name = "divergingEpoch";
static constexpr const char* About = "In case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge";
static constexpr const TKafkaInt32 Tag = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
DivergingEpochMeta::Type DivergingEpoch;
-
+
struct CurrentLeaderMeta {
using Type = TLeaderIdAndEpoch;
using TypeDesc = NPrivate::TKafkaStructDesc;
-
+
static constexpr const char* Name = "currentLeader";
static constexpr const char* About = "";
static constexpr const TKafkaInt32 Tag = 1;
-
+
static constexpr TKafkaVersions PresentVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
CurrentLeaderMeta::Type CurrentLeader;
-
+
struct SnapshotIdMeta {
using Type = TSnapshotId;
using TypeDesc = NPrivate::TKafkaStructDesc;
-
+
static constexpr const char* Name = "snapshotId";
static constexpr const char* About = "In the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request.";
static constexpr const TKafkaInt32 Tag = 2;
-
+
static constexpr TKafkaVersions PresentVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
SnapshotIdMeta::Type SnapshotId;
-
+
struct AbortedTransactionsMeta {
using ItemType = TAbortedTransaction;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TAbortedTransaction>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "abortedTransactions";
static constexpr const char* About = "The aborted transactions.";
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
AbortedTransactionsMeta::Type AbortedTransactions;
-
+
struct PreferredReadReplicaMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "preferredReadReplica";
static constexpr const char* About = "The preferred read replica for the consumer to use on its next fetch request";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {11, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PreferredReadReplicaMeta::Type PreferredReadReplica;
-
+
struct RecordsMeta {
using Type = TKafkaRecords;
using TypeDesc = NPrivate::TKafkaRecordsDesc;
-
+
static constexpr const char* Name = "records";
static constexpr const char* About = "The record data.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
RecordsMeta::Type Records;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TPartitionData& other) const = default;
};
-
+
struct TopicMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "topic";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
TopicMeta::Type Topic;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The unique topic ID";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {13, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct PartitionsMeta {
using ItemType = TPartitionData;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TPartitionData>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The topic partitions.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFetchableTopicResponse& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The top level response error code.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct SessionIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "sessionId";
static constexpr const char* About = "The fetch session ID, or 0 if this is not part of a fetch session.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
SessionIdMeta::Type SessionId;
-
+
struct ResponsesMeta {
using ItemType = TFetchableTopicResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TFetchableTopicResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "responses";
static constexpr const char* About = "The response topics.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {12, Max<TKafkaVersion>()};
};
ResponsesMeta::Type Responses;
-
+
i16 ApiKey() const override { return FETCH; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFetchResponseData& other) const = default;
};
@@ -1543,191 +1549,191 @@ public:
class TListOffsetsRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TListOffsetsRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsRequestData();
~TListOffsetsRequestData() = default;
-
+
class TListOffsetsTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsTopic();
~TListOffsetsTopic() = default;
-
+
class TListOffsetsPartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsPartition();
~TListOffsetsPartition() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct CurrentLeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "currentLeaderEpoch";
static constexpr const char* About = "The current leader epoch.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
CurrentLeaderEpochMeta::Type CurrentLeaderEpoch;
-
+
struct TimestampMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "timestamp";
static constexpr const char* About = "The current timestamp.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TimestampMeta::Type Timestamp;
-
+
struct MaxNumOffsetsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxNumOffsets";
static constexpr const char* About = "The maximum number of offsets to report.";
static const Type Default; // = 1;
-
+
static constexpr TKafkaVersions PresentVersions = {0, 0};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MaxNumOffsetsMeta::Type MaxNumOffsets;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsPartition& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TListOffsetsPartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TListOffsetsPartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "Each partition in the request.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsTopic& other) const = default;
};
-
+
struct ReplicaIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "replicaId";
static constexpr const char* About = "The broker ID of the requestor, or -1 if this request is being made by a normal consumer.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ReplicaIdMeta::Type ReplicaId;
-
+
struct IsolationLevelMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "isolationLevel";
static constexpr const char* About = "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
IsolationLevelMeta::Type IsolationLevel;
-
+
struct TopicsMeta {
using ItemType = TListOffsetsTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TListOffsetsTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic in the request.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
i16 ApiKey() const override { return LIST_OFFSETS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsRequestData& other) const = default;
};
@@ -1735,207 +1741,207 @@ public:
class TListOffsetsResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TListOffsetsResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsResponseData();
~TListOffsetsResponseData() = default;
-
+
class TListOffsetsTopicResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsTopicResponse();
~TListOffsetsTopicResponse() = default;
-
+
class TListOffsetsPartitionResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TListOffsetsPartitionResponse();
~TListOffsetsPartitionResponse() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The partition error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct OldStyleOffsetsMeta {
using ItemType = TKafkaInt64;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt64>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "oldStyleOffsets";
static constexpr const char* About = "The result offsets.";
-
+
static constexpr TKafkaVersions PresentVersions = {0, 0};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
OldStyleOffsetsMeta::Type OldStyleOffsets;
-
+
struct TimestampMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "timestamp";
static constexpr const char* About = "The timestamp associated with the returned offset.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TimestampMeta::Type Timestamp;
-
+
struct OffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "offset";
static constexpr const char* About = "The returned offset.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
OffsetMeta::Type Offset;
-
+
struct LeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "leaderEpoch";
static constexpr const char* About = "";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
LeaderEpochMeta::Type LeaderEpoch;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsPartitionResponse& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TListOffsetsPartitionResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TListOffsetsPartitionResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "Each partition in the response.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsTopicResponse& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct TopicsMeta {
using ItemType = TListOffsetsTopicResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TListOffsetsTopicResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic in the response.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
i16 ApiKey() const override { return LIST_OFFSETS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TListOffsetsResponseData& other) const = default;
};
@@ -1943,128 +1949,128 @@ public:
class TMetadataRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TMetadataRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataRequestData();
~TMetadataRequestData() = default;
-
+
class TMetadataRequestTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataRequestTopic();
~TMetadataRequestTopic() = default;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The topic id.";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {10, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = {10, Max<TKafkaVersion>()};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataRequestTopic& other) const = default;
};
-
+
struct TopicsMeta {
using ItemType = TMetadataRequestTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMetadataRequestTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The topics to fetch metadata for.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct AllowAutoTopicCreationMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "allowAutoTopicCreation";
static constexpr const char* About = "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.";
static const Type Default; // = true;
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
AllowAutoTopicCreationMeta::Type AllowAutoTopicCreation;
-
+
struct IncludeClusterAuthorizedOperationsMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "includeClusterAuthorizedOperations";
static constexpr const char* About = "Whether to include cluster authorized operations.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {8, 10};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IncludeClusterAuthorizedOperationsMeta::Type IncludeClusterAuthorizedOperations;
-
+
struct IncludeTopicAuthorizedOperationsMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "includeTopicAuthorizedOperations";
static constexpr const char* About = "Whether to include topic authorized operations.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IncludeTopicAuthorizedOperationsMeta::Type IncludeTopicAuthorizedOperations;
-
+
i16 ApiKey() const override { return METADATA; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataRequestData& other) const = default;
};
@@ -2072,422 +2078,422 @@ public:
class TMetadataResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TMetadataResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataResponseData();
~TMetadataResponseData() = default;
-
+
class TMetadataResponseBroker : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataResponseBroker();
~TMetadataResponseBroker() = default;
-
+
struct NodeIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "nodeId";
static constexpr const char* About = "The broker ID.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
NodeIdMeta::Type NodeId;
-
+
struct HostMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "host";
static constexpr const char* About = "The broker hostname.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
HostMeta::Type Host;
-
+
struct PortMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "port";
static constexpr const char* About = "The broker port.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
PortMeta::Type Port;
-
+
struct RackMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "rack";
static constexpr const char* About = "The rack of the broker, or null if it has not been assigned to a rack.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
RackMeta::Type Rack;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataResponseBroker& other) const = default;
};
-
+
class TMetadataResponseTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataResponseTopic();
~TMetadataResponseTopic() = default;
-
+
class TMetadataResponsePartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 12};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
-
+
TMetadataResponsePartition();
~TMetadataResponsePartition() = default;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The partition error, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct LeaderIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "leaderId";
static constexpr const char* About = "The ID of the leader broker.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
LeaderIdMeta::Type LeaderId;
-
+
struct LeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "leaderEpoch";
static constexpr const char* About = "The leader epoch of this partition.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
LeaderEpochMeta::Type LeaderEpoch;
-
+
struct ReplicaNodesMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "replicaNodes";
static constexpr const char* About = "The set of all nodes that host this partition.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ReplicaNodesMeta::Type ReplicaNodes;
-
+
struct IsrNodesMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "isrNodes";
static constexpr const char* About = "The set of nodes that are in sync with the leader for this partition.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IsrNodesMeta::Type IsrNodes;
-
+
struct OfflineReplicasMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "offlineReplicas";
static constexpr const char* About = "The set of offline replicas of this partition.";
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
OfflineReplicasMeta::Type OfflineReplicas;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataResponsePartition& other) const = default;
};
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The topic error, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = {12, Max<TKafkaVersion>()};
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The topic id.";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {10, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct IsInternalMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "isInternal";
static constexpr const char* About = "True if the topic is internal.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
IsInternalMeta::Type IsInternal;
-
+
struct PartitionsMeta {
using ItemType = TMetadataResponsePartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMetadataResponsePartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "Each partition in the topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
struct TopicAuthorizedOperationsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "topicAuthorizedOperations";
static constexpr const char* About = "32-bit bitfield to represent authorized operations for this topic.";
static const Type Default; // = -2147483648;
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TopicAuthorizedOperationsMeta::Type TopicAuthorizedOperations;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataResponseTopic& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct BrokersMeta {
using ItemType = TMetadataResponseBroker;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMetadataResponseBroker>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "brokers";
static constexpr const char* About = "Each broker in the response.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
BrokersMeta::Type Brokers;
-
+
struct ClusterIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "clusterId";
static constexpr const char* About = "The cluster ID that responding broker belongs to.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ClusterIdMeta::Type ClusterId;
-
+
struct ControllerIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "controllerId";
static constexpr const char* About = "The ID of the controller broker.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ControllerIdMeta::Type ControllerId;
-
+
struct TopicsMeta {
using ItemType = TMetadataResponseTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMetadataResponseTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic in the response.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct ClusterAuthorizedOperationsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "clusterAuthorizedOperations";
static constexpr const char* About = "32-bit bitfield to represent authorized operations for this cluster.";
static const Type Default; // = -2147483648;
-
+
static constexpr TKafkaVersions PresentVersions = {8, 10};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {9, Max<TKafkaVersion>()};
};
ClusterAuthorizedOperationsMeta::Type ClusterAuthorizedOperations;
-
+
i16 ApiKey() const override { return METADATA; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMetadataResponseData& other) const = default;
};
@@ -2495,251 +2501,251 @@ public:
class TOffsetCommitRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TOffsetCommitRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitRequestData();
~TOffsetCommitRequestData() = default;
-
+
class TOffsetCommitRequestTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitRequestTopic();
~TOffsetCommitRequestTopic() = default;
-
+
class TOffsetCommitRequestPartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitRequestPartition();
~TOffsetCommitRequestPartition() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct CommittedOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedOffset";
static constexpr const char* About = "The message offset to be committed.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
CommittedOffsetMeta::Type CommittedOffset;
-
+
struct CommittedLeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedLeaderEpoch";
static constexpr const char* About = "The leader epoch of this partition.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {6, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
CommittedLeaderEpochMeta::Type CommittedLeaderEpoch;
-
+
struct CommitTimestampMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "commitTimestamp";
static constexpr const char* About = "The timestamp of the commit.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, 1};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
CommitTimestampMeta::Type CommitTimestamp;
-
+
struct CommittedMetadataMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "committedMetadata";
static constexpr const char* About = "Any associated metadata the client wants to keep.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
CommittedMetadataMeta::Type CommittedMetadata;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitRequestPartition& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TOffsetCommitRequestPartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetCommitRequestPartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "Each partition to commit offsets for.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitRequestTopic& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The unique group identifier.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct GenerationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "generationId";
static constexpr const char* About = "The generation of the group.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
GenerationIdMeta::Type GenerationId;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID assigned by the group coordinator.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct RetentionTimeMsMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "retentionTimeMs";
static constexpr const char* About = "The time period in ms to retain the offset.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {2, 4};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
RetentionTimeMsMeta::Type RetentionTimeMs;
-
+
struct TopicsMeta {
using ItemType = TOffsetCommitRequestTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetCommitRequestTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The topics to commit offsets for.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
i16 ApiKey() const override { return OFFSET_COMMIT; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitRequestData& other) const = default;
};
@@ -2747,146 +2753,146 @@ public:
class TOffsetCommitResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TOffsetCommitResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitResponseData();
~TOffsetCommitResponseData() = default;
-
+
class TOffsetCommitResponseTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitResponseTopic();
~TOffsetCommitResponseTopic() = default;
-
+
class TOffsetCommitResponsePartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
-
+
TOffsetCommitResponsePartition();
~TOffsetCommitResponsePartition() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitResponsePartition& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TOffsetCommitResponsePartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetCommitResponsePartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The responses for each partition in the topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitResponseTopic& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct TopicsMeta {
using ItemType = TOffsetCommitResponseTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetCommitResponseTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The responses for each topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {8, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
i16 ApiKey() const override { return OFFSET_COMMIT; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetCommitResponseData& other) const = default;
};
@@ -2894,226 +2900,226 @@ public:
class TOffsetFetchRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TOffsetFetchRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TOffsetFetchRequestData();
~TOffsetFetchRequestData() = default;
-
+
class TOffsetFetchRequestTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TOffsetFetchRequestTopic();
~TOffsetFetchRequestTopic() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionIndexesMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitionIndexes";
static constexpr const char* About = "The partition indexes we would like to fetch offsets for.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionIndexesMeta::Type PartitionIndexes;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchRequestTopic& other) const = default;
};
-
+
class TOffsetFetchRequestGroup : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 8};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TOffsetFetchRequestGroup();
~TOffsetFetchRequestGroup() = default;
-
+
class TOffsetFetchRequestTopics : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 8};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TOffsetFetchRequestTopics();
~TOffsetFetchRequestTopics() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NameMeta::Type Name;
-
+
struct PartitionIndexesMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitionIndexes";
static constexpr const char* About = "The partition indexes we would like to fetch offsets for.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
PartitionIndexesMeta::Type PartitionIndexes;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchRequestTopics& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The group ID.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
GroupIdMeta::Type GroupId;
-
+
struct TopicsMeta {
using ItemType = TOffsetFetchRequestTopics;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchRequestTopics>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicsMeta::Type Topics;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchRequestGroup& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The group to fetch offsets for.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct TopicsMeta {
using ItemType = TOffsetFetchRequestTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchRequestTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.";
-
+
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct GroupsMeta {
using ItemType = TOffsetFetchRequestGroup;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchRequestGroup>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "groups";
static constexpr const char* About = "Each group we would like to fetch offsets for";
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
GroupsMeta::Type Groups;
-
+
struct RequireStableMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "requireStable";
static constexpr const char* About = "Whether broker should hold on returning unstable offsets but set a retriable error code for the partitions.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
RequireStableMeta::Type RequireStable;
-
+
i16 ApiKey() const override { return OFFSET_FETCH; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchRequestData& other) const = default;
};
@@ -3121,425 +3127,425 @@ public:
class TOffsetFetchResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TOffsetFetchResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 8};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TOffsetFetchResponseData();
~TOffsetFetchResponseData() = default;
-
+
class TOffsetFetchResponseTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TOffsetFetchResponseTopic();
~TOffsetFetchResponseTopic() = default;
-
+
class TOffsetFetchResponsePartition : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TOffsetFetchResponsePartition();
~TOffsetFetchResponsePartition() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct CommittedOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedOffset";
static constexpr const char* About = "The committed message offset.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
CommittedOffsetMeta::Type CommittedOffset;
-
+
struct CommittedLeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedLeaderEpoch";
static constexpr const char* About = "The leader epoch.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
CommittedLeaderEpochMeta::Type CommittedLeaderEpoch;
-
+
struct MetadataMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "metadata";
static constexpr const char* About = "The partition metadata.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MetadataMeta::Type Metadata;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponsePartition& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TOffsetFetchResponsePartition;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchResponsePartition>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The responses per partition";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponseTopic& other) const = default;
};
-
+
class TOffsetFetchResponseGroup : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 8};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TOffsetFetchResponseGroup();
~TOffsetFetchResponseGroup() = default;
-
+
class TOffsetFetchResponseTopics : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 8};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TOffsetFetchResponseTopics();
~TOffsetFetchResponseTopics() = default;
-
+
class TOffsetFetchResponsePartitions : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {8, 8};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TOffsetFetchResponsePartitions();
~TOffsetFetchResponsePartitions() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct CommittedOffsetMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedOffset";
static constexpr const char* About = "The committed message offset.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
CommittedOffsetMeta::Type CommittedOffset;
-
+
struct CommittedLeaderEpochMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "committedLeaderEpoch";
static constexpr const char* About = "The leader epoch.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
CommittedLeaderEpochMeta::Type CommittedLeaderEpoch;
-
+
struct MetadataMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "metadata";
static constexpr const char* About = "The partition metadata.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
MetadataMeta::Type Metadata;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The partition-level error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ErrorCodeMeta::Type ErrorCode;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponsePartitions& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NameMeta::Type Name;
-
+
struct PartitionsMeta {
using ItemType = TOffsetFetchResponsePartitions;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchResponsePartitions>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "partitions";
static constexpr const char* About = "The responses per partition";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
PartitionsMeta::Type Partitions;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponseTopics& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The group ID.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
GroupIdMeta::Type GroupId;
-
+
struct TopicsMeta {
using ItemType = TOffsetFetchResponseTopics;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchResponseTopics>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The responses per topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicsMeta::Type Topics;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The group-level error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ErrorCodeMeta::Type ErrorCode;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponseGroup& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct TopicsMeta {
using ItemType = TOffsetFetchResponseTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchResponseTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The responses per topic.";
-
+
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The top-level error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {2, 7};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct GroupsMeta {
using ItemType = TOffsetFetchResponseGroup;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TOffsetFetchResponseGroup>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "groups";
static constexpr const char* About = "The responses per group id.";
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
GroupsMeta::Type Groups;
-
+
i16 ApiKey() const override { return OFFSET_FETCH; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TOffsetFetchResponseData& other) const = default;
};
@@ -3547,66 +3553,66 @@ public:
class TFindCoordinatorRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TFindCoordinatorRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
-
+
TFindCoordinatorRequestData();
~TFindCoordinatorRequestData() = default;
-
+
struct KeyMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "key";
static constexpr const char* About = "The coordinator key.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
KeyMeta::Type Key;
-
+
struct KeyTypeMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "keyType";
static constexpr const char* About = "The coordinator key type. (Group, transaction, etc.)";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
KeyTypeMeta::Type KeyType;
-
+
struct CoordinatorKeysMeta {
using ItemType = TKafkaString;
using ItemTypeDesc = NPrivate::TKafkaStringDesc;
using Type = std::vector<TKafkaString>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "coordinatorKeys";
static constexpr const char* About = "The coordinator keys.";
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
CoordinatorKeysMeta::Type CoordinatorKeys;
-
+
i16 ApiKey() const override { return FIND_COORDINATOR; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFindCoordinatorRequestData& other) const = default;
};
@@ -3614,233 +3620,233 @@ public:
class TFindCoordinatorResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TFindCoordinatorResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
-
+
TFindCoordinatorResponseData();
~TFindCoordinatorResponseData() = default;
-
+
class TCoordinator : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {4, 4};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TCoordinator();
~TCoordinator() = default;
-
+
struct KeyMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "key";
static constexpr const char* About = "The coordinator key.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
KeyMeta::Type Key;
-
+
struct NodeIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "nodeId";
static constexpr const char* About = "The node id.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NodeIdMeta::Type NodeId;
-
+
struct HostMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "host";
static constexpr const char* About = "The host name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
HostMeta::Type Host;
-
+
struct PortMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "port";
static constexpr const char* About = "The port.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
PortMeta::Type Port;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The error message, or null if there was no error.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ErrorMessageMeta::Type ErrorMessage;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCoordinator& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The error message, or null if there was no error.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {1, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
struct NodeIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "nodeId";
static constexpr const char* About = "The node id.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
NodeIdMeta::Type NodeId;
-
+
struct HostMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "host";
static constexpr const char* About = "The host name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
HostMeta::Type Host;
-
+
struct PortMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "port";
static constexpr const char* About = "The port.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
PortMeta::Type Port;
-
+
struct CoordinatorsMeta {
using ItemType = TCoordinator;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCoordinator>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "coordinators";
static constexpr const char* About = "Each coordinator result in the response";
-
+
static constexpr TKafkaVersions PresentVersions = {4, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
CoordinatorsMeta::Type Coordinators;
-
+
i16 ApiKey() const override { return FIND_COORDINATOR; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFindCoordinatorResponseData& other) const = default;
};
@@ -3848,187 +3854,187 @@ public:
class TJoinGroupRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TJoinGroupRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TJoinGroupRequestData();
~TJoinGroupRequestData() = default;
-
+
class TJoinGroupRequestProtocol : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TJoinGroupRequestProtocol();
~TJoinGroupRequestProtocol() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The protocol name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct MetadataMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "metadata";
static constexpr const char* About = "The protocol metadata.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MetadataMeta::Type Metadata;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TJoinGroupRequestProtocol& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The group identifier.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct SessionTimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "sessionTimeoutMs";
static constexpr const char* About = "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
SessionTimeoutMsMeta::Type SessionTimeoutMs;
-
+
struct RebalanceTimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "rebalanceTimeoutMs";
static constexpr const char* About = "The maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
RebalanceTimeoutMsMeta::Type RebalanceTimeoutMs;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member id assigned by the group coordinator.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct ProtocolTypeMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolType";
static constexpr const char* About = "The unique name the for class of protocols implemented by the group we want to join.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ProtocolTypeMeta::Type ProtocolType;
-
+
struct ProtocolsMeta {
using ItemType = TJoinGroupRequestProtocol;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TJoinGroupRequestProtocol>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "protocols";
static constexpr const char* About = "The list of protocols that the member supports.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ProtocolsMeta::Type Protocols;
-
+
struct ReasonMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "reason";
static constexpr const char* About = "The reason why the member (re-)joins the group.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {8, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ReasonMeta::Type Reason;
-
+
i16 ApiKey() const override { return JOIN_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TJoinGroupRequestData& other) const = default;
};
@@ -4036,218 +4042,219 @@ public:
class TJoinGroupResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TJoinGroupResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TJoinGroupResponseData();
~TJoinGroupResponseData() = default;
-
+
class TJoinGroupResponseMember : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 9};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
-
+
TJoinGroupResponseMember();
~TJoinGroupResponseMember() = default;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The group member ID.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct MetadataMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "metadata";
static constexpr const char* About = "The group member metadata.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MetadataMeta::Type Metadata;
+
TString MetaStr;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TJoinGroupResponseMember& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct GenerationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "generationId";
static constexpr const char* About = "The generation ID of the group.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
GenerationIdMeta::Type GenerationId;
-
+
struct ProtocolTypeMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolType";
static constexpr const char* About = "The group protocol name.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProtocolTypeMeta::Type ProtocolType;
-
+
struct ProtocolNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolName";
static constexpr const char* About = "The group protocol selected by the coordinator.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
ProtocolNameMeta::Type ProtocolName;
-
+
struct LeaderMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "leader";
static constexpr const char* About = "The leader of the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
LeaderMeta::Type Leader;
-
+
struct SkipAssignmentMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "skipAssignment";
static constexpr const char* About = "True if the leader must skip running the assignment.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {9, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
SkipAssignmentMeta::Type SkipAssignment;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID assigned by the group coordinator.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct MembersMeta {
using ItemType = TJoinGroupResponseMember;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TJoinGroupResponseMember>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "members";
static constexpr const char* About = "";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {6, Max<TKafkaVersion>()};
};
MembersMeta::Type Members;
-
+
i16 ApiKey() const override { return JOIN_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TJoinGroupResponseData& other) const = default;
};
@@ -4255,80 +4262,80 @@ public:
class THeartbeatRequestData : public TApiMessage {
public:
typedef std::shared_ptr<THeartbeatRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
THeartbeatRequestData();
~THeartbeatRequestData() = default;
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The group id.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct GenerationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "generationId";
static constexpr const char* About = "The generation of the group.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GenerationIdMeta::Type GenerationId;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
i16 ApiKey() const override { return HEARTBEAT; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const THeartbeatRequestData& other) const = default;
};
@@ -4336,50 +4343,50 @@ public:
class THeartbeatResponseData : public TApiMessage {
public:
typedef std::shared_ptr<THeartbeatResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
THeartbeatResponseData();
~THeartbeatResponseData() = default;
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
i16 ApiKey() const override { return HEARTBEAT; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const THeartbeatResponseData& other) const = default;
};
@@ -4387,128 +4394,128 @@ public:
class TLeaveGroupRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TLeaveGroupRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TLeaveGroupRequestData();
~TLeaveGroupRequestData() = default;
-
+
class TMemberIdentity : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {3, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TMemberIdentity();
~TMemberIdentity() = default;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID to remove from the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The group instance ID to remove from the group.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct ReasonMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "reason";
static constexpr const char* About = "The reason why the member left the group.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ReasonMeta::Type Reason;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMemberIdentity& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The ID of the group to leave.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID to remove from the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct MembersMeta {
using ItemType = TMemberIdentity;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMemberIdentity>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "members";
static constexpr const char* About = "List of leaving member identities.";
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MembersMeta::Type Members;
-
+
i16 ApiKey() const override { return LEAVE_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TLeaveGroupRequestData& other) const = default;
};
@@ -4516,128 +4523,128 @@ public:
class TLeaveGroupResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TLeaveGroupResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TLeaveGroupResponseData();
~TLeaveGroupResponseData() = default;
-
+
class TMemberResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {3, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TMemberResponse();
~TMemberResponse() = default;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID to remove from the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The group instance ID to remove from the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TMemberResponse& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct MembersMeta {
using ItemType = TMemberResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TMemberResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "members";
static constexpr const char* About = "List of leaving member responses.";
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MembersMeta::Type Members;
-
+
i16 ApiKey() const override { return LEAVE_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TLeaveGroupResponseData& other) const = default;
};
@@ -4645,173 +4652,174 @@ public:
class TSyncGroupRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TSyncGroupRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TSyncGroupRequestData();
~TSyncGroupRequestData() = default;
-
+
class TSyncGroupRequestAssignment : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TSyncGroupRequestAssignment();
~TSyncGroupRequestAssignment() = default;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The ID of the member to assign.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct AssignmentMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "assignment";
static constexpr const char* About = "The member assignment.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
AssignmentMeta::Type Assignment;
+
TString AssignmentStr;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSyncGroupRequestAssignment& other) const = default;
};
-
+
struct GroupIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupId";
static constexpr const char* About = "The unique group identifier.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupIdMeta::Type GroupId;
-
+
struct GenerationIdMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "generationId";
static constexpr const char* About = "The generation of the group.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GenerationIdMeta::Type GenerationId;
-
+
struct MemberIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "memberId";
static constexpr const char* About = "The member ID assigned by the group.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
MemberIdMeta::Type MemberId;
-
+
struct GroupInstanceIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "groupInstanceId";
static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
GroupInstanceIdMeta::Type GroupInstanceId;
-
+
struct ProtocolTypeMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolType";
static constexpr const char* About = "The group protocol type.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProtocolTypeMeta::Type ProtocolType;
-
+
struct ProtocolNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolName";
static constexpr const char* About = "The group protocol name.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProtocolNameMeta::Type ProtocolName;
-
+
struct AssignmentsMeta {
using ItemType = TSyncGroupRequestAssignment;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TSyncGroupRequestAssignment>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "assignments";
static constexpr const char* About = "Each assignment.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
AssignmentsMeta::Type Assignments;
-
+
i16 ApiKey() const override { return SYNC_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSyncGroupRequestData& other) const = default;
};
@@ -4819,96 +4827,96 @@ public:
class TSyncGroupResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TSyncGroupResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 5};
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
-
+
TSyncGroupResponseData();
~TSyncGroupResponseData() = default;
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ProtocolTypeMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolType";
static constexpr const char* About = "The group protocol type.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProtocolTypeMeta::Type ProtocolType;
-
+
struct ProtocolNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "protocolName";
static constexpr const char* About = "The group protocol name.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProtocolNameMeta::Type ProtocolName;
-
+
struct AssignmentMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "assignment";
static constexpr const char* About = "The member assignment.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
};
AssignmentMeta::Type Assignment;
-
+
TString AssignmentStr;
-
+
i16 ApiKey() const override { return SYNC_GROUP; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSyncGroupResponseData& other) const = default;
};
@@ -4916,35 +4924,35 @@ public:
class TSaslHandshakeRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TSaslHandshakeRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 1};
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
-
+
TSaslHandshakeRequestData();
~TSaslHandshakeRequestData() = default;
-
+
struct MechanismMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "mechanism";
static constexpr const char* About = "The SASL mechanism chosen by the client.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
MechanismMeta::Type Mechanism;
-
+
i16 ApiKey() const override { return SASL_HANDSHAKE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSaslHandshakeRequestData& other) const = default;
};
@@ -4952,51 +4960,51 @@ public:
class TSaslHandshakeResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TSaslHandshakeResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 1};
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
-
+
TSaslHandshakeResponseData();
~TSaslHandshakeResponseData() = default;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct MechanismsMeta {
using ItemType = TKafkaString;
using ItemTypeDesc = NPrivate::TKafkaStringDesc;
using Type = std::vector<TKafkaString>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "mechanisms";
static constexpr const char* About = "The mechanisms enabled in the server.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsNever;
};
MechanismsMeta::Type Mechanisms;
-
+
i16 ApiKey() const override { return SASL_HANDSHAKE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSaslHandshakeResponseData& other) const = default;
};
@@ -5004,50 +5012,50 @@ public:
class TApiVersionsRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TApiVersionsRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
-
+
TApiVersionsRequestData();
~TApiVersionsRequestData() = default;
-
+
struct ClientSoftwareNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "clientSoftwareName";
static constexpr const char* About = "The name of the client.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ClientSoftwareNameMeta::Type ClientSoftwareName;
-
+
struct ClientSoftwareVersionMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "clientSoftwareVersion";
static constexpr const char* About = "The version of the client.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ClientSoftwareVersionMeta::Type ClientSoftwareVersion;
-
+
i16 ApiKey() const override { return API_VERSIONS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TApiVersionsRequestData& other) const = default;
};
@@ -5055,318 +5063,318 @@ public:
class TApiVersionsResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TApiVersionsResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
-
+
TApiVersionsResponseData();
~TApiVersionsResponseData() = default;
-
+
class TApiVersion : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
-
+
TApiVersion();
~TApiVersion() = default;
-
+
struct ApiKeyMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "apiKey";
static constexpr const char* About = "The API index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ApiKeyMeta::Type ApiKey;
-
+
struct MinVersionMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "minVersion";
static constexpr const char* About = "The minimum supported version, inclusive.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
MinVersionMeta::Type MinVersion;
-
+
struct MaxVersionMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxVersion";
static constexpr const char* About = "The maximum supported version, inclusive.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
MaxVersionMeta::Type MaxVersion;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TApiVersion& other) const = default;
};
-
+
class TSupportedFeatureKey : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {3, 3};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TSupportedFeatureKey();
~TSupportedFeatureKey() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The name of the feature.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NameMeta::Type Name;
-
+
struct MinVersionMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "minVersion";
static constexpr const char* About = "The minimum supported version for the feature.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
MinVersionMeta::Type MinVersion;
-
+
struct MaxVersionMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxVersion";
static constexpr const char* About = "The maximum supported version for the feature.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
MaxVersionMeta::Type MaxVersion;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSupportedFeatureKey& other) const = default;
};
-
+
class TFinalizedFeatureKey : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {3, 3};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TFinalizedFeatureKey();
~TFinalizedFeatureKey() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The name of the feature.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NameMeta::Type Name;
-
+
struct MaxVersionLevelMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "maxVersionLevel";
static constexpr const char* About = "The cluster-wide finalized max version level for the feature.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
MaxVersionLevelMeta::Type MaxVersionLevel;
-
+
struct MinVersionLevelMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "minVersionLevel";
static constexpr const char* About = "The cluster-wide finalized min version level for the feature.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
MinVersionLevelMeta::Type MinVersionLevel;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TFinalizedFeatureKey& other) const = default;
};
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The top-level error code.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ApiKeysMeta {
using ItemType = TApiVersion;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TApiVersion>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "apiKeys";
static constexpr const char* About = "The APIs supported by the broker.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ApiKeysMeta::Type ApiKeys;
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct SupportedFeaturesMeta {
using ItemType = TSupportedFeatureKey;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TSupportedFeatureKey>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "supportedFeatures";
static constexpr const char* About = "Features supported by the broker.";
static constexpr const TKafkaInt32 Tag = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
SupportedFeaturesMeta::Type SupportedFeatures;
-
+
struct FinalizedFeaturesEpochMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "finalizedFeaturesEpoch";
static constexpr const char* About = "The monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch.";
static constexpr const TKafkaInt32 Tag = 1;
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
FinalizedFeaturesEpochMeta::Type FinalizedFeaturesEpoch;
-
+
struct FinalizedFeaturesMeta {
using ItemType = TFinalizedFeatureKey;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TFinalizedFeatureKey>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "finalizedFeatures";
static constexpr const char* About = "List of cluster-wide finalized features. The information is valid only if FinalizedFeaturesEpoch >= 0.";
static constexpr const TKafkaInt32 Tag = 2;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
FinalizedFeaturesMeta::Type FinalizedFeatures;
-
+
struct ZkMigrationReadyMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "zkMigrationReady";
static constexpr const char* About = "Set by a KRaft controller if the required configurations for ZK migration are present";
static constexpr const TKafkaInt32 Tag = 3;
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ZkMigrationReadyMeta::Type ZkMigrationReady;
-
+
i16 ApiKey() const override { return API_VERSIONS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TApiVersionsResponseData& other) const = default;
};
@@ -5374,255 +5382,255 @@ public:
class TCreateTopicsRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TCreateTopicsRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreateTopicsRequestData();
~TCreateTopicsRequestData() = default;
-
+
class TCreatableTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreatableTopic();
~TCreatableTopic() = default;
-
+
class TCreatableReplicaAssignment : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreatableReplicaAssignment();
~TCreatableReplicaAssignment() = default;
-
+
struct PartitionIndexMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "partitionIndex";
static constexpr const char* About = "The partition index.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
PartitionIndexMeta::Type PartitionIndex;
-
+
struct BrokerIdsMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "brokerIds";
static constexpr const char* About = "The brokers to place the partition on.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
BrokerIdsMeta::Type BrokerIds;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatableReplicaAssignment& other) const = default;
};
-
+
class TCreateableTopicConfig : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreateableTopicConfig();
~TCreateableTopicConfig() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The configuration name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct ValueMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "value";
static constexpr const char* About = "The configuration value.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ValueMeta::Type Value;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreateableTopicConfig& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct NumPartitionsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "numPartitions";
static constexpr const char* About = "The number of partitions to create in the topic, or -1 if we are either specifying a manual partition assignment or using the default partitions.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
NumPartitionsMeta::Type NumPartitions;
-
+
struct ReplicationFactorMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "replicationFactor";
static constexpr const char* About = "The number of replicas to create for each partition in the topic, or -1 if we are either specifying a manual partition assignment or using the default replication factor.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ReplicationFactorMeta::Type ReplicationFactor;
-
+
struct AssignmentsMeta {
using ItemType = TCreatableReplicaAssignment;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatableReplicaAssignment>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "assignments";
static constexpr const char* About = "The manual partition assignment, or the empty array if we are using automatic assignment.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
AssignmentsMeta::Type Assignments;
-
+
struct ConfigsMeta {
using ItemType = TCreateableTopicConfig;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreateableTopicConfig>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "configs";
static constexpr const char* About = "The custom topic configurations to set.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ConfigsMeta::Type Configs;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatableTopic& other) const = default;
};
-
+
struct TopicsMeta {
using ItemType = TCreatableTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatableTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "The topics to create.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct TimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "timeoutMs";
static constexpr const char* About = "How long to wait in milliseconds before timing out the request.";
static const Type Default; // = 60000;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
TimeoutMsMeta::Type TimeoutMs;
-
+
struct ValidateOnlyMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "validateOnly";
static constexpr const char* About = "If true, check that the topics can be created as specified, but don't create anything.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ValidateOnlyMeta::Type ValidateOnly;
-
+
i16 ApiKey() const override { return CREATE_TOPICS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreateTopicsRequestData& other) const = default;
};
@@ -5630,282 +5638,282 @@ public:
class TCreateTopicsResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TCreateTopicsResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreateTopicsResponseData();
~TCreateTopicsResponseData() = default;
-
+
class TCreatableTopicResult : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 7};
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
-
+
TCreatableTopicResult();
~TCreatableTopicResult() = default;
-
+
class TCreatableTopicConfigs : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {5, 7};
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
-
+
TCreatableTopicConfigs();
~TCreatableTopicConfigs() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The configuration name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NameMeta::Type Name;
-
+
struct ValueMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "value";
static constexpr const char* About = "The configuration value.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ValueMeta::Type Value;
-
+
struct ReadOnlyMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "readOnly";
static constexpr const char* About = "True if the configuration is read-only.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ReadOnlyMeta::Type ReadOnly;
-
+
struct ConfigSourceMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "configSource";
static constexpr const char* About = "The configuration source.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ConfigSourceMeta::Type ConfigSource;
-
+
struct IsSensitiveMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "isSensitive";
static constexpr const char* About = "True if this configuration is sensitive.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
IsSensitiveMeta::Type IsSensitive;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatableTopicConfigs& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct TopicIdMeta {
using Type = TKafkaUuid;
using TypeDesc = NPrivate::TKafkaUuidDesc;
-
+
static constexpr const char* Name = "topicId";
static constexpr const char* About = "The unique topic ID";
static const Type Default; // = TKafkaUuid(0, 0);
-
+
static constexpr TKafkaVersions PresentVersions = {7, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicIdMeta::Type TopicId;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The error message, or null if there was no error.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
struct TopicConfigErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "topicConfigErrorCode";
static constexpr const char* About = "Optional topic config error returned if configs are not returned in the response.";
static constexpr const TKafkaInt32 Tag = 0;
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsAlways;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
TopicConfigErrorCodeMeta::Type TopicConfigErrorCode;
-
+
struct NumPartitionsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "numPartitions";
static constexpr const char* About = "Number of partitions of the topic.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
NumPartitionsMeta::Type NumPartitions;
-
+
struct ReplicationFactorMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "replicationFactor";
static constexpr const char* About = "Replication factor of the topic.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ReplicationFactorMeta::Type ReplicationFactor;
-
+
struct ConfigsMeta {
using ItemType = TCreatableTopicConfigs;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatableTopicConfigs>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "configs";
static constexpr const char* About = "Configuration of the topic.";
-
+
static constexpr TKafkaVersions PresentVersions = {5, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ConfigsMeta::Type Configs;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatableTopicResult& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct TopicsMeta {
using ItemType = TCreatableTopicResult;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatableTopicResult>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Results for each topic we tried to create.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {5, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
i16 ApiKey() const override { return CREATE_TOPICS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreateTopicsResponseData& other) const = default;
};
@@ -5913,80 +5921,80 @@ public:
class TInitProducerIdRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TInitProducerIdRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TInitProducerIdRequestData();
~TInitProducerIdRequestData() = default;
-
+
struct TransactionalIdMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "transactionalId";
static constexpr const char* About = "The transactional id, or null if the producer is not transactional.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
TransactionalIdMeta::Type TransactionalId;
-
+
struct TransactionTimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "transactionTimeoutMs";
static constexpr const char* About = "The time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
TransactionTimeoutMsMeta::Type TransactionTimeoutMs;
-
+
struct ProducerIdMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "producerId";
static constexpr const char* About = "The producer id. This is used to disambiguate requests if a transactional id is reused following its expiration.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProducerIdMeta::Type ProducerId;
-
+
struct ProducerEpochMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "producerEpoch";
static constexpr const char* About = "The producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
};
ProducerEpochMeta::Type ProducerEpoch;
-
+
i16 ApiKey() const override { return INIT_PRODUCER_ID; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TInitProducerIdRequestData& other) const = default;
};
@@ -5994,242 +6002,1687 @@ public:
class TInitProducerIdResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TInitProducerIdResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 4};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TInitProducerIdResponseData();
~TInitProducerIdResponseData() = default;
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ProducerIdMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "producerId";
static constexpr const char* About = "The current producer id.";
static const Type Default; // = -1;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ProducerIdMeta::Type ProducerId;
+
+ struct ProducerEpochMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerEpoch";
+ static constexpr const char* About = "The current epoch associated with the producer id.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
+ };
+ ProducerEpochMeta::Type ProducerEpoch;
+
+ i16 ApiKey() const override { return INIT_PRODUCER_ID; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TInitProducerIdResponseData& other) const = default;
+};
+
+class TAddPartitionsToTxnRequestData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TAddPartitionsToTxnRequestData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddPartitionsToTxnRequestData();
+ ~TAddPartitionsToTxnRequestData() = default;
+
+ class TAddPartitionsToTxnTopic : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddPartitionsToTxnTopic();
+ ~TAddPartitionsToTxnTopic() = default;
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The name of the topic.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct PartitionsMeta {
+ using ItemType = TKafkaInt32;
+ using ItemTypeDesc = NPrivate::TKafkaIntDesc;
+ using Type = std::vector<TKafkaInt32>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "partitions";
+ static constexpr const char* About = "The partition indexes to add to the transaction";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionsMeta::Type Partitions;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddPartitionsToTxnTopic& other) const = default;
+ };
+
+ struct TransactionalIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "transactionalId";
+ static constexpr const char* About = "The transactional id corresponding to the transaction.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TransactionalIdMeta::Type TransactionalId;
+
+ struct ProducerIdMeta {
+ using Type = TKafkaInt64;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerId";
+ static constexpr const char* About = "Current producer id in use by the transactional id.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerIdMeta::Type ProducerId;
+
struct ProducerEpochMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerEpoch";
+ static constexpr const char* About = "Current epoch associated with the producer id.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerEpochMeta::Type ProducerEpoch;
+
+ struct TopicsMeta {
+ using ItemType = TAddPartitionsToTxnTopic;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TAddPartitionsToTxnTopic>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "topics";
+ static constexpr const char* About = "The partitions to add to the transaction.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TopicsMeta::Type Topics;
+
+ i16 ApiKey() const override { return ADD_PARTITIONS_TO_TXN; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddPartitionsToTxnRequestData& other) const = default;
+};
+
+
+class TAddPartitionsToTxnResponseData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TAddPartitionsToTxnResponseData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddPartitionsToTxnResponseData();
+ ~TAddPartitionsToTxnResponseData() = default;
+
+ class TAddPartitionsToTxnTopicResult : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddPartitionsToTxnTopicResult();
+ ~TAddPartitionsToTxnTopicResult() = default;
+
+ class TAddPartitionsToTxnPartitionResult : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddPartitionsToTxnPartitionResult();
+ ~TAddPartitionsToTxnPartitionResult() = default;
+
+ struct PartitionIndexMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "partitionIndex";
+ static constexpr const char* About = "The partition indexes.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionIndexMeta::Type PartitionIndex;
+
+ struct ErrorCodeMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "errorCode";
+ static constexpr const char* About = "The response error code.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ErrorCodeMeta::Type ErrorCode;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddPartitionsToTxnPartitionResult& other) const = default;
+ };
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The topic name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct ResultsMeta {
+ using ItemType = TAddPartitionsToTxnPartitionResult;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TAddPartitionsToTxnPartitionResult>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "results";
+ static constexpr const char* About = "The results for each partition";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ResultsMeta::Type Results;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddPartitionsToTxnTopicResult& other) const = default;
+ };
+
+ struct ThrottleTimeMsMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "throttleTimeMs";
+ static constexpr const char* About = "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ThrottleTimeMsMeta::Type ThrottleTimeMs;
+
+ struct ResultsMeta {
+ using ItemType = TAddPartitionsToTxnTopicResult;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TAddPartitionsToTxnTopicResult>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "results";
+ static constexpr const char* About = "The results for each topic.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ResultsMeta::Type Results;
+
+ i16 ApiKey() const override { return ADD_PARTITIONS_TO_TXN; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddPartitionsToTxnResponseData& other) const = default;
+};
+
+class TAddOffsetsToTxnRequestData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TAddOffsetsToTxnRequestData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddOffsetsToTxnRequestData();
+ ~TAddOffsetsToTxnRequestData() = default;
+
+ struct TransactionalIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "transactionalId";
+ static constexpr const char* About = "The transactional id corresponding to the transaction.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TransactionalIdMeta::Type TransactionalId;
+
+ struct ProducerIdMeta {
+ using Type = TKafkaInt64;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerId";
+ static constexpr const char* About = "Current producer id in use by the transactional id.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerIdMeta::Type ProducerId;
+
+ struct ProducerEpochMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
static constexpr const char* Name = "producerEpoch";
- static constexpr const char* About = "The current epoch associated with the producer id.";
+ static constexpr const char* About = "Current epoch associated with the producer id.";
static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerEpochMeta::Type ProducerEpoch;
+
+ struct GroupIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "groupId";
+ static constexpr const char* About = "The unique group identifier.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ GroupIdMeta::Type GroupId;
+
+ i16 ApiKey() const override { return ADD_OFFSETS_TO_TXN; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddOffsetsToTxnRequestData& other) const = default;
+};
+
+class TAddOffsetsToTxnResponseData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TAddOffsetsToTxnResponseData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TAddOffsetsToTxnResponseData();
+ ~TAddOffsetsToTxnResponseData() = default;
+
+ struct ThrottleTimeMsMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "throttleTimeMs";
+ static constexpr const char* About = "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
+ static const Type Default; // = 0;
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
- static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ThrottleTimeMsMeta::Type ThrottleTimeMs;
+
+ struct ErrorCodeMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "errorCode";
+ static constexpr const char* About = "The response error code, or 0 if there was no error.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ErrorCodeMeta::Type ErrorCode;
+
+ i16 ApiKey() const override { return ADD_OFFSETS_TO_TXN; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TAddOffsetsToTxnResponseData& other) const = default;
+};
+
+
+class TEndTxnRequestData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TEndTxnRequestData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TEndTxnRequestData();
+ ~TEndTxnRequestData() = default;
+
+ struct TransactionalIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "transactionalId";
+ static constexpr const char* About = "The ID of the transaction to end.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TransactionalIdMeta::Type TransactionalId;
+
+ struct ProducerIdMeta {
+ using Type = TKafkaInt64;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerId";
+ static constexpr const char* About = "The producer ID.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerIdMeta::Type ProducerId;
+
+ struct ProducerEpochMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerEpoch";
+ static constexpr const char* About = "The current epoch associated with the producer.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
};
ProducerEpochMeta::Type ProducerEpoch;
+
+ struct CommittedMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "committed";
+ static constexpr const char* About = "True if the transaction was committed, false if it was aborted.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ CommittedMeta::Type Committed;
+
+ i16 ApiKey() const override { return END_TXN; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TEndTxnRequestData& other) const = default;
+};
- i16 ApiKey() const override { return INIT_PRODUCER_ID; };
+
+class TEndTxnResponseData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TEndTxnResponseData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TEndTxnResponseData();
+ ~TEndTxnResponseData() = default;
+
+ struct ThrottleTimeMsMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "throttleTimeMs";
+ static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ThrottleTimeMsMeta::Type ThrottleTimeMs;
+
+ struct ErrorCodeMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "errorCode";
+ static constexpr const char* About = "The error code, or 0 if there was no error.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ErrorCodeMeta::Type ErrorCode;
+
+ i16 ApiKey() const override { return END_TXN; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TEndTxnResponseData& other) const = default;
+};
- bool operator==(const TInitProducerIdResponseData& other) const = default;
+
+class TTxnOffsetCommitRequestData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TTxnOffsetCommitRequestData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitRequestData();
+ ~TTxnOffsetCommitRequestData() = default;
+
+ class TTxnOffsetCommitRequestTopic : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitRequestTopic();
+ ~TTxnOffsetCommitRequestTopic() = default;
+
+ class TTxnOffsetCommitRequestPartition : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitRequestPartition();
+ ~TTxnOffsetCommitRequestPartition() = default;
+
+ struct PartitionIndexMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "partitionIndex";
+ static constexpr const char* About = "The index of the partition within the topic.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionIndexMeta::Type PartitionIndex;
+
+ struct CommittedOffsetMeta {
+ using Type = TKafkaInt64;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "committedOffset";
+ static constexpr const char* About = "The message offset to be committed.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ CommittedOffsetMeta::Type CommittedOffset;
+
+ struct CommittedLeaderEpochMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "committedLeaderEpoch";
+ static constexpr const char* About = "The leader epoch of the last consumed record.";
+ static const Type Default; // = -1;
+
+ static constexpr TKafkaVersions PresentVersions = {2, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ CommittedLeaderEpochMeta::Type CommittedLeaderEpoch;
+
+ struct CommittedMetadataMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "committedMetadata";
+ static constexpr const char* About = "Any associated metadata the client wants to keep.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ CommittedMetadataMeta::Type CommittedMetadata;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitRequestPartition& other) const = default;
+ };
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The topic name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct PartitionsMeta {
+ using ItemType = TTxnOffsetCommitRequestPartition;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TTxnOffsetCommitRequestPartition>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "partitions";
+ static constexpr const char* About = "The partitions inside the topic that we want to committ offsets for.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionsMeta::Type Partitions;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitRequestTopic& other) const = default;
+ };
+
+ struct TransactionalIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "transactionalId";
+ static constexpr const char* About = "The ID of the transaction.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TransactionalIdMeta::Type TransactionalId;
+
+ struct GroupIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "groupId";
+ static constexpr const char* About = "The ID of the group.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ GroupIdMeta::Type GroupId;
+
+ struct ProducerIdMeta {
+ using Type = TKafkaInt64;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerId";
+ static constexpr const char* About = "The current producer ID in use by the transactional ID.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerIdMeta::Type ProducerId;
+
+ struct ProducerEpochMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "producerEpoch";
+ static constexpr const char* About = "The current epoch associated with the producer ID.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ProducerEpochMeta::Type ProducerEpoch;
+
+ struct GenerationIdMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "generationId";
+ static constexpr const char* About = "The generation of the consumer.";
+ static const Type Default; // = -1;
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
+ };
+ GenerationIdMeta::Type GenerationId;
+
+ struct MemberIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "memberId";
+ static constexpr const char* About = "The member ID assigned by the group coordinator.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
+ };
+ MemberIdMeta::Type MemberId;
+
+ struct GroupInstanceIdMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "groupInstanceId";
+ static constexpr const char* About = "The unique identifier of the consumer instance provided by end user.";
+ static const Type Default; // = std::nullopt;
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = VersionsAlways;
+ };
+ GroupInstanceIdMeta::Type GroupInstanceId;
+
+ struct TopicsMeta {
+ using ItemType = TTxnOffsetCommitRequestTopic;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TTxnOffsetCommitRequestTopic>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "topics";
+ static constexpr const char* About = "Each topic that we want to commit offsets for.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TopicsMeta::Type Topics;
+
+ i16 ApiKey() const override { return TXN_OFFSET_COMMIT; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitRequestData& other) const = default;
+};
+
+
+class TTxnOffsetCommitResponseData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TTxnOffsetCommitResponseData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitResponseData();
+ ~TTxnOffsetCommitResponseData() = default;
+
+ class TTxnOffsetCommitResponseTopic : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitResponseTopic();
+ ~TTxnOffsetCommitResponseTopic() = default;
+
+ class TTxnOffsetCommitResponsePartition : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 3};
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+
+ TTxnOffsetCommitResponsePartition();
+ ~TTxnOffsetCommitResponsePartition() = default;
+
+ struct PartitionIndexMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "partitionIndex";
+ static constexpr const char* About = "The partition index.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionIndexMeta::Type PartitionIndex;
+
+ struct ErrorCodeMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "errorCode";
+ static constexpr const char* About = "The error code, or 0 if there was no error.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ErrorCodeMeta::Type ErrorCode;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitResponsePartition& other) const = default;
+ };
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The topic name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct PartitionsMeta {
+ using ItemType = TTxnOffsetCommitResponsePartition;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TTxnOffsetCommitResponsePartition>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "partitions";
+ static constexpr const char* About = "The responses for each partition in the topic.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ PartitionsMeta::Type Partitions;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitResponseTopic& other) const = default;
+ };
+
+ struct ThrottleTimeMsMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "throttleTimeMs";
+ static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ ThrottleTimeMsMeta::Type ThrottleTimeMs;
+
+ struct TopicsMeta {
+ using ItemType = TTxnOffsetCommitResponseTopic;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TTxnOffsetCommitResponseTopic>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "topics";
+ static constexpr const char* About = "The responses for each topic.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {3, Max<TKafkaVersion>()};
+ };
+ TopicsMeta::Type Topics;
+
+ i16 ApiKey() const override { return TXN_OFFSET_COMMIT; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TTxnOffsetCommitResponseData& other) const = default;
+};
+
+
+class TDescribeConfigsRequestData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TDescribeConfigsRequestData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsRequestData();
+ ~TDescribeConfigsRequestData() = default;
+
+ class TDescribeConfigsResource : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsResource();
+ ~TDescribeConfigsResource() = default;
+
+ struct ResourceTypeMeta {
+ using Type = TKafkaInt8;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "resourceType";
+ static constexpr const char* About = "The resource type.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResourceTypeMeta::Type ResourceType;
+
+ struct ResourceNameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "resourceName";
+ static constexpr const char* About = "The resource name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResourceNameMeta::Type ResourceName;
+
+ struct ConfigurationKeysMeta {
+ using ItemType = TKafkaString;
+ using ItemTypeDesc = NPrivate::TKafkaStringDesc;
+ using Type = std::vector<TKafkaString>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "configurationKeys";
+ static constexpr const char* About = "The configuration keys to list, or null to list all configuration keys.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ConfigurationKeysMeta::Type ConfigurationKeys;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsResource& other) const = default;
+ };
+
+ struct ResourcesMeta {
+ using ItemType = TDescribeConfigsResource;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TDescribeConfigsResource>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "resources";
+ static constexpr const char* About = "The resources whose configurations we want to describe.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResourcesMeta::Type Resources;
+
+ struct IncludeSynonymsMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "includeSynonyms";
+ static constexpr const char* About = "True if we should include all synonyms.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ IncludeSynonymsMeta::Type IncludeSynonyms;
+
+ struct IncludeDocumentationMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "includeDocumentation";
+ static constexpr const char* About = "True if we should include configuration documentation.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ IncludeDocumentationMeta::Type IncludeDocumentation;
+
+ i16 ApiKey() const override { return DESCRIBE_CONFIGS; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsRequestData& other) const = default;
+};
+
+
+class TDescribeConfigsResponseData : public TApiMessage {
+public:
+ typedef std::shared_ptr<TDescribeConfigsResponseData> TPtr;
+
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsResponseData();
+ ~TDescribeConfigsResponseData() = default;
+
+ class TDescribeConfigsResult : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsResult();
+ ~TDescribeConfigsResult() = default;
+
+ class TDescribeConfigsResourceResult : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {0, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsResourceResult();
+ ~TDescribeConfigsResourceResult() = default;
+
+ class TDescribeConfigsSynonym : public TMessage {
+ public:
+ struct MessageMeta {
+ static constexpr TKafkaVersions PresentVersions = {1, 4};
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+
+ TDescribeConfigsSynonym();
+ ~TDescribeConfigsSynonym() = default;
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The synonym name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct ValueMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "value";
+ static constexpr const char* About = "The synonym value.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ValueMeta::Type Value;
+
+ struct SourceMeta {
+ using Type = TKafkaInt8;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "source";
+ static constexpr const char* About = "The synonym source.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ SourceMeta::Type Source;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsSynonym& other) const = default;
+ };
+
+ struct NameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "name";
+ static constexpr const char* About = "The configuration name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ NameMeta::Type Name;
+
+ struct ValueMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "value";
+ static constexpr const char* About = "The configuration value.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ValueMeta::Type Value;
+
+ struct ReadOnlyMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "readOnly";
+ static constexpr const char* About = "True if the configuration is read-only.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ReadOnlyMeta::Type ReadOnly;
+
+ struct IsDefaultMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "isDefault";
+ static constexpr const char* About = "True if the configuration is not set.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = {0, 0};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ IsDefaultMeta::Type IsDefault;
+
+ struct ConfigSourceMeta {
+ using Type = TKafkaInt8;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "configSource";
+ static constexpr const char* About = "The configuration source.";
+ static const Type Default; // = -1;
+
+ static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ConfigSourceMeta::Type ConfigSource;
+
+ struct IsSensitiveMeta {
+ using Type = TKafkaBool;
+ using TypeDesc = NPrivate::TKafkaBoolDesc;
+
+ static constexpr const char* Name = "isSensitive";
+ static constexpr const char* About = "True if this configuration is sensitive.";
+ static const Type Default; // = false;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ IsSensitiveMeta::Type IsSensitive;
+
+ struct SynonymsMeta {
+ using ItemType = TDescribeConfigsSynonym;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TDescribeConfigsSynonym>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "synonyms";
+ static constexpr const char* About = "The synonyms for this configuration key.";
+
+ static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ SynonymsMeta::Type Synonyms;
+
+ struct ConfigTypeMeta {
+ using Type = TKafkaInt8;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "configType";
+ static constexpr const char* About = "The configuration data type. Type can be one of the following values - BOOLEAN, STRING, INT, SHORT, LONG, DOUBLE, LIST, CLASS, PASSWORD";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ConfigTypeMeta::Type ConfigType;
+
+ struct DocumentationMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "documentation";
+ static constexpr const char* About = "The configuration documentation.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = {3, Max<TKafkaVersion>()};
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ DocumentationMeta::Type Documentation;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsResourceResult& other) const = default;
+ };
+
+ struct ErrorCodeMeta {
+ using Type = TKafkaInt16;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "errorCode";
+ static constexpr const char* About = "The error code, or 0 if we were able to successfully describe the configurations.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ErrorCodeMeta::Type ErrorCode;
+
+ struct ErrorMessageMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "errorMessage";
+ static constexpr const char* About = "The error message, or null if we were able to successfully describe the configurations.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsAlways;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ErrorMessageMeta::Type ErrorMessage;
+
+ struct ResourceTypeMeta {
+ using Type = TKafkaInt8;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "resourceType";
+ static constexpr const char* About = "The resource type.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResourceTypeMeta::Type ResourceType;
+
+ struct ResourceNameMeta {
+ using Type = TKafkaString;
+ using TypeDesc = NPrivate::TKafkaStringDesc;
+
+ static constexpr const char* Name = "resourceName";
+ static constexpr const char* About = "The resource name.";
+ static const Type Default; // = {""};
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResourceNameMeta::Type ResourceName;
+
+ struct ConfigsMeta {
+ using ItemType = TDescribeConfigsResourceResult;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TDescribeConfigsResourceResult>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "configs";
+ static constexpr const char* About = "Each listed configuration.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ConfigsMeta::Type Configs;
+
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsResult& other) const = default;
+ };
+
+ struct ThrottleTimeMsMeta {
+ using Type = TKafkaInt32;
+ using TypeDesc = NPrivate::TKafkaIntDesc;
+
+ static constexpr const char* Name = "throttleTimeMs";
+ static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
+ static const Type Default; // = 0;
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ThrottleTimeMsMeta::Type ThrottleTimeMs;
+
+ struct ResultsMeta {
+ using ItemType = TDescribeConfigsResult;
+ using ItemTypeDesc = NPrivate::TKafkaStructDesc;
+ using Type = std::vector<TDescribeConfigsResult>;
+ using TypeDesc = NPrivate::TKafkaArrayDesc;
+
+ static constexpr const char* Name = "results";
+ static constexpr const char* About = "The results for each resource.";
+
+ static constexpr TKafkaVersions PresentVersions = VersionsAlways;
+ static constexpr TKafkaVersions TaggedVersions = VersionsNever;
+ static constexpr TKafkaVersions NullableVersions = VersionsNever;
+ static constexpr TKafkaVersions FlexibleVersions = {4, Max<TKafkaVersion>()};
+ };
+ ResultsMeta::Type Results;
+
+ i16 ApiKey() const override { return DESCRIBE_CONFIGS; };
+ i32 Size(TKafkaVersion version) const override;
+ void Read(TKafkaReadable& readable, TKafkaVersion version) override;
+ void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
+
+ bool operator==(const TDescribeConfigsResponseData& other) const = default;
};
class TAlterConfigsRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TAlterConfigsRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TAlterConfigsRequestData();
~TAlterConfigsRequestData() = default;
-
+
class TAlterConfigsResource : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TAlterConfigsResource();
~TAlterConfigsResource() = default;
-
+
class TAlterableConfig : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TAlterableConfig();
~TAlterableConfig() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The configuration key name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct ValueMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "value";
static constexpr const char* About = "The value to set for the configuration key.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ValueMeta::Type Value;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAlterableConfig& other) const = default;
};
-
+
struct ResourceTypeMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "resourceType";
static constexpr const char* About = "The resource type.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResourceTypeMeta::Type ResourceType;
-
+
struct ResourceNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "resourceName";
static constexpr const char* About = "The resource name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResourceNameMeta::Type ResourceName;
-
+
struct ConfigsMeta {
using ItemType = TAlterableConfig;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TAlterableConfig>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "configs";
static constexpr const char* About = "The configurations.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ConfigsMeta::Type Configs;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAlterConfigsResource& other) const = default;
};
-
+
struct ResourcesMeta {
using ItemType = TAlterConfigsResource;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TAlterConfigsResource>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "resources";
static constexpr const char* About = "The updates for each resource.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResourcesMeta::Type Resources;
-
+
struct ValidateOnlyMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "validateOnly";
static constexpr const char* About = "True if we should validate the request, but not change the configurations.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ValidateOnlyMeta::Type ValidateOnly;
-
+
i16 ApiKey() const override { return ALTER_CONFIGS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAlterConfigsRequestData& other) const = default;
};
@@ -6237,128 +7690,128 @@ public:
class TAlterConfigsResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TAlterConfigsResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TAlterConfigsResponseData();
~TAlterConfigsResponseData() = default;
-
+
class TAlterConfigsResourceResponse : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TAlterConfigsResourceResponse();
~TAlterConfigsResourceResponse() = default;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The resource error code.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The resource error message, or null if there was no error.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
struct ResourceTypeMeta {
using Type = TKafkaInt8;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "resourceType";
static constexpr const char* About = "The resource type.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResourceTypeMeta::Type ResourceType;
-
+
struct ResourceNameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "resourceName";
static constexpr const char* About = "The resource name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResourceNameMeta::Type ResourceName;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAlterConfigsResourceResponse& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ResponsesMeta {
using ItemType = TAlterConfigsResourceResponse;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TAlterConfigsResourceResponse>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "responses";
static constexpr const char* About = "The responses for each resource.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResponsesMeta::Type Responses;
-
+
i16 ApiKey() const override { return ALTER_CONFIGS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TAlterConfigsResponseData& other) const = default;
};
@@ -6366,34 +7819,34 @@ public:
class TSaslAuthenticateRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TSaslAuthenticateRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TSaslAuthenticateRequestData();
~TSaslAuthenticateRequestData() = default;
-
+
struct AuthBytesMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "authBytes";
static constexpr const char* About = "The SASL authentication bytes from the client, as defined by the SASL mechanism.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
AuthBytesMeta::Type AuthBytes;
-
+
i16 ApiKey() const override { return SASL_AUTHENTICATE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSaslAuthenticateRequestData& other) const = default;
};
@@ -6401,79 +7854,79 @@ public:
class TSaslAuthenticateResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TSaslAuthenticateResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 2};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TSaslAuthenticateResponseData();
~TSaslAuthenticateResponseData() = default;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The error code, or 0 if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The error message, or null if there was no error.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
struct AuthBytesMeta {
using Type = TKafkaBytes;
using TypeDesc = NPrivate::TKafkaBytesDesc;
-
+
static constexpr const char* Name = "authBytes";
static constexpr const char* About = "The SASL authentication bytes from the server, as defined by the SASL mechanism.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
AuthBytesMeta::Type AuthBytes;
-
+
struct SessionLifetimeMsMeta {
using Type = TKafkaInt64;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "sessionLifetimeMs";
static constexpr const char* About = "The SASL authentication bytes from the server, as defined by the SASL mechanism.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = {1, Max<TKafkaVersion>()};
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
SessionLifetimeMsMeta::Type SessionLifetimeMs;
-
+
i16 ApiKey() const override { return SASL_AUTHENTICATE; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TSaslAuthenticateResponseData& other) const = default;
};
@@ -6481,162 +7934,162 @@ public:
class TCreatePartitionsRequestData : public TApiMessage {
public:
typedef std::shared_ptr<TCreatePartitionsRequestData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TCreatePartitionsRequestData();
~TCreatePartitionsRequestData() = default;
-
+
class TCreatePartitionsTopic : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TCreatePartitionsTopic();
~TCreatePartitionsTopic() = default;
-
+
class TCreatePartitionsAssignment : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TCreatePartitionsAssignment();
~TCreatePartitionsAssignment() = default;
-
+
struct BrokerIdsMeta {
using ItemType = TKafkaInt32;
using ItemTypeDesc = NPrivate::TKafkaIntDesc;
using Type = std::vector<TKafkaInt32>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "brokerIds";
static constexpr const char* About = "The assigned broker IDs.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
BrokerIdsMeta::Type BrokerIds;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatePartitionsAssignment& other) const = default;
};
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct CountMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "count";
static constexpr const char* About = "The new partition count.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
CountMeta::Type Count;
-
+
struct AssignmentsMeta {
using ItemType = TCreatePartitionsAssignment;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatePartitionsAssignment>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "assignments";
static constexpr const char* About = "The new partition assignments.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
AssignmentsMeta::Type Assignments;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatePartitionsTopic& other) const = default;
};
-
+
struct TopicsMeta {
using ItemType = TCreatePartitionsTopic;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatePartitionsTopic>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "topics";
static constexpr const char* About = "Each topic that we want to create new partitions inside.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
TopicsMeta::Type Topics;
-
+
struct TimeoutMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "timeoutMs";
static constexpr const char* About = "The time in ms to wait for the partitions to be created.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
TimeoutMsMeta::Type TimeoutMs;
-
+
struct ValidateOnlyMeta {
using Type = TKafkaBool;
using TypeDesc = NPrivate::TKafkaBoolDesc;
-
+
static constexpr const char* Name = "validateOnly";
static constexpr const char* About = "If true, then validate the request, but don't actually increase the number of partitions.";
static const Type Default; // = false;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ValidateOnlyMeta::Type ValidateOnly;
-
+
i16 ApiKey() const override { return CREATE_PARTITIONS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatePartitionsRequestData& other) const = default;
};
@@ -6644,114 +8097,114 @@ public:
class TCreatePartitionsResponseData : public TApiMessage {
public:
typedef std::shared_ptr<TCreatePartitionsResponseData> TPtr;
-
+
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TCreatePartitionsResponseData();
~TCreatePartitionsResponseData() = default;
-
+
class TCreatePartitionsTopicResult : public TMessage {
public:
struct MessageMeta {
static constexpr TKafkaVersions PresentVersions = {0, 3};
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
-
+
TCreatePartitionsTopicResult();
~TCreatePartitionsTopicResult() = default;
-
+
struct NameMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "name";
static constexpr const char* About = "The topic name.";
static const Type Default; // = {""};
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
NameMeta::Type Name;
-
+
struct ErrorCodeMeta {
using Type = TKafkaInt16;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "errorCode";
static constexpr const char* About = "The result error, or zero if there was no error.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorCodeMeta::Type ErrorCode;
-
+
struct ErrorMessageMeta {
using Type = TKafkaString;
using TypeDesc = NPrivate::TKafkaStringDesc;
-
+
static constexpr const char* Name = "errorMessage";
static constexpr const char* About = "The result message, or null if there was no error.";
static const Type Default; // = std::nullopt;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsAlways;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ErrorMessageMeta::Type ErrorMessage;
-
+
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatePartitionsTopicResult& other) const = default;
};
-
+
struct ThrottleTimeMsMeta {
using Type = TKafkaInt32;
using TypeDesc = NPrivate::TKafkaIntDesc;
-
+
static constexpr const char* Name = "throttleTimeMs";
static constexpr const char* About = "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.";
static const Type Default; // = 0;
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ThrottleTimeMsMeta::Type ThrottleTimeMs;
-
+
struct ResultsMeta {
using ItemType = TCreatePartitionsTopicResult;
using ItemTypeDesc = NPrivate::TKafkaStructDesc;
using Type = std::vector<TCreatePartitionsTopicResult>;
using TypeDesc = NPrivate::TKafkaArrayDesc;
-
+
static constexpr const char* Name = "results";
static constexpr const char* About = "The partition creation results for each topic.";
-
+
static constexpr TKafkaVersions PresentVersions = VersionsAlways;
static constexpr TKafkaVersions TaggedVersions = VersionsNever;
static constexpr TKafkaVersions NullableVersions = VersionsNever;
static constexpr TKafkaVersions FlexibleVersions = {2, Max<TKafkaVersion>()};
};
ResultsMeta::Type Results;
-
+
i16 ApiKey() const override { return CREATE_PARTITIONS; };
i32 Size(TKafkaVersion version) const override;
void Read(TKafkaReadable& readable, TKafkaVersion version) override;
void Write(TKafkaWritable& writable, TKafkaVersion version) const override;
-
+
bool operator==(const TCreatePartitionsResponseData& other) const = default;
};
-} // namespace NKafka
+} // namespace NKafka
diff --git a/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp b/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp
new file mode 100644
index 0000000000..001ca00d4d
--- /dev/null
+++ b/ydb/core/kafka_proxy/kafka_transactions_coordinator.cpp
@@ -0,0 +1,179 @@
+#include "kafka_transactions_coordinator.h"
+#include "actors/kafka_transaction_actor.h"
+
+namespace NKafka {
+ // Handles new transactional_id+producer_id+producer_epoch:
+ // 1. validates that producer is not a zombie (in case of parallel init_producer_requests)
+ // 2. saves transactional_id+producer_id+producer_epoch for validation of future transactional requests
+ void TKafkaTransactionsCoordinator::Handle(TEvKafka::TEvSaveTxnProducerRequest::TPtr& ev, const TActorContext& ctx){
+ TEvKafka::TEvSaveTxnProducerRequest* request = ev->Get();
+
+ if (ProducersByTransactionalId.contains(request->TransactionalId)) {
+ TProducerState& currentProducerState = ProducersByTransactionalId[request->TransactionalId];
+ TProducerState newProducerState = TProducerState(request->ProducerId, request->ProducerEpoch);
+
+ if (NewProducerStateIsOutdated(currentProducerState, newProducerState)) {
+ TString message = GetProducerIsOutdatedError(request->TransactionalId, currentProducerState, newProducerState);
+ ctx.Send(ev->Sender, new TEvKafka::TEvSaveTxnProducerResponse(TEvKafka::TEvSaveTxnProducerResponse::EStatus::PRODUCER_FENCED, message));
+ return;
+ }
+
+ currentProducerState.Id = request->ProducerId;
+ currentProducerState.Epoch = request->ProducerEpoch;
+ } else {
+ ProducersByTransactionalId[request->TransactionalId] = TProducerState(request->ProducerId, request->ProducerEpoch);
+ }
+
+ ctx.Send(ev->Sender, new TEvKafka::TEvSaveTxnProducerResponse(TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK, ""));
+ };
+
+ void TKafkaTransactionsCoordinator::Handle(TEvKafka::TEvAddPartitionsToTxnRequest::TPtr& ev, const TActorContext& ctx){
+ HandleTransactionalRequest<TAddPartitionsToTxnResponseData>(ev, ctx);
+ };
+
+ void TKafkaTransactionsCoordinator::Handle(TEvKafka::TEvAddOffsetsToTxnRequest::TPtr& ev, const TActorContext& ctx){
+ HandleTransactionalRequest<TAddOffsetsToTxnResponseData>(ev, ctx);
+ };
+
+ void TKafkaTransactionsCoordinator::Handle(TEvKafka::TEvTxnOffsetCommitRequest::TPtr& ev, const TActorContext& ctx) {
+ HandleTransactionalRequest<TTxnOffsetCommitResponseData>(ev, ctx);
+ };
+
+ void TKafkaTransactionsCoordinator::Handle(TEvKafka::TEvEndTxnRequest::TPtr& ev, const TActorContext& ctx) {
+ HandleTransactionalRequest<TEndTxnResponseData>(ev, ctx);
+ };
+
+ void TKafkaTransactionsCoordinator::Handle(TEvents::TEvPoison::TPtr&, const TActorContext& ctx) {
+ KAFKA_LOG_D("Got poison pill, killing all transaction actors");
+ for (auto& [transactionalId, txnActorId]: TxnActorByTransactionalId) {
+ ctx.Send(txnActorId, new TEvents::TEvPoison());
+ KAFKA_LOG_D(TStringBuilder() << "Sent poison pill to transaction actor for transactionalId " << transactionalId);
+ }
+ PassAway();
+ };
+
+ void TKafkaTransactionsCoordinator::PassAway() {
+ KAFKA_LOG_D("Killing myself");
+ TBase::PassAway();
+ };
+
+ // Validates producer's id and epoch
+ // If valid: proxies requests to the relevant TKafkaTransactionActor
+ // If outdated or not initialized: returns PRODUCER_FENCED error
+ template<class ErrorResponseType, class EventType>
+ void TKafkaTransactionsCoordinator::HandleTransactionalRequest(TAutoPtr<TEventHandle<EventType>>& evHandle, const TActorContext& ctx) {
+ EventType* ev = evHandle->Get();
+ KAFKA_LOG_D(TStringBuilder() << "Receieved message for transactionalId " << ev->Request->TransactionalId->c_str() << " and ApiKey " << ev->Request->ApiKey());
+
+ // create helper struct to simplify methods interaction
+ auto txnRequest = TTransactionalRequest(
+ ev->Request->TransactionalId->c_str(),
+ TProducerState(ev->Request->ProducerId, ev->Request->ProducerEpoch),
+ ev->CorrelationId,
+ ev->ConnectionId
+ );
+ if (auto error = GetTxnRequestError(txnRequest)) {
+ SendProducerFencedResponse<ErrorResponseType>(ev->Request, error->data(), txnRequest);
+ } else {
+ ForwardToTransactionActor(evHandle, ctx);
+ }
+ };
+
+ template<class ErrorResponseType, class RequestType>
+ void TKafkaTransactionsCoordinator::SendProducerFencedResponse(TMessagePtr<RequestType> kafkaRequest, const TString& error, const TTransactionalRequest& txnRequestDetails) {
+ KAFKA_LOG_W(error);
+ std::shared_ptr<ErrorResponseType> response = BuildProducerFencedResponse<ErrorResponseType>(kafkaRequest);
+ Send(txnRequestDetails.ConnectionId, new TEvKafka::TEvResponse(txnRequestDetails.CorrelationId, response, EKafkaErrors::PRODUCER_FENCED));
+ };
+
+ template<class EventType>
+ void TKafkaTransactionsCoordinator::ForwardToTransactionActor(TAutoPtr<TEventHandle<EventType>>& evHandle, const TActorContext& ctx) {
+ EventType* ev = evHandle->Get();
+
+ TActorId txnActorId;
+ if (TxnActorByTransactionalId.contains(ev->Request->TransactionalId->c_str())) {
+ txnActorId = TxnActorByTransactionalId[ev->Request->TransactionalId->c_str()];
+ } else {
+ txnActorId = ctx.Register(new TKafkaTransactionActor());
+ TxnActorByTransactionalId[ev->Request->TransactionalId->c_str()] = txnActorId;
+ KAFKA_LOG_D(TStringBuilder() << "Registered TKafkaTransactionActor with id " << txnActorId << " for transactionalId " << ev->Request->TransactionalId->c_str() << " and ApiKey " << ev->Request->ApiKey());
+ }
+ TAutoPtr<IEventHandle> tmpPtr = evHandle.Release();
+ ctx.Forward(tmpPtr, txnActorId);
+ KAFKA_LOG_D(TStringBuilder() << "Forwarded message to TKafkaTransactionActor with id " << txnActorId << " for transactionalId " << ev->Request->TransactionalId->c_str() << " and ApiKey " << ev->Request->ApiKey());
+ };
+
+ template<class ResponseType, class RequestType>
+ std::shared_ptr<ResponseType> TKafkaTransactionsCoordinator::BuildProducerFencedResponse(TMessagePtr<RequestType> request) {
+ Y_UNUSED(request); // used in other template functions
+ auto response = std::make_shared<ResponseType>();
+ response->ErrorCode = EKafkaErrors::PRODUCER_FENCED;
+ return response;
+ };
+
+ template<>
+ std::shared_ptr<TAddPartitionsToTxnResponseData> TKafkaTransactionsCoordinator::BuildProducerFencedResponse<TAddPartitionsToTxnResponseData, TAddPartitionsToTxnRequestData>(TMessagePtr<TAddPartitionsToTxnRequestData> request) {
+ auto response = std::make_shared<TAddPartitionsToTxnResponseData>();
+ std::vector<TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult> topicsResponse;
+ topicsResponse.reserve(request->Topics.size());
+ for (const auto& requestTopic : request->Topics) {
+ TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult topicInResponse;
+ topicInResponse.Name = requestTopic.Name;
+ topicInResponse.Results.reserve(requestTopic.Partitions.size());
+ for (const auto& requestPartition : requestTopic.Partitions) {
+ TAddPartitionsToTxnResponseData::TAddPartitionsToTxnTopicResult::TAddPartitionsToTxnPartitionResult partitionInResponse;
+ partitionInResponse.PartitionIndex = requestPartition;
+ partitionInResponse.ErrorCode = EKafkaErrors::PRODUCER_FENCED;
+ topicInResponse.Results.push_back(partitionInResponse);
+ }
+ topicsResponse.push_back(topicInResponse);
+ }
+ response->Results = std::move(topicsResponse);
+ return response;
+ };
+
+ template<>
+ std::shared_ptr<TTxnOffsetCommitResponseData> TKafkaTransactionsCoordinator::BuildProducerFencedResponse<TTxnOffsetCommitResponseData, TTxnOffsetCommitRequestData>(TMessagePtr<TTxnOffsetCommitRequestData> request) {
+ auto response = std::make_shared<TTxnOffsetCommitResponseData>();
+ std::vector<TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic> topicsResponse;
+ topicsResponse.reserve(request->Topics.size());
+ for (const auto& requestTopic : request->Topics) {
+ TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic topicInResponse;
+ topicInResponse.Name = requestTopic.Name;
+ topicInResponse.Partitions.reserve(requestTopic.Partitions.size());
+ for (const auto& requestPartition : requestTopic.Partitions) {
+ TTxnOffsetCommitResponseData::TTxnOffsetCommitResponseTopic::TTxnOffsetCommitResponsePartition partitionInResponse;
+ partitionInResponse.PartitionIndex = requestPartition.PartitionIndex;
+ partitionInResponse.ErrorCode = EKafkaErrors::PRODUCER_FENCED;
+ topicInResponse.Partitions.push_back(partitionInResponse);
+ }
+ topicsResponse.push_back(topicInResponse);
+ }
+ response->Topics = std::move(topicsResponse);;
+ return response;
+ };
+
+ bool TKafkaTransactionsCoordinator::NewProducerStateIsOutdated(const TProducerState& currentProducerState, const TProducerState& newProducerState) {
+ bool producerIdAlreadyGreater = currentProducerState.Id > newProducerState.Id;
+ bool producerIdsAreEqual = currentProducerState.Id == newProducerState.Id;
+ bool epochAlreadyGreater = currentProducerState.Epoch > newProducerState.Epoch;
+ return producerIdAlreadyGreater || (producerIdsAreEqual && epochAlreadyGreater);
+ };
+
+ TMaybe<TString> TKafkaTransactionsCoordinator::GetTxnRequestError(const TTransactionalRequest& request) {
+ if (!ProducersByTransactionalId.contains(request.TransactionalId)) {
+ return TStringBuilder() << "Producer with transactional id " << request.TransactionalId << " was not yet initailized.";
+ } else if (NewProducerStateIsOutdated(ProducersByTransactionalId[request.TransactionalId], request.ProducerState)) {
+ return GetProducerIsOutdatedError(request.TransactionalId, ProducersByTransactionalId[request.TransactionalId], request.ProducerState);
+ } else {
+ return {};
+ }
+ };
+
+ TString TKafkaTransactionsCoordinator::GetProducerIsOutdatedError(const TString& transactionalId, const TProducerState& currentProducerState, const TProducerState& newProducerState) {
+ return TStringBuilder() << "Producer with transactional id " << transactionalId <<
+ "is outdated. Current producer id is " << currentProducerState.Id <<
+ " and producer epoch is " << currentProducerState.Epoch << ". Requested producer id is " << newProducerState.Id <<
+ " and producer epoch is " << newProducerState.Epoch << ".";
+ };
+} // namespace NKafka \ No newline at end of file
diff --git a/ydb/core/kafka_proxy/kafka_transactions_coordinator.h b/ydb/core/kafka_proxy/kafka_transactions_coordinator.h
new file mode 100644
index 0000000000..fd0551bbd2
--- /dev/null
+++ b/ydb/core/kafka_proxy/kafka_transactions_coordinator.h
@@ -0,0 +1,92 @@
+#pragma once
+
+#include "kafka_events.h"
+
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+
+
+namespace NKafka {
+ /*
+ This class serves as a proxy between Kafka SDK and TKafkaTransactionActor
+
+ It validates that requester is not a zombie (by checking request's tranasactional_id+producer_id+producer_epoch)
+ It does so by maintaining a set of the most relevant for this node tranasactional_id+producer_id+producer_epoch.
+ Recieves updates from init_producer_id_actors.
+ */
+ class TKafkaTransactionsCoordinator : public NActors::TActorBootstrapped<TKafkaTransactionsCoordinator> {
+
+ using TBase = NActors::TActorBootstrapped<TKafkaTransactionsCoordinator>;
+
+ struct TProducerState {
+ i64 Id;
+ i32 Epoch;
+ };
+
+ struct TTransactionalRequest {
+ TString TransactionalId;
+ TProducerState ProducerState;
+ ui64 CorrelationId;
+ TActorId ConnectionId;
+ };
+
+ public:
+ void Bootstrap(const TActorContext&) {
+ TBase::Become(&TKafkaTransactionsCoordinator::StateWork);
+ }
+
+ TStringBuilder LogPrefix() const {
+ return TStringBuilder() << "KafkaTransactionsCoordinator ";
+ }
+
+ void PassAway() override;
+ private:
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ HFunc(TEvKafka::TEvSaveTxnProducerRequest, Handle);
+ HFunc(TEvKafka::TEvAddPartitionsToTxnRequest, Handle);
+ HFunc(TEvKafka::TEvAddOffsetsToTxnRequest, Handle);
+ HFunc(TEvKafka::TEvTxnOffsetCommitRequest, Handle);
+ HFunc(TEvKafka::TEvEndTxnRequest, Handle);
+ HFunc(TEvents::TEvPoison, Handle);
+ }
+ }
+
+ // Handles new transactional_id+producer_id+producer_epoch: saves for validation of future requests
+ void Handle(TEvKafka::TEvSaveTxnProducerRequest::TPtr& ev, const TActorContext& ctx);
+
+ // Proxies requests to the relevant TKafkaTransactionActor
+ void Handle(TEvKafka::TEvAddPartitionsToTxnRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvKafka::TEvAddOffsetsToTxnRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvKafka::TEvTxnOffsetCommitRequest::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvKafka::TEvEndTxnRequest::TPtr& ev, const TActorContext& ctx);
+ // Will kill all txn actors
+ void Handle(TEvents::TEvPoison::TPtr& ev, const TActorContext& ctx);
+
+ template<class ErrorResponseType, class EventType>
+ void HandleTransactionalRequest(TAutoPtr<TEventHandle<EventType>>& evHandle, const TActorContext& ctx);
+ template<class ErrorResponseType, class RequestType>
+ void SendProducerFencedResponse(TMessagePtr<RequestType> kafkaRequest, const TString& error, const TTransactionalRequest& request);
+ template<class EventType>
+ void ForwardToTransactionActor(TAutoPtr<TEventHandle<EventType>>& evHandle, const TActorContext& ctx);
+
+ template<class ResponseType, class RequestType>
+ std::shared_ptr<ResponseType> BuildProducerFencedResponse(TMessagePtr<RequestType> request);
+
+ bool NewProducerStateIsOutdated(const TProducerState& currentProducerState, const TProducerState& newProducerState);
+ TMaybe<TString> GetTxnRequestError(const TTransactionalRequest& request);
+ TString GetProducerIsOutdatedError(const TString& transactionalId, const TProducerState& currentProducerState, const TProducerState& newProducerState);
+
+ std::unordered_map<TString, TProducerState> ProducersByTransactionalId;
+ std::unordered_map<TString, TActorId> TxnActorByTransactionalId;
+ };
+
+ inline NActors::IActor* CreateKafkaTransactionsCoordinator() {
+ return new TKafkaTransactionsCoordinator();
+ };
+
+ inline TActorId MakeKafkaTransactionsServiceID() {
+ static const char x[12] = "kafka_txns";
+ return TActorId(0, TStringBuf(x, 12));
+ };
+
+} // namespace NKafka \ No newline at end of file
diff --git a/ydb/core/kafka_proxy/ut/ut_protocol.cpp b/ydb/core/kafka_proxy/ut/ut_protocol.cpp
index 607dc9a9c2..6770f9b7be 100644
--- a/ydb/core/kafka_proxy/ut/ut_protocol.cpp
+++ b/ydb/core/kafka_proxy/ut/ut_protocol.cpp
@@ -2111,7 +2111,7 @@ Y_UNIT_TEST_SUITE(KafkaProtocol) {
UNIT_ASSERT_VALUES_UNEQUAL(resp1->ProducerId, resp2->ProducerId);
}
- Y_UNIT_TEST(InitProducerId_forNewTransactionalIdShouldReturnRandomInt) {
+ Y_UNIT_TEST(InitProducerId_forNewTransactionalIdShouldReturnIncrementingInt) {
TInsecureTestServer testServer;
TKafkaTestClient kafkaClient(testServer.Port);
@@ -2126,10 +2126,8 @@ Y_UNIT_TEST_SUITE(KafkaProtocol) {
UNIT_ASSERT_VALUES_EQUAL(resp1->ProducerEpoch, 0);
// validate second response
UNIT_ASSERT_VALUES_EQUAL(resp2->ErrorCode, EKafkaErrors::NONE_ERROR);
- UNIT_ASSERT_GT(resp2->ProducerId, 0);
+ UNIT_ASSERT_GT(resp2->ProducerId, resp1->ProducerId);
UNIT_ASSERT_VALUES_EQUAL(resp2->ProducerEpoch, 0);
- // validate different values for different responses
- UNIT_ASSERT_VALUES_UNEQUAL(resp1->ProducerId, resp2->ProducerId);
}
Y_UNIT_TEST(InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase) {
diff --git a/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp b/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp
new file mode 100644
index 0000000000..3dbe77d737
--- /dev/null
+++ b/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp
@@ -0,0 +1,310 @@
+#include <ydb/core/kafka_proxy/kafka_transactions_coordinator.h>
+#include <ydb/core/kafka_proxy/kafka_events.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+#include <util/generic/fwd.h>
+#include <ydb/core/persqueue/ut/common/pq_ut_common.h>
+
+namespace {
+ class TFixture : public NUnitTest::TBaseFixture {
+ struct TopicPartitions {
+ TString Topic;
+ TVector<ui32> Partitions;
+ };
+
+ public:
+ TMaybe<NKikimr::NPQ::TTestContext> Ctx;
+ TActorId ActorId;
+
+ void SetUp(NUnitTest::TTestContext&) override {
+ Ctx.ConstructInPlace();
+
+ Ctx->Prepare();
+ Ctx->Runtime->SetScheduledLimit(5'000);
+ Ctx->Runtime->SetLogPriority(NKikimrServices::KAFKA_PROXY, NLog::PRI_DEBUG);
+ ActorId = Ctx->Runtime->Register(new NKafka::TKafkaTransactionsCoordinator());
+ }
+
+ void TearDown(NUnitTest::TTestContext&) override {
+ Ctx->Finalize();
+ }
+
+ THolder<NKafka::TEvKafka::TEvSaveTxnProducerResponse> SaveTxnProducer(const TString& txnId, i64 producerId, i16 producerEpoch) {
+ auto request = MakeHolder<NKafka::TEvKafka::TEvSaveTxnProducerRequest>(txnId, producerId, producerEpoch);
+ Ctx->Runtime->SingleSys()->Send(new IEventHandle(ActorId, Ctx->Edge, request.Release()));
+ auto response = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvSaveTxnProducerResponse>();
+ UNIT_ASSERT(response != nullptr);
+ return response;
+ }
+
+ void SendAddPartitionsToTxnRequest(ui64 correlationId, const TString& txnId, i64 producerId, i16 producerEpoch, std::vector<TopicPartitions> topicPartitions) {
+ auto message = std::make_shared<NKafka::TAddPartitionsToTxnRequestData>();
+ message->TransactionalId = txnId;
+ message->ProducerId = producerId;
+ message->ProducerEpoch = producerEpoch;
+ for (const auto& tp : topicPartitions) {
+ NKafka::TAddPartitionsToTxnRequestData::TAddPartitionsToTxnTopic topic;
+ topic.Name = tp.Topic;
+ for (auto partitionIndex : tp.Partitions) {
+ topic.Partitions.push_back(partitionIndex);
+ }
+ message->Topics.push_back(topic);
+ }
+ auto event = MakeHolder<NKafka::TEvKafka::TEvAddPartitionsToTxnRequest>(correlationId, NKafka::TMessagePtr<NKafka::TAddPartitionsToTxnRequestData>({}, message), Ctx->Edge);
+ Ctx->Runtime->SingleSys()->Send(new IEventHandle(ActorId, Ctx->Edge, event.Release()));
+ }
+
+ void SendTxnOffsetCommitRequest(ui64 correlationId, const TString& txnId, i64 producerId, i16 producerEpoch, std::vector<TopicPartitions> topicPartitions) {
+ auto message = std::make_shared<NKafka::TTxnOffsetCommitRequestData>();
+ message->TransactionalId = txnId;
+ message->ProducerId = producerId;
+ message->ProducerEpoch = producerEpoch;
+ for (const auto& tp : topicPartitions) {
+ NKafka::TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic topic;
+ topic.Name = tp.Topic;
+ for (auto partitionIndex : tp.Partitions) {
+ NKafka::TTxnOffsetCommitRequestData::TTxnOffsetCommitRequestTopic::TTxnOffsetCommitRequestPartition partition;
+ partition.PartitionIndex = partitionIndex;
+ topic.Partitions.push_back(partition);
+ }
+ message->Topics.push_back(topic);
+ }
+ auto event = MakeHolder<NKafka::TEvKafka::TEvTxnOffsetCommitRequest>(correlationId, NKafka::TMessagePtr<NKafka::TTxnOffsetCommitRequestData>({}, message), Ctx->Edge);
+ Ctx->Runtime->SingleSys()->Send(new IEventHandle(ActorId, Ctx->Edge, event.Release()));
+ }
+
+ void SendEndTxnRequest(ui64 correlationId, const TString& txnId, i64 producerId, i16 producerEpoch) {
+ auto message = std::make_shared<NKafka::TEndTxnRequestData>();
+ message->TransactionalId = txnId;
+ message->ProducerId = producerId;
+ message->ProducerEpoch = producerEpoch;
+ auto event = MakeHolder<NKafka::TEvKafka::TEvEndTxnRequest>(correlationId, NKafka::TMessagePtr<NKafka::TEndTxnRequestData>({}, message), Ctx->Edge);
+ Ctx->Runtime->SingleSys()->Send(new IEventHandle(ActorId, Ctx->Edge, event.Release()));
+ }
+ };
+
+ Y_UNIT_TEST_SUITE_F(KafkaTransactionCoordinatorActor, TFixture) {
+ Y_UNIT_TEST(OnProducerInitializedEvent_ShouldRespondOkIfTxnProducerWasNotFound) {
+ auto response = SaveTxnProducer("my-tn-producer-1", 123, 0);
+
+ UNIT_ASSERT_EQUAL(response->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ }
+
+ Y_UNIT_TEST(OnProducerInitializedEvent_ShouldRespondOkIfTxnProducerWasFoundButEpochIsOlder) {
+ TString txnId = "my-tx-id";
+ i64 producerId = 123;
+
+ auto response1 = SaveTxnProducer(txnId, producerId, 0); // save old epoch
+ auto response2 = SaveTxnProducer(txnId, producerId, 1); // save old epoch
+
+ UNIT_ASSERT_EQUAL(response1->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ UNIT_ASSERT_EQUAL(response2->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ }
+
+ // epoch overflown case
+ Y_UNIT_TEST(OnProducerInitializedEvent_ShouldRespondOkIfNewEpochIsLessButProducerIdIsNew) {
+ TString txnId = "my-tx-id";
+ i64 producerId = 123;
+
+ auto response1 = SaveTxnProducer(txnId, producerId, 10); // save old epoch
+ auto response2 = SaveTxnProducer(txnId, producerId + 1, 1);
+
+ UNIT_ASSERT_EQUAL(response1->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ UNIT_ASSERT_EQUAL(response2->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ }
+
+ // two concurrent clients sequentially inited producer id case
+ Y_UNIT_TEST(OnProducerInitializedEvent_ShouldRespondWithProducerFencedErrorIfNewEpochIsLessAndProducerIdIsTheSame) {
+ TString txnId = "my-tx-id";
+ i64 producerId = 123;
+
+ auto response1 = SaveTxnProducer(txnId, producerId, 10);
+ auto response2 = SaveTxnProducer(txnId, producerId, 9); // seсond request comes with stale epoch
+
+ UNIT_ASSERT_EQUAL(response1->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ UNIT_ASSERT_EQUAL(response2->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::PRODUCER_FENCED);
+ }
+
+ Y_UNIT_TEST(OnAnyTransactionalRequest_ShouldSendBack_PRODUCER_FENCED_ErrorIfThereIsNoTransactionalIdInState) {
+ ui64 correlationId = 123;
+ // no producer_initialized event was send, thus actor knows nothing about any producer
+ SendEndTxnRequest(correlationId, "my-tx-id", 1, 0);
+
+ // will respond to edge, cause we provieded edge actorId as a connectionId in SendAddPartitionsToTxnRequest
+ auto response = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvResponse>();
+
+ UNIT_ASSERT(response != nullptr);
+ UNIT_ASSERT_EQUAL(response->ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(response->CorrelationId, correlationId);
+ }
+
+ Y_UNIT_TEST(OnAnyTransactionalRequest_ShouldSendBack_PRODUCER_FENCED_ErrorIfProducerEpochExpired) {
+ ui64 correlationId = 123;
+ TString txnId = "my-tx-id";
+ i64 producerId = 1;
+ i16 producerEpoch = 0;
+ auto saveResponse = SaveTxnProducer(txnId, producerId, producerEpoch + 1);
+ UNIT_ASSERT_EQUAL(saveResponse->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ SendEndTxnRequest(correlationId, txnId, producerId, producerEpoch);
+
+ // will respond to edge, cause we provieded edge actorId as a connectionId in SendAddPartitionsToTxnRequest
+ auto response = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvResponse>();
+
+ UNIT_ASSERT(response != nullptr);
+ UNIT_ASSERT_EQUAL(response->ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(response->CorrelationId, correlationId);
+ }
+
+ Y_UNIT_TEST(OnAnyTransactionalRequest_ShouldForwardItToTheRelevantTransactionalIdActorIfProducerIsValid) {
+ // send valid message
+ ui64 correlationId = 123;
+ TString txnId = "my-tx-id";
+ i64 producerId = 1;
+ i16 producerEpoch = 0;
+ auto saveResponse = SaveTxnProducer(txnId, producerId, producerEpoch);
+ UNIT_ASSERT_EQUAL(saveResponse->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ // observe TEvAddPartitionsToTxnRequest to a different actor
+ bool seenEvent = false;
+ ui32 eventCounter = 0;
+ auto observer = [&](TAutoPtr<IEventHandle>& input) {
+ if (auto* event = input->CastAsLocal<NKafka::TEvKafka::TEvEndTxnRequest>()) {
+ // there will be two events TEvEndTxnRequest:
+ // first: the one we dispatch in this test
+ // second: event forwarded by TKafkaTransactionCoordinatorActor
+ if (eventCounter == 1) {
+ UNIT_ASSERT_VALUES_EQUAL(event->Request->TransactionalId, txnId);
+ UNIT_ASSERT_VALUES_EQUAL(event->Request->ProducerId, producerId);
+ UNIT_ASSERT_VALUES_EQUAL(event->Request->ProducerEpoch, producerEpoch);
+ seenEvent = true;
+ } else {
+ eventCounter++;
+ }
+ }
+
+ return TTestActorRuntimeBase::EEventAction::PROCESS;
+ };
+ Ctx->Runtime->SetObserverFunc(observer);
+
+ SendEndTxnRequest(correlationId, txnId, producerId, producerEpoch);
+
+ TDispatchOptions options;
+ options.CustomFinalCondition = [&seenEvent]() {
+ return seenEvent;
+ };
+ UNIT_ASSERT(Ctx->Runtime->DispatchEvents(options));
+ }
+
+ Y_UNIT_TEST(OnAnyTransactionalRequest_ShouldForwardItToExistingTransactionActorIfProducerIsValid) {
+ // send valid message
+ ui64 correlationId = 123;
+ TString txnId = "my-tx-id";
+ i64 producerId = 1;
+ i16 producerEpoch = 0;
+ auto saveResponse = SaveTxnProducer(txnId, producerId, producerEpoch);
+ UNIT_ASSERT_EQUAL(saveResponse->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ // observe TEvAddPartitionsToTxnRequest to a different actor
+ bool seenEvent = false;
+ ui32 eventCounter = 0;
+ TActorId txnActorId;
+ auto observer = [&](TAutoPtr<IEventHandle>& input) {
+ if (auto* event = input->CastAsLocal<NKafka::TEvKafka::TEvEndTxnRequest>()) {
+ // There will be four events TEvEndTxnRequest. We need only two of them
+ // with recipient not equal to our TKafkaTransactionCoordinatorActor id.
+ // Those are event sent from TKafkaTransactionCoordinatorActor to TKafkaTransactionActor
+ if (input->Recipient != ActorId) {
+ if (eventCounter == 0) {
+ txnActorId = input->Recipient;
+ eventCounter++;
+ } else {
+ UNIT_ASSERT_VALUES_EQUAL(txnActorId, input->Recipient);
+ seenEvent = true;
+ }
+ }
+ }
+
+ return TTestActorRuntimeBase::EEventAction::PROCESS;
+ };
+ Ctx->Runtime->SetObserverFunc(observer);
+
+ SendEndTxnRequest(correlationId, txnId, producerId, producerEpoch);
+ SendEndTxnRequest(correlationId, txnId, producerId, producerEpoch);
+
+ TDispatchOptions options;
+ options.CustomFinalCondition = [&seenEvent]() {
+ return seenEvent;
+ };
+ UNIT_ASSERT(Ctx->Runtime->DispatchEvents(options));
+ }
+
+ Y_UNIT_TEST(OnAddPartitions_ShouldSendBack_PRODUCER_FENCED_ErrorIfProducerIsNotInitialized) {
+ ui64 correlationId = 123;
+ SendAddPartitionsToTxnRequest(correlationId, "my-tx-id", 1, 0, {{"topic1", {0, 1}}, {"topic2", {0}}});
+
+ // will respond to edge, cause we provieded edge actorId as a connectionId in SendAddPartitionsToTxnRequest
+ auto response = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvResponse>();
+
+ UNIT_ASSERT(response != nullptr);
+ UNIT_ASSERT_EQUAL(response->ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_EQUAL(response->Response->ApiKey(), NKafka::EApiKey::ADD_PARTITIONS_TO_TXN);
+ const auto& message = static_cast<const NKafka::TAddPartitionsToTxnResponseData&>(*response->Response);
+ UNIT_ASSERT_VALUES_EQUAL(response->CorrelationId, correlationId);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results.size(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[0].Name, "topic1");
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[0].Results.size(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[0].Results[0].PartitionIndex, 0);
+ UNIT_ASSERT_EQUAL(message.Results[0].Results[0].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[0].Results[1].PartitionIndex, 1);
+ UNIT_ASSERT_EQUAL(message.Results[0].Results[1].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[1].Name, "topic2");
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[1].Results.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(message.Results[1].Results[0].PartitionIndex, 0);
+ UNIT_ASSERT_EQUAL(message.Results[1].Results[0].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ }
+
+ Y_UNIT_TEST(OnTxnOffsetCommit_ShouldSendBack_PRODUCER_FENCED_ErrorIfProducerIsNotInitialized) {
+ ui64 correlationId = 123;
+ SendTxnOffsetCommitRequest(correlationId, "my-tx-id", 1, 0, {{"topic1", {0, 1}}, {"topic2", {0}}});
+
+ // will respond to edge, cause we provieded edge actorId as a connectionId in SendAddPartitionsToTxnRequest
+ auto response = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvResponse>();
+
+ UNIT_ASSERT(response != nullptr);
+ UNIT_ASSERT_VALUES_EQUAL(response->CorrelationId, correlationId);
+ UNIT_ASSERT_EQUAL(response->Response->ApiKey(), NKafka::EApiKey::TXN_OFFSET_COMMIT);
+ const auto& message = static_cast<const NKafka::TTxnOffsetCommitResponseData&>(*response->Response);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics.size(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[0].Name, "topic1");
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[0].Partitions.size(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[0].Partitions[0].PartitionIndex, 0);
+ UNIT_ASSERT_EQUAL(message.Topics[0].Partitions[0].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[0].Partitions[1].PartitionIndex, 1);
+ UNIT_ASSERT_EQUAL(message.Topics[0].Partitions[1].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[1].Name, "topic2");
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[1].Partitions.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(message.Topics[1].Partitions[0].PartitionIndex, 0);
+ UNIT_ASSERT_EQUAL(message.Topics[1].Partitions[0].ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ }
+
+ Y_UNIT_TEST(AfterSecondInitializationOldTxnRequestsShouldBeFenced) {
+ ui64 correlationId = 123;
+ TString txnId = "my-tx-id";
+ i64 producerId = 1;
+ i16 producerEpoch = 0;
+ // first app initializes
+ auto saveResponse1 = SaveTxnProducer(txnId, producerId, producerEpoch);
+ UNIT_ASSERT_EQUAL(saveResponse1->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+ // other app initializes with same transactional id
+ auto saveResponse2 = SaveTxnProducer(txnId, producerId, producerEpoch + 1);
+ UNIT_ASSERT_EQUAL(saveResponse2->Status, NKafka::TEvKafka::TEvSaveTxnProducerResponse::EStatus::OK);
+
+ // first app sends txn request
+ SendEndTxnRequest(correlationId, txnId, producerId, producerEpoch);
+ // will respond to edge, cause we provieded edge actorId as a connectionId in SendAddPartitionsToTxnRequest
+ auto firstResponse = Ctx->Runtime->GrabEdgeEvent<NKafka::TEvKafka::TEvResponse>();
+
+ UNIT_ASSERT(firstResponse != nullptr);
+ UNIT_ASSERT_EQUAL(firstResponse->ErrorCode, NKafka::EKafkaErrors::PRODUCER_FENCED);
+ }
+ }
+
+} // namespace \ No newline at end of file
diff --git a/ydb/core/kafka_proxy/ut/ya.make b/ydb/core/kafka_proxy/ut/ya.make
index f07eeeb3e0..f16ee9f9bd 100644
--- a/ydb/core/kafka_proxy/ut/ya.make
+++ b/ydb/core/kafka_proxy/ut/ya.make
@@ -13,6 +13,7 @@ SRCS(
ut_serialization.cpp
metarequest_ut.cpp
port_discovery_ut.cpp
+ ut_transaction_coordinator.cpp
)
PEERDIR(
diff --git a/ydb/core/kafka_proxy/ya.make b/ydb/core/kafka_proxy/ya.make
index d14754db03..c84a57e542 100644
--- a/ydb/core/kafka_proxy/ya.make
+++ b/ydb/core/kafka_proxy/ya.make
@@ -40,6 +40,7 @@ SRCS(
kafka_consumer_groups_metadata_initializers.cpp
kafka_consumer_members_metadata_initializers.cpp
kafka_transactional_producers_initializers.cpp
+ kafka_transactions_coordinator.cpp
)
GENERATE_ENUM_SERIALIZATION(kafka.h)
diff --git a/ydb/core/kqp/common/kqp_yql.cpp b/ydb/core/kqp/common/kqp_yql.cpp
index 0ff2491c2a..050f527db9 100644
--- a/ydb/core/kqp/common/kqp_yql.cpp
+++ b/ydb/core/kqp/common/kqp_yql.cpp
@@ -158,10 +158,11 @@ TKqpReadTableSettings ParseInternal(const TCoNameValueTupleList& node) {
settings.ItemsLimit = tuple.Value().Cast().Ptr();
} else if (name == TKqpReadTableSettings::ReverseSettingName) {
YQL_ENSURE(tuple.Ref().ChildrenSize() == 1);
- settings.Reverse = true;
+ settings.SetSorting(ERequestSorting::DESC);
} else if (name == TKqpReadTableSettings::SortedSettingName) {
- YQL_ENSURE(tuple.Ref().ChildrenSize() == 1);
- settings.Sorted = true;
+ if (settings.GetSorting() == ERequestSorting::NONE) {
+ settings.SetSorting(ERequestSorting::ASC);
+ }
} else if (name == TKqpReadTableSettings::SequentialSettingName) {
YQL_ENSURE(tuple.Ref().ChildrenSize() == 2);
settings.SequentialInFlight = FromString<ui64>(tuple.Value().Cast<TCoAtom>().Value());
@@ -224,7 +225,7 @@ NNodes::TCoNameValueTupleList TKqpReadTableSettings::BuildNode(TExprContext& ctx
.Done());
}
- if (Reverse) {
+ if (IsReverse()) {
settings.emplace_back(
Build<TCoNameValueTuple>(ctx, pos)
.Name()
@@ -240,7 +241,7 @@ NNodes::TCoNameValueTupleList TKqpReadTableSettings::BuildNode(TExprContext& ctx
.Done());
}
- if (Sorted) {
+ if (IsSorted()) {
settings.emplace_back(
Build<TCoNameValueTuple>(ctx, pos)
.Name()
diff --git a/ydb/core/kqp/common/kqp_yql.h b/ydb/core/kqp/common/kqp_yql.h
index dcff52e18d..4372406e1c 100644
--- a/ydb/core/kqp/common/kqp_yql.h
+++ b/ydb/core/kqp/common/kqp_yql.h
@@ -72,7 +72,35 @@ struct TKqpStreamLookupSettings {
static TKqpStreamLookupSettings Parse(const NNodes::TCoNameValueTupleList& node);
};
-struct TKqpReadTableSettings {
+enum class ERequestSorting {
+ NONE = 0,
+ ASC,
+ DESC
+};
+
+template <ERequestSorting DefaultValue = ERequestSorting::NONE>
+class TSortingOperator {
+private:
+ ERequestSorting Sorting = DefaultValue;
+public:
+ void SetSorting(const ERequestSorting sorting) {
+ Sorting = sorting;
+ }
+ ERequestSorting GetSorting() const {
+ return Sorting;
+ }
+
+ bool IsSorted() const {
+ return Sorting != ERequestSorting::NONE;
+ }
+
+ bool IsReverse() const {
+ return Sorting == ERequestSorting::DESC;
+ }
+};
+
+struct TKqpReadTableSettings: public TSortingOperator<ERequestSorting::NONE> {
+public:
static constexpr TStringBuf SkipNullKeysSettingName = "SkipNullKeys";
static constexpr TStringBuf ItemsLimitSettingName = "ItemsLimit";
static constexpr TStringBuf ReverseSettingName = "Reverse";
@@ -84,16 +112,12 @@ struct TKqpReadTableSettings {
TVector<TString> SkipNullKeys;
TExprNode::TPtr ItemsLimit;
- bool Reverse = false;
- bool Sorted = false;
TMaybe<ui64> SequentialInFlight;
TMaybe<ui64> TabletId;
bool ForcePrimary = false;
void AddSkipNullKey(const TString& key);
void SetItemsLimit(const TExprNode::TPtr& expr) { ItemsLimit = expr; }
- void SetReverse() { Reverse = true; }
- void SetSorted() { Sorted = true; }
bool operator == (const TKqpReadTableSettings&) const = default;
diff --git a/ydb/core/kqp/common/simple/kqp_event_ids.h b/ydb/core/kqp/common/simple/kqp_event_ids.h
index a544080064..b9ad1addb1 100644
--- a/ydb/core/kqp/common/simple/kqp_event_ids.h
+++ b/ydb/core/kqp/common/simple/kqp_event_ids.h
@@ -94,6 +94,7 @@ struct TKqpComputeEvents {
EvScanInitActor,
EvRemoteScanData,
EvRemoteScanDataAck,
+ EvScanPing,
};
static_assert(Unused0 == EventSpaceBegin(TKikimrEvents::ES_KQP) + 200);
diff --git a/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp b/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp
index 7698856be9..0e52f9d5a4 100644
--- a/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp
+++ b/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.cpp
@@ -170,6 +170,7 @@ public:
runtimeSettings.ExtraMemoryAllocationPool = args.MemoryPool;
runtimeSettings.UseSpilling = args.WithSpilling;
runtimeSettings.StatsMode = args.StatsMode;
+ runtimeSettings.WithProgressStats = args.WithProgressStats;
if (runtimeSettings.UseSpilling) {
args.Task->SetEnableSpilling(runtimeSettings.UseSpilling);
diff --git a/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.h b/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.h
index 362c9f0ee4..14ea909b22 100644
--- a/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.h
+++ b/ydb/core/kqp/compute_actor/kqp_compute_actor_factory.h
@@ -122,6 +122,7 @@ public:
const NKikimr::NKqp::NRm::EKqpMemoryPool MemoryPool;
const bool WithSpilling;
const NYql::NDqProto::EDqStatsMode StatsMode;
+ const bool WithProgressStats;
const TInstant& Deadline;
const bool ShareMailbox;
const TMaybe<NYql::NDqProto::TRlPath>& RlPath;
diff --git a/ydb/core/kqp/compute_actor/kqp_compute_events.h b/ydb/core/kqp/compute_actor/kqp_compute_events.h
index 4b9d3f2924..cdb0496fe4 100644
--- a/ydb/core/kqp/compute_actor/kqp_compute_events.h
+++ b/ydb/core/kqp/compute_actor/kqp_compute_events.h
@@ -250,16 +250,22 @@ struct TEvKqpCompute {
}
};
+ struct TEvScanPing : public NActors::TEventPB<TEvScanPing, NKikimrKqp::TEvScanPing,
+ TKqpComputeEvents::EvScanPing>
+ {
+ };
+
struct TEvScanInitActor : public NActors::TEventPB<TEvScanInitActor, NKikimrKqp::TEvScanInitActor,
TKqpComputeEvents::EvScanInitActor>
{
TEvScanInitActor() = default;
- TEvScanInitActor(ui64 scanId, const NActors::TActorId& scanActor, ui32 generation, const ui64 tabletId) {
+ TEvScanInitActor(ui64 scanId, const NActors::TActorId& scanActor, ui32 generation, const ui64 tabletId, bool allowPings = false) {
Record.SetScanId(scanId);
ActorIdToProto(scanActor, Record.MutableScanActorId());
Record.SetGeneration(generation);
Record.SetTabletId(tabletId);
+ Record.SetAllowPings(allowPings);
}
};
diff --git a/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp b/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp
index 7eab5ef781..1b408cdf6f 100644
--- a/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp
+++ b/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.cpp
@@ -27,10 +27,9 @@ std::vector<std::unique_ptr<TComputeTaskData>> TShardScannerInfo::OnReceiveData(
AFL_ENSURE(data.Finished);
result.emplace_back(std::make_unique<TComputeTaskData>(selfPtr, std::make_unique<TEvScanExchange::TEvSendData>(TabletId, data.LocksInfo)));
} else if (data.SplittedBatches.size() > 1) {
- ui32 idx = 0;
AFL_ENSURE(data.ArrowBatch);
for (auto&& i : data.SplittedBatches) {
- result.emplace_back(std::make_unique<TComputeTaskData>(selfPtr, std::make_unique<TEvScanExchange::TEvSendData>(data.ArrowBatch, TabletId, std::move(i), data.LocksInfo), idx++));
+ result.emplace_back(std::make_unique<TComputeTaskData>(selfPtr, std::make_unique<TEvScanExchange::TEvSendData>(data.ArrowBatch, TabletId, std::move(i), data.LocksInfo)));
}
} else if (data.ArrowBatch) {
result.emplace_back(std::make_unique<TComputeTaskData>(selfPtr, std::make_unique<TEvScanExchange::TEvSendData>(data.ArrowBatch, TabletId, data.LocksInfo)));
diff --git a/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.h b/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.h
index 67b7ff64be..bcc0e28fe4 100644
--- a/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.h
+++ b/ydb/core/kqp/compute_actor/kqp_scan_compute_manager.h
@@ -32,6 +32,7 @@ private:
const ui64 FreeSpace = (ui64)8 << 20;
bool NeedAck = true;
bool Finished = false;
+ bool AllowPings = false;
void DoAck() {
if (Finished) {
@@ -105,10 +106,17 @@ public:
return !ActorId.has_value();
}
- void Start(const TActorId& actorId) {
+ void PingIfNeeded() {
+ if (AllowPings && !!ActorId) {
+ NActors::TActivationContext::AsActorContext().Send(*ActorId, new TEvKqpCompute::TEvScanPing());
+ }
+ }
+
+ void Start(const TActorId& actorId, bool allowPings) {
AFL_DEBUG(NKikimrServices::KQP_COMPUTE)("event", "start_scanner")("actor_id", actorId);
AFL_ENSURE(!ActorId);
ActorId = actorId;
+ AllowPings = allowPings;
DoAck();
}
@@ -284,6 +292,12 @@ public:
}
}
+ void PingAllScanners() {
+ for (auto&& itTablet : ShardScanners) {
+ itTablet.second->PingIfNeeded();
+ }
+ }
+
std::shared_ptr<TShardState> GetShardStateByActorId(const NActors::TActorId& actorId) const {
auto it = ShardsByActorId.find(actorId);
if (it == ShardsByActorId.end()) {
@@ -307,7 +321,7 @@ public:
}
}
- void RegisterScannerActor(const ui64 tabletId, const ui64 generation, const TActorId& scanActorId) {
+ void RegisterScannerActor(const ui64 tabletId, const ui64 generation, const TActorId& scanActorId, bool allowPings) {
auto state = GetShardState(tabletId);
if (!state || generation != state->Generation) {
AFL_DEBUG(NKikimrServices::KQP_COMPUTE)("event", "register_scanner_actor_dropped")
@@ -326,7 +340,7 @@ public:
state->ResetRetry();
AFL_ENSURE(ShardsByActorId.emplace(scanActorId, state).second);
- GetShardScannerVerified(tabletId)->Start(scanActorId);
+ GetShardScannerVerified(tabletId)->Start(scanActorId, allowPings);
}
void StartScanner(TShardState& state) {
diff --git a/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp b/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp
index 0a383b154a..605a7e2613 100644
--- a/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp
+++ b/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.cpp
@@ -1,6 +1,7 @@
#include "kqp_scan_fetcher_actor.h"
#include <ydb/library/wilson_ids/wilson.h>
#include <ydb/core/kqp/common/kqp_resolve.h>
+#include <ydb/core/kqp/common/kqp_yql.h>
#include <ydb/core/tx/datashard/range_ops.h>
#include <ydb/core/actorlib_impl/long_timer.h>
#include <ydb/core/scheme/scheme_types_proto.h>
@@ -82,6 +83,7 @@ void TKqpScanFetcherActor::Bootstrap() {
AFL_DEBUG(NKikimrServices::KQP_COMPUTE)("event", "bootstrap")("compute", ComputeActorIds.size())("shards", PendingShards.size());
StartTableScan();
Become(&TKqpScanFetcherActor::StateFunc);
+ Schedule(TDuration::Seconds(30), new NActors::TEvents::TEvWakeup());
}
void TKqpScanFetcherActor::HandleExecute(TEvScanExchange::TEvAckData::TPtr& ev) {
@@ -110,7 +112,7 @@ void TKqpScanFetcherActor::HandleExecute(TEvKqpCompute::TEvScanInitActor::TPtr&
}
auto& msg = ev->Get()->Record;
auto scanActorId = ActorIdFromProto(msg.GetScanActorId());
- InFlightShards.RegisterScannerActor(msg.GetTabletId(), msg.GetGeneration(), scanActorId);
+ InFlightShards.RegisterScannerActor(msg.GetTabletId(), msg.GetGeneration(), scanActorId, msg.GetAllowPings());
}
void TKqpScanFetcherActor::HandleExecute(TEvKqpCompute::TEvScanData::TPtr& ev) {
@@ -461,7 +463,15 @@ std::unique_ptr<NKikimr::TEvDataShard::TEvKqpScan> TKqpScanFetcherActor::BuildEv
ev->Record.SetGeneration(gen);
- ev->Record.SetReverse(Meta.GetReverse());
+ if (Meta.HasOptionalSorting()) {
+ if (Meta.GetOptionalSorting() == (ui32)ERequestSorting::DESC) {
+ ev->Record.SetReverse(true);
+ } else if (Meta.GetOptionalSorting() == (ui32)ERequestSorting::ASC) {
+ ev->Record.SetReverse(false);
+ }
+ } else {
+ ev->Record.SetReverse(Meta.GetReverse());
+ }
ev->Record.SetItemsLimit(Meta.GetItemsLimit());
if (Meta.GroupByColumnNamesSize()) {
@@ -676,4 +686,9 @@ void TKqpScanFetcherActor::CheckFinish() {
}
}
+void TKqpScanFetcherActor::HandleExecute(NActors::TEvents::TEvWakeup::TPtr&) {
+ InFlightShards.PingAllScanners();
+ Schedule(TDuration::Seconds(30), new NActors::TEvents::TEvWakeup());
+}
+
}
diff --git a/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.h b/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.h
index 73ead0a5ef..18d6e05003 100644
--- a/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.h
+++ b/ydb/core/kqp/compute_actor/kqp_scan_fetcher_actor.h
@@ -82,6 +82,7 @@ public:
hFunc(TEvInterconnect::TEvNodeDisconnected, HandleExecute);
hFunc(TEvScanExchange::TEvTerminateFromCompute, HandleExecute);
hFunc(TEvScanExchange::TEvAckData, HandleExecute);
+ hFunc(NActors::TEvents::TEvWakeup, HandleExecute);
IgnoreFunc(TEvInterconnect::TEvNodeConnected);
IgnoreFunc(TEvTxProxySchemeCache::TEvInvalidateTableResult);
default:
@@ -97,6 +98,8 @@ public:
void HandleExecute(TEvScanExchange::TEvTerminateFromCompute::TPtr& ev);
+ void HandleExecute(NActors::TEvents::TEvWakeup::TPtr& ev);
+
private:
void CheckFinish();
diff --git a/ydb/core/kqp/executer_actor/kqp_executer_impl.h b/ydb/core/kqp/executer_actor/kqp_executer_impl.h
index f8f260b0f7..11e34f906a 100644
--- a/ydb/core/kqp/executer_actor/kqp_executer_impl.h
+++ b/ydb/core/kqp/executer_actor/kqp_executer_impl.h
@@ -576,8 +576,8 @@ protected:
ExecuterStateSpan = NWilson::TSpan(TWilsonKqp::ExecuterTableResolve, ExecuterSpan.GetTraceId(), "WaitForTableResolve", NWilson::EFlags::AUTO_END);
- auto kqpTableResolver = CreateKqpTableResolver(this->SelfId(), TxId, UserToken, Request.Transactions,
- TasksGraph);
+ FillKqpTasksGraphStages(TasksGraph, Request.Transactions);
+ auto kqpTableResolver = CreateKqpTableResolver(this->SelfId(), TxId, UserToken, TasksGraph);
KqpTableResolverId = this->RegisterWithSameMailbox(kqpTableResolver);
LOG_T("Got request, become WaitResolveState");
@@ -910,7 +910,7 @@ protected:
auto readSettings = ExtractReadSettings(op, stageInfo, HolderFactory(), TypeEnv());
task.Meta.Reads.ConstructInPlace();
task.Meta.Reads->emplace_back(std::move(readInfo));
- task.Meta.ReadInfo.Reverse = readSettings.Reverse;
+ task.Meta.ReadInfo.SetSorting(readSettings.GetSorting());
task.Meta.Type = TTaskMeta::TTaskType::Compute;
FillSecureParamsFromStage(task.Meta.SecureParams, stage);
@@ -1439,18 +1439,17 @@ protected:
}
}
- void FillReadInfo(TTaskMeta& taskMeta, ui64 itemsLimit, bool reverse, bool sorted) const
+ void FillReadInfo(TTaskMeta& taskMeta, ui64 itemsLimit, const NYql::ERequestSorting sorting) const
{
if (taskMeta.Reads && !taskMeta.Reads.GetRef().empty()) {
// Validate parameters
YQL_ENSURE(taskMeta.ReadInfo.ItemsLimit == itemsLimit);
- YQL_ENSURE(taskMeta.ReadInfo.Reverse == reverse);
+ YQL_ENSURE(taskMeta.ReadInfo.GetSorting() == sorting);
return;
}
taskMeta.ReadInfo.ItemsLimit = itemsLimit;
- taskMeta.ReadInfo.Reverse = reverse;
- taskMeta.ReadInfo.Sorted = sorted;
+ taskMeta.ReadInfo.SetSorting(sorting);
taskMeta.ReadInfo.ReadType = TTaskMeta::TReadInfo::EReadType::Rows;
}
@@ -1524,7 +1523,7 @@ protected:
readInfo.ShardId = shardId;
}
- FillReadInfo(meta, readSettings.ItemsLimit, readSettings.Reverse, readSettings.Sorted);
+ FillReadInfo(meta, readSettings.ItemsLimit, readSettings.GetSorting());
if (op.GetTypeCase() == NKqpProto::TKqpPhyTableOperation::kReadOlapRange) {
FillOlapReadInfo(meta, readSettings.ResultType, op.GetReadOlapRange());
}
@@ -1619,6 +1618,7 @@ protected:
.UserToken = UserToken,
.Deadline = Deadline.GetOrElse(TInstant::Zero()),
.StatsMode = Request.StatsMode,
+ .WithProgressStats = Request.ProgressStatsPeriod != TDuration::Zero(),
.RlPath = Request.RlPath,
.ExecuterSpan = ExecuterSpan,
.ResourcesSnapshot = std::move(ResourcesSnapshot),
@@ -1652,11 +1652,12 @@ protected:
auto& stage = stageInfo.Meta.GetStage(stageInfo.Id);
auto& columnShardHashV1Params = stageInfo.Meta.ColumnShardHashV1Params;
- if (enableShuffleElimination && stageInfo.Meta.ColumnTableInfoPtr) {
+ if (enableShuffleElimination && stage.GetIsShuffleEliminated() && stageInfo.Meta.ColumnTableInfoPtr) {
const auto& tableDesc = stageInfo.Meta.ColumnTableInfoPtr->Description;
columnShardHashV1Params.SourceShardCount = tableDesc.GetColumnShardCount();
columnShardHashV1Params.SourceTableKeyColumnTypes = std::make_shared<TVector<NScheme::TTypeInfo>>();
for (const auto& column: tableDesc.GetSharding().GetHashSharding().GetColumns()) {
+ Y_ENSURE(stageInfo.Meta.TableConstInfo->Columns.contains(column), TStringBuilder{} << "Table doesn't have column: " << column);
auto columnType = stageInfo.Meta.TableConstInfo->Columns.at(column).Type;
columnShardHashV1Params.SourceTableKeyColumnTypes->push_back(columnType);
}
@@ -1683,7 +1684,7 @@ protected:
stageInfo.Meta.SkipNullKeys.assign(op.GetReadRange().GetSkipNullKeys().begin(),
op.GetReadRange().GetSkipNullKeys().end());
// not supported for scan queries
- YQL_ENSURE(!readSettings.Reverse);
+ YQL_ENSURE(!readSettings.IsReverse());
}
for (auto&& i: partitions) {
@@ -1697,11 +1698,11 @@ protected:
}
}
- if (!AppData()->FeatureFlags.GetEnableSeparationComputeActorsFromRead() || (!isOlapScan && readSettings.Sorted)) {
+ if (!AppData()->FeatureFlags.GetEnableSeparationComputeActorsFromRead() || (!isOlapScan && readSettings.IsSorted())) {
for (auto&& pair : nodeShards) {
auto& shardsInfo = pair.second;
for (auto&& shardInfo : shardsInfo) {
- auto& task = AssignScanTaskToShard(stageInfo, shardInfo.ShardId, nodeTasks, assignedShardsCount, readSettings.Sorted, isOlapScan);
+ auto& task = AssignScanTaskToShard(stageInfo, shardInfo.ShardId, nodeTasks, assignedShardsCount, readSettings.IsSorted(), isOlapScan);
MergeReadInfoToTaskMeta(task.Meta, shardInfo.ShardId, shardInfo.KeyReadRanges, readSettings,
columns, op, /*isPersistentScan*/ true);
}
@@ -1710,7 +1711,7 @@ protected:
for (const auto& pair : nodeTasks) {
for (const auto& taskIdx : pair.second) {
auto& task = TasksGraph.GetTask(taskIdx);
- task.Meta.SetEnableShardsSequentialScan(readSettings.Sorted);
+ task.Meta.SetEnableShardsSequentialScan(readSettings.IsSorted());
PrepareScanMetaForUsage(task.Meta, keyTypes);
BuildSinks(stage, task);
}
diff --git a/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp b/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp
index a89aa90a1b..1fe57463f6 100644
--- a/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp
+++ b/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp
@@ -1326,91 +1326,116 @@ void TQueryExecutionStats::ExportAggAsyncBufferStats(TAsyncBufferStats& data, NY
void TQueryExecutionStats::ExportExecStats(NYql::NDqProto::TDqExecutionStats& stats) {
THashMap<ui32, NDqProto::TDqStageStats*> protoStages;
- for (auto& [stageId, stagetype] : TasksGraph->GetStagesInfo()) {
- protoStages.emplace(stageId.StageId, GetOrCreateStageStats(stageId, *TasksGraph, stats));
+
+ if (CollectFullStats(StatsMode)) {
+ for (auto& [stageId, stagetype] : TasksGraph->GetStagesInfo()) {
+ protoStages.emplace(stageId.StageId, GetOrCreateStageStats(stageId, *TasksGraph, stats));
+ }
}
- for (auto& [stageId, stageStat] : StageStats) {
- auto& stageStats = *protoStages[stageStat.StageId.StageId];
- stageStats.SetTotalTasksCount(stageStat.Task2Index.size());
- stageStats.SetFinishedTasksCount(stageStat.FinishedCount);
-
- stageStats.SetBaseTimeMs(BaseTimeMs);
- stageStat.CpuTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableCpuTimeUs());
- ExportAggStats(stageStat.SourceCpuTimeUs, *stageStats.MutableSourceCpuTimeUs());
- stageStat.MaxMemoryUsage.ExportAggStats(BaseTimeMs, *stageStats.MutableMaxMemoryUsage());
-
- ExportAggStats(stageStat.InputRows, *stageStats.MutableInputRows());
- ExportAggStats(stageStat.InputBytes, *stageStats.MutableInputBytes());
- ExportAggStats(stageStat.OutputRows, *stageStats.MutableOutputRows());
- ExportAggStats(stageStat.OutputBytes, *stageStats.MutableOutputBytes());
- ExportAggStats(stageStat.ResultRows, *stageStats.MutableResultRows());
- ExportAggStats(stageStat.ResultBytes, *stageStats.MutableResultBytes());
- ExportAggStats(stageStat.IngressRows, *stageStats.MutableIngressRows());
- ExportAggStats(stageStat.IngressBytes, *stageStats.MutableIngressBytes());
- ExportAggStats(stageStat.IngressDecompressedBytes, *stageStats.MutableIngressDecompressedBytes());
- ExportAggStats(stageStat.EgressRows, *stageStats.MutableEgressRows());
- ExportAggStats(stageStat.EgressBytes, *stageStats.MutableEgressBytes());
-
- ExportOffsetAggStats(stageStat.StartTimeMs, *stageStats.MutableStartTimeMs(), BaseTimeMs);
- ExportOffsetAggStats(stageStat.FinishTimeMs, *stageStats.MutableFinishTimeMs(), BaseTimeMs);
- ExportAggStats(stageStat.DurationUs, *stageStats.MutableDurationUs());
- stageStat.WaitInputTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableWaitInputTimeUs());
- stageStat.WaitOutputTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableWaitOutputTimeUs());
-
- stageStat.SpillingComputeBytes.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingComputeBytes());
- stageStat.SpillingChannelBytes.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingChannelBytes());
- stageStat.SpillingComputeTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingComputeTimeUs());
- stageStat.SpillingChannelTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingChannelTimeUs());
-
- FillStageDurationUs(stageStats);
+ std::unordered_map<TString, NYql::NDqProto::TDqTableStats*> currentTableStats;
+ for (auto& [stageId, stageStat] : StageStats) {
for (auto& [path, t] : stageStat.Tables) {
- auto& table = *stageStats.AddTables();
- table.SetTablePath(path);
- ExportAggStats(t.ReadRows, *table.MutableReadRows());
- ExportAggStats(t.ReadBytes, *table.MutableReadBytes());
- ExportAggStats(t.WriteRows, *table.MutableWriteRows());
- ExportAggStats(t.WriteBytes, *table.MutableWriteBytes());
- ExportAggStats(t.EraseRows, *table.MutableEraseRows());
- ExportAggStats(t.EraseBytes, *table.MutableEraseBytes());
- table.SetAffectedPartitions(ExportAggStats(t.AffectedPartitions));
- }
- for (auto& [id, i] : stageStat.Ingress) {
- ExportAggAsyncBufferStats(i, (*stageStats.MutableIngress())[id]);
- }
- for (auto& [id, i] : stageStat.Input) {
- ExportAggAsyncBufferStats(i, (*stageStats.MutableInput())[id]);
- }
- for (auto& [id, o] : stageStat.Output) {
- ExportAggAsyncBufferStats(o, (*stageStats.MutableOutput())[id]);
- }
- for (auto& [id, e] : stageStat.Egress) {
- ExportAggAsyncBufferStats(e, (*stageStats.MutableEgress())[id]);
- }
- for (auto& [id, j] : stageStat.Joins) {
- auto& joinStat = (*stageStats.MutableOperatorJoin())[id];
- joinStat.SetOperatorId(id);
- ExportAggStats(j.Bytes, *joinStat.MutableBytes());
- ExportAggStats(j.Rows, *joinStat.MutableRows());
- }
- for (auto& [id, f] : stageStat.Filters) {
- auto& filterStat = (*stageStats.MutableOperatorFilter())[id];
- filterStat.SetOperatorId(id);
- ExportAggStats(f.Bytes, *filterStat.MutableBytes());
- ExportAggStats(f.Rows, *filterStat.MutableRows());
+ NYql::NDqProto::TDqTableStats* tableAggr = nullptr;
+ if (auto it = currentTableStats.find(path); it != currentTableStats.end()) {
+ tableAggr = it->second;
+ } else {
+ tableAggr = stats.AddTables();
+ tableAggr->SetTablePath(path);
+ currentTableStats.emplace(path, tableAggr);
+ }
+
+ tableAggr->SetReadRows(tableAggr->GetReadRows() + ExportAggStats(t.ReadRows));
+ tableAggr->SetReadBytes(tableAggr->GetReadBytes() + ExportAggStats(t.ReadBytes));
+ tableAggr->SetWriteRows(tableAggr->GetWriteRows() + ExportAggStats(t.WriteRows));
+ tableAggr->SetWriteBytes(tableAggr->GetWriteBytes() + ExportAggStats(t.WriteBytes));
+ tableAggr->SetEraseRows(tableAggr->GetEraseRows() + ExportAggStats(t.EraseRows));
+ tableAggr->SetAffectedPartitions(tableAggr->GetAffectedPartitions() + ExportAggStats(t.AffectedPartitions));
+
}
- for (auto& [id, a] : stageStat.Aggregations) {
- auto& aggrStat = (*stageStats.MutableOperatorAggregation())[id];
- aggrStat.SetOperatorId(id);
- ExportAggStats(a.Bytes, *aggrStat.MutableBytes());
- ExportAggStats(a.Rows, *aggrStat.MutableRows());
+
+
+ if (CollectFullStats(StatsMode)) {
+ auto& stageStats = *protoStages[stageStat.StageId.StageId];
+ stageStats.SetTotalTasksCount(stageStat.Task2Index.size());
+ stageStats.SetFinishedTasksCount(stageStat.FinishedCount);
+
+ stageStats.SetBaseTimeMs(BaseTimeMs);
+ stageStat.CpuTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableCpuTimeUs());
+ ExportAggStats(stageStat.SourceCpuTimeUs, *stageStats.MutableSourceCpuTimeUs());
+ stageStat.MaxMemoryUsage.ExportAggStats(BaseTimeMs, *stageStats.MutableMaxMemoryUsage());
+
+ ExportAggStats(stageStat.InputRows, *stageStats.MutableInputRows());
+ ExportAggStats(stageStat.InputBytes, *stageStats.MutableInputBytes());
+ ExportAggStats(stageStat.OutputRows, *stageStats.MutableOutputRows());
+ ExportAggStats(stageStat.OutputBytes, *stageStats.MutableOutputBytes());
+ ExportAggStats(stageStat.ResultRows, *stageStats.MutableResultRows());
+ ExportAggStats(stageStat.ResultBytes, *stageStats.MutableResultBytes());
+ ExportAggStats(stageStat.IngressRows, *stageStats.MutableIngressRows());
+ ExportAggStats(stageStat.IngressBytes, *stageStats.MutableIngressBytes());
+ ExportAggStats(stageStat.IngressDecompressedBytes, *stageStats.MutableIngressDecompressedBytes());
+ ExportAggStats(stageStat.EgressRows, *stageStats.MutableEgressRows());
+ ExportAggStats(stageStat.EgressBytes, *stageStats.MutableEgressBytes());
+
+ ExportOffsetAggStats(stageStat.StartTimeMs, *stageStats.MutableStartTimeMs(), BaseTimeMs);
+ ExportOffsetAggStats(stageStat.FinishTimeMs, *stageStats.MutableFinishTimeMs(), BaseTimeMs);
+ ExportAggStats(stageStat.DurationUs, *stageStats.MutableDurationUs());
+ stageStat.WaitInputTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableWaitInputTimeUs());
+ stageStat.WaitOutputTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableWaitOutputTimeUs());
+
+ stageStat.SpillingComputeBytes.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingComputeBytes());
+ stageStat.SpillingChannelBytes.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingChannelBytes());
+ stageStat.SpillingComputeTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingComputeTimeUs());
+ stageStat.SpillingChannelTimeUs.ExportAggStats(BaseTimeMs, *stageStats.MutableSpillingChannelTimeUs());
+
+ FillStageDurationUs(stageStats);
+
+ for (auto& [path, t] : stageStat.Tables) {
+ auto& table = *stageStats.AddTables();
+ table.SetTablePath(path);
+ ExportAggStats(t.ReadRows, *table.MutableReadRows());
+ ExportAggStats(t.ReadBytes, *table.MutableReadBytes());
+ ExportAggStats(t.WriteRows, *table.MutableWriteRows());
+ ExportAggStats(t.WriteBytes, *table.MutableWriteBytes());
+ ExportAggStats(t.EraseRows, *table.MutableEraseRows());
+ ExportAggStats(t.EraseBytes, *table.MutableEraseBytes());
+ table.SetAffectedPartitions(ExportAggStats(t.AffectedPartitions));
+ }
+ for (auto& [id, i] : stageStat.Ingress) {
+ ExportAggAsyncBufferStats(i, (*stageStats.MutableIngress())[id]);
+ }
+ for (auto& [id, i] : stageStat.Input) {
+ ExportAggAsyncBufferStats(i, (*stageStats.MutableInput())[id]);
+ }
+ for (auto& [id, o] : stageStat.Output) {
+ ExportAggAsyncBufferStats(o, (*stageStats.MutableOutput())[id]);
+ }
+ for (auto& [id, e] : stageStat.Egress) {
+ ExportAggAsyncBufferStats(e, (*stageStats.MutableEgress())[id]);
+ }
+ for (auto& [id, j] : stageStat.Joins) {
+ auto& joinStat = (*stageStats.MutableOperatorJoin())[id];
+ joinStat.SetOperatorId(id);
+ ExportAggStats(j.Bytes, *joinStat.MutableBytes());
+ ExportAggStats(j.Rows, *joinStat.MutableRows());
+ }
+ for (auto& [id, f] : stageStat.Filters) {
+ auto& filterStat = (*stageStats.MutableOperatorFilter())[id];
+ filterStat.SetOperatorId(id);
+ ExportAggStats(f.Bytes, *filterStat.MutableBytes());
+ ExportAggStats(f.Rows, *filterStat.MutableRows());
+ }
+ for (auto& [id, a] : stageStat.Aggregations) {
+ auto& aggrStat = (*stageStats.MutableOperatorAggregation())[id];
+ aggrStat.SetOperatorId(id);
+ ExportAggStats(a.Bytes, *aggrStat.MutableBytes());
+ ExportAggStats(a.Rows, *aggrStat.MutableRows());
+ }
}
}
- for (const auto& [_, tableStats] : TableStats) {
- stats.AddTables()->CopyFrom(*tableStats);
- }
+ stats.SetDurationUs(TInstant::Now().MicroSeconds() - StartTs.MicroSeconds());
}
void TQueryExecutionStats::AdjustExternalAggr(NYql::NDqProto::TDqExternalAggrStats& stats) {
diff --git a/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp b/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp
index d7bb8faf2f..e0acc8b98b 100644
--- a/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp
+++ b/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp
@@ -1017,19 +1017,28 @@ TPhysicalShardReadSettings ExtractReadSettings(const NKqpProto::TKqpPhyTableOper
switch(operation.GetTypeCase()){
case NKqpProto::TKqpPhyTableOperation::kReadRanges: {
readSettings.ItemsLimit = ExtractItemsLimit(stageInfo, operation.GetReadRanges().GetItemsLimit(), holderFactory, typeEnv);
- readSettings.Reverse = operation.GetReadRanges().GetReverse();
+ if (operation.GetReadRanges().GetReverse()) {
+ readSettings.SetSorting(ERequestSorting::DESC);
+ }
break;
}
case NKqpProto::TKqpPhyTableOperation::kReadRange: {
readSettings.ItemsLimit = ExtractItemsLimit(stageInfo, operation.GetReadRange().GetItemsLimit(), holderFactory, typeEnv);
- readSettings.Reverse = operation.GetReadRange().GetReverse();
+ if (operation.GetReadRange().GetReverse()) {
+ readSettings.SetSorting(ERequestSorting::DESC);
+ }
break;
}
case NKqpProto::TKqpPhyTableOperation::kReadOlapRange: {
- readSettings.Sorted = operation.GetReadOlapRange().GetSorted();
- readSettings.Reverse = operation.GetReadOlapRange().GetReverse();
+ if (operation.GetReadOlapRange().GetReverse()) {
+ readSettings.SetSorting(ERequestSorting::DESC);
+ } else if (operation.GetReadOlapRange().GetSorted()) {
+ readSettings.SetSorting(ERequestSorting::ASC);
+ } else {
+ readSettings.SetSorting(ERequestSorting::NONE);
+ }
readSettings.ItemsLimit = ExtractItemsLimit(stageInfo, operation.GetReadOlapRange().GetItemsLimit(), holderFactory, typeEnv);
NKikimrMiniKQL::TType minikqlProtoResultType;
ConvertYdbTypeToMiniKQLType(operation.GetReadOlapRange().GetResultType(), minikqlProtoResultType);
diff --git a/ydb/core/kqp/executer_actor/kqp_partition_helper.h b/ydb/core/kqp/executer_actor/kqp_partition_helper.h
index c3d54b17fa..03a1679436 100644
--- a/ydb/core/kqp/executer_actor/kqp_partition_helper.h
+++ b/ydb/core/kqp/executer_actor/kqp_partition_helper.h
@@ -2,7 +2,7 @@
#include "kqp_tasks_graph.h"
-
+#include <ydb/core/kqp/common/kqp_yql.h>
#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
#include <util/generic/variant.h>
@@ -26,15 +26,12 @@ public:
ui64 ShardId;
TShardInfoWithId(const ui64 shardId, TShardInfo&& base)
: TShardInfo(std::move(base))
- , ShardId(shardId)
- {
+ , ShardId(shardId) {
}
};
-struct TPhysicalShardReadSettings {
- bool Sorted = true;
- bool Reverse = false;
+struct TPhysicalShardReadSettings: public NYql::TSortingOperator<NYql::ERequestSorting::ASC> {
ui64 ItemsLimit = 0;
NKikimr::NMiniKQL::TType* ResultType = nullptr;
};
diff --git a/ydb/core/kqp/executer_actor/kqp_planner.cpp b/ydb/core/kqp/executer_actor/kqp_planner.cpp
index b82ea21d06..94bba1c234 100644
--- a/ydb/core/kqp/executer_actor/kqp_planner.cpp
+++ b/ydb/core/kqp/executer_actor/kqp_planner.cpp
@@ -89,6 +89,7 @@ TKqpPlanner::TKqpPlanner(TKqpPlanner::TArgs&& args)
, UserToken(args.UserToken)
, Deadline(args.Deadline)
, StatsMode(args.StatsMode)
+ , WithProgressStats(args.WithProgressStats)
, RlPath(args.RlPath)
, ResourcesSnapshot(std::move(args.ResourcesSnapshot))
, ExecuterSpan(args.ExecuterSpan)
@@ -106,6 +107,7 @@ TKqpPlanner::TKqpPlanner(TKqpPlanner::TArgs&& args)
, ArrayBufferMinFillPercentage(args.ArrayBufferMinFillPercentage)
, VerboseMemoryLimitException(args.VerboseMemoryLimitException)
{
+ Y_UNUSED(MkqlMemoryLimit);
if (GUCSettings) {
SerializedGUCSettings = GUCSettings->SerializeToString();
}
@@ -223,6 +225,7 @@ std::unique_ptr<TEvKqpNode::TEvStartKqpTasksRequest> TKqpPlanner::SerializeReque
}
request.MutableRuntimeSettings()->SetStatsMode(GetDqStatsMode(StatsMode));
+ request.MutableRuntimeSettings()->SetWithProgressStats(WithProgressStats);
request.SetStartAllOrFail(true);
request.MutableRuntimeSettings()->SetExecType(NYql::NDqProto::TComputeRuntimeSettings::DATA);
request.MutableRuntimeSettings()->SetUseSpilling(TasksGraph.GetMeta().AllowWithSpilling);
@@ -498,6 +501,7 @@ TString TKqpPlanner::ExecuteDataComputeTask(ui64 taskId, ui32 computeTasksSize)
.MemoryPool = NRm::EKqpMemoryPool::DataQuery,
.WithSpilling = TasksGraph.GetMeta().AllowWithSpilling,
.StatsMode = GetDqStatsMode(StatsMode),
+ .WithProgressStats = WithProgressStats,
.Deadline = Deadline,
.ShareMailbox = (computeTasksSize <= 1),
.RlPath = Nothing(),
diff --git a/ydb/core/kqp/executer_actor/kqp_planner.h b/ydb/core/kqp/executer_actor/kqp_planner.h
index 6f20629122..68c1f1bbb4 100644
--- a/ydb/core/kqp/executer_actor/kqp_planner.h
+++ b/ydb/core/kqp/executer_actor/kqp_planner.h
@@ -48,6 +48,7 @@ public:
const TIntrusiveConstPtr<NACLib::TUserToken>& UserToken;
const TInstant Deadline;
const Ydb::Table::QueryStatsCollection::Mode& StatsMode;
+ const bool WithProgressStats;
const TMaybe<NKikimrKqp::TRlPath>& RlPath;
NWilson::TSpan& ExecuterSpan;
TVector<NKikimrKqp::TKqpNodeResources> ResourcesSnapshot;
@@ -110,6 +111,7 @@ private:
const TIntrusiveConstPtr<NACLib::TUserToken> UserToken;
const TInstant Deadline;
const Ydb::Table::QueryStatsCollection::Mode StatsMode;
+ const bool WithProgressStats;
const TMaybe<NKikimrKqp::TRlPath> RlPath;
THashSet<ui32> TrackingNodes;
TVector<NKikimrKqp::TKqpNodeResources> ResourcesSnapshot;
diff --git a/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp b/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp
index aa69778493..605eceba91 100644
--- a/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp
+++ b/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp
@@ -28,12 +28,10 @@ public:
TKqpTableResolver(const TActorId& owner, ui64 txId,
const TIntrusiveConstPtr<NACLib::TUserToken>& userToken,
- const TVector<IKqpGateway::TPhysicalTxData>& transactions,
TKqpTasksGraph& tasksGraph)
: Owner(owner)
, TxId(txId)
, UserToken(userToken)
- , Transactions(transactions)
, TasksGraph(tasksGraph)
, SystemViewRewrittenResolver(NSysView::CreateSystemViewRewrittenResolver()) {}
@@ -149,7 +147,6 @@ private:
private:
void ResolveKeys() {
- FillKqpTasksGraphStages(TasksGraph, Transactions);
auto requestNavigate = std::make_unique<NSchemeCache::TSchemeCacheNavigate>();
auto request = MakeHolder<NSchemeCache::TSchemeCacheRequest>();
@@ -268,7 +265,6 @@ private:
const TActorId Owner;
const ui64 TxId;
TIntrusiveConstPtr<NACLib::TUserToken> UserToken;
- const TVector<IKqpGateway::TPhysicalTxData>& Transactions;
THashMap<TTableId, TVector<TStageId>> TableRequestIds;
THashMap<TTableId, TString> TablePathsById;
bool NavigationFinished = false;
@@ -287,9 +283,8 @@ private:
} // anonymous namespace
NActors::IActor* CreateKqpTableResolver(const TActorId& owner, ui64 txId,
- const TIntrusiveConstPtr<NACLib::TUserToken>& userToken,
- const TVector<IKqpGateway::TPhysicalTxData>& transactions, TKqpTasksGraph& tasksGraph) {
- return new TKqpTableResolver(owner, txId, userToken, transactions, tasksGraph);
+ const TIntrusiveConstPtr<NACLib::TUserToken>& userToken, TKqpTasksGraph& tasksGraph) {
+ return new TKqpTableResolver(owner, txId, userToken, tasksGraph);
}
} // namespace NKikimr::NKqp
diff --git a/ydb/core/kqp/executer_actor/kqp_table_resolver.h b/ydb/core/kqp/executer_actor/kqp_table_resolver.h
index f914cf3430..13472f2e1f 100644
--- a/ydb/core/kqp/executer_actor/kqp_table_resolver.h
+++ b/ydb/core/kqp/executer_actor/kqp_table_resolver.h
@@ -5,7 +5,6 @@
namespace NKikimr::NKqp {
NActors::IActor* CreateKqpTableResolver(const TActorId& owner, ui64 txId,
- const TIntrusiveConstPtr<NACLib::TUserToken>& userToken,
- const TVector<IKqpGateway::TPhysicalTxData>& transactions, TKqpTasksGraph& tasksGraph);
+ const TIntrusiveConstPtr<NACLib::TUserToken>& userToken, TKqpTasksGraph& tasksGraph);
} // namespace NKikimr::NKqp
diff --git a/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp b/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp
index 606b6427a5..21c23995f0 100644
--- a/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp
+++ b/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp
@@ -545,7 +545,8 @@ void BuildKqpStageChannels(TKqpTasksGraph& tasksGraph, TStageInfo& stageInfo,
columnShardHashV1Params.SourceTableKeyColumnTypes->reserve(columnShardHashV1.KeyColumnTypesSize());
for (const auto& keyColumnType: columnShardHashV1.GetKeyColumnTypes()) {
auto typeId = static_cast<NScheme::TTypeId>(keyColumnType);
- auto typeInfo = NScheme::TTypeInfo{typeId};
+ auto typeInfo =
+ typeId == NScheme::NTypeIds::Decimal? NScheme::TTypeInfo(NKikimr::NScheme::TDecimalType::Default()): NScheme::TTypeInfo(typeId);
columnShardHashV1Params.SourceTableKeyColumnTypes->push_back(typeInfo);
}
break;
@@ -983,7 +984,7 @@ void FillTaskMeta(const TStageInfo& stageInfo, const TTask& task, NYql::NDqProto
protoColumn->SetName(column.Name);
}
protoReadMeta->SetItemsLimit(task.Meta.ReadInfo.ItemsLimit);
- protoReadMeta->SetReverse(task.Meta.ReadInfo.Reverse);
+ protoReadMeta->SetReverse(task.Meta.ReadInfo.IsReverse());
}
}
if (task.Meta.Writes) {
@@ -1054,7 +1055,8 @@ void FillTaskMeta(const TStageInfo& stageInfo, const TTask& task, NYql::NDqProto
YQL_ENSURE(!task.Meta.Writes);
if (!task.Meta.Reads->empty()) {
- protoTaskMeta.SetReverse(task.Meta.ReadInfo.Reverse);
+ protoTaskMeta.SetReverse(task.Meta.ReadInfo.IsReverse());
+ protoTaskMeta.SetOptionalSorting((ui32)task.Meta.ReadInfo.GetSorting());
protoTaskMeta.SetItemsLimit(task.Meta.ReadInfo.ItemsLimit);
if (task.Meta.HasEnableShardsSequentialScan()) {
protoTaskMeta.SetEnableShardsSequentialScan(task.Meta.GetEnableShardsSequentialScanUnsafe());
diff --git a/ydb/core/kqp/executer_actor/kqp_tasks_graph.h b/ydb/core/kqp/executer_actor/kqp_tasks_graph.h
index 22a6f31931..1c8c68f0ea 100644
--- a/ydb/core/kqp/executer_actor/kqp_tasks_graph.h
+++ b/ydb/core/kqp/executer_actor/kqp_tasks_graph.h
@@ -2,6 +2,7 @@
#include <ydb/core/kqp/common/kqp_resolve.h>
#include <ydb/core/kqp/common/kqp_user_request_context.h>
+#include <ydb/core/kqp/common/kqp_yql.h>
#include <ydb/core/kqp/gateway/kqp_gateway.h>
#include <ydb/core/scheme/scheme_tabledefs.h>
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
@@ -289,14 +290,13 @@ public:
std::set<TString> ParameterNames;
};
- struct TReadInfo {
+ struct TReadInfo: public NYql::TSortingOperator<NYql::ERequestSorting::NONE> {
+ public:
enum class EReadType {
Rows,
Blocks
};
ui64 ItemsLimit = 0;
- bool Reverse = false;
- bool Sorted = false;
EReadType ReadType = EReadType::Rows;
TKqpOlapProgram OlapProgram;
TVector<NScheme::TTypeInfo> ResultColumnsTypes;
diff --git a/ydb/core/kqp/host/kqp_gateway_proxy.cpp b/ydb/core/kqp/host/kqp_gateway_proxy.cpp
index 90d42a5639..11cf4998af 100644
--- a/ydb/core/kqp/host/kqp_gateway_proxy.cpp
+++ b/ydb/core/kqp/host/kqp_gateway_proxy.cpp
@@ -2513,6 +2513,12 @@ public:
auto& state = *op.MutableState();
state.MutableDone()->SetFailoverMode(
static_cast<NKikimrReplication::TReplicationState::TDone::EFailoverMode>(done->FailoverMode));
+ } else if (const auto& paused = settings.Settings.StatePaused) {
+ auto& state = *op.MutableState();
+ state.MutablePaused();
+ } else if (const auto& standBy = settings.Settings.StateStandBy) {
+ auto& state = *op.MutableState();
+ state.MutableStandBy();
}
if (settings.Settings.ConnectionString || settings.Settings.Endpoint || settings.Settings.Database ||
diff --git a/ydb/core/kqp/node_service/kqp_node_service.cpp b/ydb/core/kqp/node_service/kqp_node_service.cpp
index 7774878700..22ecddfe75 100644
--- a/ydb/core/kqp/node_service/kqp_node_service.cpp
+++ b/ydb/core/kqp/node_service/kqp_node_service.cpp
@@ -274,6 +274,7 @@ private:
.MemoryPool = memoryPool,
.WithSpilling = msgRtSettings.GetUseSpilling(),
.StatsMode = msgRtSettings.GetStatsMode(),
+ .WithProgressStats = msgRtSettings.GetWithProgressStats(),
.Deadline = TInstant(),
.ShareMailbox = false,
.RlPath = rlPath,
diff --git a/ydb/core/kqp/opt/kqp_query_plan.cpp b/ydb/core/kqp/opt/kqp_query_plan.cpp
index aa9d7f25a6..38a73f5b72 100644
--- a/ydb/core/kqp/opt/kqp_query_plan.cpp
+++ b/ydb/core/kqp/opt/kqp_query_plan.cpp
@@ -65,13 +65,12 @@ std::string RemoveForbiddenChars(std::string s) {
return NYql::IsUtf8(s)? s: "Non-UTF8 string";
}
-struct TTableRead {
+struct TTableRead: public NYql::TSortingOperator<NYql::ERequestSorting::ASC> {
EPlanTableReadType Type = EPlanTableReadType::Unspecified;
TVector<TString> LookupBy;
TVector<TString> ScanBy;
TVector<TString> Columns;
TMaybe<TString> Limit;
- bool Reverse = false;
};
struct TTableWrite {
@@ -1914,9 +1913,11 @@ private:
readInfo.Limit = limit;
op.Properties["ReadLimit"] = limit;
}
- if (settings.Reverse) {
- readInfo.Reverse = true;
+ readInfo.SetSorting(settings.GetSorting());
+ if (settings.GetSorting() == ERequestSorting::DESC) {
op.Properties["Reverse"] = true;
+ } else if (settings.GetSorting() == ERequestSorting::ASC) {
+ op.Properties["Reverse"] = false;
}
if (settings.SequentialInFlight) {
@@ -2006,7 +2007,7 @@ void WriteCommonTablesInfo(NJsonWriter::TBuf& writer, TMap<TString, TTableInfo>&
if (read.Limit) {
writer.WriteKey("limit").WriteString(*read.Limit);
}
- if (read.Reverse) {
+ if (read.IsReverse()) {
writer.WriteKey("reverse").WriteBool(true);
}
diff --git a/ydb/core/kqp/opt/logical/kqp_opt_log.cpp b/ydb/core/kqp/opt/logical/kqp_opt_log.cpp
index 4315c567ae..15208b701d 100644
--- a/ydb/core/kqp/opt/logical/kqp_opt_log.cpp
+++ b/ydb/core/kqp/opt/logical/kqp_opt_log.cpp
@@ -163,7 +163,7 @@ protected:
TMaybeNode<TExprBase> OptimizeEquiJoinWithCosts(TExprBase node, TExprContext& ctx) {
auto maxDPhypDPTableSize = Config->MaxDPHypDPTableSize.Get().GetOrElse(TDqSettings::TDefault::MaxDPHypDPTableSize);
auto optLevel = Config->CostBasedOptimizationLevel.Get().GetOrElse(Config->DefaultCostBasedOptimizationLevel);
- bool enableShuffleElimination = KqpCtx.Config->OptShuffleElimination.Get().GetOrElse(Config->DefaultEnableShuffleElimination);
+ bool enableShuffleElimination = KqpCtx.Config->OptShuffleElimination.Get().GetOrElse(KqpCtx.Config->DefaultEnableShuffleElimination);
auto providerCtx = TKqpProviderContext(KqpCtx, optLevel);
auto opt = std::unique_ptr<IOptimizerNew>(MakeNativeOptimizerNew(providerCtx, maxDPhypDPTableSize, ctx, enableShuffleElimination));
TExprBase output = DqOptimizeEquiJoinWithCosts(node, ctx, TypesCtx, optLevel,
diff --git a/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp b/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp
index 2507d3b31c..f9c03d710d 100644
--- a/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp
+++ b/ydb/core/kqp/opt/logical/kqp_opt_log_extract.cpp
@@ -157,6 +157,10 @@ TExprBase KqpApplyExtractMembersToReadOlapTable(TExprBase node, TExprContext& ct
auto read = node.Cast<TKqpReadOlapTableRangesBase>();
+ if (read.Columns().Size() == 1) {
+ return node;
+ }
+
auto usedColumns = GetUsedColumns(read, read.Columns(), parentsMap, allowMultiUsage, ctx);
if (!usedColumns) {
return node;
diff --git a/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp b/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp
index 0c4a7ac21a..11b1830199 100644
--- a/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp
+++ b/ydb/core/kqp/opt/physical/kqp_opt_phy.cpp
@@ -124,7 +124,10 @@ public:
AddHandler(1, &TCoTake::Match, HNDL(PropagatePrecomuteTake<true>));
AddHandler(1, &TCoFlatMap::Match, HNDL(PropagatePrecomuteFlatmap<true>));
AddHandler(1, &TKqpWriteConstraint::Match, HNDL(BuildWriteConstraint<true>));
+ AddHandler(1, &TKqpWriteConstraint::Match, HNDL(BuildWriteConstraint<true>));
+ AddHandler(1, &TKqpReadOlapTableRanges::Match, HNDL(AddColumnForEmptyColumnsOlapRead));
+
AddHandler(2, &TDqStage::Match, HNDL(RewriteKqpReadTable));
AddHandler(2, &TDqStage::Match, HNDL(RewriteKqpLookupTable));
AddHandler(2, &TKqlUpsertRows::Match, HNDL(RewriteReturningUpsert));
@@ -452,7 +455,7 @@ protected:
bool shuffleEliminationWithMap = KqpCtx.Config->OptShuffleEliminationWithMap.Get().GetOrElse(true);
bool rightCollectStage = !KqpCtx.Config->AllowMultiBroadcasts;
TExprBase output = DqBuildJoin(node, ctx, optCtx, *getParents(), IsGlobal,
- pushLeftStage, KqpCtx.Config->GetHashJoinMode(), false, KqpCtx.Config->UseGraceJoinCoreForMap.Get().GetOrElse(false), KqpCtx.Config->OptShuffleElimination.Get().GetOrElse(false), shuffleEliminationWithMap,
+ pushLeftStage, KqpCtx.Config->GetHashJoinMode(), false, KqpCtx.Config->UseGraceJoinCoreForMap.Get().GetOrElse(false), KqpCtx.Config->OptShuffleElimination.Get().GetOrElse(KqpCtx.Config->DefaultEnableShuffleElimination), shuffleEliminationWithMap,
rightCollectStage
);
DumpAppliedRule("BuildJoin", node.Ptr(), output.Ptr(), ctx);
@@ -528,6 +531,13 @@ protected:
return output;
}
+ TMaybeNode<TExprBase> AddColumnForEmptyColumnsOlapRead(TExprBase node, TExprContext& ctx)
+ {
+ TExprBase output = KqpAddColumnForEmptyColumnsOlapRead(node, ctx, KqpCtx);
+ DumpAppliedRule("AddColumnForEmptyColumnsOlapRead", node.Ptr(), output.Ptr(), ctx);
+ return output;
+ }
+
TMaybeNode<TExprBase> DropUnordered(TExprBase node, TExprContext& ctx) {
TExprBase output = node;
if (node.Maybe<TCoUnorderedBase>().Input().Maybe<TDqCnUnionAll>()) {
diff --git a/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp b/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp
index f89cf425ad..69ed31b600 100644
--- a/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp
+++ b/ydb/core/kqp/opt/physical/kqp_opt_phy_limit.cpp
@@ -122,7 +122,9 @@ TExprBase KqpApplyLimitToOlapReadTable(TExprBase node, TExprContext& ctx, const
return node; // already set
}
if (direction == ESortDirection::Reverse) {
- settings.SetReverse();
+ settings.SetSorting(ERequestSorting::DESC);
+ } else if (direction == ESortDirection::Forward) {
+ settings.SetSorting(ERequestSorting::ASC);
}
auto keySelector = topSort.KeySelectorLambda();
diff --git a/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp b/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp
index 6f18f8ce1b..542137581e 100644
--- a/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp
+++ b/ydb/core/kqp/opt/physical/kqp_opt_phy_olap_filter.cpp
@@ -883,4 +883,32 @@ TExprBase KqpPushOlapFilter(TExprBase node, TExprContext& ctx, const TKqpOptimiz
#endif
}
+TExprBase KqpAddColumnForEmptyColumnsOlapRead(TExprBase node, TExprContext& ctx, const TKqpOptimizeContext& kqpCtx) {
+ if (!node.Maybe<TKqpReadOlapTableRanges>()) {
+ return node;
+ }
+
+ auto readOlap = node.Cast<TKqpReadOlapTableRanges>();
+ if (readOlap.Columns().Size()!=0) {
+ return node;
+ }
+
+ const auto& tableData = kqpCtx.Tables->ExistingTable(kqpCtx.Cluster, readOlap.Table().Path().Value());
+ auto keyColumns = tableData.Metadata->KeyColumnNames;
+
+ TVector<TExprNode::TPtr> newColumns;
+ newColumns.push_back(ctx.NewAtom(node.Pos(), keyColumns[0]));
+
+ return Build<TKqpReadOlapTableRanges>(ctx, node.Pos())
+ .Table(readOlap.Table())
+ .Ranges(readOlap.Ranges())
+ .Columns()
+ .Add(newColumns)
+ .Build()
+ .Settings(readOlap.Settings())
+ .ExplainPrompt(readOlap.ExplainPrompt())
+ .Process(readOlap.Process())
+ .Done();
+}
+
} // namespace NKikimr::NKqp::NOpt
diff --git a/ydb/core/kqp/opt/physical/kqp_opt_phy_rules.h b/ydb/core/kqp/opt/physical/kqp_opt_phy_rules.h
index 7128016625..587672cb41 100644
--- a/ydb/core/kqp/opt/physical/kqp_opt_phy_rules.h
+++ b/ydb/core/kqp/opt/physical/kqp_opt_phy_rules.h
@@ -67,6 +67,9 @@ NYql::NNodes::TExprBase KqpPropagatePrecomuteScalarRowset(NYql::NNodes::TExprBas
NYql::NNodes::TExprBase KqpBuildWriteConstraint(NYql::NNodes::TExprBase node, NYql::TExprContext& ctx,
NYql::IOptimizationContext& optCtx, const NYql::TParentsMap& parentsMap, bool allowStageMultiUsage);
+NYql::NNodes::TExprBase KqpAddColumnForEmptyColumnsOlapRead(NYql::NNodes::TExprBase node, NYql::TExprContext& ctx,
+ const TKqpOptimizeContext& kqpCtx);
+
bool AllowFuseJoinInputs(NYql::NNodes::TExprBase node);
bool UseSource(const TKqpOptimizeContext& kqpCtx, const NYql::TKikimrTableDescription& tableDesc);
diff --git a/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp b/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp
index efa9804fce..f1630e3bca 100644
--- a/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp
+++ b/ydb/core/kqp/opt/physical/kqp_opt_phy_sort.cpp
@@ -72,17 +72,13 @@ TExprBase KqpRemoveRedundantSortByPk(TExprBase node, TExprContext& ctx, const TK
return node;
}
- if (settings.Reverse) {
- return node;
- }
-
- settings.SetReverse();
- settings.SetSorted();
-
+ AFL_ENSURE(settings.GetSorting() == ERequestSorting::NONE);
+ settings.SetSorting(ERequestSorting::DESC);
input = BuildReadNode(input.Pos(), ctx, input, settings);
} else if (direction == ESortDirection::Forward) {
if (UseSource(kqpCtx, tableDesc)) {
- settings.SetSorted();
+ AFL_ENSURE(settings.GetSorting() == ERequestSorting::NONE);
+ settings.SetSorting(ERequestSorting::ASC);
input = BuildReadNode(input.Pos(), ctx, input, settings);
}
}
diff --git a/ydb/core/kqp/provider/yql_kikimr_datasource.cpp b/ydb/core/kqp/provider/yql_kikimr_datasource.cpp
index 438faeca1b..e888368766 100644
--- a/ydb/core/kqp/provider/yql_kikimr_datasource.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_datasource.cpp
@@ -9,6 +9,7 @@
#include <yql/essentials/core/yql_expr_optimize.h>
#include <yql/essentials/core/yql_expr_type_annotation.h>
+#include <yql/essentials/core/yql_opt_utils.h>
#include <yql/essentials/providers/common/schema/expr/yql_expr_schema.h>
#include <ydb/library/yql/providers/dq/expr_nodes/dqs_expr_nodes.h>
#include <ydb/library/yql/dq/expr_nodes/dq_expr_nodes.h>
@@ -95,6 +96,15 @@ namespace {
using namespace NKikimr;
using namespace NNodes;
+bool IsShowCreate(const TExprNode& read) {
+ if (read.ChildrenSize() <= TKiReadTable::idx_Settings) {
+ return false;
+ }
+ const auto& settings = *read.Child(TKiReadTable::idx_Settings);
+ return HasSetting(settings, "showCreateTable")
+ || HasSetting(settings, "showCreateView");
+}
+
class TKiSourceIntentDeterminationTransformer: public TKiSourceVisitorTransformer {
public:
TKiSourceIntentDeterminationTransformer(TIntrusivePtr<TKikimrSessionContext> sessionCtx)
@@ -795,7 +805,7 @@ public:
retChildren[0] = newRead;
return ctx.ChangeChildren(*node, std::move(retChildren));
}
- } else if (tableDesc.Metadata->Kind == EKikimrTableKind::View) {
+ } else if (tableDesc.Metadata->Kind == EKikimrTableKind::View && !IsShowCreate(*read)) {
if (!SessionCtx->Config().FeatureFlags.GetEnableViews()) {
ctx.AddError(TIssue(node->Pos(ctx),
"Views are disabled. Please contact your system administrator to enable the feature"));
diff --git a/ydb/core/kqp/provider/yql_kikimr_exec.cpp b/ydb/core/kqp/provider/yql_kikimr_exec.cpp
index f301eb6d8b..ae78c7aef2 100644
--- a/ydb/core/kqp/provider/yql_kikimr_exec.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_exec.cpp
@@ -553,7 +553,7 @@ namespace {
FromString<ui32>(setting.Value().Cast<TCoDataCtor>().Literal().Cast<TCoAtom>().Value())
);
} else if (name == "setMaxPartitions") {
- request->mutable_alter_partitioning_settings()->set_set_partition_count_limit(
+ request->mutable_alter_partitioning_settings()->set_set_max_active_partitions(
FromString<ui32>(setting.Value().Cast<TCoDataCtor>().Literal().Cast<TCoAtom>().Value())
);
} else if (name == "setRetentionPeriod") {
diff --git a/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp b/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp
index af58f5ec04..4c21926196 100644
--- a/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_opt_build.cpp
@@ -876,6 +876,16 @@ TVector<TKiDataQueryBlock> MakeKiDataQueryBlocks(TExprBase node, const TKiExplor
return queryBlocks;
}
+TString GetShowCreateType(const TExprNode& settings) {
+ if (HasSetting(settings, "showCreateTable")) {
+ return "showCreateTable";
+ }
+ if (HasSetting(settings, "showCreateView")) {
+ return "showCreateView";
+ }
+ return "";
+}
+
} // namespace
TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf database, TIntrusivePtr<TKikimrTablesData> tablesData,
@@ -953,23 +963,23 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
return res;
}
- TNodeOnNodeOwnedMap showCreateTableReadReplaces;
- VisitExpr(node.Ptr(), [&showCreateTableReadReplaces](const TExprNode::TPtr& input) -> bool {
+ TNodeOnNodeOwnedMap showCreateReadReplacements;
+ VisitExpr(node.Ptr(), [&showCreateReadReplacements](const TExprNode::TPtr& input) -> bool {
TExprBase currentNode(input);
if (auto maybeReadTable = currentNode.Maybe<TKiReadTable>()) {
auto readTable = maybeReadTable.Cast();
for (auto setting : readTable.Settings()) {
auto name = setting.Name().Value();
- if (name == "showCreateTable") {
- showCreateTableReadReplaces[input.Get()] = nullptr;
+ if (name == "showCreateTable" || name == "showCreateView") {
+ showCreateReadReplacements[input.Get()] = nullptr;
}
}
}
return true;
});
- if (!showCreateTableReadReplaces.empty()) {
- for (auto& [input, _] : showCreateTableReadReplaces) {
+ if (!showCreateReadReplacements.empty()) {
+ for (auto& [input, _] : showCreateReadReplacements) {
TKiReadTable content(input);
TExprNode::TPtr path = ctx.NewCallable(
@@ -983,6 +993,9 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
TKikimrKey key(ctx);
YQL_ENSURE(key.Extract(content.TableKey().Ref()));
+ auto type = GetShowCreateType(content.Settings().Ref());
+ YQL_ENSURE(!type.empty());
+
auto sysViewRewrittenValue = Build<TCoNameValueTuple>(ctx, node.Pos())
.Name()
.Build("sysViewRewritten")
@@ -991,12 +1004,12 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
.Build()
.Done();
- auto showCreateTableValue = Build<TCoNameValueTuple>(ctx, node.Pos())
+ auto showCreateTypeValue = Build<TCoNameValueTuple>(ctx, node.Pos())
.Name()
- .Build("showCreateTable")
+ .Build(type)
.Done();
- auto showCreateTableRead = Build<TCoRead>(ctx, node.Pos())
+ auto showCreateRead = Build<TCoRead>(ctx, node.Pos())
.World<TCoWorld>().Build()
.DataSource<TCoDataSource>()
.Category(ctx.NewAtom(node.Pos(), KikimrProviderName))
@@ -1009,61 +1022,68 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
.Add(ctx.NewCallable(node.Pos(), "Void", {}))
.Add(ctx.NewList(node.Pos(), {}))
.Add(sysViewRewrittenValue)
- .Add(showCreateTableValue)
+ .Add(showCreateTypeValue)
.Build()
.Done().Ptr();
- showCreateTableReadReplaces[input] = showCreateTableRead;
+ showCreateReadReplacements[input] = showCreateRead;
}
- auto res = ctx.ReplaceNodes(std::move(node.Ptr()), showCreateTableReadReplaces);
+ auto res = ctx.ReplaceNodes(std::move(node.Ptr()), showCreateReadReplacements);
TExprBase resNode(res);
- TNodeOnNodeOwnedMap showCreateTableRightReplaces;
- VisitExpr(resNode.Ptr(), [&showCreateTableRightReplaces](const TExprNode::TPtr& input) -> bool {
+ TNodeOnNodeOwnedMap showCreateRightReplacements;
+ VisitExpr(resNode.Ptr(), [&showCreateRightReplacements](const TExprNode::TPtr& input) -> bool {
TExprBase currentNode(input);
if (auto rightMaybe = currentNode.Maybe<TCoRight>()) {
auto right = rightMaybe.Cast();
if (auto maybeRead = right.Input().Maybe<TCoRead>()) {
auto read = maybeRead.Cast();
bool isSysViewRewritten = false;
- bool isShowCreateTable = false;
+ bool isShowCreate = false;
for (auto arg : read.FreeArgs()) {
if (auto tuple = arg.Maybe<TCoNameValueTuple>()) {
auto name = tuple.Cast().Name().Value();
if (name == "sysViewRewritten") {
isSysViewRewritten = true;
- } else if (name == "showCreateTable") {
- isShowCreateTable = true;
+ } else if (name == "showCreateTable" || name == "showCreateView") {
+ isShowCreate = true;
}
}
}
- if (isShowCreateTable && isSysViewRewritten) {
- showCreateTableRightReplaces[input.Get()] = nullptr;
+ if (isShowCreate && isSysViewRewritten) {
+ showCreateRightReplacements[input.Get()] = nullptr;
}
}
}
return true;
});
- for (auto& [input, _] : showCreateTableRightReplaces) {
+ for (auto& [input, _] : showCreateRightReplacements) {
TCoRight right(input);
TCoRead read(right.Input().Ptr());
- TString tablePath;
+ TString path;
+ TString pathType;
for (auto arg : read.FreeArgs()) {
if (auto tuple = arg.Maybe<TCoNameValueTuple>()) {
auto name = tuple.Cast().Name().Value();
if (name == "sysViewRewritten") {
- tablePath = tuple.Cast().Value().Cast().Cast<TCoAtom>().StringValue();
+ path = tuple.Cast().Value().Cast().Cast<TCoAtom>().StringValue();
+ }
+ if (name == "showCreateTable") {
+ pathType = "Table";
+ }
+ if (name == "showCreateView") {
+ pathType = "View";
}
}
}
- YQL_ENSURE(!tablePath.empty(), "Unexpected empty table path for SHOW CREATE TABLE");
+ YQL_ENSURE(!path.empty(), "Unexpected empty path for SHOW CREATE " << pathType.to_upper());
- auto tempTablePath = tablesData->GetTempTablePath(tablePath);
+ auto tempTablePath = tablesData->GetTempTablePath(path);
if (tempTablePath) {
- tablePath = tempTablePath.value();
+ path = tempTablePath.value();
}
auto showCreateArg = Build<TCoArgument>(ctx, resNode.Pos())
@@ -1082,7 +1102,7 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
auto pathCondition = Build<TCoCmpEqual>(ctx, resNode.Pos())
.Left(columnPath)
.Right<TCoString>()
- .Literal().Build(tablePath)
+ .Literal().Build(path)
.Build()
.Done();
@@ -1095,7 +1115,7 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
auto pathTypeCondition = Build<TCoCmpEqual>(ctx, resNode.Pos())
.Left(columnPathType)
.Right<TCoString>()
- .Literal().Build("Table")
+ .Literal().Build(pathType)
.Build()
.Done();
@@ -1121,7 +1141,7 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
.Lambda(lambda)
.Done().Ptr();
- showCreateTableRightReplaces[input] = filterData;
+ showCreateRightReplacements[input] = filterData;
}
ctx.Step
@@ -1133,7 +1153,7 @@ TExprNode::TPtr KiBuildQuery(TExprBase node, TExprContext& ctx, TStringBuf datab
.Repeat(TExprStep::LoadTablesMetadata)
.Repeat(TExprStep::RewriteIO);
- return ctx.ReplaceNodes(std::move(resNode.Ptr()), showCreateTableRightReplaces);
+ return ctx.ReplaceNodes(std::move(resNode.Ptr()), showCreateRightReplacements);
}
TKiExploreTxResults txExplore;
diff --git a/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp b/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp
index 3937ab160b..8ea634a0af 100644
--- a/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp
+++ b/ydb/core/kqp/provider/yql_kikimr_type_ann.cpp
@@ -167,8 +167,8 @@ private:
if (!SessionCtx->Config().FeatureFlags.GetEnableShowCreate()) {
for (auto setting : readTable.Settings()) {
auto name = setting.Name().Value();
- if (name == "showCreateTable") {
- ctx.AddError(TIssue(ctx.GetPosition(node.Pos()),
+ if (name == "showCreateTable" || name == "showCreateView") {
+ ctx.AddError(TIssue(ctx.GetPosition(node.Pos()),
TStringBuilder() << "SHOW CREATE statement is not supported"));
return TStatus::Error;
}
diff --git a/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp b/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp
index fa57a38899..6158910b6b 100644
--- a/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp
+++ b/ydb/core/kqp/query_compiler/kqp_mkql_compiler.cpp
@@ -197,7 +197,7 @@ TKqpKeyRange MakeKeyRange(const TKqlReadTableBase& readTable, const TKqlCompileC
if (settings.ItemsLimit) {
keyRange.ItemsLimit = MkqlBuildExpr(*settings.ItemsLimit, buildCtx);
}
- keyRange.Reverse = settings.Reverse;
+ keyRange.Reverse = settings.IsReverse();
return keyRange;
}
@@ -210,7 +210,7 @@ TKqpKeyRanges MakeComputedKeyRanges(const TKqlReadTableRangesBase& readTable, co
TKqpKeyRanges ranges = {
.Ranges = MkqlBuildExpr(readTable.Ranges().Ref(), buildCtx),
.ItemsLimit = settings.ItemsLimit ? MkqlBuildExpr(*settings.ItemsLimit, buildCtx) : ctx.PgmBuilder().NewNull(),
- .Reverse = settings.Reverse,
+ .Reverse = settings.IsReverse(),
};
return ranges;
diff --git a/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp b/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp
index 6dd9af5d8e..9cbba6d02f 100644
--- a/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp
+++ b/ydb/core/kqp/query_compiler/kqp_query_compiler.cpp
@@ -360,7 +360,7 @@ void FillReadRange(const TKqpWideReadTable& read, const TKikimrTableMetadata& ta
}
}
- readProto.SetReverse(settings.Reverse);
+ readProto.SetReverse(settings.IsReverse());
}
template <typename TReader, typename TProto>
@@ -396,13 +396,13 @@ void FillReadRanges(const TReader& read, const TKikimrTableMetadata&, TProto& re
}
if constexpr (std::is_same_v<TProto, NKqpProto::TKqpPhyOpReadOlapRanges>) {
- readProto.SetSorted(settings.Sorted);
+ readProto.SetSorted(settings.IsSorted());
if (settings.TabletId) {
readProto.SetTabletId(*settings.TabletId);
}
}
- readProto.SetReverse(settings.Reverse);
+ readProto.SetReverse(settings.IsReverse());
}
template <typename TEffectCallable, typename TEffectProto>
@@ -919,7 +919,7 @@ private:
i.MutableProgram()->MutableSettings()->SetLevelDataPrediction(rPredictor.GetLevelDataVolume(i.GetProgram().GetSettings().GetStageLevel()));
}
- txProto.SetEnableShuffleElimination(Config->OptShuffleElimination.Get().GetOrElse(false));
+ txProto.SetEnableShuffleElimination(Config->OptShuffleElimination.Get().GetOrElse(Config->DefaultEnableShuffleElimination));
txProto.SetHasEffects(hasEffectStage);
for (const auto& paramBinding : tx.ParamBindings()) {
@@ -1045,8 +1045,8 @@ private:
}
auto readSettings = TKqpReadTableSettings::Parse(settings.Settings().Cast());
- readProto.SetReverse(readSettings.Reverse);
- readProto.SetSorted(readSettings.Sorted);
+ readProto.SetReverse(readSettings.IsReverse());
+ readProto.SetSorted(readSettings.IsSorted());
YQL_ENSURE(readSettings.SkipNullKeys.empty());
if (readSettings.SequentialInFlight) {
@@ -1336,13 +1336,16 @@ private:
shuffleProto.AddKeyColumns(TString(keyColumn));
}
- if (Config->OptShuffleElimination.Get().GetOrElse(false)) {
+ if (Config->OptShuffleElimination.Get().GetOrElse(Config->DefaultEnableShuffleElimination)) {
auto& columnHashV1 = *shuffleProto.MutableColumnShardHashV1();
const auto& outputType = NYql::NDq::GetDqConnectionType(connection, ctx);
auto structType = outputType->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>();
for (const auto& column: shuffle.KeyColumns().Ptr()->Children()) {
auto ty = NYql::NDq::GetColumnType(connection, *structType, column->Content(), column->Pos(), ctx);
+ if (ty->GetKind() == ETypeAnnotationKind::List) {
+ ty = ty->Cast<TListExprType>()->GetItemType();
+ }
NYql::NUdf::EDataSlot slot;
switch (ty->GetKind()) {
case ETypeAnnotationKind::Data: {
@@ -1351,6 +1354,9 @@ private:
}
case ETypeAnnotationKind::Optional: {
auto optionalType = ty->Cast<TOptionalExprType>()->GetItemType();
+ if (optionalType->GetKind() == ETypeAnnotationKind::List) {
+ optionalType = optionalType->Cast<TListExprType>()->GetItemType();
+ }
Y_ENSURE(
optionalType->GetKind() == ETypeAnnotationKind::Data,
TStringBuilder{} << "Can't retrieve type from optional" << static_cast<std::int64_t>(optionalType->GetKind()) << "for ColumnHashV1 Shuffling"
diff --git a/ydb/core/kqp/runtime/kqp_read_actor.cpp b/ydb/core/kqp/runtime/kqp_read_actor.cpp
index 243e81d3fc..9a5b91eded 100644
--- a/ydb/core/kqp/runtime/kqp_read_actor.cpp
+++ b/ydb/core/kqp/runtime/kqp_read_actor.cpp
@@ -838,7 +838,11 @@ public:
record.MutableTableId()->SetTableId(Settings->GetTable().GetTableId().GetTableId());
record.MutableTableId()->SetSchemaVersion(Settings->GetTable().GetSchemaVersion());
- record.SetReverse(Settings->GetReverse());
+ if (Settings->HasOptionalSorting()) {
+ record.SetReverse(Settings->GetOptionalSorting() == (ui32)ERequestSorting::DESC);
+ } else {
+ record.SetReverse(Settings->GetReverse());
+ }
if (limit) {
record.SetMaxRows(*limit);
record.SetTotalRowsLimit(*limit);
diff --git a/ydb/core/kqp/runtime/kqp_write_actor.cpp b/ydb/core/kqp/runtime/kqp_write_actor.cpp
index 63b47668c8..1118f45312 100644
--- a/ydb/core/kqp/runtime/kqp_write_actor.cpp
+++ b/ydb/core/kqp/runtime/kqp_write_actor.cpp
@@ -2921,7 +2921,7 @@ public:
Settings.GetTable().GetOwnerId(),
Settings.GetTable().GetTableId(),
Settings.GetTable().GetVersion())
- , ForwardWriteActorSpan(TWilsonKqp::ForwardWriteActor, NWilson::TTraceId(args.TraceId), "TKqpForwardWriteActor")
+ , ForwardWriteActorSpan(TWilsonKqp::ForwardWriteActor, NWilson::TTraceId(args.TraceId), "ForwardWriteActor")
{
EgressStats.Level = args.StatsLevel;
diff --git a/ydb/core/kqp/session_actor/kqp_session_actor.cpp b/ydb/core/kqp/session_actor/kqp_session_actor.cpp
index 137c75b4c0..4fc64ec1a1 100644
--- a/ydb/core/kqp/session_actor/kqp_session_actor.cpp
+++ b/ydb/core/kqp/session_actor/kqp_session_actor.cpp
@@ -1450,13 +1450,32 @@ public:
txCtx->TxManager->SetTopicOperations(std::move(request.TopicOperations));
txCtx->TxManager->AddTopicsToShards();
+ auto alloc = std::make_shared<NKikimr::NMiniKQL::TScopedAlloc>(
+ __LOCATION__, NKikimr::TAlignedPagePoolCounters(), true, false);
+
+ const auto& queryLimitsProto = Settings.TableService.GetQueryLimits();
+ const auto& bufferLimitsProto = queryLimitsProto.GetBufferLimits();
+ const ui64 writeBufferMemoryLimit = bufferLimitsProto.HasWriteBufferMemoryLimitBytes()
+ ? bufferLimitsProto.GetWriteBufferMemoryLimitBytes()
+ : ui64(Settings.MkqlMaxMemoryLimit);
+ const ui64 writeBufferInitialMemoryLimit = writeBufferMemoryLimit < ui64(Settings.MkqlInitialMemoryLimit)
+ ? writeBufferMemoryLimit
+ : ui64(Settings.MkqlInitialMemoryLimit);
+ alloc->SetLimit(writeBufferInitialMemoryLimit);
+ alloc->Ref().SetIncreaseMemoryLimitCallback([this, alloc=alloc.get(), writeBufferMemoryLimit](ui64 currentLimit, ui64 required) {
+ if (required < writeBufferMemoryLimit) {
+ LOG_D("Increase memory limit from " << currentLimit << " to " << required);
+ alloc->SetLimit(required);
+ }
+ });
+
TKqpBufferWriterSettings settings {
.SessionActorId = SelfId(),
.TxManager = txCtx->TxManager,
.TraceId = request.TraceId.GetTraceId(),
.Counters = Counters,
.TxProxyMon = RequestCounters->TxProxyMon,
- .Alloc = txCtx->TxAlloc->Alloc,
+ .Alloc = std::move(alloc),
};
auto* actor = CreateKqpBufferWriterActor(std::move(settings));
txCtx->BufferActorId = RegisterWithSameMailbox(actor);
@@ -1548,14 +1567,13 @@ public:
}
if (QueryState->ReportStats()) {
- if (QueryState->GetStatsMode() >= Ydb::Table::QueryStatsCollection::STATS_COLLECTION_FULL) {
- NKqpProto::TKqpStatsQuery& stats = *ev->Get()->Record.MutableQueryStats();
- NKqpProto::TKqpStatsQuery executionStats;
- executionStats.Swap(&stats);
- stats = QueryState->QueryStats.ToProto();
- stats.MutableExecutions()->MergeFrom(executionStats.GetExecutions());
- ev->Get()->Record.SetQueryPlan(SerializeAnalyzePlan(stats, QueryState->UserRequestContext->PoolId));
- }
+ NKqpProto::TKqpStatsQuery& stats = *ev->Get()->Record.MutableQueryStats();
+ NKqpProto::TKqpStatsQuery executionStats;
+ executionStats.Swap(&stats);
+ stats = QueryState->QueryStats.ToProto();
+ stats.MutableExecutions()->MergeFrom(executionStats.GetExecutions());
+ ev->Get()->Record.SetQueryPlan(SerializeAnalyzePlan(stats, QueryState->UserRequestContext->PoolId));
+ stats.SetDurationUs((TInstant::Now() - QueryState->StartTime).MicroSeconds());
}
LOG_D("Forwarded TEvExecuterProgress to " << QueryState->RequestActorId);
diff --git a/ydb/core/kqp/tools/combiner_perf/bin/main.cpp b/ydb/core/kqp/tools/combiner_perf/bin/main.cpp
new file mode 100644
index 0000000000..ef62a36e6c
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/bin/main.cpp
@@ -0,0 +1,181 @@
+#include <ydb/core/kqp/tools/combiner_perf/printout.h>
+#include <ydb/core/kqp/tools/combiner_perf/simple_last.h>
+#include <ydb/core/kqp/tools/combiner_perf/simple.h>
+#include <ydb/core/kqp/tools/combiner_perf/tpch_last.h>
+#include <ydb/core/kqp/tools/combiner_perf/simple_block.h>
+
+#include <library/cpp/lfalloc/alloc_profiler/profiler.h>
+
+#include <util/stream/output.h>
+#include <util/stream/file.h>
+#include <util/string/printf.h>
+#include <util/system/compiler.h>
+
+using NKikimr::NMiniKQL::TRunParams;
+
+class TPrintingResultCollector : public TTestResultCollector
+{
+public:
+ virtual void SubmitTestNameAndParams(const TRunParams& runParams, const char* testName, const std::optional<bool> llvm, const std::optional<bool> spilling) override
+ {
+ Cout << "------------------------------" << Endl;
+ Cout << testName;
+ if (llvm.has_value()) {
+ Cout << ", " << (llvm.value() ? "+" : "-") << "llvm";
+ }
+ if (spilling.has_value()) {
+ Cout << ", " << (spilling.value() ? "+" : "-") << "spilling";
+ }
+ Cout << Endl;
+ Cout << "Data rows total: " << runParams.RowsPerRun << " x " << runParams.NumRuns << Endl;
+ Cout << (runParams.MaxKey + 1) << " distinct numeric keys" << Endl;
+ Cout << "Block size: " << runParams.BlockSize << Endl;
+ Cout << "Long strings: " << (runParams.LongStringKeys ? "yes" : "no") << Endl;
+ Cout << Endl;
+ }
+
+ virtual void SubmitTimings(const TDuration& graphTime, const TDuration& referenceTime, const std::optional<TDuration> streamTime) override
+ {
+ Cout << "Graph runtime is: " << graphTime << " vs. reference C++ implementation: " << referenceTime << Endl;
+
+ if (streamTime.has_value()) {
+ Cout << "Input stream own iteration time: " << *streamTime << Endl;
+ Cout << "Graph time - stream own time = " << (*streamTime <= graphTime ? graphTime - *streamTime : TDuration::Zero()) << Endl;
+ Cout << "C++ implementation time - devnull time = " << (*streamTime <= referenceTime ? referenceTime - *streamTime : TDuration::Zero()) << Endl;
+ }
+ }
+};
+
+class TWikiResultCollector : public TTestResultCollector
+{
+public:
+ TWikiResultCollector()
+ {
+ Cout << "#|" << Endl;
+ Cout << "|| Test name | LLVM | Spilling | RowsTotal | Distinct keys | Block size | Input stream own time (s) | Graph time - stream time (s) | C++ time - stream time (s) | Shame ratio ||" << Endl;
+ }
+
+ ~TWikiResultCollector()
+ {
+ Cout << "|#" << Endl;
+ }
+
+ virtual void SubmitTestNameAndParams(const TRunParams& runParams, const char* testName, const std::optional<bool> llvm, const std::optional<bool> spilling) override
+ {
+ Cout << "|| ";
+ Cout << testName << " | ";
+ if (llvm.has_value()) {
+ Cout << (llvm.value() ? "+" : " ");
+ }
+ Cout << " | ";
+ if (spilling.has_value()) {
+ Cout << (spilling.value() ? "+" : " ");
+ }
+ Cout << " | ";
+
+ Cout << (runParams.RowsPerRun * runParams.NumRuns) << " | " << (runParams.MaxKey + 1) << " | ";
+ if (TStringBuf(testName).Contains("Block")) {
+ Cout << runParams.BlockSize;
+ }
+ Cout << " | ";
+ }
+
+ static TString FancyDuration(const TDuration duration)
+ {
+ const auto ms = duration.MilliSeconds();
+ if (!ms) {
+ return " ";
+ }
+ return Sprintf("%.2f", (ms / 1000.0));
+ }
+
+ virtual void SubmitTimings(const TDuration& graphTime, const TDuration& referenceTime, const std::optional<TDuration> streamTime) override
+ {
+ TDuration streamTimeOrZero = (streamTime.has_value()) ? streamTime.value() : TDuration::Zero();
+ TDuration corrGraphTime = streamTimeOrZero <= graphTime ? graphTime - streamTimeOrZero : TDuration::Zero();
+ TDuration corrRefTime = streamTimeOrZero <= referenceTime ? referenceTime - streamTimeOrZero : TDuration::Zero();
+
+ TString diff;
+ if (corrRefTime.MilliSeconds() > 0) {
+ diff = Sprintf("%.2f", corrGraphTime.MilliSeconds() * 1.0 / corrRefTime.MilliSeconds());
+ }
+
+ Cout << FancyDuration(streamTimeOrZero) << " | " << FancyDuration(corrGraphTime) << " | " << FancyDuration(corrRefTime) << " | " << diff << " ||" << Endl;
+ Cout.Flush();
+ }
+};
+
+void DoFullPass(bool withSpilling)
+{
+ using namespace NKikimr::NMiniKQL;
+
+ TWikiResultCollector printout;
+
+ TRunParams runParams;
+
+ runParams.NumRuns = 20;
+ runParams.RowsPerRun = 5'000'000;
+ runParams.MaxKey = 1'00 - 1;
+ runParams.LongStringKeys = false;
+
+ const std::vector<size_t> numKeys = {4u, 1000u, 100'000u, 200'000u};
+ const std::vector<size_t> blockSizes = {128u, 8192u};
+
+ auto doSimple = [&printout, numKeys](const TRunParams& params) {
+ for (size_t keyCount : numKeys) {
+ auto runParams = params;
+ runParams.MaxKey = keyCount - 1;
+ RunTestSimple<false>(runParams, printout);
+ RunTestSimple<true>(runParams, printout);
+ }
+ };
+
+ auto doSimpleLast = [&printout, &numKeys, withSpilling](const TRunParams& params) {
+ for (size_t keyCount : numKeys) {
+ auto runParams = params;
+ runParams.MaxKey = keyCount - 1;
+ RunTestCombineLastSimple<false, false>(runParams, printout);
+ RunTestCombineLastSimple<true, false>(runParams, printout);
+ if (withSpilling) {
+ RunTestCombineLastSimple<false, true>(runParams, printout);
+ RunTestCombineLastSimple<true, true>(runParams, printout);
+ }
+ }
+ };
+
+ auto doBlockHashed = [&printout, &numKeys, &blockSizes](const TRunParams& params) {
+ for (size_t keyCount : numKeys) {
+ for (size_t blockSize : blockSizes) {
+ auto runParams = params;
+ runParams.MaxKey = keyCount - 1;
+ runParams.BlockSize = blockSize;
+ RunTestBlockCombineHashedSimple<false, false>(runParams, printout);
+ }
+ }
+ };
+
+ doSimple(runParams);
+ doSimpleLast(runParams);
+ doBlockHashed(runParams);
+}
+
+int main(int argc, const char* argv[])
+{
+ Y_UNUSED(argc);
+ Y_UNUSED(argv);
+
+ if (false) {
+ NAllocProfiler::StartAllocationSampling(true);
+ }
+
+ constexpr int NumIterations = 1;
+
+ for (int i = 0; i < NumIterations; ++i) {
+ DoFullPass(false);
+ }
+
+ if (false) {
+ TFileOutput out("memory_profile");
+ NAllocProfiler::StopAllocationSampling(out, 10000);
+ }
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/bin/ya.make b/ydb/core/kqp/tools/combiner_perf/bin/ya.make
new file mode 100644
index 0000000000..2bce61f2f7
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/bin/ya.make
@@ -0,0 +1,23 @@
+PROGRAM(combiner_perf)
+ALLOCATOR(LF_DBG)
+
+YQL_LAST_ABI_VERSION()
+
+IF (MKQL_RUNTIME_VERSION)
+ CFLAGS(
+ -DMKQL_RUNTIME_VERSION=$MKQL_RUNTIME_VERSION
+ )
+ENDIF()
+
+PEERDIR(
+ ydb/core/kqp/tools/combiner_perf
+ library/cpp/lfalloc/alloc_profiler
+ library/cpp/dwarf_backtrace
+ library/cpp/dwarf_backtrace/registry
+)
+
+SRCS(
+ main.cpp
+)
+
+END()
diff --git a/ydb/core/kqp/tools/combiner_perf/converters.cpp b/ydb/core/kqp/tools/combiner_perf/converters.cpp
new file mode 100644
index 0000000000..c8c30d5cae
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/converters.cpp
@@ -0,0 +1,14 @@
+#include "converters.h"
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<>
+std::string UnboxedToNative(const NUdf::TUnboxedValue& result)
+{
+ const NUdf::TStringRef val = result.AsStringRef();
+ return std::string(val.data(), val.size());
+}
+
+}
+} \ No newline at end of file
diff --git a/ydb/core/kqp/tools/combiner_perf/converters.h b/ydb/core/kqp/tools/combiner_perf/converters.h
new file mode 100644
index 0000000000..76a76062ff
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/converters.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+#include <yql/essentials/minikql/computation/mock_spiller_factory_ut.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/system/compiler.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool Embedded>
+void NativeToUnboxed(const ui64 value, NUdf::TUnboxedValue& result)
+{
+ result = NUdf::TUnboxedValuePod(value);
+}
+
+template<bool Embedded>
+void NativeToUnboxed(const std::string& value, NUdf::TUnboxedValue& result)
+{
+ if constexpr (Embedded) {
+ result = NUdf::TUnboxedValue::Embedded(value);
+ } else {
+ result = NUdf::TUnboxedValuePod(NUdf::TStringValue(value));
+ }
+}
+
+template<typename T>
+T UnboxedToNative(const NUdf::TUnboxedValue& result)
+{
+ return result.template Get<T>();
+}
+
+template<>
+std::string UnboxedToNative(const NUdf::TUnboxedValue& result);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/factories.cpp b/ydb/core/kqp/tools/combiner_perf/factories.cpp
new file mode 100644
index 0000000000..672ef8b5d1
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/factories.cpp
@@ -0,0 +1,19 @@
+#include "factories.h"
+
+#include <yql/essentials/minikql/computation/mkql_computation_node_impl.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+TComputationNodeFactory GetPerfTestFactory(TComputationNodeFactory customFactory) {
+ return [customFactory](TCallable& callable, const TComputationNodeFactoryContext& ctx) -> IComputationNode* {
+ if (callable.GetType()->GetName() == "TestList") {
+ return new TExternalComputationNode(ctx.Mutables);
+ }
+
+ return GetBuiltinFactory()(callable, ctx);
+ };
+}
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/factories.h b/ydb/core/kqp/tools/combiner_perf/factories.h
new file mode 100644
index 0000000000..9d396df804
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/factories.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <yql/essentials/minikql/mkql_program_builder.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+TComputationNodeFactory GetPerfTestFactory(TComputationNodeFactory customFactory = {});
+
+template<bool SPILLING>
+TRuntimeNode WideLastCombiner(TProgramBuilder& pb, TRuntimeNode flow, const TProgramBuilder::TWideLambda& extractor, const TProgramBuilder::TBinaryWideLambda& init, const TProgramBuilder::TTernaryWideLambda& update, const TProgramBuilder::TBinaryWideLambda& finish) {
+ return SPILLING ?
+ pb.WideLastCombinerWithSpilling(flow, extractor, init, update, finish):
+ pb.WideLastCombiner(flow, extractor, init, update, finish);
+}
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/printout.h b/ydb/core/kqp/tools/combiner_perf/printout.h
new file mode 100644
index 0000000000..0f2f431e89
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/printout.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include "run_params.h"
+
+#include <util/datetime/base.h>
+#include <optional>
+
+class TTestResultCollector {
+public:
+ virtual void SubmitTestNameAndParams(const NKikimr::NMiniKQL::TRunParams& runParams, const char* testName, const std::optional<bool> llvm = {}, const std::optional<bool> spilling = {}) = 0;
+
+ virtual void SubmitTimings(const TDuration& graphTime, const TDuration& referenceTime, const std::optional<TDuration> streamTime = {}) = 0;
+
+ virtual ~TTestResultCollector() {};
+};
diff --git a/ydb/core/kqp/tools/combiner_perf/run_params.h b/ydb/core/kqp/tools/combiner_perf/run_params.h
new file mode 100644
index 0000000000..a12576e2d2
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/run_params.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include <util/system/defaults.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+struct TRunParams {
+ size_t RowsPerRun = 0;
+ size_t NumRuns = 0;
+ size_t MaxKey = 0; // for numeric keys, the range is [0..MaxKey]
+ size_t BlockSize = 5000;
+ bool LongStringKeys = false;
+};
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple.cpp b/ydb/core/kqp/tools/combiner_perf/simple.cpp
new file mode 100644
index 0000000000..6ada8c91d3
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple.cpp
@@ -0,0 +1,86 @@
+#include "simple.h"
+
+#include "factories.h"
+#include "streams.h"
+#include "printout.h"
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+#include <yql/essentials/minikql/computation/mock_spiller_factory_ut.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/system/compiler.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM>
+void RunTestSimple(const TRunParams& params, TTestResultCollector& printout)
+{
+ TSetup<LLVM> setup(GetPerfTestFactory());
+
+ printout.SubmitTestNameAndParams(params, __func__, LLVM);
+
+ TString64DataSampler sampler(params.RowsPerRun, params.MaxKey, params.NumRuns, params.LongStringKeys);
+ // or T6464DataSampler sampler(numSamples, maxKey, numIters); -- maybe make selectable from params
+ Cerr << "Sampler type: " << sampler.Describe() << Endl;
+
+ TProgramBuilder& pb = *setup.PgmBuilder;
+
+ const auto streamItemType = pb.NewMultiType({sampler.GetKeyType(pb), pb.NewDataType(NUdf::TDataType<ui64>::Id)});
+ const auto streamType = pb.NewStreamType(streamItemType);
+ const auto streamCallable = TCallableBuilder(pb.GetTypeEnvironment(), "TestList", streamType).Build();
+
+ const auto pgmReturn = pb.Collect(pb.NarrowMap(pb.WideCombiner(
+ pb.ToFlow(TRuntimeNode(streamCallable, false)),
+ 0ULL,
+ [&](TRuntimeNode::TList items) -> TRuntimeNode::TList { return { items.front() }; },
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items) -> TRuntimeNode::TList { return { items.back() } ; },
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items, TRuntimeNode::TList state) -> TRuntimeNode::TList {
+ return {pb.AggrAdd(state.front(), items.back())};
+ },
+ [&](TRuntimeNode::TList keys, TRuntimeNode::TList state) -> TRuntimeNode::TList {
+ return {keys.front(), state.front()};
+ }),
+ [&](TRuntimeNode::TList items) { return pb.NewTuple(items); }
+ ));
+
+ const auto graph = setup.BuildGraph(pgmReturn, {streamCallable});
+
+ // Measure the input stream run time
+ const auto devnullStream = sampler.MakeStream(graph->GetHolderFactory());
+ const auto devnullStart = TInstant::Now();
+ {
+ NUdf::TUnboxedValue columns[2];
+ while (devnullStream->WideFetch(columns, 2) == NUdf::EFetchStatus::Ok) {
+ }
+ }
+ const auto devnullTime = TInstant::Now() - devnullStart;
+
+ // Reference implementation (sum via an std::unordered_map)
+ auto referenceStream = sampler.MakeStream(graph->GetHolderFactory());
+ const auto t = TInstant::Now();
+ sampler.ComputeReferenceResult(*referenceStream);
+ const auto cppTime = TInstant::Now() - t;
+
+ // Compute graph implementation
+ auto myStream = NUdf::TUnboxedValuePod(sampler.MakeStream(graph->GetHolderFactory()).Release());
+ graph->GetEntryPoint(0, true)->SetValue(graph->GetContext(), std::move(myStream));
+
+ const auto graphTimeStart = TInstant::Now();
+ const auto& value = graph->GetValue();
+ const auto graphTime = TInstant::Now() - graphTimeStart;
+
+ // Verification
+ sampler.VerifyComputedValueVsReference(value);
+
+ printout.SubmitTimings(graphTime, cppTime, devnullTime);
+}
+
+template void RunTestSimple<false>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestSimple<true>(const TRunParams& params, TTestResultCollector& printout);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple.h b/ydb/core/kqp/tools/combiner_perf/simple.h
new file mode 100644
index 0000000000..193312919f
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "run_params.h"
+#include "printout.h"
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM>
+void RunTestSimple(const TRunParams& params, TTestResultCollector& printout);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple_block.cpp b/ydb/core/kqp/tools/combiner_perf/simple_block.cpp
new file mode 100644
index 0000000000..bb143217a5
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple_block.cpp
@@ -0,0 +1,209 @@
+#include "simple_block.h"
+
+#include "factories.h"
+#include "streams.h"
+#include "printout.h"
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+#include <yql/essentials/minikql/computation/mock_spiller_factory_ut.h>
+#include <yql/essentials/minikql/computation/mkql_block_builder.h>
+
+#include <contrib/libs/apache/arrow/cpp/src/arrow/type_fwd.h>
+#include <contrib/libs/apache/arrow/cpp/src/arrow/array/array_primitive.h>
+#include <contrib/libs/apache/arrow/cpp/src/arrow/chunked_array.h>
+#include <contrib/libs/apache/arrow/cpp/src/arrow/array.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/system/compiler.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<typename K>
+void UpdateMapFromBlocks(const NUdf::TUnboxedValue& key, const NUdf::TUnboxedValue& value, std::unordered_map<K, ui64>& result);
+
+template<>
+void UpdateMapFromBlocks(const NUdf::TUnboxedValue& key, const NUdf::TUnboxedValue& value, std::unordered_map<ui64, ui64>& result)
+{
+ auto datumKey = TArrowBlock::From(key).GetDatum();
+ auto datumValue = TArrowBlock::From(value).GetDatum();
+ UNIT_ASSERT(datumKey.is_array());
+ UNIT_ASSERT(datumValue.is_array());
+
+ const auto ui64keys = datumKey.template array_as<arrow::UInt64Array>();
+ const auto ui64values = datumValue.template array_as<arrow::UInt64Array>();
+ UNIT_ASSERT(!!ui64keys);
+ UNIT_ASSERT(!!ui64values);
+ UNIT_ASSERT_VALUES_EQUAL(ui64keys->length(), ui64values->length());
+
+ const size_t length = ui64keys->length();
+ for (size_t i = 0; i < length; ++i) {
+ result[ui64keys->Value(i)] += ui64values->Value(i);
+ }
+}
+
+template<>
+void UpdateMapFromBlocks(const NUdf::TUnboxedValue& key, const NUdf::TUnboxedValue& value, std::unordered_map<std::string, ui64>& result)
+{
+ auto datumKey = TArrowBlock::From(key).GetDatum();
+ auto datumValue = TArrowBlock::From(value).GetDatum();
+ UNIT_ASSERT(datumKey.is_arraylike());
+ UNIT_ASSERT(datumValue.is_array());
+
+ const auto ui64values = datumValue.template array_as<arrow::UInt64Array>();
+ UNIT_ASSERT(!!ui64values);
+
+ int64_t valueOffset = 0;
+
+ for (const auto& chunk : datumKey.chunks()) {
+ auto* barray = dynamic_cast<arrow::BinaryArray*>(chunk.get());
+ UNIT_ASSERT(barray != nullptr);
+ for (int64_t i = 0; i < barray->length(); ++i) {
+ auto key = barray->GetString(i);
+ auto val = ui64values->Value(valueOffset);
+ result[key] += val;
+ ++valueOffset;
+ }
+ }
+}
+
+
+template<typename TStream, typename TMap>
+void CalcRefResult(TStream& refStream, TMap& refResult)
+{
+ NUdf::TUnboxedValue columns[3];
+
+ while (refStream->WideFetch(columns, 3) == NUdf::EFetchStatus::Ok) {
+ UpdateMapFromBlocks(columns[0], columns[1], refResult);
+ }
+}
+
+
+template<bool LLVM, bool Spilling>
+void RunTestBlockCombineHashedSimple(const TRunParams& params, TTestResultCollector& printout)
+{
+ TSetup<LLVM, Spilling> setup(GetPerfTestFactory());
+
+ printout.SubmitTestNameAndParams(params, __func__, LLVM, Spilling);
+
+ auto samples = MakeKeyedString64Samples(params.RowsPerRun, params.MaxKey, false);
+
+ TProgramBuilder& pb = *setup.PgmBuilder;
+
+ auto keyBaseType = pb.NewDataType(NUdf::TDataType<char*>::Id);
+ auto valueBaseType = pb.NewDataType(NUdf::TDataType<ui64>::Id);
+ auto keyBlockType = pb.NewBlockType(keyBaseType, TBlockType::EShape::Many);
+ auto valueBlockType = pb.NewBlockType(valueBaseType, TBlockType::EShape::Many);
+ auto blockSizeType = pb.NewDataType(NUdf::TDataType<ui64>::Id);
+ auto blockSizeBlockType = pb.NewBlockType(blockSizeType, TBlockType::EShape::Scalar);
+ const auto streamItemType = pb.NewMultiType({keyBlockType, valueBlockType, blockSizeBlockType});
+ const auto streamType = pb.NewStreamType(streamItemType);
+ const auto streamResultItemType = pb.NewMultiType({keyBlockType, valueBlockType, blockSizeBlockType});
+ const auto streamResultType = pb.NewStreamType(streamResultItemType);
+ const auto streamCallable = TCallableBuilder(pb.GetTypeEnvironment(), "TestList", streamType).Build();
+
+ ui32 keys[] = {0};
+ TAggInfo aggs[] = {
+ TAggInfo{
+ .Name = "sum",
+ .ArgsColumns = {1u},
+ }
+ };
+ auto pgmReturn = pb.Collect(pb.NarrowMap(pb.ToFlow(
+ pb.BlockCombineHashed(
+ TRuntimeNode(streamCallable, false),
+ {},
+ TArrayRef(keys, 1),
+ TArrayRef(aggs, 1),
+ streamResultType
+ )),
+ [&](TRuntimeNode::TList items) -> TRuntimeNode { return pb.NewTuple(items); } // NarrowMap handler
+ ));
+ const auto graph = setup.BuildGraph(pgmReturn, {streamCallable});
+
+ auto streamMaker = [&]() -> auto {
+ return std::unique_ptr<TBlockKVStream<std::string, ui64>>(new TBlockKVStream<std::string, ui64>(
+ graph->GetContext(),
+ samples,
+ params.NumRuns,
+ params.BlockSize,
+ {keyBaseType, valueBaseType}
+ ));
+ };
+
+ // Compute results directly from raw samples to test the input stream implementation
+ std::unordered_map<std::string, ui64> rawResult;
+ for (const auto& tuple : samples) {
+ rawResult[tuple.first] += (tuple.second * params.NumRuns);
+ }
+
+ // Measure the input stream run time
+ const auto devnullStream = streamMaker();
+ const auto devnullStart = TInstant::Now();
+ {
+ NUdf::TUnboxedValue columns[3];
+ while (devnullStream->WideFetch(columns, 3) == NUdf::EFetchStatus::Ok) {
+ }
+ }
+ const auto devnullTime = TInstant::Now() - devnullStart;
+
+ // Reference implementation (sum via an std::unordered_map)
+ std::unordered_map<std::string, ui64> refResult;
+ const auto refStream = streamMaker();
+ const auto cppStart = TInstant::Now();
+ CalcRefResult(refStream, refResult);
+ const auto cppTime = TInstant::Now() - cppStart;
+
+ // Verify the reference stream implementation against the raw samples
+ UNIT_ASSERT_VALUES_EQUAL(refResult.size(), rawResult.size());
+ for (const auto& tuple : rawResult) {
+ auto otherIt = refResult.find(tuple.first);
+ UNIT_ASSERT(otherIt != refResult.end());
+ UNIT_ASSERT_VALUES_EQUAL(tuple.second, otherIt->second);
+ }
+
+ // Compute graph implementation
+ auto stream = NUdf::TUnboxedValuePod(streamMaker().release());
+ graph->GetEntryPoint(0, true)->SetValue(graph->GetContext(), std::move(stream));
+ const auto graphStart = TInstant::Now();
+ const auto& resultList = graph->GetValue();
+ const auto graphTime = TInstant::Now() - graphStart;
+
+ size_t numResultItems = resultList.GetListLength();
+ Cerr << "Result block count: " << numResultItems << Endl;
+
+ // Verification
+ std::unordered_map<std::string, ui64> graphResult;
+
+ const auto ptr = resultList.GetElements();
+ for (size_t i = 0ULL; i < numResultItems; ++i) {
+ UNIT_ASSERT(ptr[i].GetListLength() >= 2);
+
+ const auto elements = ptr[i].GetElements();
+
+ UpdateMapFromBlocks(elements[0], elements[1], graphResult);
+ }
+
+ UNIT_ASSERT_VALUES_EQUAL(refResult.size(), graphResult.size());
+ for (const auto& tuple : refResult) {
+ auto graphIt = graphResult.find(tuple.first);
+ UNIT_ASSERT(graphIt != graphResult.end());
+ UNIT_ASSERT_VALUES_EQUAL(tuple.second, graphIt->second);
+ }
+
+ Cerr << "Graph time raw: " << graphTime << Endl;
+ Cerr << "CPP time raw: " << cppTime << Endl;
+ printout.SubmitTimings(graphTime, cppTime, devnullTime);
+}
+
+template void RunTestBlockCombineHashedSimple<false, false>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestBlockCombineHashedSimple<false, true>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestBlockCombineHashedSimple<true, false>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestBlockCombineHashedSimple<true, true>(const TRunParams& params, TTestResultCollector& printout);
+
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple_block.h b/ydb/core/kqp/tools/combiner_perf/simple_block.h
new file mode 100644
index 0000000000..e57554a10a
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple_block.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "run_params.h"
+#include "printout.h"
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM, bool Spilling>
+void RunTestBlockCombineHashedSimple(const TRunParams& params, TTestResultCollector& printout);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple_last.cpp b/ydb/core/kqp/tools/combiner_perf/simple_last.cpp
new file mode 100644
index 0000000000..fe880637c7
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple_last.cpp
@@ -0,0 +1,103 @@
+#include "simple_last.h"
+
+#include "factories.h"
+#include "converters.h"
+#include "streams.h"
+#include "printout.h"
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+#include <yql/essentials/minikql/computation/mock_spiller_factory_ut.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/system/compiler.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM, bool Spilling>
+void RunTestCombineLastSimple(const TRunParams& params, TTestResultCollector& printout)
+{
+ TSetup<LLVM, Spilling> setup(GetPerfTestFactory());
+
+ printout.SubmitTestNameAndParams(params, __func__, LLVM, Spilling);
+
+ const size_t numSamples = params.RowsPerRun;
+ const size_t numIters = params.NumRuns; // Will process numSamples * numIters items
+ const size_t maxKey = params.MaxKey; // maxKey + 1 distinct keys, each key multiplied by 64 for some reason
+
+ TString64DataSampler sampler(numSamples, maxKey, numIters, params.LongStringKeys);
+ // or T6464DataSampler sampler(numSamples, maxKey, numIters); -- maybe make selectable from params
+ Cerr << "Sampler type: " << sampler.Describe() << Endl;
+
+ TProgramBuilder& pb = *setup.PgmBuilder;
+
+ const auto streamItemType = pb.NewMultiType({sampler.GetKeyType(pb), pb.NewDataType(NUdf::TDataType<ui64>::Id)});
+ const auto streamType = pb.NewStreamType(streamItemType);
+ const auto streamCallable = TCallableBuilder(pb.GetTypeEnvironment(), "TestList", streamType).Build();
+
+ /*
+ flow: generate a flow of wide MultiType values (uint64 { key, value } pairs) from the input stream
+ extractor: get key from an item
+ init: initialize the state with the item value
+ update: state += value (why AggrAdd though?)
+ finish: return key + state
+ */
+ const auto pgmReturn = pb.Collect(pb.NarrowMap(
+ WideLastCombiner<Spilling>(
+ pb,
+ pb.ToFlow(TRuntimeNode(streamCallable, false)),
+ [&](TRuntimeNode::TList items) -> TRuntimeNode::TList { return {items.front()}; }, // extractor
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items) -> TRuntimeNode::TList { return {items.back()}; }, // init
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items, TRuntimeNode::TList state) -> TRuntimeNode::TList {
+ return {pb.AggrAdd(state.front(), items.back())};
+ }, // update
+ [&](TRuntimeNode::TList keys, TRuntimeNode::TList state) -> TRuntimeNode::TList { return {keys.front(), state.front()}; } // finish
+ ), // NarrowMap flow
+ [&](TRuntimeNode::TList items) -> TRuntimeNode { return pb.NewTuple(items); } // NarrowMap handler
+ ));
+
+ const auto graph = setup.BuildGraph(pgmReturn, {streamCallable});
+ if (Spilling) {
+ graph->GetContext().SpillerFactory = std::make_shared<TMockSpillerFactory>();
+ }
+
+ // Measure the input stream run time
+ const auto devnullStream = sampler.MakeStream(graph->GetHolderFactory());
+ const auto devnullStart = TInstant::Now();
+ {
+ NUdf::TUnboxedValue columns[2];
+ while (devnullStream->WideFetch(columns, 2) == NUdf::EFetchStatus::Ok) {
+ }
+ }
+ const auto devnullTime = TInstant::Now() - devnullStart;
+
+ // Reference implementation (sum via an std::unordered_map)
+ auto referenceStream = sampler.MakeStream(graph->GetHolderFactory());
+ const auto t = TInstant::Now();
+ sampler.ComputeReferenceResult(*referenceStream);
+ const auto cppTime = TInstant::Now() - t;
+
+ // Compute graph implementation
+ auto myStream = NUdf::TUnboxedValuePod(sampler.MakeStream(graph->GetHolderFactory()).Release());
+ graph->GetEntryPoint(0, true)->SetValue(graph->GetContext(), std::move(myStream));
+
+ const auto t1 = TInstant::Now();
+ const auto& value = graph->GetValue();
+ const auto graphTime = TInstant::Now() - t1;
+
+ // Verification
+ sampler.VerifyComputedValueVsReference(value);
+
+ printout.SubmitTimings(graphTime, cppTime, devnullTime);
+}
+
+template void RunTestCombineLastSimple<false, false>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestCombineLastSimple<false, true>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestCombineLastSimple<true, false>(const TRunParams& params, TTestResultCollector& printout);
+template void RunTestCombineLastSimple<true, true>(const TRunParams& params, TTestResultCollector& printout);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/simple_last.h b/ydb/core/kqp/tools/combiner_perf/simple_last.h
new file mode 100644
index 0000000000..90433f8141
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/simple_last.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "run_params.h"
+#include "printout.h"
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM, bool Spilling>
+void RunTestCombineLastSimple(const TRunParams& params, TTestResultCollector& printout);
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/streams.cpp b/ydb/core/kqp/tools/combiner_perf/streams.cpp
new file mode 100644
index 0000000000..7f0a662250
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/streams.cpp
@@ -0,0 +1,48 @@
+#include "streams.h"
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+T6464Samples MakeKeyed6464Samples(const size_t numSamples, const unsigned int maxKey) {
+ std::default_random_engine eng;
+ std::uniform_int_distribution<unsigned int> keys(0, maxKey);
+ std::uniform_int_distribution<uint64_t> unif(0, 100000.0);
+
+ T6464Samples samples(numSamples);
+
+ eng.seed(std::time(nullptr));
+ std::generate(samples.begin(), samples.end(),
+ [&]() -> auto {
+ return std::make_pair<uint64_t, uint64_t>(keys(eng), unif(eng));
+ }
+ );
+
+ return samples;
+}
+
+TString64Samples MakeKeyedString64Samples(const size_t numSamples, const unsigned int maxKey, const bool longStrings) {
+ std::default_random_engine eng;
+ std::uniform_int_distribution<unsigned int> keys(0, maxKey);
+ std::uniform_int_distribution<uint64_t> unif(0, 100000.0);
+
+ TString64Samples samples(numSamples);
+
+ eng.seed(std::time(nullptr));
+ std::generate(samples.begin(), samples.end(),
+ [&]() -> auto {
+ auto key = keys(eng);
+ std::string strKey;
+ if (!longStrings) {
+ strKey = std::string(ToString(key));
+ } else {
+ strKey = Sprintf("%07u.%07u.%07u.", key, key, key);
+ }
+ return std::make_pair<std::string, uint64_t>(std::move(strKey), unif(eng));
+ }
+ );
+
+ return samples;
+}
+
+}
+} \ No newline at end of file
diff --git a/ydb/core/kqp/tools/combiner_perf/streams.h b/ydb/core/kqp/tools/combiner_perf/streams.h
new file mode 100644
index 0000000000..cb955849f9
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/streams.h
@@ -0,0 +1,290 @@
+// Sample data generators and streams
+
+#pragma once
+
+#include "converters.h"
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_block_builder.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+using T6464Samples = std::vector<std::pair<ui64, ui64>>;
+T6464Samples MakeKeyed6464Samples(const size_t numSamples, const unsigned int maxKey);
+
+using TString64Samples = std::vector<std::pair<std::string, ui64>>;
+TString64Samples MakeKeyedString64Samples(const size_t numSamples, const unsigned int maxKey, const bool longStrings);
+
+struct IWideStream : public NUdf::TBoxedValue
+{
+ NUdf::EFetchStatus Fetch(NUdf::TUnboxedValue& result) final override {
+ Y_UNUSED(result);
+ ythrow yexception() << "only WideFetch is supported here";
+ }
+
+ NUdf::EFetchStatus WideFetch(NUdf::TUnboxedValue* result, ui32 width) override = 0;
+};
+
+template<typename K, typename V, bool EmbeddedKeys>
+struct TKVStream : public IWideStream {
+ using TSamples = std::vector<std::pair<K, V>>;
+
+ TKVStream(const THolderFactory& holderFactory, const TSamples& samples, size_t iterations)
+ : HolderFactory(holderFactory)
+ , Samples(samples)
+ , End(Samples.end())
+ , MaxIterations(iterations)
+ , CurrSample(Samples.begin())
+ {
+ Y_ENSURE(samples.size() > 0);
+ }
+
+private:
+ const THolderFactory& HolderFactory;
+ const TSamples& Samples;
+ const TSamples::const_iterator End;
+ const size_t MaxIterations;
+ TSamples::const_iterator CurrSample = 0;
+ size_t Iteration = 0;
+
+public:
+ NUdf::EFetchStatus WideFetch(NUdf::TUnboxedValue* result, ui32 width) final {
+ Y_UNUSED(HolderFactory); // for future use
+
+ if (CurrSample == End) {
+ ++Iteration;
+ if (Iteration >= MaxIterations) {
+ return NUdf::EFetchStatus::Finish;
+ }
+ CurrSample = Samples.begin();
+ }
+
+ if (width != 2) {
+ ythrow yexception() << "width 2 expected";
+ }
+
+ // TODO: support embedded strings in values?
+ NativeToUnboxed<EmbeddedKeys>(CurrSample->first, result[0]);
+ NativeToUnboxed<false>(CurrSample->second, result[1]);
+
+ ++CurrSample;
+
+ return NUdf::EFetchStatus::Ok;
+ }
+};
+
+template<typename K, typename V>
+std::unordered_map<K, V> ComputeSumReferenceResult(IWideStream& referenceStream)
+{
+ std::unordered_map<K, V> expects;
+ {
+ NUdf::TUnboxedValue inputs[2];
+
+ while (referenceStream.WideFetch(inputs, 2) == NUdf::EFetchStatus::Ok) {
+ expects[UnboxedToNative<K>(inputs[0])] += inputs[1].Get<V>();
+ }
+ }
+ return expects;
+}
+
+template<typename K, typename V>
+void VerifyListVsUnorderedMap(const NUdf::TUnboxedValue& pairList, const std::unordered_map<K, V>& map)
+{
+ UNIT_ASSERT_VALUES_EQUAL(pairList.GetListLength(), map.size());
+
+ const auto ptr = pairList.GetElements();
+ for (size_t i = 0ULL; i < map.size(); ++i) {
+ const auto elements = ptr[i].GetElements();
+ const auto key = UnboxedToNative<K>(elements[0]);
+ const auto value = UnboxedToNative<V>(elements[1]);
+ const auto ii = map.find(key);
+ UNIT_ASSERT(ii != map.end());
+ UNIT_ASSERT_VALUES_EQUAL(value, ii->second);
+ }
+}
+
+class IDataSampler
+{
+ virtual THolder<IWideStream> MakeStream(const THolderFactory& holderFactory) const = 0;
+ virtual TType* GetKeyType(TProgramBuilder& pb) const = 0;
+ virtual void ComputeReferenceResult(IWideStream& referenceStream) = 0;
+ virtual void VerifyComputedValueVsReference(const NUdf::TUnboxedValue& value) const = 0;
+ virtual std::string Describe() const = 0;
+};
+
+class T6464DataSampler : public IDataSampler
+{
+public:
+ TKVStream<ui64, ui64, false>::TSamples Samples;
+ std::unordered_map<ui64, ui64> Expects;
+
+ size_t StreamNumIters = 0;
+
+ T6464DataSampler(size_t numSamples, size_t maxKey, size_t numIters)
+ : Samples(MakeKeyed6464Samples(numSamples, maxKey))
+ , StreamNumIters(numIters)
+ {
+ }
+
+ THolder<IWideStream> MakeStream(const THolderFactory& holderFactory) const override
+ {
+ return THolder(new TKVStream<ui64, ui64, false>(holderFactory, Samples, StreamNumIters));
+ }
+
+ TType* GetKeyType(TProgramBuilder& pb) const override
+ {
+ return pb.NewDataType(NUdf::TDataType<ui64>::Id);
+ }
+
+ void ComputeReferenceResult(IWideStream& referenceStream) override
+ {
+ Y_ENSURE(Expects.empty());
+
+ Expects = ComputeSumReferenceResult<ui64, ui64>(referenceStream);
+ }
+
+ void VerifyComputedValueVsReference(const NUdf::TUnboxedValue& value) const override
+ {
+ VerifyListVsUnorderedMap(value, Expects);
+ }
+
+ std::string Describe() const override
+ {
+ return "ui64 keys, ui64 values";
+ }
+};
+
+class TString64DataSampler : public IDataSampler
+{
+public:
+ TKVStream<std::string, ui64, false>::TSamples Samples;
+ std::unordered_map<std::string, ui64> Expects;
+
+ size_t StreamNumIters = 0;
+ bool LongStrings = false;
+
+ TString64DataSampler(size_t numSamples, size_t maxKey, size_t numIters, bool longStrings)
+ : Samples(MakeKeyedString64Samples(numSamples, maxKey, longStrings))
+ , StreamNumIters(numIters)
+ , LongStrings(longStrings)
+ {
+ }
+
+ THolder<IWideStream> MakeStream(const THolderFactory& holderFactory) const override
+ {
+ if (LongStrings) {
+ return THolder(new TKVStream<std::string, ui64, false>(holderFactory, Samples, StreamNumIters));
+ } else {
+ return THolder(new TKVStream<std::string, ui64, true>(holderFactory, Samples, StreamNumIters));
+ }
+ }
+
+ TType* GetKeyType(TProgramBuilder& pb) const override
+ {
+ return pb.NewDataType(NUdf::TDataType<char*>::Id);
+ }
+
+ void ComputeReferenceResult(IWideStream& referenceStream) override
+ {
+ Y_ENSURE(Expects.empty());
+
+ Expects = ComputeSumReferenceResult<std::string, ui64>(referenceStream);
+ }
+
+ void VerifyComputedValueVsReference(const NUdf::TUnboxedValue& value) const override
+ {
+ VerifyListVsUnorderedMap(value, Expects);
+ }
+
+ std::string Describe() const override
+ {
+ return Sprintf("%s string keys, ui64 values", LongStrings ? "Long (24 byte)" : "Embedded");
+ }
+};
+
+
+template<typename K, typename V>
+struct TBlockKVStream : public IWideStream {
+ using TSamples = std::vector<std::pair<K, V>>;
+
+ TBlockKVStream(TComputationContext& ctx, const TSamples& samples, size_t iterations, size_t blockSize, const std::vector<TType*> types)
+ : Context(ctx)
+ , Samples(samples)
+ , End(Samples.end())
+ , MaxIterations(iterations)
+ , CurrSample(Samples.begin())
+ , BlockSize(blockSize)
+ , Types(types)
+ {
+ Y_ENSURE(samples.size() > 0);
+ }
+
+private:
+ TComputationContext& Context;
+ const TSamples& Samples;
+ const TSamples::const_iterator End;
+ const size_t MaxIterations;
+ TSamples::const_iterator CurrSample;
+ size_t Iteration = 0;
+ size_t BlockSize = 1;
+ const std::vector<TType*> Types;
+
+ const TSamples::value_type* NextSample() {
+ if (CurrSample == End) {
+ ++Iteration;
+ if (Iteration >= MaxIterations) {
+ return nullptr;
+ }
+ CurrSample = Samples.begin();
+ }
+ return CurrSample++;
+ }
+
+ bool IsAtTheEnd() const {
+ return (CurrSample == End) && (Iteration >= MaxIterations);
+ }
+
+public:
+ NUdf::EFetchStatus WideFetch(NUdf::TUnboxedValue* result, ui32 width) final override {
+ if (width != 3) {
+ ythrow yexception() << "width 3 expected";
+ }
+
+ TVector<std::unique_ptr<IArrayBuilder>> builders;
+ std::transform(Types.cbegin(), Types.cend(), std::back_inserter(builders),
+ [&](const auto& type) {
+ return MakeArrayBuilder(TTypeInfoHelper(), type, Context.ArrowMemoryPool, BlockSize, &Context.Builder->GetPgBuilder());
+ });
+
+ size_t count = 0;
+ for (; count < BlockSize; ++count) {
+ const auto nextTuple = NextSample();
+ if (!nextTuple) {
+ break;
+ }
+
+ NUdf::TUnboxedValue key;
+ NativeToUnboxed<false>(nextTuple->first, key);
+ builders[0]->Add(key);
+
+ NUdf::TUnboxedValue value;
+ NativeToUnboxed<false>(nextTuple->second, value);
+ builders[1]->Add(value);
+ }
+
+ if (count > 0) {
+ const bool finish = IsAtTheEnd();
+ result[0] = Context.HolderFactory.CreateArrowBlock(builders[0]->Build(finish));
+ result[1] = Context.HolderFactory.CreateArrowBlock(builders[1]->Build(finish));
+ result[2] = Context.HolderFactory.CreateArrowBlock(arrow::Datum(static_cast<uint64_t>(count)));
+
+ return NUdf::EFetchStatus::Ok;
+ } else {
+ return NUdf::EFetchStatus::Finish;
+ }
+ }
+};
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/tpch_last.cpp b/ydb/core/kqp/tools/combiner_perf/tpch_last.cpp
new file mode 100644
index 0000000000..31621323ef
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/tpch_last.cpp
@@ -0,0 +1,168 @@
+#include "tpch_last.h"
+#include "factories.h"
+
+#include <yql/essentials/minikql/comp_nodes/ut/mkql_computation_node_ut.h>
+#include <yql/essentials/minikql/computation/mkql_computation_node_holders.h>
+#include <yql/essentials/minikql/comp_nodes/mkql_factories.h>
+#include <yql/essentials/minikql/computation/mock_spiller_factory_ut.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/system/compiler.h>
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+constexpr auto TpchDateBorder = 9124596000000000ULL;
+
+std::vector<std::tuple<ui64, std::string, std::string, double, double, double, double>> MakeTpchSamples() {
+ constexpr auto total_samples = 5000000ULL;
+
+ std::default_random_engine eng;
+ std::uniform_int_distribution<ui64> dates(694303200000000ULL, 9124596000000005ULL);
+ std::uniform_int_distribution<int> keys(0U, 3U);
+ std::uniform_real_distribution<double> prices(900., 105000.0);
+ std::uniform_real_distribution<double> taxes(0., 0.08);
+ std::uniform_real_distribution<double> discs(0., 0.1);
+ std::uniform_real_distribution<double> qntts(1., 50.);
+
+ std::random_device rd;
+ std::mt19937 gen(rd());
+
+ std::vector<std::tuple<ui64, std::string, std::string, double, double, double, double>> samples(total_samples);
+
+ eng.seed(std::time(nullptr));
+
+ std::generate(samples.begin(), samples.end(), [&]() {
+ switch(keys(gen)) {
+ case 0U: return std::make_tuple(dates(gen), "N", "O", prices(gen), taxes(gen), discs(gen), qntts(gen));
+ case 1U: return std::make_tuple(dates(gen), "A", "F", prices(gen), taxes(gen), discs(gen), qntts(gen));
+ case 2U: return std::make_tuple(dates(gen), "N", "F", prices(gen), taxes(gen), discs(gen), qntts(gen));
+ case 3U: return std::make_tuple(dates(gen), "R", "F", prices(gen), taxes(gen), discs(gen), qntts(gen));
+ }
+ Y_ABORT("Unexpected");
+ });
+ return samples;
+}
+
+
+const std::vector<std::tuple<ui64, std::string, std::string, double, double, double, double>> TpchSamples = MakeTpchSamples();
+
+
+template<bool LLVM, bool Spilling>
+void RunTestLastTpch()
+{
+ TSetup<LLVM, Spilling> setup(GetPerfTestFactory());
+
+ Cerr << "tpc-h sample has " << TpchSamples.size() << " rows" << Endl;
+
+ struct TPairHash { size_t operator()(const std::pair<std::string_view, std::string_view>& p) const { return CombineHashes(std::hash<std::string_view>()(p.first), std::hash<std::string_view>()(p.second)); } };
+
+ std::unordered_map<std::pair<std::string_view, std::string_view>, std::pair<ui64, std::array<double, 5U>>, TPairHash> expects;
+ const auto t = TInstant::Now();
+ for (auto& sample : TpchSamples) {
+ if (std::get<0U>(sample) <= TpchDateBorder) {
+ const auto& ins = expects.emplace(std::pair<std::string_view, std::string_view>{std::get<1U>(sample), std::get<2U>(sample)}, std::pair<ui64, std::array<double, 5U>>{0ULL, {0., 0., 0., 0., 0.}});
+ auto& item = ins.first->second;
+ ++item.first;
+ std::get<0U>(item.second) += std::get<3U>(sample);
+ std::get<1U>(item.second) += std::get<5U>(sample);
+ std::get<2U>(item.second) += std::get<6U>(sample);
+ const auto v = std::get<3U>(sample) * (1. - std::get<5U>(sample));
+ std::get<3U>(item.second) += v;
+ std::get<4U>(item.second) += v * (1. + std::get<4U>(sample));
+ }
+ }
+ for (auto& item : expects) {
+ std::get<1U>(item.second.second) /= item.second.first;
+ }
+ const auto cppTime = TInstant::Now() - t;
+
+ std::vector<std::pair<std::pair<std::string, std::string>, std::pair<ui64, std::array<double, 5U>>>> one, two;
+ one.reserve(expects.size());
+ two.reserve(expects.size());
+
+ one.insert(one.cend(), expects.cbegin(), expects.cend());
+ std::sort(one.begin(), one.end(), [](const std::pair<std::pair<std::string_view, std::string_view>, std::pair<ui64, std::array<double, 5U>>> l, const std::pair<std::pair<std::string_view, std::string_view>, std::pair<ui64, std::array<double, 5U>>> r){ return l.first < r.first; });
+
+ TProgramBuilder& pb = *setup.PgmBuilder;
+
+ const auto listType = pb.NewListType(pb.NewTupleType({
+ pb.NewDataType(NUdf::TDataType<ui64>::Id),
+ pb.NewDataType(NUdf::TDataType<const char*>::Id),
+ pb.NewDataType(NUdf::TDataType<const char*>::Id),
+ pb.NewDataType(NUdf::TDataType<double>::Id),
+ pb.NewDataType(NUdf::TDataType<double>::Id),
+ pb.NewDataType(NUdf::TDataType<double>::Id),
+ pb.NewDataType(NUdf::TDataType<double>::Id)
+ }));
+ const auto list = TCallableBuilder(pb.GetTypeEnvironment(), "TestList", listType).Build();
+
+ const auto pgmReturn = pb.Collect(pb.NarrowMap(WideLastCombiner<Spilling>(
+ pb,
+ pb.WideFilter(pb.ExpandMap(pb.ToFlow(TRuntimeNode(list, false)),
+ [&](TRuntimeNode item) -> TRuntimeNode::TList { return {pb.Nth(item, 0U), pb.Nth(item, 1U), pb.Nth(item, 2U), pb.Nth(item, 3U), pb.Nth(item, 4U), pb.Nth(item, 5U), pb.Nth(item, 6U)}; }),
+ [&](TRuntimeNode::TList items) { return pb.AggrLessOrEqual(items.front(), pb.NewDataLiteral<ui64>(TpchDateBorder)); }
+ ),
+ [&](TRuntimeNode::TList item) -> TRuntimeNode::TList { return {item[1U], item[2U]}; },
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items) -> TRuntimeNode::TList {
+ const auto price = items[3U];
+ const auto disco = items[5U];
+ const auto v = pb.Mul(price, pb.Sub(pb.NewDataLiteral<double>(1.), disco));
+ return {pb.NewDataLiteral<ui64>(1ULL), price, disco, items[6U], v, pb.Mul(v, pb.Add(pb.NewDataLiteral<double>(1.), items[4U]))};
+ },
+ [&](TRuntimeNode::TList, TRuntimeNode::TList items, TRuntimeNode::TList state) -> TRuntimeNode::TList {
+ const auto price = items[3U];
+ const auto disco = items[5U];
+ const auto v = pb.Mul(price, pb.Sub(pb.NewDataLiteral<double>(1.), disco));
+ return {pb.Increment(state[0U]), pb.AggrAdd(state[1U], price), pb.AggrAdd(state[2U], disco), pb.AggrAdd(state[3U], items[6U]), pb.AggrAdd(state[4U], v), pb.AggrAdd(state[5U], pb.Mul(v, pb.Add(pb.NewDataLiteral<double>(1.), items[4U])))};
+ },
+ [&](TRuntimeNode::TList key, TRuntimeNode::TList state) -> TRuntimeNode::TList { return {key.front(), key.back(), state[0U], state[1U], pb.Div(state[2U], state[0U]), state[3U], state[4U], state[5U]}; }),
+ [&](TRuntimeNode::TList items) { return pb.NewTuple(items); }
+ ));
+
+ const auto graph = setup.BuildGraph(pgmReturn, {list});
+ if (Spilling) {
+ graph->GetContext().SpillerFactory = std::make_shared<TMockSpillerFactory>();
+ }
+
+ NUdf::TUnboxedValue* items = nullptr;
+ graph->GetEntryPoint(0, true)->SetValue(graph->GetContext(), graph->GetHolderFactory().CreateDirectArrayHolder(TpchSamples.size(), items));
+ for (const auto& sample : TpchSamples) {
+ NUdf::TUnboxedValue* elements = nullptr;
+ *items++ = graph->GetHolderFactory().CreateDirectArrayHolder(7U, elements);
+ elements[0] = NUdf::TUnboxedValuePod(std::get<0U>(sample));
+ elements[1] = NUdf::TUnboxedValuePod::Embedded(std::get<1U>(sample));
+ elements[2] = NUdf::TUnboxedValuePod::Embedded(std::get<2U>(sample));
+ elements[3] = NUdf::TUnboxedValuePod(std::get<3U>(sample));
+ elements[4] = NUdf::TUnboxedValuePod(std::get<4U>(sample));
+ elements[5] = NUdf::TUnboxedValuePod(std::get<5U>(sample));
+ elements[6] = NUdf::TUnboxedValuePod(std::get<6U>(sample));
+ }
+
+ const auto t1 = TInstant::Now();
+ const auto& value = graph->GetValue();
+ const auto t2 = TInstant::Now();
+
+ //UNIT_ASSERT_VALUES_EQUAL(value.GetListLength(), expects.size());
+
+ const auto ptr = value.GetElements();
+ for (size_t i = 0ULL; i < expects.size(); ++i) {
+ const auto elements = ptr[i].GetElements();
+ two.emplace_back(std::make_pair(elements[0].AsStringRef(), elements[1].AsStringRef()), std::pair<ui64, std::array<double, 5U>>{elements[2].template Get<ui64>(), {elements[3].template Get<double>(), elements[4].template Get<double>(), elements[5].template Get<double>(), elements[6].template Get<double>(), elements[7].template Get<double>()}});
+ }
+
+ std::sort(two.begin(), two.end(), [](const std::pair<std::pair<std::string_view, std::string_view>, std::pair<ui64, std::array<double, 5U>>> l, const std::pair<std::pair<std::string_view, std::string_view>, std::pair<ui64, std::array<double, 5U>>> r){ return l.first < r.first; });
+ UNIT_ASSERT_VALUES_EQUAL(one, two);
+
+ Cerr << "WideLastCombiner graph runtime is: " << t2 - t1 << " vs. reference C++ implementation: " << cppTime << Endl << Endl;
+}
+
+template void RunTestLastTpch<false, false>();
+template void RunTestLastTpch<false, true>();
+template void RunTestLastTpch<true, false>();
+template void RunTestLastTpch<true, true>();
+
+
+} // namespace NMiniKQL
+} // namespace NKikimr
diff --git a/ydb/core/kqp/tools/combiner_perf/tpch_last.h b/ydb/core/kqp/tools/combiner_perf/tpch_last.h
new file mode 100644
index 0000000000..fb09ba8da5
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/tpch_last.h
@@ -0,0 +1,10 @@
+#pragma once
+
+namespace NKikimr {
+namespace NMiniKQL {
+
+template<bool LLVM, bool Spilling>
+void RunTestLastTpch();
+
+}
+}
diff --git a/ydb/core/kqp/tools/combiner_perf/ya.make b/ydb/core/kqp/tools/combiner_perf/ya.make
new file mode 100644
index 0000000000..d2e15dca53
--- /dev/null
+++ b/ydb/core/kqp/tools/combiner_perf/ya.make
@@ -0,0 +1,53 @@
+LIBRARY()
+
+YQL_LAST_ABI_VERSION()
+
+IF (MKQL_RUNTIME_VERSION)
+ CFLAGS(
+ -DMKQL_RUNTIME_VERSION=$MKQL_RUNTIME_VERSION
+ )
+ENDIF()
+
+PEERDIR(
+ yql/essentials/public/udf
+ yql/essentials/public/udf/arrow
+ yql/essentials/public/udf/service/exception_policy
+ yql/essentials/sql/pg_dummy
+
+ yql/essentials/minikql/comp_nodes
+ yql/essentials/minikql/comp_nodes/llvm16
+ yql/essentials/minikql/codegen/llvm16
+ yql/essentials/minikql/invoke_builtins/llvm16
+
+ library/cpp/testing/unittest
+
+ contrib/libs/llvm16/lib/IR
+ contrib/libs/llvm16/lib/ExecutionEngine/MCJIT
+ contrib/libs/llvm16/lib/Linker
+ contrib/libs/llvm16/lib/Passes
+ contrib/libs/llvm16/lib/Target/X86
+ contrib/libs/llvm16/lib/Target/X86/AsmParser
+ contrib/libs/llvm16/lib/Target/X86/Disassembler
+ contrib/libs/llvm16/lib/Transforms/IPO
+ contrib/libs/llvm16/lib/Transforms/ObjCARC
+)
+
+IF (ARCH_X86_64)
+
+CFLAGS(
+ -mprfchw
+)
+
+ENDIF()
+
+SRCS(
+ converters.cpp
+ factories.cpp
+ simple.cpp
+ simple_block.cpp
+ simple_last.cpp
+ streams.cpp
+ tpch_last.cpp
+)
+
+END()
diff --git a/ydb/core/kqp/ut/common/kqp_ut_common.cpp b/ydb/core/kqp/ut/common/kqp_ut_common.cpp
index 75dfc7dcce..a2b73a13bf 100644
--- a/ydb/core/kqp/ut/common/kqp_ut_common.cpp
+++ b/ydb/core/kqp/ut/common/kqp_ut_common.cpp
@@ -1688,15 +1688,8 @@ TTestExtEnv::~TTestExtEnv() {
}
void TTestExtEnv::CreateDatabase(const TString& databaseName) {
- auto& runtime = *Server->GetRuntime();
auto fullDbName = "/Root/" + databaseName;
- using TEvCreateDatabaseRequest = NKikimr::NGRpcService::TGrpcRequestOperationCall
- <
- Ydb::Cms::CreateDatabaseRequest,
- Ydb::Cms::CreateDatabaseResponse
- >;
-
Ydb::Cms::CreateDatabaseRequest request;
request.set_path(fullDbName);
@@ -1705,12 +1698,7 @@ void TTestExtEnv::CreateDatabase(const TString& databaseName) {
storage->set_unit_kind(EnvSettings.PoolName);
storage->set_count(1);
- auto future = NRpcService::DoLocalRpc<TEvCreateDatabaseRequest>(std::move(request), "", "", runtime.GetActorSystem(0));
- auto response = runtime.WaitFuture(std::move(future));
- UNIT_ASSERT(response.operation().ready());
- UNIT_ASSERT_VALUES_EQUAL(response.operation().status(), Ydb::StatusIds::SUCCESS);
-
- Tenants->Run(fullDbName, EnvSettings.DynamicNodeCount);
+ Tenants->CreateTenant(request, EnvSettings.DynamicNodeCount);
}
} // namspace NKqp
diff --git a/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp b/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp
index fb3827e84b..0cf0badfe4 100644
--- a/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp
+++ b/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp
@@ -118,6 +118,7 @@ Y_UNIT_TEST_SUITE(KqpIndexMetadata) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto& server = kikimr.GetTestServer();
auto gateway = GetIcGateway(server);
@@ -273,6 +274,7 @@ Y_UNIT_TEST_SUITE(KqpIndexMetadata) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto& server = kikimr.GetTestServer();
auto gateway = GetIcGateway(server);
@@ -386,6 +388,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -443,6 +446,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -522,6 +526,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -601,6 +606,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -785,6 +791,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -907,6 +914,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1038,6 +1046,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1272,6 +1281,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({ setting });
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1366,6 +1376,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1502,6 +1513,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1694,6 +1706,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1783,6 +1796,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -1848,6 +1862,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
TScriptingClient client(kikimr.GetDriver());
{
@@ -1888,6 +1903,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -2365,6 +2381,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, false);
{
@@ -2407,6 +2424,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, false);
{
@@ -2449,6 +2467,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, true);
{
@@ -2491,6 +2510,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, true);
{
@@ -2533,6 +2553,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, false);
{
@@ -2575,6 +2596,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, false);
{
@@ -2617,6 +2639,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, true);
{
@@ -2659,6 +2682,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForVectorIndex(db, true);
{
@@ -2863,7 +2887,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, false);
@@ -2908,7 +2932,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, false);
@@ -2953,7 +2977,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, true);
@@ -2998,7 +3022,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, true);
@@ -3043,7 +3067,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, false);
@@ -3088,7 +3112,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, false);
@@ -3133,7 +3157,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, true);
@@ -3178,7 +3202,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
- kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NLog::PRI_DEBUG);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = DoCreateTableForPrefixedVectorIndex(db, true);
@@ -3220,6 +3244,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3296,6 +3321,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3446,6 +3472,7 @@ Y_UNIT_TEST_SUITE(KqpIndexes) {
.SetKqpSettings({setting})
.SetAppConfig(app);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3523,6 +3550,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3570,6 +3598,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3633,6 +3662,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3823,6 +3853,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -3949,6 +3980,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(app);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -4288,6 +4320,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(app);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -4563,6 +4596,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -4800,6 +4834,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -4961,6 +4996,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5047,6 +5083,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto scheme = kikimr.GetSchemeClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5096,6 +5133,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5135,6 +5173,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5221,6 +5260,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5418,6 +5458,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
serverSettings.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
CreateSampleTablesWithIndex(session);
@@ -5493,6 +5534,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
CreateSampleTablesWithIndex(session);
@@ -5569,6 +5611,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5758,6 +5801,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
TScriptingClient client(kikimr.GetDriver());
auto scriptResult = client.ExecuteYqlScript(R"(
@@ -5850,6 +5894,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5906,6 +5951,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -5956,6 +6002,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
auto serverSettings = TKikimrSettings()
.SetKqpSettings({setting});
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
@@ -6501,6 +6548,7 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda
.SetKqpSettings({setting})
.SetAppConfig(appConfig);
TKikimrRunner kikimr(serverSettings);
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::BUILD_INDEX, NActors::NLog::PRI_TRACE);
auto db = kikimr.GetTableClient();
auto session = db.CreateSession().GetValueSync().GetSession();
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpcds64_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpcds64_1000s_column_store.json
index 9706ab8e44..31f1c27f99 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpcds64_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpcds64_1000s_column_store.json
@@ -27,14 +27,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/catalog_sales"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/catalog_sales"
},
{
"op_name":"HashShuffle",
@@ -89,14 +83,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_address"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_address"
},
{
"op_name":"HashShuffle",
@@ -107,14 +95,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_demographics"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_demographics"
},
{
"op_name":"HashShuffle",
@@ -133,14 +115,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer"
},
{
"op_name":"HashShuffle",
@@ -151,14 +127,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_demographics"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_demographics"
},
{
"op_name":"HashShuffle",
@@ -169,14 +139,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_address"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_address"
},
{
"op_name":"HashShuffle",
@@ -187,14 +151,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/store_returns"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/store_returns"
},
{
"op_name":"HashShuffle",
@@ -271,22 +229,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/household_demographics"
- },
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/income_band"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/household_demographics"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/income_band"
}
]
}
@@ -295,22 +247,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/household_demographics"
- },
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/income_band"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/household_demographics"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/income_band"
}
]
}
@@ -349,14 +295,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/catalog_sales"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/catalog_sales"
},
{
"op_name":"HashShuffle",
@@ -411,14 +351,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_address"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_address"
},
{
"op_name":"HashShuffle",
@@ -429,14 +363,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_demographics"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_demographics"
},
{
"op_name":"HashShuffle",
@@ -455,14 +383,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer"
},
{
"op_name":"HashShuffle",
@@ -473,14 +395,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_demographics"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_demographics"
},
{
"op_name":"HashShuffle",
@@ -491,14 +407,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/customer_address"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/customer_address"
},
{
"op_name":"HashShuffle",
@@ -509,14 +419,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/store_returns"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/store_returns"
},
{
"op_name":"HashShuffle",
@@ -593,22 +497,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/household_demographics"
- },
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/income_band"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/household_demographics"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/income_band"
}
]
}
@@ -617,22 +515,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/household_demographics"
- },
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/income_band"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/household_demographics"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/income_band"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpcds78_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpcds78_1000s_column_store.json
index 80d6b26e7e..46fd60c5a3 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpcds78_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpcds78_1000s_column_store.json
@@ -3,11 +3,11 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"LeftJoin (Grace)",
"args":
[
{
- "op_name":"LeftJoin (Grace)",
+ "op_name":"HashShuffle",
"args":
[
{
@@ -15,92 +15,86 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"LeftJoin (Grace)",
"args":
[
{
- "op_name":"LeftJoin (Grace)",
+ "op_name":"HashShuffle",
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/store_sales"
- },
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/date_dim"
- }
- ]
- }
- ]
- },
- {
- "op_name":"HashShuffle",
- "args":
- [
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/store_sales"
+ },
{
"op_name":"TableFullScan",
- "table":"test\/ds\/store_returns"
+ "table":"test\/ds\/date_dim"
}
]
}
]
+ },
+ {
+ "op_name":"HashShuffle",
+ "args":
+ [
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/store_returns"
+ }
+ ]
}
]
}
]
- },
+ }
+ ]
+ },
+ {
+ "op_name":"HashShuffle",
+ "args":
+ [
{
"op_name":"HashShuffle",
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
+ "op_name":"LeftJoin (Grace)",
"args":
[
{
- "op_name":"LeftJoin (Grace)",
+ "op_name":"HashShuffle",
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/web_sales"
- }
- ]
- },
- {
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"test\/ds\/web_returns"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/web_sales"
}
]
},
{
- "op_name":"TableFullScan",
- "table":"test\/ds\/date_dim"
+ "op_name":"HashShuffle",
+ "args":
+ [
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/web_returns"
+ }
+ ]
}
]
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"test\/ds\/date_dim"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch10_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch10_1000s_column_store.json
index 0dcb99afe8..4dbf957899 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch10_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch10_1000s_column_store.json
@@ -7,22 +7,16 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
},
@@ -35,14 +29,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"orders"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"orders"
},
{
"op_name":"HashShuffle",
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch11_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch11_1000s_column_store.json
index dc4048706a..47936ea0ab 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch11_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch11_1000s_column_store.json
@@ -21,22 +21,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
@@ -59,22 +53,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch13_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch13_1000s_column_store.json
index 10f82c02aa..806d6c98c9 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch13_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch13_1000s_column_store.json
@@ -11,14 +11,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
},
{
"op_name":"HashShuffle",
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch14_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch14_1000s_column_store.json
index 699f6153db..a7f3e7c7df 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch14_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch14_1000s_column_store.json
@@ -13,14 +13,8 @@
]
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch15_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch15_1000s_column_store.json
index 5d5f984ee2..82542cb623 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch15_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch15_1000s_column_store.json
@@ -7,14 +7,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
},
{
"op_name":"HashShuffle",
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch16_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch16_1000s_column_store.json
index b21d76c491..d9f28508b6 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch16_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch16_1000s_column_store.json
@@ -19,14 +19,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
},
{
"op_name":"HashShuffle",
@@ -43,14 +37,8 @@
]
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch17_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch17_1000s_column_store.json
index de859226b3..9a30edd8e0 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch17_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch17_1000s_column_store.json
@@ -35,14 +35,8 @@
]
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch18_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch18_1000s_column_store.json
index a2969f0a47..02ef9ce355 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch18_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch18_1000s_column_store.json
@@ -15,14 +15,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
},
{
"op_name":"HashShuffle",
@@ -43,14 +37,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"lineitem"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"lineitem"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch19_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch19_1000s_column_store.json
index 699f6153db..a7f3e7c7df 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch19_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch19_1000s_column_store.json
@@ -13,14 +13,8 @@
]
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch21_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch21_1000s_column_store.json
index 0ed0ab1e8b..ea293ab5bb 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch21_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch21_1000s_column_store.json
@@ -7,11 +7,11 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"LeftSemiJoin (Grace)",
"args":
[
{
- "op_name":"LeftSemiJoin (Grace)",
+ "op_name":"InnerJoin (Grace)",
"args":
[
{
@@ -27,50 +27,22 @@
"args":
[
{
- "op_name":"InnerJoin (Grace)",
- "args":
- [
- {
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"lineitem"
- }
- ]
- },
- {
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
- }
- ]
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"lineitem"
}
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
"op_name":"TableFullScan",
- "table":"orders"
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
@@ -79,64 +51,62 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"TableFullScan",
+ "table":"orders"
+ }
+ ]
+ },
+ {
+ "op_name":"HashShuffle",
+ "args":
+ [
+ {
+ "op_name":"InnerJoin (Grace)",
"args":
[
{
- "op_name":"InnerJoin (Grace)",
+ "op_name":"HashShuffle",
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (Grace)",
"args":
[
{
- "op_name":"InnerJoin (Grace)",
+ "op_name":"HashShuffle",
+ "args":
+ [
+ {
+ "op_name":"TableFullScan",
+ "table":"lineitem"
+ }
+ ]
+ },
+ {
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"lineitem"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
]
- },
+ }
+ ]
+ },
+ {
+ "op_name":"HashShuffle",
+ "args":
+ [
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"lineitem"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"lineitem"
}
]
}
@@ -173,22 +143,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch2_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch2_1000s_column_store.json
index 0d7f7f4478..6d0a563acf 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch2_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch2_1000s_column_store.json
@@ -3,11 +3,15 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (Grace)",
"args":
[
{
- "op_name":"InnerJoin (Grace)",
+ "op_name":"TableFullScan",
+ "table":"part"
+ },
+ {
+ "op_name":"HashShuffle",
"args":
[
{
@@ -15,60 +19,38 @@
"args":
[
{
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
- },
- {
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (Grace)",
"args":
[
{
- "op_name":"InnerJoin (Grace)",
+ "op_name":"HashShuffle",
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"partsupp"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"partsupp"
+ }
+ ]
+ },
+ {
+ "op_name":"InnerJoin (MapJoin)",
+ "args":
+ [
+ {
+ "op_name":"TableFullScan",
+ "table":"supplier"
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"nation"
- },
- {
- "op_name":"TableFullScan",
- "table":"region"
- }
- ]
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"nation"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"region"
}
]
}
@@ -101,30 +83,24 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
"op_name":"InnerJoin (MapJoin)",
"args":
[
{
"op_name":"TableFullScan",
- "table":"supplier"
+ "table":"nation"
},
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"nation"
- },
- {
- "op_name":"TableFullScan",
- "table":"region"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"region"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch3_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch3_1000s_column_store.json
index f3bb111cf3..02ef9ce355 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch3_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch3_1000s_column_store.json
@@ -15,14 +15,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
},
{
"op_name":"HashShuffle",
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch4_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch4_1000s_column_store.json
index 86eee6f81a..a456fdf93f 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch4_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch4_1000s_column_store.json
@@ -21,14 +21,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"lineitem"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"lineitem"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch5_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch5_1000s_column_store.json
index 2e043ae84c..4e756bbc59 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch5_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch5_1000s_column_store.json
@@ -7,14 +7,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
},
{
"op_name":"HashShuffle",
@@ -25,14 +19,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"orders"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"orders"
},
{
"op_name":"HashShuffle",
@@ -53,30 +41,24 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
"op_name":"InnerJoin (MapJoin)",
"args":
[
{
"op_name":"TableFullScan",
- "table":"supplier"
+ "table":"nation"
},
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"nation"
- },
- {
- "op_name":"TableFullScan",
- "table":"region"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"region"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch7_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch7_1000s_column_store.json
index b317beaa57..4032629694 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch7_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch7_1000s_column_store.json
@@ -43,22 +43,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
@@ -71,22 +65,16 @@
]
},
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch8_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch8_1000s_column_store.json
index eae7005f40..ec8fb0eeee 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch8_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch8_1000s_column_store.json
@@ -11,14 +11,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
},
{
"op_name":"HashShuffle",
@@ -33,14 +27,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"customer"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"customer"
},
{
"op_name":"HashShuffle",
@@ -51,14 +39,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"orders"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"orders"
},
{
"op_name":"HashShuffle",
@@ -69,14 +51,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
},
{
"op_name":"HashShuffle",
diff --git a/ydb/core/kqp/ut/join/data/join_order/tpch9_1000s_column_store.json b/ydb/core/kqp/ut/join/data/join_order/tpch9_1000s_column_store.json
index a7ffca420e..d2ee744019 100644
--- a/ydb/core/kqp/ut/join/data/join_order/tpch9_1000s_column_store.json
+++ b/ydb/core/kqp/ut/join/data/join_order/tpch9_1000s_column_store.json
@@ -7,14 +7,8 @@
"args":
[
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"orders"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"orders"
},
{
"op_name":"HashShuffle",
@@ -43,22 +37,16 @@
"args":
[
{
- "op_name":"HashShuffle",
+ "op_name":"InnerJoin (MapJoin)",
"args":
[
{
- "op_name":"InnerJoin (MapJoin)",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"supplier"
- },
- {
- "op_name":"TableFullScan",
- "table":"nation"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"supplier"
+ },
+ {
+ "op_name":"TableFullScan",
+ "table":"nation"
}
]
},
@@ -81,14 +69,8 @@
]
},
{
- "op_name":"HashShuffle",
- "args":
- [
- {
- "op_name":"TableFullScan",
- "table":"part"
- }
- ]
+ "op_name":"TableFullScan",
+ "table":"part"
}
]
}
diff --git a/ydb/core/kqp/ut/join/data/queries/shuffle_elimination_tpcds_map_join_bug.sql b/ydb/core/kqp/ut/join/data/queries/shuffle_elimination_tpcds_map_join_bug.sql
new file mode 100644
index 0000000000..5b6f9248d8
--- /dev/null
+++ b/ydb/core/kqp/ut/join/data/queries/shuffle_elimination_tpcds_map_join_bug.sql
@@ -0,0 +1,16 @@
+pragma ydb.optimizerhints =
+'
+ JoinOrder((cd (c ca)) ss)
+ JoinType(c ca broadcast)
+ JoinType(c ca cd broadcast)
+';
+
+select
+ *
+ from
+ `/Root/test/ds/customer` c
+ inner join `/Root/test/ds/customer_address` ca on c.c_current_addr_sk = ca.ca_address_sk
+ inner join `/Root/test/ds/customer_demographics` cd on cd.cd_demo_sk = c.c_current_cdemo_sk
+ left semi join `/Root/test/ds/store_sales` ss on c.c_customer_sk = ss.ss_customer_sk
+ where
+ ca_state in ('KY','GA','NM')
diff --git a/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp b/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp
index 0e72cc7b5d..307e0b4dcf 100644
--- a/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp
+++ b/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp
@@ -145,6 +145,15 @@ void PrintPlan(const TString& plan) {
std::replace(joinOrder.begin(), joinOrder.end(), ']', ')');
std::replace(joinOrder.begin(), joinOrder.end(), ',', ' ');
joinOrder.erase(std::remove(joinOrder.begin(), joinOrder.end(), '\"'), joinOrder.end());
+ joinOrder.erase(std::remove(joinOrder.begin(), joinOrder.end(), '\\'), joinOrder.end());
+
+
+ size_t pos;
+ std::string tpcdsTablePrefix = "test/ds/";
+ while ((pos = joinOrder.find(tpcdsTablePrefix)) != std::string::npos) {
+ joinOrder.erase(pos, tpcdsTablePrefix.length());
+ }
+
Cout << "JoinOrder" << joinOrder << Endl;
}
@@ -840,6 +849,28 @@ Y_UNIT_TEST_SUITE(KqpJoinOrder) {
}
}
+ Y_UNIT_TEST(ShuffleEliminationTpcdsMapJoinBug) {
+ auto [plan, resultSets] = ExecuteJoinOrderTestGenericQueryWithStats(
+ "queries/shuffle_elimination_tpcds_map_join_bug.sql", "stats/tpcds1000s.json", false, true, true
+ );
+
+ auto joinFinder = TFindJoinWithLabels(plan);
+ {
+ auto join = joinFinder.Find({"test/ds/customer", "test/ds/customer_address"});
+ UNIT_ASSERT_EQUAL(join.Join, "InnerJoin (MapJoin)");
+ }
+ {
+ auto join = joinFinder.Find({"test/ds/customer_demographics", "test/ds/customer", "test/ds/customer_address"});
+ UNIT_ASSERT_EQUAL(join.Join, "InnerJoin (MapJoin)");
+ }
+ {
+ auto join = joinFinder.Find({"test/ds/customer_demographics", "test/ds/customer", "test/ds/customer_address", "test/ds/store_sales"});
+ UNIT_ASSERT_EQUAL(join.Join, "LeftSemiJoin (Grace)");
+ UNIT_ASSERT(join.LhsShuffled);
+ UNIT_ASSERT(join.RhsShuffled);
+ }
+ }
+
Y_UNIT_TEST(TPCH12_100) {
auto [plan, _] = ExecuteJoinOrderTestGenericQueryWithStats("queries/tpch12.sql", "stats/tpch100s.json", false, true, true);
}
diff --git a/ydb/core/kqp/ut/olap/json_ut.cpp b/ydb/core/kqp/ut/olap/json_ut.cpp
index dd84369c60..b4349f0413 100644
--- a/ydb/core/kqp/ut/olap/json_ut.cpp
+++ b/ydb/core/kqp/ut/olap/json_ut.cpp
@@ -106,15 +106,15 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
Cerr << "HEADER:" << hNoData << "/" << hSkip << "/" << hApproves << Endl;
if (ExpectIndexSkip) {
AFL_VERIFY(iSkip + hSkip == *ExpectIndexSkip)("expect", ExpectIndexSkip)("ireal", iSkip)("hreal", hSkip)(
- "current", controller->GetIndexesSkippingOnSelect().Val())("pred", indexSkipStart);
+ "current", controller->GetIndexesSkippingOnSelect().Val())("pred", indexSkipStart);
}
if (ExpectIndexNoData) {
AFL_VERIFY(iNoData == *ExpectIndexNoData)("expect", ExpectIndexNoData)("real", iNoData)(
- "current", controller->GetIndexesSkippedNoData().Val())("pred", indexNoDataStart);
+ "current", controller->GetIndexesSkippedNoData().Val())("pred", indexNoDataStart);
}
if (ExpectIndexApprove) {
AFL_VERIFY(iApproves == *ExpectIndexApprove)("expect", ExpectIndexApprove)("real", iApproves)(
- "current", controller->GetIndexesApprovedOnSelect().Val())("pred", indexApproveStart);
+ "current", controller->GetIndexesApprovedOnSelect().Val())("pred", indexApproveStart);
}
return TConclusionStatus::Success();
}
@@ -341,7 +341,13 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
public:
TScriptVariator(const TString& script) {
- auto commands = StringSplitter(script).SplitByString("------").ToList<TString>();
+ auto lines = StringSplitter(script).SplitByString("\n").ToList<TString>();
+ lines.erase(std::remove_if(lines.begin(), lines.end(),
+ [](const TString& l) {
+ return Strip(l).StartsWith("#");
+ }),
+ lines.end());
+ auto commands = StringSplitter(JoinSeq("\n", lines)).SplitByString("------").ToList<TString>();
std::vector<std::vector<std::shared_ptr<ICommand>>> commandsDescription;
for (auto&& i : commands) {
auto& cVariants = commandsDescription.emplace_back();
@@ -380,8 +386,8 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
------
SCHEMA:
- ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1) VALUES (1u), (2u), (3u), (4u)
@@ -413,7 +419,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
------
SCHEMA:
- ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5|1$$`)
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `FORCE_SIMD_PARSING`=`$$true|false$$`, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5|1$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES (1u, JsonDocument('{"a" : "", "b" : "", "c" : ""}'))
@@ -441,7 +447,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a.b.c" : "a1", "b.c.d" : "b1", "c.d.e" : "c1"}')), (2u, JsonDocument('{"a.b.c" : "a2"}')),
@@ -469,18 +475,114 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
------
SCHEMA:
- ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`,
+ `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`,
+ `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
- REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')),
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')),
(3u, JsonDocument('{"b" : "b3", "d" : "d3"}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}'))
------
READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a2" ORDER BY Col1;
EXPECTED: [[2u;["{\"a\":\"a2\"}"]]]
------
READ: SELECT * FROM `/Root/ColumnTable` ORDER BY Col1;
- EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]]
+ EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]]
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"e.v\".c") = "1" ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]]
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"e.v\".e.\"c.a\"") = "2" ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]]]
+
+ )";
+ TScriptVariator(script).Execute();
+ }
+
+ Y_UNIT_TEST(RestoreFirstLevelVariants) {
+ TString script = R"(
+ SCHEMA:
+ CREATE TABLE `/Root/ColumnTable` (
+ Col1 Uint64 NOT NULL,
+ Col2 JsonDocument,
+ PRIMARY KEY (Col1)
+ )
+ PARTITION BY HASH(Col1)
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = $$1|2|10$$);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`true`,
+ `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`,
+ `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')),
+ (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}'))
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":\"{\\\"c\\\":1,\\\"e\\\":{\\\"c.a\\\":2}}\"}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":\"[\\\"a\\\",{\\\"v\\\":[\\\"c\\\",5]}]\"}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]]
+
+ )";
+ TScriptVariator(script).Execute();
+ }
+
+ Y_UNIT_TEST(RestoreFullJsonVariants) {
+ TString script = R"(
+ SCHEMA:
+ CREATE TABLE `/Root/ColumnTable` (
+ Col1 Uint64 NOT NULL,
+ Col2 JsonDocument,
+ PRIMARY KEY (Col1)
+ )
+ PARTITION BY HASH(Col1)
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = $$1|2|10$$);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`,
+ `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`,
+ `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1", "d" : null, "e.v" : {"c" : 1, "e" : {"c.a" : 2}}}')), (2u, JsonDocument('{"a" : "a2"}')),
+ (3u, JsonDocument('{"b" : "b3", "d" : "d3", "e" : ["a", {"v" : ["c", 5]}]}')), (4u, JsonDocument('{"b" : "b4asdsasdaa", "a" : "a4"}'))
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a\":\"a1\",\"b\":\"b1\",\"c\":\"c1\",\"d\":\"NULL\",\"e.v\":{\"c\":\"1\",\"e\":{\"c.a\":\"2\"}}}"]];[2u;["{\"a\":\"a2\"}"]];[3u;["{\"b\":\"b3\",\"d\":\"d3\",\"e\":[\"a\",{\"v\":[\"c\",\"5\"]}]}"]];[4u;["{\"a\":\"a4\",\"b\":\"b4asdsasdaa\"}"]]]
+
+ )";
+ TScriptVariator(script).Execute();
+ }
+
+ Y_UNIT_TEST(RestoreJsonArrayVariants) {
+ TString script = R"(
+ SCHEMA:
+ CREATE TABLE `/Root/ColumnTable` (
+ Col1 Uint64 NOT NULL,
+ Col2 JsonDocument,
+ PRIMARY KEY (Col1)
+ )
+ PARTITION BY HASH(Col1)
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = $$1|2|10$$);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `SCAN_FIRST_LEVEL_ONLY`=`false`,
+ `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`,
+ `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('["a", {"v" : 4}, 1,2,3,4,5,6,7,8,9,10,11,12]'))
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` ORDER BY Col1;
+ EXPECTED: [[1u;["[\"a\",{\"v\":\"4\"},\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\"]"]]]
)";
TScriptVariator(script).Execute();
@@ -502,7 +604,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -534,7 +636,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -564,7 +666,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"),
@@ -594,7 +696,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"),
@@ -624,7 +726,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2, Col3) VALUES(1u, JsonDocument('{"a" : "value_a", "b" : "b1", "c" : "c1"}'), "value1"), (2u, JsonDocument('{"a" : "value_a"}'), "value1"),
@@ -653,7 +755,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1", "b" : "b1", "c" : "c1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -682,7 +784,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -711,7 +813,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$1024|0|1$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -742,7 +844,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')),
@@ -765,7 +867,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
TScriptVariator(script).Execute();
}
- Y_UNIT_TEST(BloomIndexesVariants) {
+ Y_UNIT_TEST(BloomMixIndexesVariants) {
TString script = R"(
STOP_COMPACTION
------
@@ -776,14 +878,16 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
PRIMARY KEY (Col1)
)
PARTITION BY HASH(Col1)
- WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = $$2$$);
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2);
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `SCAN_FIRST_LEVEL_ONLY`=`$$true|false$$`,
+ `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10$$`,
+ `MEM_LIMIT_CHUNK`=`$$0|1000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a.b.c" : "a1"}')), (2u, JsonDocument('{"a.b.c" : "a2"}')),
@@ -800,7 +904,12 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=index_ngramm_b, TYPE=BLOOM_NGRAMM_FILTER,
FEATURES=`{"column_name" : "Col2", "ngramm_size" : 3, "hashes_count" : 2, "filter_size_bytes" : 4096,
- "records_count" : 1024, "data_extractor" : {"class_name" : "SUB_COLUMN", "sub_column_name" : "b.c.d"}}`);
+ "records_count" : 1024, "case_sensitive" : false, "data_extractor" : {"class_name" : "SUB_COLUMN", "sub_column_name" : '"b.c.d"'}}`);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=index_ngramm_a, TYPE=BLOOM_NGRAMM_FILTER,
+ FEATURES=`{"column_name" : "Col2", "ngramm_size" : 3, "hashes_count" : 2, "filter_size_bytes" : 4096,
+ "records_count" : 1024, "case_sensitive" : true, "data_extractor" : {"class_name" : "SUB_COLUMN", "sub_column_name" : "a"}}`);
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u)
@@ -826,7 +935,31 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
IDX_ND_SKIP_APPROVE: 0, 4, 1
------
- READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") = "1b5" ORDER BY Col1;
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") like "%1b4%" ORDER BY Col1;
+ EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "%1b4%" ORDER BY Col1;
+# EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+# IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "%1B4" ORDER BY Col1;
+# EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+# IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "1b5" ORDER BY Col1;
+# EXPECTED: []
+# IDX_ND_SKIP_APPROVE: 0, 5, 0
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "1b5" ORDER BY Col1;
+# EXPECTED: []
+# IDX_ND_SKIP_APPROVE: 0, 5, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a4" ORDER BY Col1;
+ EXPECTED: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 3, 2
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d111\"") = "1b5" ORDER BY Col1;
EXPECTED: []
IDX_ND_SKIP_APPROVE: 0, 5, 0
------
@@ -842,6 +975,172 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
TScriptVariator(script).Execute();
}
+ Y_UNIT_TEST(BloomCategoryIndexesVariants) {
+ TString script = R"(
+ STOP_COMPACTION
+ ------
+ SCHEMA:
+ CREATE TABLE `/Root/ColumnTable` (
+ Col1 Uint64 NOT NULL,
+ Col2 JsonDocument,
+ PRIMARY KEY (Col1)
+ )
+ PARTITION BY HASH(Col1)
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
+ `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `SCAN_FIRST_LEVEL_ONLY`=`$$true|false$$`,
+ `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10$$`,
+ `MEM_LIMIT_CHUNK`=`$$0|1000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a.b.c" : "a1"}')), (2u, JsonDocument('{"a.b.c" : "a2"}')),
+ (3u, JsonDocument('{"b.c.d" : "b3"}')), (4u, JsonDocument('{"b.c.d" : "b4", "a" : "a4"}'))
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a.b.c" : "1a1"}')), (12u, JsonDocument('{"a.b.c" : "1a2"}')),
+ (13u, JsonDocument('{"b.c.d" : "1b3"}')), (14u, JsonDocument('{"b.c.d" : "1b4", "a" : "a4"}'))
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=a_index, TYPE=$$CATEGORY_BLOOM_FILTER|BLOOM_FILTER$$,
+ FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u)
+ ------
+ ONE_ACTUALIZATION
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "a1" ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a.b.c\":\"a1\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=DROP_INDEX, NAME=a_index)
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "1a1" ORDER BY Col1;
+ EXPECTED: [[11u;["{\"a.b.c\":\"1a1\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=b_index, TYPE=CATEGORY_BLOOM_FILTER,
+ FEATURES=`{"column_name" : "Col2", "false_positive_probability" : 0.01}`)
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") = "1b4" ORDER BY Col1;
+ EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "1b5" ORDER BY Col1;
+ EXPECTED: []
+ IDX_ND_SKIP_APPROVE: 0, 5, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a4" ORDER BY Col1;
+ EXPECTED: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 3, 2
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d111\"") = "1b5" ORDER BY Col1;
+ EXPECTED: []
+ IDX_ND_SKIP_APPROVE: 0, 5, 0
+
+ )";
+ TScriptVariator(script).Execute();
+ }
+
+ Y_UNIT_TEST(BloomNGrammIndexesVariants) {
+ TString script = R"(
+ STOP_COMPACTION
+ ------
+ SCHEMA:
+ CREATE TABLE `/Root/ColumnTable` (
+ Col1 Uint64 NOT NULL,
+ Col2 JsonDocument,
+ PRIMARY KEY (Col1)
+ )
+ PARTITION BY HASH(Col1)
+ WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 2);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`SIMPLE`)
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
+ `DATA_EXTRACTOR_CLASS_NAME`=`JSON_SCANNER`, `FORCE_SIMD_PARSING`=`$$true|false$$`, `SCAN_FIRST_LEVEL_ONLY`=`$$true|false$$`,
+ `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10$$`,
+ `MEM_LIMIT_CHUNK`=`$$0|1000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a.b.c" : "a1"}')), (2u, JsonDocument('{"a.b.c" : "a2"}')),
+ (3u, JsonDocument('{"b.c.d" : "b3"}')), (4u, JsonDocument('{"b.c.d" : "b4", "a" : "a4"}'))
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a.b.c" : "1a1"}')), (12u, JsonDocument('{"a.b.c" : "1a2"}')),
+ (13u, JsonDocument('{"b.c.d" : "1b3"}')), (14u, JsonDocument('{"b.c.d" : "1b4", "a" : "a4"}'))
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=index_ngramm_b, TYPE=BLOOM_NGRAMM_FILTER,
+ FEATURES=`{"column_name" : "Col2", "ngramm_size" : 3, "hashes_count" : 2, "filter_size_bytes" : 4096,
+ "records_count" : 1024, "case_sensitive" : false, "data_extractor" : {"class_name" : "SUB_COLUMN", "sub_column_name" : '"b.c.d"'}}`);
+ ------
+ SCHEMA:
+ ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=UPSERT_INDEX, NAME=index_ngramm_a, TYPE=BLOOM_NGRAMM_FILTER,
+ FEATURES=`{"column_name" : "Col2", "ngramm_size" : 3, "hashes_count" : 2, "filter_size_bytes" : 4096,
+ "records_count" : 1024, "case_sensitive" : true, "data_extractor" : {"class_name" : "SUB_COLUMN", "sub_column_name" : "a"}}`);
+ ------
+ DATA:
+ REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u)
+ ------
+ ONE_ACTUALIZATION
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"a.b.c\"") = "a1" ORDER BY Col1;
+ EXPECTED: [[1u;["{\"a.b.c\":\"a1\"}"]]]
+ IDX_ND_SKIP_APPROVE: 5, 0, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") = "1b4" ORDER BY Col1;
+ EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") like "%1b4%" ORDER BY Col1;
+ EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "%1b4%" ORDER BY Col1;
+# EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+# IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "%1B4" ORDER BY Col1;
+# EXPECTED: [[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+# IDX_ND_SKIP_APPROVE: 0, 4, 1
+# ------
+# READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") ilike "1b5" ORDER BY Col1;
+# EXPECTED: []
+# IDX_ND_SKIP_APPROVE: 0, 5, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "1b5" ORDER BY Col1;
+ EXPECTED: []
+ IDX_ND_SKIP_APPROVE: 0, 5, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.a") = "a4" ORDER BY Col1;
+ EXPECTED: [[4u;["{\"a\":\"a4\",\"b.c.d\":\"b4\"}"]];[14u;["{\"a\":\"a4\",\"b.c.d\":\"1b4\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 3, 2
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d111\"") = "1b5" ORDER BY Col1;
+ EXPECTED: []
+ IDX_ND_SKIP_APPROVE: 5, 0, 0
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") like "1b3" ORDER BY Col1;
+ EXPECTED: [[13u;["{\"b.c.d\":\"1b3\"}"]]]
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ ------
+ READ: SELECT * FROM `/Root/ColumnTable` WHERE JSON_VALUE(Col2, "$.\"b.c.d\"") like "1B3" ORDER BY Col1;
+ EXPECTED: []
+ IDX_ND_SKIP_APPROVE: 0, 4, 1
+ )";
+ TScriptVariator(script).Execute();
+ }
+
Y_UNIT_TEST(SwitchAccessorCompactionVariants) {
TString script = R"(
STOP_COMPACTION
@@ -864,7 +1163,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a" : "1a1"}')), (12u, JsonDocument('{"a" : "1a2"}')),
@@ -901,7 +1200,7 @@ Y_UNIT_TEST_SUITE(KqpOlapJson) {
------
SCHEMA:
ALTER OBJECT `/Root/ColumnTable` (TYPE TABLE) SET (ACTION=ALTER_COLUMN, NAME=Col2, `DATA_ACCESSOR_CONSTRUCTOR.CLASS_NAME`=`SUB_COLUMNS`,
- `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
+ `FORCE_SIMD_PARSING`=`$$true|false$$`, `COLUMNS_LIMIT`=`$$0|1|1024$$`, `SPARSED_DETECTOR_KFF`=`$$0|10|1000$$`, `MEM_LIMIT_CHUNK`=`$$0|100|1000000$$`, `OTHERS_ALLOWED_FRACTION`=`$$0|0.5$$`)
------
DATA:
REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')),
diff --git a/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp b/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp
index 6c50912c0f..a1301d8d28 100644
--- a/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp
+++ b/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp
@@ -332,6 +332,31 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
}
}
+ Y_UNIT_TEST(EmptyColumnsRead) {
+ auto settings = TKikimrSettings()
+ .SetWithSampleTables(false);
+ TKikimrRunner kikimr(settings);
+
+ TLocalHelper(kikimr).CreateTestOlapTable();
+
+ WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 2);
+
+ auto client = kikimr.GetQueryClient();
+
+ Tests::NCommon::TLoggerInit(kikimr).Initialize();
+
+ {
+ auto it = client.ExecuteQuery(R"(
+ --!syntax_v1
+
+ SELECT 1
+ FROM `/Root/olapStore/olapTable`
+ )",NYdb::NQuery::TTxControl::NoTx(), NYdb::NQuery::TExecuteQuerySettings()).ExtractValueSync();
+
+ UNIT_ASSERT_C(it.IsSuccess(), it.GetIssues().ToString());
+ }
+ }
+
Y_UNIT_TEST(SimpleQueryOlapStats) {
auto settings = TKikimrSettings()
.SetWithSampleTables(false);
@@ -812,54 +837,81 @@ Y_UNIT_TEST_SUITE(KqpOlap) {
WriteTestData(kikimr, "/Root/olapStore/olapTable", 0, 1000000, 128);
auto tableClient = kikimr.GetTableClient();
- auto selectQueryWithSort = TString(R"(
+ auto selectQuerySortDesc = TString(R"(
--!syntax_v1
SELECT `timestamp` FROM `/Root/olapStore/olapTable` ORDER BY `timestamp` DESC LIMIT 4;
)");
- auto selectQuery = TString(R"(
+ auto selectQuerySortAsc = TString(R"(
--!syntax_v1
SELECT `timestamp` FROM `/Root/olapStore/olapTable` ORDER BY `timestamp` LIMIT 4;
)");
-
- auto it = tableClient.StreamExecuteScanQuery(selectQuery, scanSettings).GetValueSync();
- auto result = CollectStreamResult(it);
+ auto selectQueryNoSort = TString(R"(
+ --!syntax_v1
+ SELECT `timestamp` FROM `/Root/olapStore/olapTable` LIMIT 4;
+ )");
NJson::TJsonValue plan, node, reverse, limit, pushedLimit;
- NJson::ReadJsonTree(*result.PlanJson, &plan, true);
- Cerr << *result.PlanJson << Endl;
- Cerr << result.QueryStats->query_plan() << Endl;
- Cerr << result.QueryStats->query_ast() << Endl;
-
- node = FindPlanNodeByKv(plan, "Node Type", "TopSort-TableFullScan");
- UNIT_ASSERT(node.IsDefined());
- reverse = FindPlanNodeByKv(node, "Reverse", "false");
- UNIT_ASSERT(!reverse.IsDefined());
- pushedLimit = FindPlanNodeByKv(node, "ReadLimit", "4");
- UNIT_ASSERT(pushedLimit.IsDefined());
- limit = FindPlanNodeByKv(node, "Limit", "4");
- UNIT_ASSERT(limit.IsDefined());
-
- // Check that Reverse flag is set in query plan
- it = tableClient.StreamExecuteScanQuery(selectQueryWithSort, scanSettings).GetValueSync();
- result = CollectStreamResult(it);
+ {
+ auto it = tableClient.StreamExecuteScanQuery(selectQuerySortAsc, scanSettings).GetValueSync();
+ auto result = CollectStreamResult(it);
- NJson::ReadJsonTree(*result.PlanJson, &plan, true);
- Cerr << "==============================" << Endl;
- Cerr << *result.PlanJson << Endl;
- Cerr << result.QueryStats->query_plan() << Endl;
- Cerr << result.QueryStats->query_ast() << Endl;
-
- node = FindPlanNodeByKv(plan, "Node Type", "TopSort-TableFullScan");
- UNIT_ASSERT(node.IsDefined());
- reverse = FindPlanNodeByKv(node, "Reverse", "true");
- UNIT_ASSERT(reverse.IsDefined());
- limit = FindPlanNodeByKv(node, "Limit", "4");
- UNIT_ASSERT(limit.IsDefined());
- pushedLimit = FindPlanNodeByKv(node, "ReadLimit", "4");
- UNIT_ASSERT(pushedLimit.IsDefined());
+ NJson::ReadJsonTree(*result.PlanJson, &plan, true);
+ Cerr << *result.PlanJson << Endl;
+ Cerr << result.QueryStats->query_plan() << Endl;
+ Cerr << result.QueryStats->query_ast() << Endl;
+
+ node = FindPlanNodeByKv(plan, "Node Type", "TopSort-TableFullScan");
+ UNIT_ASSERT(node.IsDefined());
+ reverse = FindPlanNodeByKv(node, "Reverse", "false");
+ UNIT_ASSERT(reverse.IsDefined());
+ pushedLimit = FindPlanNodeByKv(node, "ReadLimit", "4");
+ UNIT_ASSERT(pushedLimit.IsDefined());
+ limit = FindPlanNodeByKv(node, "Limit", "4");
+ UNIT_ASSERT(limit.IsDefined());
+ }
+
+ {
+ // Check that Reverse flag is set in query plan
+ auto it = tableClient.StreamExecuteScanQuery(selectQuerySortDesc, scanSettings).GetValueSync();
+ auto result = CollectStreamResult(it);
+
+ NJson::ReadJsonTree(*result.PlanJson, &plan, true);
+ Cerr << "==============================" << Endl;
+ Cerr << *result.PlanJson << Endl;
+ Cerr << result.QueryStats->query_plan() << Endl;
+ Cerr << result.QueryStats->query_ast() << Endl;
+
+ node = FindPlanNodeByKv(plan, "Node Type", "TopSort-TableFullScan");
+ UNIT_ASSERT(node.IsDefined());
+ reverse = FindPlanNodeByKv(node, "Reverse", "true");
+ UNIT_ASSERT(reverse.IsDefined());
+ limit = FindPlanNodeByKv(node, "Limit", "4");
+ UNIT_ASSERT(limit.IsDefined());
+ pushedLimit = FindPlanNodeByKv(node, "ReadLimit", "4");
+ UNIT_ASSERT(pushedLimit.IsDefined());
+ }
+
+ {
+ // Check that Reverse flag is set in query plan
+ auto it = tableClient.StreamExecuteScanQuery(selectQueryNoSort, scanSettings).GetValueSync();
+ auto result = CollectStreamResult(it);
+
+ NJson::ReadJsonTree(*result.PlanJson, &plan, true);
+ Cerr << "==============================" << Endl;
+ Cerr << *result.PlanJson << Endl;
+ Cerr << result.QueryStats->query_plan() << Endl;
+ Cerr << result.QueryStats->query_ast() << Endl;
+
+ node = FindPlanNodeByKv(plan, "Node Type", "Limit-TableFullScan");
+ UNIT_ASSERT(node.IsDefined());
+ limit = FindPlanNodeByKv(node, "Limit", "4");
+ UNIT_ASSERT(limit.IsDefined());
+ pushedLimit = FindPlanNodeByKv(node, "ReadLimit", "4");
+ UNIT_ASSERT(pushedLimit.IsDefined());
+ }
// Run actual request in case explain did not execute anything
- it = tableClient.StreamExecuteScanQuery(selectQueryWithSort).GetValueSync();
+ auto it = tableClient.StreamExecuteScanQuery(selectQuerySortDesc).GetValueSync();
UNIT_ASSERT(it.IsSuccess());
diff --git a/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp b/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp
index b3da7006c6..79dbff9ca4 100644
--- a/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp
+++ b/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp
@@ -152,11 +152,11 @@ Y_UNIT_TEST_SUITE(KqpKv) {
auto res = FormatResultSetYson(selectResult.GetResultSet());
CompareYson(R"(
[
- [1858343823u;0u;"abcde"];
- [1921763476782200957u;1u;"abcde"];
- [3843526951706058091u;2u;"abcde"];
- [5765290426629915225u;3u;"abcde"];
- [7687053901553772359u;4u;"abcde"]
+ [[1858343823u];[0u];["abcde"]];
+ [[1921763476782200957u];[1u];["abcde"]];
+ [[3843526951706058091u];[2u];["abcde"]];
+ [[5765290426629915225u];[3u];["abcde"]];
+ [[7687053901553772359u];[4u];["abcde"]]
]
)", TString{res});
}
@@ -263,11 +263,11 @@ Y_UNIT_TEST_SUITE(KqpKv) {
UNIT_ASSERT_C(selectResult.IsSuccess(), selectResult.GetIssues().ToString());
auto res = FormatResultSetYson(selectResult.GetResultSet());
CompareYson(R"([
- [10u;0u;"abcde"];
- [11u;1u;"abcde"];
- [12u;2u;"abcde"];
- [13u;3u;"abcde"];
- [14u;4u;"abcde"]
+ [[10u];[0u];["abcde"]];
+ [[11u];[1u];["abcde"]];
+ [[12u];[2u];["abcde"]];
+ [[13u];[3u];["abcde"]];
+ [[14u];[4u];["abcde"]]
])", TString{res});
}
{
@@ -364,7 +364,7 @@ Y_UNIT_TEST_SUITE(KqpKv) {
UNIT_ASSERT_C(selectResult.IsSuccess(), selectResult.GetIssues().ToString());
auto res = FormatResultSetYson(selectResult.GetResultSet());
- CompareYson(Sprintf("[[%du;%du]]", valueToReturn_1, valueToReturn_2), TString{res});
+ CompareYson(Sprintf("[[[%du];[%du]]]", valueToReturn_1, valueToReturn_2), TString{res});
}
Y_UNIT_TEST_TWIN(ReadRows_ExternalBlobs, UseExtBlobsPrecharge) {
@@ -813,9 +813,9 @@ Y_UNIT_TEST_SUITE(KqpKv) {
auto res = FormatResultSetYson(selectResult.GetResultSet());
CompareYson(R"(
[
- ["0.123456789";"0.123456789";"0.123456789";"0.123456789";0u];
- ["1.123456789";"1000.123456789";"10.123456789";"1000000.123456789";1u];
- ["2.123456789";"2000.123456789";"20.123456789";"2000000.123456789";2u]
+ [["0.123456789"];["0.123456789"];["0.123456789"];["0.123456789"];[0u]];
+ [["1.123456789"];["1000.123456789"];["10.123456789"];["1000000.123456789"];[1u]];
+ [["2.123456789"];["2000.123456789"];["20.123456789"];["2000000.123456789"];[2u]]
]
)", TString{res});
}
@@ -833,10 +833,64 @@ Y_UNIT_TEST_SUITE(KqpKv) {
auto selectResult = db.ReadRows("/Root/TestTable", keys.Build()).GetValueSync();
UNIT_ASSERT_C(selectResult.IsSuccess(), selectResult.GetIssues().ToString());
auto res = FormatResultSetYson(selectResult.GetResultSet());
- CompareYson(R"([["inf";"inf";"inf";"inf";999999999u];])", TString{res});
+ CompareYson(R"([[["inf"];["inf"];["inf"];["inf"];[999999999u]];])", TString{res});
}
}
+ Y_UNIT_TEST(ReadRows_Nulls) {
+ auto settings = TKikimrSettings()
+ .SetWithSampleTables(false);
+ auto kikimr = TKikimrRunner{settings};
+ auto db = kikimr.GetTableClient();
+ auto session = db.CreateSession().GetValueSync().GetSession();
+
+ auto schemeResult = session.ExecuteSchemeQuery(R"(
+ CREATE TABLE TestTable (
+ Key Uint64,
+ Data Uint32,
+ Value Utf8,
+ PRIMARY KEY (Key)
+ );
+ )").GetValueSync();
+ UNIT_ASSERT_C(schemeResult.IsSuccess(), schemeResult.GetIssues().ToString());
+
+ NYdb::TValueBuilder rows;
+ rows.BeginList();
+ for (size_t i = 0; i < 5; ++i) {
+ rows.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(i * 1921763474923857134ull + 1858343823)
+ .EndStruct();
+ }
+ rows.EndList();
+
+ auto upsertResult = db.BulkUpsert("/Root/TestTable", rows.Build()).GetValueSync();
+ UNIT_ASSERT_C(upsertResult.IsSuccess(), upsertResult.GetIssues().ToString());
+
+ NYdb::TValueBuilder keys;
+ keys.BeginList();
+ for (size_t i = 0; i < 5; ++i) {
+ keys.AddListItem()
+ .BeginStruct()
+ .AddMember("Key").Uint64(i * 1921763474923857134ull + 1858343823)
+ .EndStruct();
+ }
+ keys.EndList();
+ auto selectResult = db.ReadRows("/Root/TestTable", keys.Build()).GetValueSync();
+ Cerr << "IsSuccess(): " << selectResult.IsSuccess() << " GetStatus(): " << selectResult.GetStatus() << Endl;
+ UNIT_ASSERT_C(selectResult.IsSuccess(), selectResult.GetIssues().ToString());
+ auto res = FormatResultSetYson(selectResult.GetResultSet());
+ CompareYson(R"(
+ [
+ [[1858343823u];#;#];
+ [[1921763476782200957u];#;#];
+ [[3843526951706058091u];#;#];
+ [[5765290426629915225u];#;#];
+ [[7687053901553772359u];#;#]
+ ]
+ )", TString{res});
+ }
+
}
diff --git a/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp b/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp
new file mode 100644
index 0000000000..f9478e2a33
--- /dev/null
+++ b/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp
@@ -0,0 +1,102 @@
+#include <ydb/core/kqp/ut/common/kqp_ut_common.h>
+#include <ydb/core/kqp/counters/kqp_counters.h>
+
+#include <util/system/fs.h>
+
+namespace NKikimr {
+namespace NKqp {
+
+using namespace NYdb;
+using namespace NYdb::NTable;
+
+namespace {
+
+TKikimrSettings AppSettings(TStringStream& logStream) {
+ NKikimrConfig::TAppConfig appCfg;
+
+ TKikimrSettings serverSettings;
+ serverSettings.SetAppConfig(appCfg);
+ serverSettings.LogStream = &logStream;
+
+ return serverSettings;
+}
+
+void FillTableWithData(NQuery::TQueryClient& db, ui64 numRows=300) {
+ for (ui32 i = 0; i < numRows; ++i) {
+ auto result = db.ExecuteQuery(Sprintf(R"(
+ --!syntax_v1
+ REPLACE INTO `/Root/KeyValue` (Key, Value) VALUES (%d, "%s")
+ )", i, TString(200000 + i, 'a' + (i % 26)).c_str()), NYdb::NQuery::TTxControl::BeginTx().CommitTx()).GetValueSync();
+ UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
+ }
+}
+
+void RunTestForQuery(const std::string& query, const std::string& expectedLog) {
+ TStringStream logsStream;
+
+ Cerr << "cwd: " << NFs::CurrentWorkingDirectory() << Endl;
+ TKikimrRunner kikimr(AppSettings(logsStream));
+
+ kikimr.GetTestServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_TASKS_RUNNER, NActors::NLog::PRI_DEBUG);
+
+ auto db = kikimr.GetQueryClient();
+
+ FillTableWithData(db);
+
+ auto explainMode = NYdb::NQuery::TExecuteQuerySettings().ExecMode(NYdb::NQuery::EExecMode::Explain);
+ auto planres = db.ExecuteQuery(query, NYdb::NQuery::TTxControl::NoTx(), explainMode).ExtractValueSync();
+ Cerr << planres.GetIssues().ToString() << Endl;
+ UNIT_ASSERT_VALUES_EQUAL_C(planres.GetStatus(), EStatus::SUCCESS, planres.GetIssues().ToString());
+
+ Cerr << planres.GetStats()->GetAst() << Endl;
+
+ auto result = db.ExecuteQuery(query, NYdb::NQuery::TTxControl::BeginTx().CommitTx(), NYdb::NQuery::TExecuteQuerySettings()).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+
+ TString output = FormatResultSetYson(result.GetResultSet(0));
+ Cout << output << Endl;
+
+ bool hasExpectedLog = false;
+ TString line;
+ while (logsStream.ReadLine(line)) {
+ if (line.Contains(expectedLog)) {
+ hasExpectedLog = true;
+ break;
+ }
+ }
+ // TODO: Uncomment after: https://github.com/ydb-platform/ydb/issues/15597
+ Y_UNUSED(hasExpectedLog);
+ // UNIT_ASSERT(hasExpectedLog);
+}
+
+} // anonymous namespace
+
+Y_UNIT_TEST_SUITE(KqpScanLogs) {
+
+Y_UNIT_TEST(WideCombine) {
+ auto query = R"(
+ --!syntax_v1
+ select count(t.Key) from `/Root/KeyValue` as t group by t.Value
+ )";
+
+ RunTestForQuery(query, "[WideCombine]");
+}
+
+Y_UNIT_TEST(GraceJoin) {
+ auto query = R"(
+ --!syntax_v1
+ PRAGMA ydb.CostBasedOptimizationLevel='0';
+ PRAGMA ydb.HashJoinMode='graceandself';
+ select t1.Key, t1.Value, t2.Key, t2.Value
+ from `/Root/KeyValue` as t1 full join `/Root/KeyValue` as t2 on t1.Value = t2.Value
+ order by t1.Value
+ )";
+
+ RunTestForQuery(query, "[GraceJoin]");
+}
+
+
+} // suite
+
+} // namespace NKqp
+} // namespace NKikimr
diff --git a/ydb/core/kqp/ut/spilling/kqp_scan_spilling_ut.cpp b/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp
index 1f255ce8b1..1f255ce8b1 100644
--- a/ydb/core/kqp/ut/spilling/kqp_scan_spilling_ut.cpp
+++ b/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp
diff --git a/ydb/core/kqp/ut/spilling/ya.make b/ydb/core/kqp/ut/runtime/ya.make
index d5642cc575..5f15bda669 100644
--- a/ydb/core/kqp/ut/spilling/ya.make
+++ b/ydb/core/kqp/ut/runtime/ya.make
@@ -6,6 +6,7 @@ SIZE(MEDIUM)
SRCS(
kqp_scan_spilling_ut.cpp
+ kqp_scan_logging_ut.cpp
)
PEERDIR(
diff --git a/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp b/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp
index 42c1257fce..db77fa211c 100644
--- a/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp
+++ b/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp
@@ -26,6 +26,7 @@ namespace NKqp {
using namespace NYdb;
using namespace NYdb::NTable;
+using namespace NYdb::NReplication;
Y_UNIT_TEST_SUITE(KqpScheme) {
Y_UNIT_TEST(UseUnauthorizedTable) {
@@ -7642,6 +7643,35 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
--!syntax_v1
ALTER ASYNC REPLICATION `/Root/replication`
SET (
+ STATE = "Paused"
+ );
+ )";
+
+ const auto result = session.ExecuteSchemeQuery(query).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+
+ // alter state and config
+ {
+ auto query = R"(
+ --!syntax_v1
+ ALTER ASYNC REPLICATION `/Root/replication`
+ SET (
+ STATE = "StandBy"
+ );
+ )";
+
+ const auto result = session.ExecuteSchemeQuery(query).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+
+
+ // alter state and config
+ {
+ auto query = R"(
+ --!syntax_v1
+ ALTER ASYNC REPLICATION `/Root/replication`
+ SET (
STATE = "DONE",
FAILOVER_MODE = "FORCE",
CONNECTION_STRING = "grpc://localhost:2135/?database=/Root"
@@ -7676,7 +7706,7 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
break;
}
- UNIT_ASSERT_C(i, "Alter timeout");
+ //UNIT_ASSERT_C(i, "Alter timeout");
Sleep(TDuration::Seconds(1));
}
}
@@ -9014,6 +9044,34 @@ Y_UNIT_TEST_SUITE(KqpScheme) {
--!syntax_v1
ALTER TRANSFER `/Root/transfer`
SET (
+ STATE = "Paused"
+ );
+ )";
+
+ const auto result = session.ExecuteQuery(query, NYdb::NQuery::TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+
+ // alter state and config
+ {
+ auto query = R"(
+ --!syntax_v1
+ ALTER TRANSFER `/Root/transfer`
+ SET (
+ STATE = "StandBy"
+ );
+ )";
+
+ const auto result = session.ExecuteQuery(query, NYdb::NQuery::TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+
+ // alter state and config
+ {
+ auto query = R"(
+ --!syntax_v1
+ ALTER TRANSFER `/Root/transfer`
+ SET (
STATE = "DONE",
CONNECTION_STRING = "grpc://localhost:2135/?database=/Root"
);
@@ -10642,64 +10700,90 @@ Y_UNIT_TEST_SUITE(KqpOlapScheme) {
}
}
- Y_UNIT_TEST(AddColumn) {
- TKikimrSettings runnerSettings;
- runnerSettings.WithSampleTables = false;
- TTestHelper testHelper(runnerSettings);
+ class TestAddColumn {
+ private:
+ TString ReaderPolicyName;
- TVector<TTestHelper::TColumnSchema> schema = {
- TTestHelper::TColumnSchema().SetName("id").SetType(NScheme::NTypeIds::Int32).SetNullable(false),
- TTestHelper::TColumnSchema().SetName("resource_id").SetType(NScheme::NTypeIds::Utf8),
- TTestHelper::TColumnSchema().SetName("level").SetType(NScheme::NTypeIds::Int32)
- };
+ public:
+ TestAddColumn(const TString& reader)
+ : ReaderPolicyName(reader) {
+ }
- Tests::NCommon::TLoggerInit(testHelper.GetKikimr()).Initialize();
- TTestHelper::TColumnTable testTable;
+ void Run() {
+ TKikimrSettings runnerSettings;
+ runnerSettings.WithSampleTables = false;
+ runnerSettings.SetColumnShardAlterObjectEnabled(true);
+ TTestHelper testHelper(runnerSettings);
- testTable.SetName("/Root/ColumnTableTest").SetPrimaryKey({"id"}).SetSharding({"id"}).SetSchema(schema);
- testHelper.CreateTable(testTable);
+ TVector<TTestHelper::TColumnSchema> schema = {
+ TTestHelper::TColumnSchema().SetName("id").SetType(NScheme::NTypeIds::Int32).SetNullable(false),
+ TTestHelper::TColumnSchema().SetName("resource_id").SetType(NScheme::NTypeIds::Utf8),
+ TTestHelper::TColumnSchema().SetName("level").SetType(NScheme::NTypeIds::Int32)
+ };
- {
- TTestHelper::TUpdatesBuilder tableInserter(testTable.GetArrowSchema(schema));
- tableInserter.AddRow().Add(1).Add("test_res_1").AddNull();
- tableInserter.AddRow().Add(2).Add("test_res_2").Add(123);
- testHelper.BulkUpsert(testTable, tableInserter);
- }
+ Tests::NCommon::TLoggerInit(testHelper.GetKikimr()).Initialize();
+ TTestHelper::TColumnTable testTable;
- testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=1", "[[1;#;[\"test_res_1\"]]]");
+ testTable.SetName("/Root/ColumnTableTest").SetPrimaryKey({ "id" }).SetSharding({ "id" }).SetSchema(schema);
+ testHelper.CreateTable(testTable);
+ {
+ auto alterQuery = TStringBuilder()
+ << "ALTER OBJECT `" << testTable.GetName()
+ << "` (TYPE TABLE) SET (ACTION=UPSERT_OPTIONS, `SCAN_READER_POLICY_NAME`=`" << ReaderPolicyName << "`)";
+ auto alterResult = testHelper.GetSession().ExecuteSchemeQuery(alterQuery).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(alterResult.GetStatus(), EStatus::SUCCESS, alterResult.GetIssues().ToString());
+ }
- {
- schema.push_back(TTestHelper::TColumnSchema().SetName("new_column").SetType(NScheme::NTypeIds::Uint64));
- auto alterQuery = TStringBuilder() << "ALTER TABLE `" << testTable.GetName() << "` ADD COLUMN new_column Uint64;";
- auto alterResult = testHelper.GetSession().ExecuteSchemeQuery(alterQuery).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(alterResult.GetStatus(), EStatus::SUCCESS, alterResult.GetIssues().ToString());
- }
+ {
+ TTestHelper::TUpdatesBuilder tableInserter(testTable.GetArrowSchema(schema));
+ tableInserter.AddRow().Add(1).Add("test_res_1").AddNull();
+ tableInserter.AddRow().Add(2).Add("test_res_2").Add(123);
+ testHelper.BulkUpsert(testTable, tableInserter);
+ }
- {
- auto settings = TDescribeTableSettings().WithTableStatistics(true);
- auto describeResult = testHelper.GetSession().DescribeTable("/Root/ColumnTableTest", settings).GetValueSync();
- UNIT_ASSERT_C(describeResult.IsSuccess(), describeResult.GetIssues().ToString());
+ testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=1", "[[1;#;[\"test_res_1\"]]]");
- const auto& description = describeResult.GetTableDescription();
- auto columns = description.GetTableColumns();
- UNIT_ASSERT_VALUES_EQUAL(columns.size(), 4);
- }
+ {
+ schema.push_back(TTestHelper::TColumnSchema().SetName("new_column").SetType(NScheme::NTypeIds::Uint64));
+ auto alterQuery = TStringBuilder() << "ALTER TABLE `" << testTable.GetName() << "` ADD COLUMN new_column Uint64;";
+ auto alterResult = testHelper.GetSession().ExecuteSchemeQuery(alterQuery).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(alterResult.GetStatus(), EStatus::SUCCESS, alterResult.GetIssues().ToString());
+ }
- testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=1", "[[1;#;#;[\"test_res_1\"]]]");
- testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest` WHERE id=1", "[[#]]");
- testHelper.ReadData("SELECT resource_id FROM `/Root/ColumnTableTest` WHERE id=1", "[[[\"test_res_1\"]]]");
- Tests::NCommon::TLoggerInit(testHelper.GetKikimr()).Initialize();
- {
- TTestHelper::TUpdatesBuilder tableInserter(testTable.GetArrowSchema(schema));
- tableInserter.AddRow().Add(3).Add("test_res_3").Add(123).Add<uint64_t>(200);
- testHelper.BulkUpsert(testTable, tableInserter);
+ {
+ auto settings = TDescribeTableSettings().WithTableStatistics(true);
+ auto describeResult = testHelper.GetSession().DescribeTable("/Root/ColumnTableTest", settings).GetValueSync();
+ UNIT_ASSERT_C(describeResult.IsSuccess(), describeResult.GetIssues().ToString());
+
+ const auto& description = describeResult.GetTableDescription();
+ auto columns = description.GetTableColumns();
+ UNIT_ASSERT_VALUES_EQUAL(columns.size(), 4);
+ }
+
+ testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=1", "[[1;#;#;[\"test_res_1\"]]]");
+ testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest` WHERE id=1", "[[#]]");
+ testHelper.ReadData("SELECT resource_id FROM `/Root/ColumnTableTest` WHERE id=1", "[[[\"test_res_1\"]]]");
+ Tests::NCommon::TLoggerInit(testHelper.GetKikimr()).Initialize();
+ {
+ TTestHelper::TUpdatesBuilder tableInserter(testTable.GetArrowSchema(schema));
+ tableInserter.AddRow().Add(3).Add("test_res_3").Add(123).Add<uint64_t>(200);
+ testHelper.BulkUpsert(testTable, tableInserter);
+ }
+
+ testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=3", "[[3;[123];[200u];[\"test_res_3\"]]]");
+ testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE new_column=200", "[[3;[123];[200u];[\"test_res_3\"]]]");
+ testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest` WHERE id=3", "[[[200u]]]");
+ testHelper.ReadData("SELECT resource_id FROM `/Root/ColumnTableTest` WHERE id=3", "[[[\"test_res_3\"]]]");
+ testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest`", "[[#];[#];[[200u]]]");
}
+ };
+
+ Y_UNIT_TEST(AddColumn) {
+ TestAddColumn("PLAIN").Run();
+ }
- testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE id=3", "[[3;[123];[200u];[\"test_res_3\"]]]");
- testHelper.ReadData("SELECT * FROM `/Root/ColumnTableTest` WHERE new_column=200", "[[3;[123];[200u];[\"test_res_3\"]]]");
- testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest` WHERE id=3", "[[[200u]]]");
- testHelper.ReadData("SELECT resource_id FROM `/Root/ColumnTableTest` WHERE id=3", "[[[\"test_res_3\"]]]");
- testHelper.ReadData("SELECT new_column FROM `/Root/ColumnTableTest`", "[[#];[#];[[200u]]]");
+ Y_UNIT_TEST(AddColumnSimpleReader) {
+ TestAddColumn("SIMPLE").Run();
}
Y_UNIT_TEST(AddColumnOldSchemeBulkUpsert) {
diff --git a/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp b/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp
index d8210714fc..d7fc0fa186 100644
--- a/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp
+++ b/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp
@@ -2895,6 +2895,77 @@ Y_UNIT_TEST_SUITE(KqpQueryService) {
}
}
+ Y_UNIT_TEST(ShowCreateTableOnView) {
+ auto serverSettings = TKikimrSettings().SetEnableShowCreate(true);
+
+ TKikimrRunner kikimr(serverSettings);
+ auto db = kikimr.GetQueryClient();
+ auto session = db.GetSession().GetValueSync().GetSession();
+
+ {
+ // note: KeyValue is one of the sample tables created in KikimrRunner
+ auto result = session.ExecuteQuery(R"(
+ CREATE VIEW test_view WITH security_invoker = TRUE AS
+ SELECT * FROM KeyValue;
+ )", TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
+ }
+
+ {
+ auto result = session.ExecuteQuery(R"(
+ SHOW CREATE TABLE test_view;
+ )", TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::BAD_REQUEST, result.GetIssues().ToString());
+ UNIT_ASSERT_STRING_CONTAINS(result.GetIssues().ToString(), "Path type mismatch, expected: Table");
+ }
+ }
+
+ Y_UNIT_TEST(ShowCreateView) {
+ auto serverSettings = TKikimrSettings().SetEnableShowCreate(true);
+
+ TKikimrRunner kikimr(serverSettings);
+ auto db = kikimr.GetQueryClient();
+ auto session = db.GetSession().GetValueSync().GetSession();
+
+ {
+ // note: KeyValue is one of the sample tables created in KikimrRunner
+ auto result = session.ExecuteQuery(R"(
+ CREATE VIEW test_view WITH security_invoker = TRUE AS
+ SELECT * FROM KeyValue;
+ )", TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
+ }
+
+ {
+ auto result = session.ExecuteQuery(R"(
+ SHOW CREATE VIEW test_view;
+ )", TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString());
+ UNIT_ASSERT(!result.GetResultSets().empty());
+
+ CompareYson(R"([
+ [["test_view"];["View"];["CREATE VIEW `test_view` WITH (security_invoker = TRUE) AS\nSELECT * FROM KeyValue;\n"]];
+ ])", FormatResultSetYson(result.GetResultSet(0)));
+ }
+ }
+
+ Y_UNIT_TEST(ShowCreateViewOnTable) {
+ auto serverSettings = TKikimrSettings().SetEnableShowCreate(true);
+
+ TKikimrRunner kikimr(serverSettings);
+ auto db = kikimr.GetQueryClient();
+ auto session = db.GetSession().GetValueSync().GetSession();
+
+ {
+ // note: KeyValue is one of the sample tables created in KikimrRunner
+ auto result = session.ExecuteQuery(R"(
+ SHOW CREATE VIEW KeyValue;
+ )", TTxControl::NoTx()).ExtractValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::BAD_REQUEST, result.GetIssues().ToString());
+ UNIT_ASSERT_STRING_CONTAINS(result.GetIssues().ToString(), "Path type mismatch, expected: View");
+ }
+ }
+
Y_UNIT_TEST(DdlCache) {
NKikimrConfig::TAppConfig appConfig;
auto setting = NKikimrKqp::TKqpSetting();
diff --git a/ydb/core/kqp/ut/ya.make b/ydb/core/kqp/ut/ya.make
index e8b3dbe256..38757b3dca 100644
--- a/ydb/core/kqp/ut/ya.make
+++ b/ydb/core/kqp/ut/ya.make
@@ -16,7 +16,7 @@ RECURSE_FOR_TESTS(
scan
scheme
service
- spilling
+ runtime
sysview
tx
view
diff --git a/ydb/core/kqp/ya.make b/ydb/core/kqp/ya.make
index df5b517a94..6337e62249 100644
--- a/ydb/core/kqp/ya.make
+++ b/ydb/core/kqp/ya.make
@@ -79,4 +79,5 @@ RECURSE(
RECURSE_FOR_TESTS(
ut
+ tools/combiner_perf/bin
)
diff --git a/ydb/core/persqueue/pq_impl.cpp b/ydb/core/persqueue/pq_impl.cpp
index 2918c297ba..4c7c616cc2 100644
--- a/ydb/core/persqueue/pq_impl.cpp
+++ b/ydb/core/persqueue/pq_impl.cpp
@@ -140,9 +140,13 @@ private:
{
Y_ABORT_UNLESS(Response);
const auto& record = ev->Get()->Record;
- if (!record.HasPartitionResponse() || !record.GetPartitionResponse().HasCmdReadResult() ||
- record.GetStatus() != NMsgBusProxy::MSTATUS_OK || record.GetErrorCode() != NPersQueue::NErrorCode::OK ||
- record.GetPartitionResponse().GetCmdReadResult().ResultSize() == 0) {
+ auto isDirectRead = DirectReadKey.ReadId != 0;
+ if (!record.HasPartitionResponse()
+ || !record.GetPartitionResponse().HasCmdReadResult()
+ || record.GetStatus() != NMsgBusProxy::MSTATUS_OK
+ || record.GetErrorCode() != NPersQueue::NErrorCode::OK
+ || (record.GetPartitionResponse().GetCmdReadResult().ResultSize() == 0 && !isDirectRead)
+ ) {
Response->Record.CopyFrom(record);
ctx.Send(Sender, Response.Release());
@@ -151,7 +155,6 @@ private:
}
Y_ABORT_UNLESS(record.HasPartitionResponse() && record.GetPartitionResponse().HasCmdReadResult());
const auto& readResult = record.GetPartitionResponse().GetCmdReadResult();
- auto isDirectRead = DirectReadKey.ReadId != 0;
if (isDirectRead) {
if (!PreparedResponse) {
PreparedResponse = std::make_shared<NKikimrClient::TResponse>();
@@ -162,10 +165,12 @@ private:
responseRecord.SetStatus(NMsgBusProxy::MSTATUS_OK);
responseRecord.SetErrorCode(NPersQueue::NErrorCode::OK);
- Y_ABORT_UNLESS(readResult.ResultSize() > 0);
+ Y_ABORT_UNLESS(readResult.ResultSize() > 0 || isDirectRead);
bool isStart = false;
if (!responseRecord.HasPartitionResponse()) {
- Y_ABORT_UNLESS(!readResult.GetResult(0).HasPartNo() || readResult.GetResult(0).GetPartNo() == 0); //starts from begin of record
+ if (readResult.ResultSize() > 0) {
+ Y_ABORT_UNLESS(!readResult.GetResult(0).HasPartNo() || readResult.GetResult(0).GetPartNo() == 0); //starts from begin of record
+ }
auto partResp = responseRecord.MutablePartitionResponse();
auto readRes = partResp->MutableCmdReadResult();
readRes->SetBlobsFromDisk(readRes->GetBlobsFromDisk() + readResult.GetBlobsFromDisk());
diff --git a/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp b/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp
index b39ad0c0c7..f101a70b51 100644
--- a/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp
+++ b/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp
@@ -924,6 +924,68 @@ Y_UNIT_TEST_SUITE(TopicAutoscaling) {
}
}
+ Y_UNIT_TEST(PartitionSplit_AutosplitByLoad_AfterAlter) {
+ TTopicSdkTestSetup setup = CreateSetup();
+ TTopicClient client = setup.MakeClient();
+
+ TCreateTopicSettings createSettings;
+ createSettings
+ .BeginConfigurePartitioningSettings()
+ .MinActivePartitions(1)
+ .EndConfigurePartitioningSettings();
+ client.CreateTopic(TEST_TOPIC, createSettings).Wait();
+
+ TAlterTopicSettings alterSettings;
+ alterSettings
+ .BeginAlterPartitioningSettings()
+ .MinActivePartitions(1)
+ .MaxActivePartitions(100)
+ .BeginAlterAutoPartitioningSettings()
+ .UpUtilizationPercent(2)
+ .DownUtilizationPercent(1)
+ .StabilizationWindow(TDuration::Seconds(2))
+ .Strategy(EAutoPartitioningStrategy::ScaleUp)
+ .EndAlterAutoPartitioningSettings()
+ .EndAlterTopicPartitioningSettings();
+ client.AlterTopic(TEST_TOPIC, alterSettings).Wait();
+
+ auto msg = TString(1_MB, 'a');
+
+ auto writeSession_1 = CreateWriteSession(client, "producer-1", 0, std::string{TEST_TOPIC}, false);
+ auto writeSession_2 = CreateWriteSession(client, "producer-2", 0, std::string{TEST_TOPIC}, false);
+
+ {
+ UNIT_ASSERT(writeSession_1->Write(Msg(msg, 1)));
+ UNIT_ASSERT(writeSession_1->Write(Msg(msg, 2)));
+ Sleep(TDuration::Seconds(5));
+ auto describe = client.DescribeTopic(TEST_TOPIC).GetValueSync();
+ UNIT_ASSERT_EQUAL(describe.GetTopicDescription().GetPartitions().size(), 1);
+ }
+
+ {
+ UNIT_ASSERT(writeSession_1->Write(Msg(msg, 3)));
+ UNIT_ASSERT(writeSession_2->Write(Msg(msg, 4)));
+ UNIT_ASSERT(writeSession_1->Write(Msg(msg, 5)));
+ UNIT_ASSERT(writeSession_2->Write(Msg(msg, 6)));
+ Sleep(TDuration::Seconds(5));
+ auto describe = client.DescribeTopic(TEST_TOPIC).GetValueSync();
+ UNIT_ASSERT_EQUAL(describe.GetTopicDescription().GetPartitions().size(), 3);
+ }
+
+ auto writeSession2_1 = CreateWriteSession(client, "producer-1", 1, std::string{TEST_TOPIC}, false);
+ auto writeSession2_2 = CreateWriteSession(client, "producer-2", 1, std::string{TEST_TOPIC}, false);
+
+ {
+ UNIT_ASSERT(writeSession2_1->Write(Msg(msg, 7)));
+ UNIT_ASSERT(writeSession2_2->Write(Msg(msg, 8)));
+ UNIT_ASSERT(writeSession2_1->Write(Msg(msg, 9)));
+ UNIT_ASSERT(writeSession2_2->Write(Msg(msg, 10)));
+ Sleep(TDuration::Seconds(5));
+ auto describe2 = client.DescribeTopic(TEST_TOPIC).GetValueSync();
+ UNIT_ASSERT_EQUAL(describe2.GetTopicDescription().GetPartitions().size(), 5);
+ }
+ }
+
void ExecuteQuery(NYdb::NTable::TSession& session, const TString& query ) {
const auto result = session.ExecuteSchemeQuery(query).GetValueSync();
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), NYdb::EStatus::SUCCESS, result.GetIssues().ToString());
diff --git a/ydb/core/protos/auth.proto b/ydb/core/protos/auth.proto
index 6e713d7f2c..3eb8b3408e 100644
--- a/ydb/core/protos/auth.proto
+++ b/ydb/core/protos/auth.proto
@@ -58,6 +58,7 @@ message TAuthConfig {
optional string NodeRegistrationToken = 82 [default = "root@builtin", (Ydb.sensitive) = true];
optional TPasswordComplexity PasswordComplexity = 83;
optional TAccountLockout AccountLockout = 84;
+ optional string ClusterAccessResourceId = 85 [default = "gizmo"];
}
message TUserRegistryConfig {
diff --git a/ydb/core/protos/config.proto b/ydb/core/protos/config.proto
index 854a07179c..80d9b29229 100644
--- a/ydb/core/protos/config.proto
+++ b/ydb/core/protos/config.proto
@@ -718,6 +718,23 @@ message TGRpcConfig {
repeated string RatelimiterServicesEnabled = 25;
repeated string RatelimiterServicesDisabled = 26;
+ enum YdbGrpcCompressionAlgorithm {
+ YDB_GRPC_COMPRESS_NONE = 0;
+ YDB_GRPC_COMPRESS_DEFLATE = 1;
+ YDB_GRPC_COMPRESS_GZIP = 2;
+ };
+
+ optional YdbGrpcCompressionAlgorithm DefaultCompressionAlgorithm = 30 [default = YDB_GRPC_COMPRESS_GZIP];
+
+ enum YdbGrpcCompressionLevel {
+ YDB_GRPC_COMPRESS_LEVEL_NONE = 0;
+ YDB_GRPC_COMPRESS_LEVEL_LOW = 1;
+ YDB_GRPC_COMPRESS_LEVEL_MED = 2;
+ YDB_GRPC_COMPRESS_LEVEL_HIGH = 3;
+ }
+
+ optional YdbGrpcCompressionLevel DefaultCompressionLevel = 31 [default = YDB_GRPC_COMPRESS_LEVEL_MED];
+
// server socket options
optional bool KeepAliveEnable = 100 [default = true]; // SO_KEEPALIVE
optional uint32 KeepAliveIdleTimeoutTriggerSec = 101 [default = 90]; // TCP_KEEPIDLE
diff --git a/ydb/core/protos/counters_blob_depot.proto b/ydb/core/protos/counters_blob_depot.proto
index ab4ed54f10..669275fc1a 100644
--- a/ydb/core/protos/counters_blob_depot.proto
+++ b/ydb/core/protos/counters_blob_depot.proto
@@ -80,4 +80,6 @@ enum ETxTypes {
TXTYPE_PREPARE_WRITE_S3 = 17 [(NKikimr.TxTypeOpts) = {Name: "TTxPrepareWriteS3"}];
TXTYPE_DELETE_TRASH_S3 = 18 [(NKikimr.TxTypeOpts) = {Name: "TTxDeleteTrashS3"}];
TXTYPE_PROCESS_SCANNED_KEYS = 19 [(NKikimr.TxTypeOpts) = {Name: "TTxProcessScannedKeys"}];
+ TXTYPE_DECOMMIT_BLOBS = 20 [(NKikimr.TxTypeOpts) = {Name: "TTxDecommitBlobs"}];
+ TXTYPE_PREPARE = 21 [(NKikimr.TxTypeOpts) = {Name: "TTxPrepare"}];
}
diff --git a/ydb/core/protos/flat_scheme_op.proto b/ydb/core/protos/flat_scheme_op.proto
index 9d0f3fdfbe..983245835f 100644
--- a/ydb/core/protos/flat_scheme_op.proto
+++ b/ydb/core/protos/flat_scheme_op.proto
@@ -431,6 +431,7 @@ message TRequestedBloomNGrammFilter {
optional uint32 RecordsCount = 5;
optional TIndexDataExtractor DataExtractor = 6;
optional TSkipIndexBitSetStorage BitsStorage = 7;
+ optional bool CaseSensitive = 8 [default = true];
}
message TRequestedMaxIndex {
@@ -472,6 +473,7 @@ message TBloomNGrammFilter {
optional uint32 RecordsCount = 5;
optional TIndexDataExtractor DataExtractor = 6;
optional TSkipIndexBitSetStorage BitsStorage = 7;
+ optional bool CaseSensitive = 8 [default = true];
}
message TMaxIndex {
@@ -1268,6 +1270,15 @@ message TRestoreTask {
}
optional bool ValidateChecksums = 7; // currently available for s3
+
+ message TEncryptionSettings {
+ optional bytes IV = 1;
+ oneof Key {
+ Ydb.Export.EncryptionSettings.SymmetricKey SymmetricKey = 2;
+ }
+ }
+
+ optional TEncryptionSettings EncryptionSettings = 8;
}
message TPersQueueGroupAllocate {
diff --git a/ydb/core/protos/index_builder.proto b/ydb/core/protos/index_builder.proto
index 12beb69e56..e0acece547 100644
--- a/ydb/core/protos/index_builder.proto
+++ b/ydb/core/protos/index_builder.proto
@@ -21,6 +21,12 @@ message TColumnBuildSettings {
optional string Table = 2;
}
+message TIndexBuildScanSettings {
+ optional uint32 MaxBatchRows = 1 [ default = 50000 ];
+ optional uint64 MaxBatchBytes = 2 [ default = 8388608 ];
+ optional uint32 MaxBatchRetries = 3 [ default = 50 ];
+}
+
message TIndexBuildSettings {
optional string source_path = 1;
optional Ydb.Table.TableIndex index = 2;
@@ -28,12 +34,15 @@ message TIndexBuildSettings {
optional bool pg_mode = 8 [ default = false];
optional bool if_not_exist = 9 [ default = false];
- optional uint32 max_batch_rows = 3 [ default = 50000 ];
- optional uint64 max_batch_bytes = 4 [ default = 8388608 ];
+ reserved 3; // max_batch_rows
+ reserved 4; // max_batch_bytes
+
optional uint32 max_shards_in_flight = 5 [ default = 32 ];
- optional uint32 max_retries_upload_batch = 6 [ default = 50 ];
+ reserved 6; // max_retries_upload_batch
optional google.protobuf.Any AlterMainTablePayload = 10;
+
+ optional NKikimrIndexBuilder.TIndexBuildScanSettings ScanSettings = 11;
}
message TIndexBuild {
diff --git a/ydb/core/protos/kqp.proto b/ydb/core/protos/kqp.proto
index 46dee8ed2f..24b4df45df 100644
--- a/ydb/core/protos/kqp.proto
+++ b/ydb/core/protos/kqp.proto
@@ -639,7 +639,8 @@ message TEvScanInitActor {
optional NActorsProto.TActorId ScanActorId = 2;
optional uint32 Generation = 3;
optional uint64 TabletId = 4;
-};
+ optional bool AllowPings = 5;
+}
message TEvScanError {
optional Ydb.StatusIds.StatusCode Status = 1;
@@ -648,6 +649,9 @@ message TEvScanError {
optional uint64 TabletId = 4;
}
+message TEvScanPing {
+}
+
message TEvKqpScanCursor {
message TColumnShardScanPlain {
}
@@ -655,9 +659,14 @@ message TEvKqpScanCursor {
optional uint64 SourceId = 1;
optional uint32 StartRecordIndex = 2;
}
+ message TColumnShardScanNotSortedSimple {
+ optional uint64 SourceId = 1;
+ optional uint32 StartRecordIndex = 2;
+ }
oneof Implementation {
TColumnShardScanPlain ColumnShardPlain = 10;
TColumnShardScanSimple ColumnShardSimple = 11;
+ TColumnShardScanNotSortedSimple ColumnShardNotSortedSimple = 12;
}
}
diff --git a/ydb/core/protos/out/out.cpp b/ydb/core/protos/out/out.cpp
index 04030e17c8..eb8c79a0fe 100644
--- a/ydb/core/protos/out/out.cpp
+++ b/ydb/core/protos/out/out.cpp
@@ -258,6 +258,6 @@ Y_DECLARE_OUT_SPEC(, NKikimrIndexBuilder::EBuildStatus, stream, value) {
stream << NKikimrIndexBuilder::EBuildStatus_Name(value);
}
-Y_DECLARE_OUT_SPEC(, NKikimrTxDataShard::TEvLocalKMeansRequest_EState, stream, value) {
- stream << NKikimrTxDataShard::TEvLocalKMeansRequest_EState_Name(value);
+Y_DECLARE_OUT_SPEC(, NKikimrTxDataShard::EKMeansState, stream, value) {
+ stream << NKikimrTxDataShard::EKMeansState_Name(value);
}
diff --git a/ydb/core/protos/table_service_config.proto b/ydb/core/protos/table_service_config.proto
index e8dde4aae1..2ffd193491 100644
--- a/ydb/core/protos/table_service_config.proto
+++ b/ydb/core/protos/table_service_config.proto
@@ -85,6 +85,10 @@ message TTableServiceConfig {
optional uint64 TotalReadSizeLimitBytes = 4;
}
+ message TTxWriteBufferLimits {
+ optional uint64 WriteBufferMemoryLimitBytes = 1;
+ }
+
message TQueryLimits {
optional TQueryPhaseLimits PhaseLimits = 1;
optional uint32 SchemeQueryTimeoutMs = 2 [default = 600000];
@@ -92,6 +96,7 @@ message TTableServiceConfig {
optional uint32 ScanQueryTimeoutMs = 4 [default = 600000];
optional uint32 ResultRowsLimit = 5;
optional uint32 WaitCAStatsTimeoutMs = 6 [default = 1000];
+ optional TTxWriteBufferLimits BufferLimits = 7;
}
message TShutdownSettings {
diff --git a/ydb/core/protos/tx_datashard.proto b/ydb/core/protos/tx_datashard.proto
index 0884818a9d..0c37e1f7c5 100644
--- a/ydb/core/protos/tx_datashard.proto
+++ b/ydb/core/protos/tx_datashard.proto
@@ -214,6 +214,7 @@ message TKqpTransaction {
// Type of read result: unboxed values or Arrow blocks of data
optional EReadType ReadType = 14;
repeated string GroupByColumnNames = 15;
+ optional uint32 OptionalSorting = 16;
}
optional EKqpTransactionType Type = 1;
@@ -263,6 +264,7 @@ message TKqpReadRangesSourceSettings {
repeated TKqpTransaction.TColumnMeta DuplicateCheckColumns = 19;
optional NKikimrDataEvents.ELockMode LockMode = 20;
+ optional uint32 OptionalSorting = 21;
}
message TKqpTaskInfo {
@@ -1436,17 +1438,18 @@ message TEvBuildIndexCreateRequest {
optional uint64 SnapshotTxId = 8;
optional uint64 SnapshotStep = 9;
- optional uint32 MaxBatchRows = 10;
- optional uint64 MaxBatchBytes = 11;
+ reserved 10; // MaxBatchRows
+ reserved 11; // MaxBatchBytes
optional uint64 SeqNoGeneration = 12; // monotonically increasing sequence, first part
optional uint64 SeqNoRound = 13; // monotonically increasing sequence, second part
-
- optional uint64 MaxRetries = 14;
+ reserved 14; // MaxRetries
repeated string DataColumns = 15;
optional NKikimrIndexBuilder.TColumnBuildSettings ColumnBuildSettings = 16;
+
+ optional NKikimrIndexBuilder.TIndexBuildScanSettings ScanSettings = 17;
}
message TEvBuildIndexProgressResponse {
@@ -1513,6 +1516,17 @@ message TEvSampleKResponse {
repeated bytes Rows = 11;
}
+enum EKMeansState {
+ UNSPECIFIED = 0;
+ SAMPLE = 1;
+ KMEANS = 2;
+ UPLOAD_MAIN_TO_BUILD = 3;
+ UPLOAD_MAIN_TO_POSTING = 4;
+ UPLOAD_BUILD_TO_BUILD = 5;
+ UPLOAD_BUILD_TO_POSTING = 6;
+ DONE = 7;
+};
+
message TEvLocalKMeansRequest {
optional uint64 Id = 1;
@@ -1528,23 +1542,10 @@ message TEvLocalKMeansRequest {
optional Ydb.Table.VectorIndexSettings Settings = 8;
optional uint64 Seed = 9;
- optional uint32 K = 10;
- enum EState {
- UNSPECIFIED = 0;
- SAMPLE = 1;
- KMEANS = 2;
- UPLOAD_MAIN_TO_BUILD = 3;
- UPLOAD_MAIN_TO_POSTING = 4;
- UPLOAD_BUILD_TO_BUILD = 5;
- UPLOAD_BUILD_TO_POSTING = 6;
- DONE = 7;
- };
- optional EState Upload = 11;
- // State != DONE
- optional EState State = 12;
- // State != KMEANS || DoneRounds < NeedsRounds
- optional uint32 DoneRounds = 13;
+ optional EKMeansState Upload = 11;
+
+ optional uint32 K = 10;
optional uint32 NeedsRounds = 14;
// id of parent cluster
@@ -1558,6 +1559,8 @@ message TEvLocalKMeansRequest {
optional string EmbeddingColumn = 19;
repeated string DataColumns = 20;
+
+ optional NKikimrIndexBuilder.TIndexBuildScanSettings ScanSettings = 22;
}
message TEvLocalKMeansResponse {
@@ -1576,10 +1579,6 @@ message TEvLocalKMeansResponse {
optional uint64 UploadBytes = 9;
optional uint64 ReadRows = 10;
optional uint64 ReadBytes = 11;
-
- // TODO(mbkkt) implement slow-path (reliable-path)
- // optional TEvLocalKMeansRequest.EState State
- // optional uint32 DoneRounds
}
message TEvReshuffleKMeansRequest {
@@ -1596,7 +1595,7 @@ message TEvReshuffleKMeansRequest {
optional Ydb.Table.VectorIndexSettings Settings = 8;
- optional TEvLocalKMeansRequest.EState Upload = 9;
+ optional EKMeansState Upload = 9;
// id of parent cluster
optional uint64 Parent = 10;
@@ -1609,6 +1608,8 @@ message TEvReshuffleKMeansRequest {
optional string EmbeddingColumn = 14;
repeated string DataColumns = 15;
+
+ optional NKikimrIndexBuilder.TIndexBuildScanSettings ScanSettings = 16;
}
message TEvReshuffleKMeansResponse {
@@ -1637,7 +1638,7 @@ message TEvPrefixKMeansRequest {
optional uint64 TabletId = 2;
optional NKikimrProto.TPathID PathId = 3;
-
+
optional uint64 SeqNoGeneration = 4;
optional uint64 SeqNoRound = 5;
@@ -1645,7 +1646,7 @@ message TEvPrefixKMeansRequest {
optional uint64 Seed = 7;
- optional TEvLocalKMeansRequest.EState Upload = 8;
+ optional EKMeansState Upload = 8;
optional uint32 K = 9;
optional uint32 NeedsRounds = 10;
@@ -1660,6 +1661,8 @@ message TEvPrefixKMeansRequest {
optional string EmbeddingColumn = 15;
repeated string DataColumns = 16;
optional uint32 PrefixColumns = 17;
+
+ optional NKikimrIndexBuilder.TIndexBuildScanSettings ScanSettings = 18;
}
message TEvPrefixKMeansResponse {
@@ -2328,8 +2331,10 @@ message TEvForceDataCleanup {
// Intermediate requests and corresponding TEvForceDataCleanupResult's may be skipped.
message TEvForceDataCleanupResult {
enum EStatus {
- OK = 0;
- FAILED = 1;
+ UNKNOWN = 0;
+ OK = 1;
+ WRONG_SHARD_STATE = 2;
+ BORROWED = 3;
};
optional uint64 DataCleanupGeneration = 1; // from corresponding request (or greater)
optional uint64 TabletId = 2;
diff --git a/ydb/core/security/ticket_parser_impl.h b/ydb/core/security/ticket_parser_impl.h
index b50c371104..7aae4a55b5 100644
--- a/ydb/core/security/ticket_parser_impl.h
+++ b/ydb/core/security/ticket_parser_impl.h
@@ -479,6 +479,12 @@ private:
if (const auto folderId = record.GetAttributeValue(permission, "folder_id"); folderId) {
AddNebiusContainerId(pathsContainer, folderId);
}
+
+ // Use attribute "gizmo_id" as container id that contains cluster access resource
+ // IAM can link roles for cluster access resource
+ if (const auto gizmoId = record.GetAttributeValue(permission, "gizmo_id"); gizmoId) {
+ AddNebiusContainerId(pathsContainer, gizmoId);
+ }
}
template <typename TTokenRecord>
diff --git a/ydb/core/security/ticket_parser_ut.cpp b/ydb/core/security/ticket_parser_ut.cpp
index cb6d3506b3..2904d75583 100644
--- a/ydb/core/security/ticket_parser_ut.cpp
+++ b/ydb/core/security/ticket_parser_ut.cpp
@@ -1661,19 +1661,17 @@ Y_UNIT_TEST_SUITE(TTicketParserTest) {
UNIT_ASSERT_C(result->Error.empty(), result->Error);
UNIT_ASSERT_C(result->Token->IsExist("something.read-bbbb4554@as"), result->Token->ShortDebugString());
- if constexpr (!IsNebiusAccessService<TAccessServiceMock>()) {
- // Authorization successful for gizmo resource
- accessServiceMock.AllowedResourceIds.clear();
- accessServiceMock.AllowedResourceIds.emplace("gizmo");
- runtime->Send(new IEventHandle(MakeTicketParserID(), sender, new TEvTicketParser::TEvAuthorizeTicket(
- userToken,
- {{"gizmo_id", "gizmo"}, },
- {"monitoring.view"})), 0);
- result = runtime->GrabEdgeEvent<TEvTicketParser::TEvAuthorizeTicketResult>(handle);
- UNIT_ASSERT_C(result->Error.empty(), result->Error);
- UNIT_ASSERT_C(result->Token->IsExist("monitoring.view@as"), result->Token->ShortDebugString());
- UNIT_ASSERT_C(result->Token->IsExist("monitoring.view-gizmo@as"), result->Token->ShortDebugString());
- }
+ // Authorization successful for gizmo resource
+ accessServiceMock.AllowedResourceIds.clear();
+ accessServiceMock.AllowedResourceIds.emplace("gizmo");
+ runtime->Send(new IEventHandle(MakeTicketParserID(), sender, new TEvTicketParser::TEvAuthorizeTicket(
+ userToken,
+ {{"gizmo_id", "gizmo"}, },
+ {"monitoring.view"})), 0);
+ result = runtime->GrabEdgeEvent<TEvTicketParser::TEvAuthorizeTicketResult>(handle);
+ UNIT_ASSERT_C(result->Error.empty(), result->Error);
+ UNIT_ASSERT_C(result->Token->IsExist("monitoring.view@as"), result->Token->ShortDebugString());
+ UNIT_ASSERT_C(result->Token->IsExist("monitoring.view-gizmo@as"), result->Token->ShortDebugString());
}
Y_UNIT_TEST(Authorization) {
diff --git a/ydb/core/statistics/service/service_impl.cpp b/ydb/core/statistics/service/service_impl.cpp
index 573ea21c48..d439f5a02a 100644
--- a/ydb/core/statistics/service/service_impl.cpp
+++ b/ydb/core/statistics/service/service_impl.cpp
@@ -695,7 +695,10 @@ private:
while(parser.TryNextRow()) {
auto& col = parser.ColumnParser("data");
- query_response->Data = col.GetString();
+ // may be not optional from versions before fix of bug https://github.com/ydb-platform/ydb/issues/15701
+ query_response->Data = col.GetKind() == NYdb::TTypeParser::ETypeKind::Optional
+ ? col.GetOptionalString()
+ : col.GetString();
}
} else {
SA_LOG_E("[TStatService::ReadRowsResponse] QueryId[ "
diff --git a/ydb/core/sys_view/service/ext_counters.cpp b/ydb/core/sys_view/service/ext_counters.cpp
index fec1047f69..946b3f01c5 100644
--- a/ydb/core/sys_view/service/ext_counters.cpp
+++ b/ydb/core/sys_view/service/ext_counters.cpp
@@ -170,7 +170,7 @@ private:
}
ui64 total = 0;
for (ui32 n = 0; n < count; ++n) {
- ui64 value = snapshot->Value(n);;
+ ui64 value = snapshot->Value(n);
ui64 diff = value - ExecuteLatencyMsPrevValues[n];
total += diff;
ExecuteLatencyMsValues[n] = diff;
@@ -182,7 +182,7 @@ private:
}
metrics->AddMetric("queries.requests", total);
if (total != 0) {
- metrics->AddHistogramMetric("queries.latencies", ExecuteLatencyMsValues, ExecuteLatencyMsBounds);
+ metrics->AddHistogramMetric("queries.latencies", ExecuteLatencyMsBounds, ExecuteLatencyMsValues);
}
}
if (metrics->Record.MetricsSize() > 0) {
diff --git a/ydb/core/sys_view/show_create/create_table_formatter.cpp b/ydb/core/sys_view/show_create/create_table_formatter.cpp
index 50678cfb94..9c53079ec8 100644
--- a/ydb/core/sys_view/show_create/create_table_formatter.cpp
+++ b/ydb/core/sys_view/show_create/create_table_formatter.cpp
@@ -7,7 +7,6 @@
#include <ydb/public/lib/ydb_cli/dump/util/query_utils.h>
-#include <yql/essentials/ast/yql_ast_escaping.h>
#include <yql/essentials/minikql/mkql_type_ops.h>
#include <util/generic/yexception.h>
@@ -19,18 +18,6 @@ using namespace NKikimrSchemeOp;
using namespace Ydb::Table;
using namespace NYdb;
-void TCreateTableFormatter::EscapeName(const TString& str) {
- NYql::EscapeArbitraryAtom(str, '`', &Stream);
-}
-
-void TCreateTableFormatter::EscapeString(const TString& str) {
- NYql::EscapeArbitraryAtom(str, '\'', &Stream);
-}
-
-void TCreateTableFormatter::EscapeBinary(const TString& str) {
- NYql::EscapeBinaryAtom(str, '\'', &Stream);
-}
-
void TCreateTableFormatter::FormatValue(NYdb::TValueParser& parser, bool isPartition, TString del) {
TGuard<NMiniKQL::TScopedAlloc> guard(Alloc);
switch (parser.GetKind()) {
@@ -72,7 +59,7 @@ void TCreateTableFormatter::FormatValue(NYdb::TValueParser& parser, bool isParti
auto precision = decimal.DecimalType_.Precision;
auto scale = decimal.DecimalType_.Scale;
Stream << "CAST(";
- EscapeString(decimal.ToString());
+ EscapeString(decimal.ToString(), Stream);
Stream << " AS Decimal(" << ui32(precision) << "," << ui32(scale) << ")";
Stream << ")";
return;
@@ -152,24 +139,24 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
break;
}
case NYdb::EPrimitiveType::Utf8: {
- EscapeString(TString(parser.GetUtf8()));
+ EscapeString(TString(parser.GetUtf8()), Stream);
break;
}
case NYdb::EPrimitiveType::Date: {
Stream << "DATE(";
- EscapeString(parser.GetDate().FormatGmTime("%Y-%m-%d"));
+ EscapeString(parser.GetDate().FormatGmTime("%Y-%m-%d"), Stream);
Stream << ")";
break;
}
case NYdb::EPrimitiveType::Datetime: {
Stream << "DATETIME(";
- EscapeString(parser.GetDatetime().ToStringUpToSeconds());
+ EscapeString(parser.GetDatetime().ToStringUpToSeconds(), Stream);
Stream << ")";
break;
}
case NYdb::EPrimitiveType::Timestamp: {
Stream << "TIMESTAMP(";
- EscapeString(parser.GetTimestamp().ToString());
+ EscapeString(parser.GetTimestamp().ToString(), Stream);
Stream << ")";
break;
}
@@ -177,7 +164,7 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
Stream << "INTERVAL(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Interval, NUdf::TUnboxedValuePod(static_cast<i64>(parser.GetInterval())));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ")";
break;
}
@@ -185,7 +172,7 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
Stream << "DATE32(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Date32, NUdf::TUnboxedValuePod(parser.GetDate32()));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ")";
break;
}
@@ -193,7 +180,7 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
Stream << "DATETIME64(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Datetime64, NUdf::TUnboxedValuePod(static_cast<i64>(parser.GetDatetime64())));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ")";
break;
}
@@ -201,7 +188,7 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
Stream << "TIMESTAMP64(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Timestamp64, NUdf::TUnboxedValuePod(static_cast<i64>(parser.GetTimestamp64())));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ")";
break;
}
@@ -209,28 +196,28 @@ void TCreateTableFormatter::FormatPrimitive(NYdb::TValueParser& parser) {
Stream << "INTERVAL64(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Interval64, NUdf::TUnboxedValuePod(static_cast<i64>(parser.GetInterval64())));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ")";
break;
}
case NYdb::EPrimitiveType::String:
- EscapeString(TString(parser.GetString()));
+ EscapeString(TString(parser.GetString()), Stream);
break;
case NYdb::EPrimitiveType::Yson:
- EscapeString(TString(parser.GetYson()));
+ EscapeString(TString(parser.GetYson()), Stream);
break;
case NYdb::EPrimitiveType::Json:
- EscapeString(TString(parser.GetJson()));
+ EscapeString(TString(parser.GetJson()), Stream);
break;
case NYdb::EPrimitiveType::DyNumber: {
Stream << "DyNumber(";
- EscapeString(TString(parser.GetDyNumber()));
+ EscapeString(TString(parser.GetDyNumber()), Stream);
Stream << ")";
break;
}
case NYdb::EPrimitiveType::Uuid: {
Stream << "UUID(";
- EscapeString(TString(parser.GetUuid().ToString()));
+ EscapeString(TString(parser.GetUuid().ToString()), Stream);
Stream << ")";
break;
}
@@ -258,7 +245,7 @@ private:
TStringStream& Stream;
};
-TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tablePath,
+TFormatResult TCreateTableFormatter::Format(const TString& tablePath,
const TTableDescription& tableDesc, bool temporary) {
Stream.Clear();
@@ -270,14 +257,14 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
} else {
Stream << "CREATE TABLE ";
}
- EscapeName(tablePath);
+ EscapeName(tablePath, Stream);
Stream << " (\n";
NKikimrMiniKQL::TType mkqlKeyType;
try {
FillColumnDescription(createRequest, mkqlKeyType, tableDesc);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
Y_ENSURE(!tableDesc.GetColumns().empty());
@@ -298,16 +285,16 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
Format(*it->second);
}
} catch (const TFormatFail& ex) {
- return TResult(ex.Status, ex.Error);
+ return TFormatResult(ex.Status, ex.Error);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
try {
FillTableBoundary(createRequest, tableDesc, mkqlKeyType);
FillIndexDescription(createRequest, tableDesc);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());;
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());;
}
if (!createRequest.indexes().empty()) {
@@ -319,9 +306,9 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
Format(createRequest.indexes(i));
}
} catch (const TFormatFail& ex) {
- return TResult(ex.Status, ex.Error);
+ return TFormatResult(ex.Status, ex.Error);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
}
Stream << ",\n";
@@ -340,9 +327,9 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
isFamilyPrinted = Format(partitionConfig.GetColumnFamilies(i));
}
} catch (const TFormatFail& ex) {
- return TResult(ex.Status, ex.Error);
+ return TFormatResult(ex.Status, ex.Error);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
}
}
@@ -352,10 +339,10 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
Stream << ",\n";
}
Stream << "\tPRIMARY KEY (";
- EscapeName(columns[tableDesc.GetKeyColumnIds(0)]->GetName());
+ EscapeName(columns[tableDesc.GetKeyColumnIds(0)]->GetName(), Stream);
for (int i = 1; i < tableDesc.GetKeyColumnIds().size(); i++) {
Stream << ", ";
- EscapeName(columns[tableDesc.GetKeyColumnIds(i)]->GetName());
+ EscapeName(columns[tableDesc.GetKeyColumnIds(i)]->GetName(), Stream);
}
Stream << ")\n";
Stream << ")";
@@ -374,9 +361,9 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
try {
printed |= Format(createRequest.partition_at_keys(), del, !printed);
} catch (const TFormatFail& ex) {
- return TResult(ex.Status, ex.Error);
+ return TFormatResult(ex.Status, ex.Error);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
}
@@ -408,9 +395,9 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
try {
printed |= Format(createRequest.ttl_settings(), del, !printed);
} catch (const TFormatFail& ex) {
- return TResult(ex.Status, ex.Error);
+ return TFormatResult(ex.Status, ex.Error);
} catch (const yexception& e) {
- return TResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
}
}
@@ -422,17 +409,17 @@ TCreateTableFormatter::TResult TCreateTableFormatter::Format(const TString& tabl
TString formattedStatement;
NYql::TIssues issues;
if (!NYdb::NDump::Format(statement, formattedStatement, issues)) {
- return TResult(Ydb::StatusIds::INTERNAL_ERROR, issues.ToString());
+ return TFormatResult(Ydb::StatusIds::INTERNAL_ERROR, issues.ToString());
}
- auto result = TResult(std::move(formattedStatement));
+ auto result = TFormatResult(std::move(formattedStatement));
return result;
}
void TCreateTableFormatter::Format(const NKikimrSchemeOp::TColumnDescription& columnDesc) {
Stream << "\t";
- EscapeName(columnDesc.GetName());
+ EscapeName(columnDesc.GetName(), Stream);
Stream << " ";
auto type = columnDesc.GetType();
@@ -460,7 +447,7 @@ void TCreateTableFormatter::Format(const NKikimrSchemeOp::TColumnDescription& co
if (columnDesc.HasFamilyName()) {
Stream << " FAMILY ";
- EscapeName(columnDesc.GetFamilyName());
+ EscapeName(columnDesc.GetFamilyName(), Stream);
}
if (columnDesc.GetNotNull()) {
Stream << " NOT NULL";
@@ -473,7 +460,7 @@ void TCreateTableFormatter::Format(const NKikimrSchemeOp::TColumnDescription& co
void TCreateTableFormatter::Format(const TableIndex& index) {
Stream << "\tINDEX ";
- EscapeName(index.name());
+ EscapeName(index.name(), Stream);
std::optional<KMeansTreeSettings> kMeansTreeSettings;
switch (index.type_case()) {
case TableIndex::kGlobalIndex: {
@@ -499,20 +486,20 @@ void TCreateTableFormatter::Format(const TableIndex& index) {
Y_ENSURE(!index.index_columns().empty());
Stream << "(";
- EscapeName(index.index_columns(0));
+ EscapeName(index.index_columns(0), Stream);
for (int i = 1; i < index.index_columns().size(); i++) {
Stream << ", ";
- EscapeName(index.index_columns(i));
+ EscapeName(index.index_columns(i), Stream);
}
Stream << ")";
if (!index.data_columns().empty()) {
Stream << " COVER ";
Stream << "(";
- EscapeName(index.data_columns(0));
+ EscapeName(index.data_columns(0), Stream);
for (int i = 1; i < index.data_columns().size(); i++) {
Stream << ", ";
- EscapeName(index.data_columns(i));
+ EscapeName(index.data_columns(i), Stream);
}
Stream << ")";
}
@@ -612,10 +599,11 @@ bool TCreateTableFormatter::Format(const TFamilyDescription& familyDesc) {
compression = "off";
break;
case NKikimrSchemeOp::ColumnCodecLZ4:
- compression = "lz4";
+ compression = "lz4";
break;
case NKikimrSchemeOp::ColumnCodecZSTD:
- ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "ZSTD COMPRESSION codec is not supported");
+ compression = "zstd";
+ break;
}
} else if (familyDesc.HasCodec()) {
if (familyDesc.GetCodec() == 1) {
@@ -634,7 +622,7 @@ bool TCreateTableFormatter::Format(const TFamilyDescription& familyDesc) {
Y_ENSURE(familyName);
Stream << "\tFAMILY ";
- EscapeName(familyName);
+ EscapeName(familyName, Stream);
Stream << " (";
TString del = "";
@@ -643,7 +631,7 @@ bool TCreateTableFormatter::Format(const TFamilyDescription& familyDesc) {
del = ", ";
}
- if (dataName) {
+ if (compression) {
Stream << del << "COMPRESSION = " << "\"" << compression << "\"";
}
@@ -753,11 +741,11 @@ void TCreateTableFormatter::Format(ui64 expireAfterSeconds, std::optional<TStrin
Stream << "INTERVAL(";
const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Interval, NUdf::TUnboxedValuePod(expireAfterSeconds * 1000000));
Y_ENSURE(str.HasValue());
- EscapeString(TString(str.AsStringRef()));
+ EscapeString(TString(str.AsStringRef()), Stream);
Stream << ") ";
if (storage) {
Stream << "TO EXTERNAL DATA SOURCE ";
- EscapeName(*storage);
+ EscapeName(*storage, Stream);
} else {
Stream << "DELETE";
}
@@ -884,5 +872,312 @@ bool TCreateTableFormatter::Format(const Ydb::Table::TtlSettings& ttlSettings, T
return true;
}
+TFormatResult TCreateTableFormatter::Format(const TString& tablePath, const TColumnTableDescription& tableDesc, bool temporary) {
+ Stream.Clear();
+
+ TStringStreamWrapper wrapper(Stream);
+
+ Ydb::Table::CreateTableRequest createRequest;
+ if (temporary) {
+ Stream << "CREATE TEMPORARY TABLE ";
+ } else {
+ Stream << "CREATE TABLE ";
+ }
+ EscapeName(tablePath, Stream);
+ Stream << " (\n";
+
+ const auto& schema = tableDesc.GetSchema();
+
+ std::map<ui32, const TOlapColumnDescription*> columns;
+ for (const auto& column : schema.GetColumns()) {
+ columns[column.GetId()] = &column;
+ }
+
+ try {
+ auto it = columns.cbegin();
+ Format(*it->second);
+ std::advance(it, 1);
+ for (; it != columns.end(); ++it) {
+ Stream << ",\n";
+ Format(*it->second);
+ }
+ } catch (const TFormatFail& ex) {
+ return TFormatResult(ex.Status, ex.Error);
+ } catch (const yexception& e) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ }
+ Stream << ",\n";
+
+ if (!schema.GetIndexes().empty()) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, "Indexes are not supported yet for column tables.");
+ }
+
+ bool isFamilyPrinted = false;
+ if (!schema.GetColumnFamilies().empty()) {
+ try {
+ isFamilyPrinted = Format(schema.GetColumnFamilies(0));
+ for (int i = 1; i < schema.GetColumnFamilies().size(); i++) {
+ if (isFamilyPrinted) {
+ Stream << ",\n";
+ }
+ isFamilyPrinted = Format(schema.GetColumnFamilies(i));
+ }
+ } catch (const TFormatFail& ex) {
+ return TFormatResult(ex.Status, ex.Error);
+ } catch (const yexception& e) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, e.what());
+ }
+ }
+
+ Y_ENSURE(!schema.GetKeyColumnNames().empty());
+ if (isFamilyPrinted) {
+ Stream << ",\n";
+ }
+ Stream << "\tPRIMARY KEY (";
+ EscapeName(schema.GetKeyColumnNames(0), Stream);
+ for (int i = 1; i < schema.GetKeyColumnNames().size(); i++) {
+ Stream << ", ";
+ EscapeName(schema.GetKeyColumnNames(i), Stream);
+ }
+ Stream << ")\n";
+ Stream << ") ";
+
+ if (schema.HasOptions()) {
+ const auto& options = schema.GetOptions();
+ if (options.GetSchemeNeedActualization()) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: SCHEME_NEED_ACTUALIZATION");
+ }
+ if (options.HasScanReaderPolicyName() && !options.GetScanReaderPolicyName().empty()) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: SCAN_READER_POLICY_NAME");
+ }
+ if (options.HasCompactionPlannerConstructor()) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: COMPACTION_PLANNER");
+ }
+ if (options.HasMetadataManagerConstructor()) {
+ return TFormatResult(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: METADATA_MEMORY_MANAGER");
+ }
+ }
+
+ if (tableDesc.HasSharding()) {
+ Format(tableDesc.GetSharding());
+ }
+
+ Stream << "WITH (\n";
+ Stream << "\tSTORE = COLUMN";
+
+ if (tableDesc.HasColumnShardCount()) {
+ Stream << ",\n";
+ Stream << "\tAUTO_PARTITIONING_MIN_PARTITIONS_COUNT = " << tableDesc.GetColumnShardCount();
+ }
+
+ if (tableDesc.HasTtlSettings()) {
+ Format(tableDesc.GetTtlSettings());
+ }
+
+ Stream << "\n);";
+
+ TString statement = Stream.Str();
+ TString formattedStatement;
+ NYql::TIssues issues;
+ if (!NYdb::NDump::Format(statement, formattedStatement, issues)) {
+ return TFormatResult(Ydb::StatusIds::INTERNAL_ERROR, issues.ToString());
+ }
+
+ auto result = TFormatResult(std::move(formattedStatement));
+
+ return result;
+}
+
+void TCreateTableFormatter::Format(const TOlapColumnDescription& olapColumnDesc) {
+ Stream << "\t";
+ EscapeName(olapColumnDesc.GetName(), Stream);
+ Stream << " " << olapColumnDesc.GetType();
+
+ if (olapColumnDesc.HasColumnFamilyName()) {
+ Stream << " FAMILY ";
+ EscapeName(olapColumnDesc.GetColumnFamilyName(), Stream);
+ }
+ if (olapColumnDesc.GetNotNull()) {
+ Stream << " NOT NULL";
+ }
+ if (olapColumnDesc.HasDefaultValue()) {
+ Format(olapColumnDesc.GetDefaultValue());
+ }
+
+ if (olapColumnDesc.HasStorageId() && !olapColumnDesc.GetStorageId().empty()) {
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: STORAGE_ID");
+ }
+
+ if (olapColumnDesc.HasDataAccessorConstructor()) {
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: DATA_ACCESSOR_CONSTRUCTOR");
+ }
+
+ if (olapColumnDesc.HasDictionaryEncoding()) {
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Unsupported setting: ENCODING.DICTIONARY");
+ }
+}
+
+void TCreateTableFormatter::Format(const NKikimrColumnShardColumnDefaults::TColumnDefault& defaultValue) {
+ if (!defaultValue.HasScalar()) {
+ return;
+ }
+
+ Stream << " DEFAULT ";
+
+ TGuard<NMiniKQL::TScopedAlloc> guard(Alloc);
+ const auto& scalar = defaultValue.GetScalar();
+ if (scalar.HasBool()) {
+ if (scalar.GetBool() == true) {
+ Stream << "true";
+ } else {
+ Stream << "false";
+ }
+ } else if (scalar.HasUint8()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Uint8, NUdf::TUnboxedValuePod(scalar.GetUint8()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasUint16()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Uint16, NUdf::TUnboxedValuePod(scalar.GetUint16()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasUint32()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Uint32, NUdf::TUnboxedValuePod(scalar.GetUint32()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasUint64()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Uint64, NUdf::TUnboxedValuePod(static_cast<ui64>(scalar.GetUint64())));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasInt8()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Int8, NUdf::TUnboxedValuePod(scalar.GetInt8()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasInt16()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Int16, NUdf::TUnboxedValuePod(scalar.GetInt16()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasInt32()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Int32, NUdf::TUnboxedValuePod(scalar.GetInt32()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasInt64()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Int64, NUdf::TUnboxedValuePod(static_cast<i64>(scalar.GetInt64())));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasDouble()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Double, NUdf::TUnboxedValuePod(scalar.GetDouble()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasFloat()) {
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Float, NUdf::TUnboxedValuePod(scalar.GetFloat()));
+ Y_ENSURE(str.HasValue());
+ Stream << TString(str.AsStringRef());
+ } else if (scalar.HasTimestamp()) {
+ ui64 value = scalar.GetTimestamp().GetValue();
+ arrow::TimeUnit::type unit = arrow::TimeUnit::type(scalar.GetTimestamp().GetUnit());
+ switch (unit) {
+ case arrow::TimeUnit::SECOND:
+ value *= 1000000;
+ break;
+ case arrow::TimeUnit::MILLI:
+ value *= 1000;
+ break;
+ case arrow::TimeUnit::MICRO:
+ break;
+ case arrow::TimeUnit::NANO:
+ value /= 1000;
+ break;
+ }
+ Stream << "TIMESTAMP(";
+ const NUdf::TUnboxedValue str = NMiniKQL::ValueToString(NUdf::EDataSlot::Timestamp, NUdf::TUnboxedValuePod(value));
+ Y_ENSURE(str.HasValue());
+ EscapeString(TString(str.AsStringRef()), Stream);
+ Stream << ")";
+ } else if (scalar.HasString()) {
+ EscapeString(TString(scalar.GetString()), Stream);
+ } else {
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Unsupported type for default value");
+ }
+}
+
+void TCreateTableFormatter::Format(const NKikimrSchemeOp::TColumnTableSharding& sharding) {
+ switch (sharding.GetMethodCase()) {
+ case NKikimrSchemeOp::TColumnTableSharding::kHashSharding: {
+ const auto& hashSharding = sharding.GetHashSharding();
+ Y_ENSURE(!hashSharding.GetColumns().empty());
+ Stream << "PARTITION BY HASH(";
+ EscapeName(hashSharding.GetColumns(0), Stream);
+ for (int i = 1; i < hashSharding.GetColumns().size(); i++) {
+ Stream << ", ";
+ EscapeName(hashSharding.GetColumns(i), Stream);
+ }
+ Stream << ")\n";
+ break;
+ }
+ case NKikimrSchemeOp::TColumnTableSharding::kRandomSharding:
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Random sharding is not supported yet.");
+ default:
+ ythrow TFormatFail(Ydb::StatusIds::INTERNAL_ERROR, "Unsupported unit");
+ }
+}
+
+void TCreateTableFormatter::Format(const NKikimrSchemeOp::TColumnDataLifeCycle& ttlSettings) {
+ if (!ttlSettings.HasEnabled()) {
+ return;
+ }
+
+ const auto& enabled = ttlSettings.GetEnabled();
+
+ if (enabled.HasExpireAfterBytes()) {
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "TTL by size is not supported.");
+ }
+
+ Stream << ",\n";
+ Stream << "\tTTL =\n\t ";
+ bool first = true;
+
+ if (!enabled.TiersSize()) {
+ Y_ENSURE(enabled.HasExpireAfterSeconds());
+ Format(enabled.GetExpireAfterSeconds());
+ } else {
+ for (const auto& tier : enabled.GetTiers()) {
+ if (!first) {
+ Stream << ", ";
+ }
+ switch (tier.GetActionCase()) {
+ case NKikimrSchemeOp::TTTLSettings::TTier::ActionCase::kDelete:
+ Format(tier.GetApplyAfterSeconds());
+ break;
+ case NKikimrSchemeOp::TTTLSettings::TTier::ActionCase::kEvictToExternalStorage:
+ Format(tier.GetApplyAfterSeconds(), tier.GetEvictToExternalStorage().GetStorage());
+ break;
+ case NKikimrSchemeOp::TTTLSettings::TTier::ActionCase::ACTION_NOT_SET:
+ ythrow TFormatFail(Ydb::StatusIds::UNSUPPORTED, "Undefined tier action");
+ }
+ first = false;
+ }
+ }
+
+ Stream << "\n\t ON " << enabled.GetColumnName();
+ switch (enabled.GetColumnUnit()) {
+ case NKikimrSchemeOp::TTTLSettings::UNIT_AUTO:
+ break;
+ case NKikimrSchemeOp::TTTLSettings::UNIT_SECONDS:
+ Stream << " AS SECONDS";
+ break;
+ case NKikimrSchemeOp::TTTLSettings::UNIT_MILLISECONDS:
+ Stream << " AS MILLISECONDS";
+ break;
+ case NKikimrSchemeOp::TTTLSettings::UNIT_MICROSECONDS:
+ Stream << " AS MICROSECONDS";
+ break;
+ case NKikimrSchemeOp::TTTLSettings::UNIT_NANOSECONDS:
+ Stream << " AS NANOSECONDS";
+ break;
+ default:
+ ythrow TFormatFail(Ydb::StatusIds::INTERNAL_ERROR, "Unsupported unit");
+ }
+}
+
} // NSysView
} // NKikimr
diff --git a/ydb/core/sys_view/show_create/create_table_formatter.h b/ydb/core/sys_view/show_create/create_table_formatter.h
index a34d5dcf4d..bd82bc5a15 100644
--- a/ydb/core/sys_view/show_create/create_table_formatter.h
+++ b/ydb/core/sys_view/show_create/create_table_formatter.h
@@ -1,67 +1,24 @@
#pragma once
+#include "formatters_common.h"
+
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/value/value.h>
#include <ydb/core/protos/flat_scheme_op.pb.h>
+#include <ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.h>
+
#include <ydb/public/api/protos/ydb_table.pb.h>
#include <yql/essentials/minikql/mkql_alloc.h>
-#include <util/generic/hash.h>
#include <util/stream/str.h>
-#include <util/string/builder.h>
namespace NKikimr {
namespace NSysView {
class TCreateTableFormatter {
public:
- class TFormatFail : public yexception {
- public:
- Ydb::StatusIds::StatusCode Status;
- TString Error;
-
- TFormatFail(Ydb::StatusIds::StatusCode status, TString error = {})
- : Status(status)
- , Error(std::move(error))
- {}
- };
-
- class TResult {
- public:
- TResult(TString out)
- : Out(std::move(out))
- , Status(Ydb::StatusIds::SUCCESS)
- {}
-
- TResult(Ydb::StatusIds::StatusCode status, TString error)
- : Status(status)
- , Error(std::move(error))
- {}
-
- bool IsSuccess() const {
- return Status == Ydb::StatusIds::SUCCESS;
- }
-
- Ydb::StatusIds::StatusCode GetStatus() const {
- return Status;
- }
-
- const TString& GetError() const {
- return Error;
- }
-
- TString ExtractOut() {
- return std::move(Out);
- }
- private:
- TString Out;
-
- Ydb::StatusIds::StatusCode Status;
- TString Error;
- };
-
TCreateTableFormatter()
: Alloc(__LOCATION__)
{
@@ -73,10 +30,10 @@ public:
Alloc.Acquire();
}
- TResult Format(const TString& tablePath, const NKikimrSchemeOp::TTableDescription& tableDesc, bool temporary);
+ TFormatResult Format(const TString& tablePath, const NKikimrSchemeOp::TTableDescription& tableDesc, bool temporary);
+ TFormatResult Format(const TString& tablePath, const NKikimrSchemeOp::TColumnTableDescription& tableDesc, bool temporary);
private:
-
void Format(const NKikimrSchemeOp::TColumnDescription& columnDesc);
bool Format(const NKikimrSchemeOp::TFamilyDescription& familyDesc);
bool Format(const NKikimrSchemeOp::TPartitioningPolicy& policy, ui32 shardsToCreate, TString& del, bool needWith);
@@ -88,14 +45,16 @@ private:
void Format(ui64 expireAfterSeconds, std::optional<TString> storage = std::nullopt);
+ void Format(const NKikimrSchemeOp::TOlapColumnDescription& olapColumnDesc);
+ void Format(const NKikimrSchemeOp::TColumnTableSharding& tableSharding);
+ void Format(const NKikimrSchemeOp::TColumnDataLifeCycle& ttlSettings);
+
+ void Format(const NKikimrColumnShardColumnDefaults::TColumnDefault& defaultValue);
+
void Format(const Ydb::TypedValue& value, bool isPartition = false);
void FormatValue(NYdb::TValueParser& parser, bool isPartition = false, TString del = "");
void FormatPrimitive(NYdb::TValueParser& parser);
- void EscapeName(const TString& str);
- void EscapeString(const TString& str);
- void EscapeBinary(const TString& str);
-private:
TStringStream Stream;
NMiniKQL::TScopedAlloc Alloc;
};
diff --git a/ydb/core/sys_view/show_create/create_view_formatter.cpp b/ydb/core/sys_view/show_create/create_view_formatter.cpp
new file mode 100644
index 0000000000..746e96ae01
--- /dev/null
+++ b/ydb/core/sys_view/show_create/create_view_formatter.cpp
@@ -0,0 +1,22 @@
+#include "create_view_formatter.h"
+
+#include <ydb/public/lib/ydb_cli/dump/util/view_utils.h>
+
+namespace NKikimr::NSysView {
+
+TFormatResult TCreateViewFormatter::Format(const TString& viewPath, const NKikimrSchemeOp::TViewDescription& viewDesc) {
+ const auto [contextRecreation, select] = NYdb::NDump::SplitViewQuery(viewDesc.GetQueryText());
+
+ const TString creationQuery = std::format(
+ "{}"
+ "CREATE VIEW `{}` WITH (security_invoker = TRUE) AS\n"
+ "{};\n",
+ contextRecreation.c_str(),
+ viewPath.c_str(),
+ select.c_str()
+ );
+
+ return TFormatResult(creationQuery);
+}
+
+}
diff --git a/ydb/core/sys_view/show_create/create_view_formatter.h b/ydb/core/sys_view/show_create/create_view_formatter.h
new file mode 100644
index 0000000000..b71ba909fc
--- /dev/null
+++ b/ydb/core/sys_view/show_create/create_view_formatter.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "formatters_common.h"
+
+#include <ydb/core/protos/flat_scheme_op.pb.h>
+
+#include <util/stream/str.h>
+
+namespace NKikimr::NSysView {
+
+class TCreateViewFormatter {
+public:
+ TFormatResult Format(const TString& viewPath, const NKikimrSchemeOp::TViewDescription& viewDesc);
+
+private:
+ TStringStream Stream;
+};
+
+}
diff --git a/ydb/core/sys_view/show_create/formatters_common.cpp b/ydb/core/sys_view/show_create/formatters_common.cpp
new file mode 100644
index 0000000000..04d0808397
--- /dev/null
+++ b/ydb/core/sys_view/show_create/formatters_common.cpp
@@ -0,0 +1,19 @@
+#include "formatters_common.h"
+
+#include <yql/essentials/ast/yql_ast_escaping.h>
+
+namespace NKikimr::NSysView {
+
+void EscapeName(const TString& str, TStringStream& stream) {
+ NYql::EscapeArbitraryAtom(str, '`', &stream);
+}
+
+void EscapeString(const TString& str, TStringStream& stream) {
+ NYql::EscapeArbitraryAtom(str, '\'', &stream);
+}
+
+void EscapeBinary(const TString& str, TStringStream& stream) {
+ NYql::EscapeBinaryAtom(str, '\'', &stream);
+}
+
+}
diff --git a/ydb/core/sys_view/show_create/formatters_common.h b/ydb/core/sys_view/show_create/formatters_common.h
new file mode 100644
index 0000000000..64c133da8e
--- /dev/null
+++ b/ydb/core/sys_view/show_create/formatters_common.h
@@ -0,0 +1,60 @@
+#pragma once
+
+#include <ydb/public/api/protos/ydb_status_codes.pb.h>
+
+#include <util/generic/yexception.h>
+#include <util/stream/str.h>
+
+namespace NKikimr::NSysView {
+
+void EscapeName(const TString& str, TStringStream& stream);
+void EscapeString(const TString& str, TStringStream& stream);
+void EscapeBinary(const TString& str, TStringStream& stream);
+
+class TFormatFail : public yexception {
+public:
+ Ydb::StatusIds::StatusCode Status;
+ TString Error;
+
+ TFormatFail(Ydb::StatusIds::StatusCode status, TString error = {})
+ : Status(status)
+ , Error(std::move(error))
+ {}
+};
+
+class TFormatResult {
+public:
+ TFormatResult(TString out)
+ : Out(std::move(out))
+ , Status(Ydb::StatusIds::SUCCESS)
+ {}
+
+ TFormatResult(Ydb::StatusIds::StatusCode status, TString error)
+ : Status(status)
+ , Error(std::move(error))
+ {}
+
+ bool IsSuccess() const {
+ return Status == Ydb::StatusIds::SUCCESS;
+ }
+
+ Ydb::StatusIds::StatusCode GetStatus() const {
+ return Status;
+ }
+
+ const TString& GetError() const {
+ return Error;
+ }
+
+ TString ExtractOut() {
+ return std::move(Out);
+ }
+
+private:
+ TString Out;
+
+ Ydb::StatusIds::StatusCode Status;
+ TString Error;
+};
+
+}
diff --git a/ydb/core/sys_view/show_create/show_create.cpp b/ydb/core/sys_view/show_create/show_create.cpp
index 9706e10cce..dbe0ddbcd7 100644
--- a/ydb/core/sys_view/show_create/show_create.cpp
+++ b/ydb/core/sys_view/show_create/show_create.cpp
@@ -1,4 +1,5 @@
#include "create_table_formatter.h"
+#include "create_view_formatter.h"
#include "show_create.h"
#include <ydb/core/base/tablet_pipe.h>
@@ -20,6 +21,34 @@ namespace {
using namespace NActors;
+TString ToString(NKikimrSchemeOp::EPathType pathType) {
+ switch (pathType) {
+ case NKikimrSchemeOp::EPathTypeTable:
+ case NKikimrSchemeOp::EPathTypeColumnTable:
+ return "Table";
+ case NKikimrSchemeOp::EPathTypeView:
+ return "View";
+ default:
+ Y_ENSURE(false, "No user-friendly name for a path type: " << pathType);
+ return "";
+ }
+}
+
+bool RewriteTemporaryTablePath(const TString& database, TString& tablePath, TString& error) {
+ auto pathVecTmp = SplitPath(tablePath);
+ auto sz = pathVecTmp.size();
+ Y_ENSURE(sz > 3 && pathVecTmp[0] == ".tmp" && pathVecTmp[1] == "sessions");
+
+ auto pathTmp = JoinPath(TVector<TString>(pathVecTmp.begin() + 3, pathVecTmp.end()));
+ std::pair<TString, TString> pathPairTmp;
+ if (!TrySplitPathByDb(pathTmp, database, pathPairTmp, error)) {
+ return false;
+ }
+
+ tablePath = pathPairTmp.second;
+ return true;
+}
+
class TShowCreate : public TScanActorBase<TShowCreate> {
public:
using TBase = TScanActorBase<TShowCreate>;
@@ -99,9 +128,10 @@ private:
Path = cellsFrom[0].AsBuf();
PathType = cellsFrom[1].AsBuf();
- if (PathType != "Table") {
- ReplyErrorAndDie(Ydb::StatusIds::SCHEME_ERROR, TStringBuilder() << "Invalid path type: " << PathType);
- return;
+ if (!IsIn({"Table", "View"}, PathType)) {
+ return ReplyErrorAndDie(Ydb::StatusIds::BAD_REQUEST, TStringBuilder()
+ << "Unsupported path type: " << PathType
+ );
}
std::unique_ptr<TEvTxUserProxy::TEvNavigate> navigateRequest(new TEvTxUserProxy::TEvNavigate());
@@ -111,8 +141,10 @@ private:
}
NKikimrSchemeOp::TDescribePath* record = navigateRequest->Record.MutableDescribePath();
record->SetPath(Path);
- record->MutableOptions()->SetReturnBoundaries(true);
- record->MutableOptions()->SetShowPrivateTable(false);
+ if (PathType == "Table") {
+ record->MutableOptions()->SetReturnBoundaries(true);
+ record->MutableOptions()->SetShowPrivateTable(false);
+ }
Send(MakeTxProxyID(), navigateRequest.release());
}
@@ -136,15 +168,13 @@ private:
switch (status) {
case NKikimrScheme::StatusSuccess: {
const auto& pathDescription = record.GetPathDescription();
- if (pathDescription.GetSelf().GetPathType() != NKikimrSchemeOp::EPathTypeTable) {
- ReplyErrorAndDie(Ydb::StatusIds::SCHEME_ERROR, "Invalid path type");
- return;
+ if (auto pathType = ToString(pathDescription.GetSelf().GetPathType()); pathType != PathType) {
+ return ReplyErrorAndDie(Ydb::StatusIds::BAD_REQUEST, TStringBuilder()
+ << "Path type mismatch, expected: " << PathType << ", found: " << pathType
+ );
}
- const auto& tableDesc = pathDescription.GetTable();
-
std::pair<TString, TString> pathPair;
-
{
TString error;
if (!TrySplitPathByDb(Path, Database, pathPair, error)) {
@@ -153,34 +183,73 @@ private:
}
}
- auto [_, tablePath] = pathPair;
- bool temporary = false;
-
- if (NKqp::IsSessionsDirPath(Database, tablePath)) {
- auto pathVecTmp = SplitPath(tablePath);
- auto sz = pathVecTmp.size();
- Y_ENSURE(sz > 3 && pathVecTmp[0] == ".tmp" && pathVecTmp[1] == "sessions");
-
- auto pathTmp = JoinPath(TVector<TString>(pathVecTmp.begin() + 3, pathVecTmp.end()));
- std::pair<TString, TString> pathPairTmp;
- TString error;
- if (!TrySplitPathByDb(pathTmp, Database, pathPairTmp, error)) {
- ReplyErrorAndDie(Ydb::StatusIds::SCHEME_ERROR, error);
- return;
+ switch (pathDescription.GetSelf().GetPathType()) {
+ case NKikimrSchemeOp::EPathTypeTable: {
+ const auto& tableDesc = pathDescription.GetTable();
+ auto tablePath = pathPair.second;
+
+ bool temporary = false;
+ if (NKqp::IsSessionsDirPath(Database, pathPair.second)) {
+ TString error;
+ if (!RewriteTemporaryTablePath(Database, tablePath, error)) {
+ return ReplyErrorAndDie(Ydb::StatusIds::SCHEME_ERROR, error);
+ }
+ temporary = true;
+ }
+
+ TCreateTableFormatter formatter;
+ auto formatterResult = formatter.Format(tablePath, tableDesc, temporary);
+ if (formatterResult.IsSuccess()) {
+ path = tablePath;
+ statement = formatterResult.ExtractOut();
+ } else {
+ ReplyErrorAndDie(formatterResult.GetStatus(), formatterResult.GetError());
+ return;
+ }
+ break;
+ }
+ case NKikimrSchemeOp::EPathTypeColumnTable: {
+ const auto& columnTableDesc = pathDescription.GetColumnTableDescription();
+ auto tablePath = pathPair.second;
+
+ bool temporary = false;
+ if (NKqp::IsSessionsDirPath(Database, pathPair.second)) {
+ TString error;
+ if (!RewriteTemporaryTablePath(Database, tablePath, error)) {
+ return ReplyErrorAndDie(Ydb::StatusIds::SCHEME_ERROR, error);
+ }
+ temporary = true;
+ }
+
+ TCreateTableFormatter formatter;
+ auto formatterResult = formatter.Format(tablePath, columnTableDesc, temporary);
+ if (formatterResult.IsSuccess()) {
+ path = tablePath;
+ statement = formatterResult.ExtractOut();
+ } else {
+ ReplyErrorAndDie(formatterResult.GetStatus(), formatterResult.GetError());
+ return;
+ }
+ break;
+ }
+ case NKikimrSchemeOp::EPathTypeView: {
+ const auto& description = pathDescription.GetViewDescription();
+ path = pathPair.second;
+
+ TCreateViewFormatter formatter;
+ auto formatterResult = formatter.Format(*path, description);
+ if (formatterResult.IsSuccess()) {
+ statement = formatterResult.ExtractOut();
+ } else {
+ return ReplyErrorAndDie(formatterResult.GetStatus(), formatterResult.GetError());
+ }
+ break;
+ }
+ default: {
+ return ReplyErrorAndDie(Ydb::StatusIds::BAD_REQUEST, TStringBuilder()
+ << "Unsupported path type: " << pathDescription.GetSelf().GetPathType()
+ );
}
-
- tablePath = pathPairTmp.second;
- temporary = true;
- }
-
- TCreateTableFormatter formatter;
- auto formatterResult = formatter.Format(tablePath, tableDesc, temporary);
- if (formatterResult.IsSuccess()) {
- path = tablePath;
- statement = formatterResult.ExtractOut();
- } else {
- ReplyErrorAndDie(formatterResult.GetStatus(), formatterResult.GetError());
- return;
}
break;
}
diff --git a/ydb/core/sys_view/show_create/ya.make b/ydb/core/sys_view/show_create/ya.make
index bf35ce0f8a..910379928b 100644
--- a/ydb/core/sys_view/show_create/ya.make
+++ b/ydb/core/sys_view/show_create/ya.make
@@ -2,15 +2,16 @@ LIBRARY()
SRCS(
create_table_formatter.cpp
- create_table_formatter.h
+ create_view_formatter.cpp
+ formatters_common.cpp
show_create.cpp
- show_create.h
)
PEERDIR(
ydb/core/base
ydb/core/kqp/runtime
ydb/core/protos
+ ydb/core/tx/columnshard/engines/scheme/defaults/protos
ydb/core/sys_view/common
ydb/core/tx/schemeshard
ydb/core/tx/tx_proxy
diff --git a/ydb/core/sys_view/ut_common.cpp b/ydb/core/sys_view/ut_common.cpp
index 8bc695a239..3705a1900a 100644
--- a/ydb/core/sys_view/ut_common.cpp
+++ b/ydb/core/sys_view/ut_common.cpp
@@ -1,5 +1,6 @@
#include "ut_common.h"
#include <ydb/core/persqueue/ut/common/pq_ut_common.h>
+#include <ydb/core/wrappers/fake_storage.h>
namespace NKikimr {
namespace NSysView {
@@ -46,6 +47,9 @@ TTestEnv::TTestEnv(ui32 staticNodes, ui32 dynamicNodes, const TTestEnvSettings&
featureFlags.SetEnableResourcePools(true);
featureFlags.SetEnableFollowerStats(true);
featureFlags.SetEnableVectorIndex(true);
+ featureFlags.SetEnableTieringInColumnShard(true);
+ featureFlags.SetEnableExternalDataSources(true);
+
Settings->SetFeatureFlags(featureFlags);
Settings->SetEnablePersistentQueryStats(settings.EnableSVP);
@@ -56,6 +60,7 @@ TTestEnv::TTestEnv(ui32 staticNodes, ui32 dynamicNodes, const TTestEnvSettings&
NKikimrConfig::TAppConfig appConfig;
*appConfig.MutableFeatureFlags() = Settings->FeatureFlags;
+ appConfig.MutableQueryServiceConfig()->AddAvailableExternalDataSources("ObjectStorage");
Settings->SetAppConfig(appConfig);
for (ui32 i : xrange(settings.StoragePools)) {
@@ -97,6 +102,8 @@ TTestEnv::TTestEnv(ui32 staticNodes, ui32 dynamicNodes, const TTestEnvSettings&
Driver = MakeHolder<NYdb::TDriver>(DriverConfig);
Server->GetRuntime()->SetLogPriority(NKikimrServices::SYSTEM_VIEWS, NActors::NLog::PRI_DEBUG);
+
+ Singleton<NKikimr::NWrappers::NExternalStorage::TFakeExternalStorage>()->SetSecretKey("fakeSecret");
}
TTestEnv::~TTestEnv() {
diff --git a/ydb/core/sys_view/ut_kqp.cpp b/ydb/core/sys_view/ut_kqp.cpp
index 2dedc8b4c4..c5bb53a373 100644
--- a/ydb/core/sys_view/ut_kqp.cpp
+++ b/ydb/core/sys_view/ut_kqp.cpp
@@ -266,7 +266,10 @@ public:
: Env(env)
, QueryClient(NQuery::TQueryClient(Env.GetDriver()))
, TableClient(TTableClient(Env.GetDriver()))
- {}
+ {
+ CreateTier("tier1");
+ CreateTier("tier2");
+ }
void CheckShowCreateTable(const std::string& query, const std::string& tableName, TString formatQuery = "", bool temporary = false) {
auto session = QueryClient.GetSession().GetValueSync().GetSession();
@@ -283,16 +286,16 @@ public:
UNIT_ASSERT_VALUES_EQUAL_C(UnescapeC(formatQuery), UnescapeC(showCreateTableQuery), UnescapeC(showCreateTableQuery));
}
- auto tableDescOrig = DescribeTable(tableName, sessionId);
+ auto describeResultOrig = DescribeTable(tableName, sessionId);
DropTable(session, tableName);
CreateTable(session, showCreateTableQuery);
- auto tableDescNew = DescribeTable(tableName, sessionId);
+ auto describeResultNew = DescribeTable(tableName, sessionId);
DropTable(session, tableName);
- CompareDescriptions(std::move(tableDescOrig), std::move(tableDescNew), showCreateTableQuery);
+ CompareDescriptions(std::move(describeResultOrig), std::move(describeResultNew), showCreateTableQuery);
}
private:
@@ -302,7 +305,24 @@ private:
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
}
- NKikimrSchemeOp::TTableDescription DescribeTable(const std::string& tableName,
+ void CreateTier(const TString& tierName) {
+ auto session = TableClient.CreateSession().GetValueSync().GetSession();
+ auto result = session.ExecuteSchemeQuery(R"(
+ UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);
+ UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);
+ CREATE EXTERNAL DATA SOURCE `)" + tierName + R"(` WITH (
+ SOURCE_TYPE="ObjectStorage",
+ LOCATION="http://fake.fake/olap-)" + tierName + R"(",
+ AUTH_METHOD="AWS",
+ AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey",
+ AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey",
+ AWS_REGION="ru-central1"
+ );
+ )").GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
+ }
+
+ Ydb::Table::CreateTableRequest DescribeTable(const std::string& tableName,
std::optional<TString> sessionId = std::nullopt) {
auto describeTable = [this](const TString& path) {
@@ -317,7 +337,19 @@ private:
runtime.Send(new IEventHandle(MakeTxProxyID(), sender, request.Release()));
auto reply = runtime.GrabEdgeEventRethrow<NSchemeShard::TEvSchemeShard::TEvDescribeSchemeResult>(handle);
- return reply->GetRecord().GetPathDescription().GetTable();
+ if (reply->GetRecord().GetPathDescription().HasColumnTableDescription()) {
+ const auto& tableDescription = reply->GetRecord().GetPathDescription().GetColumnTableDescription();
+
+ return *GetCreateTableRequest(tableDescription);
+ }
+
+ if (!reply->GetRecord().GetPathDescription().HasTable()) {
+ UNIT_ASSERT_C(false, "Invalid path type");
+ }
+
+ const auto& tableDescription = reply->GetRecord().GetPathDescription().GetTable();
+
+ return *GetCreateTableRequest(tableDescription);
};
TString tablePath = TString(tableName);
@@ -383,14 +415,11 @@ private:
UNIT_ASSERT_VALUES_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString());
}
- void CompareDescriptions(NKikimrSchemeOp::TTableDescription origDesc, NKikimrSchemeOp::TTableDescription newDesc, const std::string& showCreateTableQuery) {
- Ydb::Table::CreateTableRequest requestFirst = *GetCreateTableRequest(origDesc);
- Ydb::Table::CreateTableRequest requestSecond = *GetCreateTableRequest(newDesc);
-
+ void CompareDescriptions(Ydb::Table::CreateTableRequest describeResultOrig, Ydb::Table::CreateTableRequest describeResultNew, const std::string& showCreateTableQuery) {
TString first;
- ::google::protobuf::TextFormat::PrintToString(requestFirst, &first);
+ ::google::protobuf::TextFormat::PrintToString(describeResultOrig, &first);
TString second;
- ::google::protobuf::TextFormat::PrintToString(requestSecond, &second);
+ ::google::protobuf::TextFormat::PrintToString(describeResultNew, &second);
UNIT_ASSERT_VALUES_EQUAL_C(first, second, showCreateTableQuery);
}
@@ -430,6 +459,15 @@ private:
return scheme;
}
+ TMaybe<Ydb::Table::CreateTableRequest> GetCreateTableRequest(const NKikimrSchemeOp::TColumnTableDescription& tableDesc) {
+ Ydb::Table::CreateTableRequest scheme;
+
+ FillColumnDescription(scheme, tableDesc);
+ FillColumnFamilies(scheme, tableDesc);
+
+ return scheme;
+ }
+
private:
TTestEnv& Env;
NQuery::TQueryClient QueryClient;
@@ -942,8 +980,109 @@ R"(CREATE TABLE `test_show_create` (
PRIMARY KEY (`BoolValue`, `Int32Value`, `Uint32Value`, `Int64Value`, `Uint64Value`, `StringValue`, `Utf8Value`)
)
WITH (PARTITION_AT_KEYS = ((FALSE), (FALSE, 1, 2), (TRUE, 1, 1, 1, 1, 'str'), (TRUE, 1, 1, 100, 0, 'str', 'utf')));
-)",
- true);
+)"
+ );
+ }
+
+ Y_UNIT_TEST(ShowCreateTablePartitionByHash) {
+ TTestEnv env(1, 4, {.StoragePools = 3, .ShowCreateTable = true});
+
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPILE_SERVICE, NActors::NLog::PRI_DEBUG);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_YQL, NActors::NLog::PRI_TRACE);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::SYSTEM_VIEWS, NActors::NLog::PRI_DEBUG);
+
+ TShowCreateTableChecker checker(env);
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key1 Uint64 NOT NULL,
+ Key2 String NOT NULL,
+ Value String,
+ PRIMARY KEY (Key1, Key2)
+ )
+ PARTITION BY HASH(Key1, Key2)
+ WITH (
+ STORE = COLUMN
+ );
+ )", "test_show_create",
+R"(CREATE TABLE `test_show_create` (
+ `Key1` Uint64 NOT NULL,
+ `Key2` String NOT NULL,
+ `Value` String,
+ PRIMARY KEY (`Key1`, `Key2`)
+)
+PARTITION BY HASH (`Key1`, `Key2`)
+WITH (
+ STORE = COLUMN,
+ AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
+);
+)"
+ );
+ }
+
+ Y_UNIT_TEST(ShowCreateTableColumn) {
+ TTestEnv env(1, 4, {.StoragePools = 3, .ShowCreateTable = true});
+
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_EXECUTER, NActors::NLog::PRI_DEBUG);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_COMPILE_SERVICE, NActors::NLog::PRI_DEBUG);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::KQP_YQL, NActors::NLog::PRI_TRACE);
+ env.GetServer().GetRuntime()->SetLogPriority(NKikimrServices::SYSTEM_VIEWS, NActors::NLog::PRI_DEBUG);
+
+ TShowCreateTableChecker checker(env);
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key1 Uint64 NOT NULL,
+ Key2 Utf8 NOT NULL,
+ Key3 Int32 NOT NULL,
+ Value1 Utf8 FAMILY Family1,
+ Value2 Int16 FAMILY Family2,
+ Value3 String FAMILY Family2,
+ PRIMARY KEY (Key1, Key2, Key3),
+ FAMILY default (
+ COMPRESSION = "zstd"
+ ),
+ FAMILY Family1 (
+ COMPRESSION = "off"
+ ),
+ FAMILY Family2 (
+ COMPRESSION = "lz4"
+ )
+ )
+ PARTITION BY HASH(`Key1`, `Key2`)
+ WITH (
+ STORE = COLUMN,
+ AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 100,
+ TTL =
+ Interval("PT10S") TO EXTERNAL DATA SOURCE `/Root/tier1`,
+ Interval("PT1H") DELETE
+ ON Key1 AS SECONDS
+ );
+ )", "test_show_create",
+R"(CREATE TABLE `test_show_create` (
+ `Key1` Uint64 NOT NULL,
+ `Key2` Utf8 NOT NULL,
+ `Key3` Int32 NOT NULL,
+ `Value1` Utf8,
+ `Value2` Int16,
+ `Value3` String,
+ FAMILY `default` (COMPRESSION = 'zstd'),
+ FAMILY `Family1` (COMPRESSION = 'off'),
+ FAMILY `Family2` (COMPRESSION = 'lz4'),
+ PRIMARY KEY (`Key1`, `Key2`, `Key3`)
+)
+PARTITION BY HASH (`Key1`, `Key2`)
+WITH (
+ STORE = COLUMN,
+ AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 100,
+ TTL =
+ INTERVAL('PT10S') TO EXTERNAL DATA SOURCE `/Root/tier1`,
+ INTERVAL('PT1H') DELETE
+ ON Key1 AS SECONDS
+);
+)"
+ );
}
Y_UNIT_TEST(ShowCreateTablePartitionSettings) {
@@ -1090,6 +1229,82 @@ R"(CREATE TABLE `test_show_create` (
WITH (TTL = INTERVAL('PT1H') DELETE ON Key AS SECONDS);
)"
);
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key Uint32 NOT NULL,
+ Value String,
+ PRIMARY KEY (Key)
+ )
+ PARTITION BY HASH(`Key`)
+ WITH (
+ STORE = COLUMN,
+ TTL = INTERVAL('PT1H') DELETE ON Key AS MILLISECONDS
+ );
+ )", "test_show_create");
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key Uint32 NOT NULL,
+ Value String,
+ PRIMARY KEY (Key)
+ )
+ PARTITION BY HASH(`Key`)
+ WITH (
+ STORE = COLUMN,
+ TTL =
+ INTERVAL('PT1H') TO EXTERNAL DATA SOURCE `/Root/tier2`,
+ INTERVAL('PT3H') DELETE
+ ON Key AS NANOSECONDS
+ );
+ )", "test_show_create");
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key Uint64 NOT NULL,
+ Value String,
+ PRIMARY KEY (Key)
+ )
+ PARTITION BY HASH(`Key`)
+ WITH (
+ STORE = COLUMN,
+ TTL = INTERVAL('PT1H') TO EXTERNAL DATA SOURCE `/Root/tier2` ON Key AS MICROSECONDS
+ );
+ )", "test_show_create");
+
+ checker.CheckShowCreateTable(R"(
+ CREATE TABLE test_show_create (
+ Key Timestamp NOT NULL,
+ Value String,
+ PRIMARY KEY (Key)
+ )
+ PARTITION BY HASH(`Key`)
+ WITH (
+ STORE = COLUMN,
+ TTL =
+ Interval("PT10S") TO EXTERNAL DATA SOURCE `/Root/tier1`,
+ Interval("PT1M") TO EXTERNAL DATA SOURCE `/Root/tier2`,
+ Interval("PT1H") DELETE
+ ON Key
+ );
+ )", "test_show_create",
+R"(CREATE TABLE `test_show_create` (
+ `Key` Timestamp NOT NULL,
+ `Value` String,
+ PRIMARY KEY (`Key`)
+)
+PARTITION BY HASH (`Key`)
+WITH (
+ STORE = COLUMN,
+ AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64,
+ TTL =
+ INTERVAL('PT10S') TO EXTERNAL DATA SOURCE `/Root/tier1`,
+ INTERVAL('PT1M') TO EXTERNAL DATA SOURCE `/Root/tier2`,
+ INTERVAL('PT1H') DELETE
+ ON Key
+);
+)"
+ );
}
Y_UNIT_TEST(ShowCreateTableTemporary) {
diff --git a/ydb/core/sys_view/ya.make b/ydb/core/sys_view/ya.make
index eb813f4dba..8d0c77be40 100644
--- a/ydb/core/sys_view/ya.make
+++ b/ydb/core/sys_view/ya.make
@@ -23,6 +23,7 @@ PEERDIR(
ydb/core/sys_view/tablets
ydb/core/tx/schemeshard
ydb/core/tx/tx_proxy
+ ydb/core/wrappers
)
YQL_LAST_ABI_VERSION()
diff --git a/ydb/core/testlib/actors/test_runtime.cpp b/ydb/core/testlib/actors/test_runtime.cpp
index 4c526d4af8..9f1682b952 100644
--- a/ydb/core/testlib/actors/test_runtime.cpp
+++ b/ydb/core/testlib/actors/test_runtime.cpp
@@ -8,6 +8,7 @@
#include <ydb/core/mon_alloc/profiler.h>
#include <ydb/core/grpc_services/grpc_helper.h>
#include <ydb/core/tablet/tablet_impl.h>
+#include <ydb/core/testlib/mock_transfer_writer_factory.h>
#include <ydb/library/actors/core/executor_pool_basic.h>
#include <ydb/library/actors/core/executor_pool_io.h>
@@ -182,6 +183,7 @@ namespace NActors {
nodeAppData->EnableMvccSnapshotWithLegacyDomainRoot = app0->EnableMvccSnapshotWithLegacyDomainRoot;
nodeAppData->IoContextFactory = app0->IoContextFactory;
nodeAppData->SchemeOperationFactory = app0->SchemeOperationFactory;
+ nodeAppData->TransferWriterFactory = std::make_shared<NKikimr::Tests::MockTransferWriterFactory>();
if (nodeIndex < egg.Icb.size()) {
nodeAppData->Icb = std::move(egg.Icb[nodeIndex]);
nodeAppData->InFlightLimiterRegistry.Reset(new NKikimr::NGRpcService::TInFlightLimiterRegistry(nodeAppData->Icb));
diff --git a/ydb/core/testlib/mock_transfer_writer_factory.h b/ydb/core/testlib/mock_transfer_writer_factory.h
new file mode 100644
index 0000000000..a644a041fd
--- /dev/null
+++ b/ydb/core/testlib/mock_transfer_writer_factory.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <ydb/core/tx/replication/service/transfer_writer_factory.h>
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+
+namespace NKikimr::Tests {
+
+struct MockTransferWriterFactory : public NKikimr::NReplication::NService::ITransferWriterFactory {
+ struct MockActor : public NActors::TActorBootstrapped<MockActor> {
+ void Bootstrap() {}
+ };
+
+
+ NActors::IActor* Create(const Parameters&) const override {
+ return new MockActor();
+ }
+};
+
+} // namespace NKikimr::Tests
diff --git a/ydb/core/testlib/storage_helpers.cpp b/ydb/core/testlib/storage_helpers.cpp
new file mode 100644
index 0000000000..193f51b4e9
--- /dev/null
+++ b/ydb/core/testlib/storage_helpers.cpp
@@ -0,0 +1,28 @@
+#include "storage_helpers.h"
+
+#include <ydb/core/blobstorage/dsproxy/mock/model.h>
+
+namespace NKikimr {
+ int CountBlobsWithSubstring(ui64 tabletId, const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSs, const TString& substring) {
+ int res = 0;
+ for (const auto& proxyDS : proxyDSs) {
+ for (const auto& [id, blob] : proxyDS->AllMyBlobs()) {
+ if (id.TabletID() == tabletId && !blob.DoNotKeep && blob.Buffer.ConvertToString().Contains(substring)) {
+ ++res;
+ }
+ }
+ }
+ return res;
+ }
+
+ bool BlobStorageContains(const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSs, const TString& value) {
+ for (const auto& proxyDS : proxyDSs) {
+ for (const auto& [id, blob] : proxyDS->AllMyBlobs()) {
+ if (!blob.DoNotKeep && blob.Buffer.ConvertToString().Contains(value)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+} // namespace NKikimr
diff --git a/ydb/core/testlib/storage_helpers.h b/ydb/core/testlib/storage_helpers.h
new file mode 100644
index 0000000000..40b6a3d0ea
--- /dev/null
+++ b/ydb/core/testlib/storage_helpers.h
@@ -0,0 +1,8 @@
+#pragma once
+
+#include <ydb/core/blobstorage/dsproxy/mock/dsproxy_mock.h>
+
+namespace NKikimr {
+ int CountBlobsWithSubstring(ui64 tabletId, const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSs, const TString& substring);
+ bool BlobStorageContains(const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSs, const TString& value);
+} // namespace NKikimr
diff --git a/ydb/core/testlib/test_client.cpp b/ydb/core/testlib/test_client.cpp
index 578f5c0f1e..0189659853 100644
--- a/ydb/core/testlib/test_client.cpp
+++ b/ydb/core/testlib/test_client.cpp
@@ -66,6 +66,7 @@
#include <ydb/core/kafka_proxy/kafka_listener.h>
#include <ydb/core/kafka_proxy/actors/kafka_metadata_actor.h>
#include <ydb/core/kafka_proxy/kafka_metrics.h>
+#include <ydb/core/kafka_proxy/kafka_transactions_coordinator.h>
#include <ydb/core/kqp/common/kqp.h>
#include <ydb/core/kqp/rm_service/kqp_rm_service.h>
#include <ydb/core/kqp/proxy_service/kqp_proxy_service.h>
@@ -537,6 +538,7 @@ namespace Tests {
appData.GraphConfig.MergeFrom(Settings->AppConfig->GetGraphConfig());
appData.SqsConfig.MergeFrom(Settings->AppConfig->GetSqsConfig());
appData.SharedCacheConfig.MergeFrom(Settings->AppConfig->GetSharedCacheConfig());
+ appData.TransferWriterFactory = Settings->TransferWriterFactory;
appData.DynamicNameserviceConfig = new TDynamicNameserviceConfig;
auto dnConfig = appData.DynamicNameserviceConfig;
@@ -622,11 +624,17 @@ namespace Tests {
}
}
- void TServer::EnableGRpc(const NYdbGrpc::TServerOptions& options, ui32 grpcServiceNodeId) {
- GRpcServerRootCounters = MakeIntrusive<::NMonitoring::TDynamicCounters>();
- auto& counters = GRpcServerRootCounters;
+ void TServer::EnableGRpc(const NYdbGrpc::TServerOptions& options, ui32 grpcServiceNodeId, const std::optional<TString>& tenant) {
+ auto* grpcInfo = &RootGRpc;
+ if (tenant) {
+ grpcInfo = &TenantsGRpc[*tenant];
+ }
+
+ grpcInfo->GRpcServerRootCounters = MakeIntrusive<::NMonitoring::TDynamicCounters>();
+ auto& counters = grpcInfo->GRpcServerRootCounters;
- GRpcServer.reset(new NYdbGrpc::TGRpcServer(options));
+ auto& grpcServer = grpcInfo->GRpcServer;
+ grpcServer.reset(new NYdbGrpc::TGRpcServer(options));
auto grpcService = new NGRpcProxy::TGRpcService();
auto system(Runtime->GetActorSystem(grpcServiceNodeId));
@@ -663,11 +671,15 @@ namespace Tests {
desc->Port = options.Port;
desc->Ssl = !options.SslData.Empty();
- TVector<TString> rootDomains;
- if (const auto& domain = appData.DomainsInfo->Domain) {
- rootDomains.emplace_back("/" + domain->Name);
+ if (!tenant) {
+ TVector<TString> rootDomains;
+ if (const auto& domain = appData.DomainsInfo->Domain) {
+ rootDomains.emplace_back("/" + domain->Name);
+ }
+ desc->ServedDatabases.insert(desc->ServedDatabases.end(), rootDomains.begin(), rootDomains.end());
+ } else {
+ desc->ServedDatabases.emplace_back(CanonizePath(*tenant));
}
- desc->ServedDatabases.insert(desc->ServedDatabases.end(), rootDomains.begin(), rootDomains.end());
TVector<TString> grpcServices = {"yql", "clickhouse_internal", "datastreams", "table_service", "scripting", "experimental", "discovery", "pqcd", "fds", "pq", "pqv0", "pqv1" };
desc->ServedServices.insert(desc->ServedServices.end(), grpcServices.begin(), grpcServices.end());
@@ -695,50 +707,50 @@ namespace Tests {
future.Subscribe(startCb);
- GRpcServer->AddService(grpcService);
- GRpcServer->AddService(new NGRpcService::TGRpcYdbExportService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbImportService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbSchemeService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbTableService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies, true, 1));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbScriptingService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcOperationService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::V1::TGRpcPersQueueService(system, counters, NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::V1::TGRpcTopicService(system, counters, NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcPQClusterDiscoveryService(system, counters, grpcRequestProxies[0]));
- GRpcServer->AddService(new NKesus::TKesusGRpcService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcCmsService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcDiscoveryService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbObjectStorageService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NQuoter::TRateLimiterGRpcService(system, counters, grpcRequestProxies[0]));
- GRpcServer->AddService(new NGRpcService::TGRpcDataStreamsService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcMonitoringService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbQueryService(system, counters, grpcRequestProxies, true, 1));
- GRpcServer->AddService(new NGRpcService::TGRpcYdbTabletService(system, counters, grpcRequestProxies, true, 1));
+ grpcServer->AddService(grpcService);
+ grpcServer->AddService(new NGRpcService::TGRpcYdbExportService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbImportService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbSchemeService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbTableService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies, true, 1));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbScriptingService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcOperationService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::V1::TGRpcPersQueueService(system, counters, NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::V1::TGRpcTopicService(system, counters, NMsgBusProxy::CreatePersQueueMetaCacheV2Id(), grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcPQClusterDiscoveryService(system, counters, grpcRequestProxies[0]));
+ grpcServer->AddService(new NKesus::TKesusGRpcService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcCmsService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcDiscoveryService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbClickhouseInternalService(system, counters, appData.InFlightLimiterRegistry, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbObjectStorageService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NQuoter::TRateLimiterGRpcService(system, counters, grpcRequestProxies[0]));
+ grpcServer->AddService(new NGRpcService::TGRpcDataStreamsService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcMonitoringService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbQueryService(system, counters, grpcRequestProxies, true, 1));
+ grpcServer->AddService(new NGRpcService::TGRpcYdbTabletService(system, counters, grpcRequestProxies, true, 1));
if (Settings->EnableYq || Settings->EnableYqGrpc) {
- GRpcServer->AddService(new NGRpcService::TGRpcFederatedQueryService(system, counters, grpcRequestProxies[0]));
- GRpcServer->AddService(new NGRpcService::TGRpcFqPrivateTaskService(system, counters, grpcRequestProxies[0]));
+ grpcServer->AddService(new NGRpcService::TGRpcFederatedQueryService(system, counters, grpcRequestProxies[0]));
+ grpcServer->AddService(new NGRpcService::TGRpcFqPrivateTaskService(system, counters, grpcRequestProxies[0]));
}
if (const auto& factory = Settings->GrpcServiceFactory) {
// All services enabled by default for ut
static const std::unordered_set<TString> dummy;
for (const auto& service : factory->Create(dummy, dummy, system, counters, grpcRequestProxies[0])) {
- GRpcServer->AddService(service);
+ grpcServer->AddService(service);
}
}
- GRpcServer->AddService(new NGRpcService::TGRpcYdbLogStoreService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcAuthService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcReplicationService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->AddService(new NGRpcService::TGRpcViewService(system, counters, grpcRequestProxies[0], true));
- GRpcServer->Start();
+ grpcServer->AddService(new NGRpcService::TGRpcYdbLogStoreService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcAuthService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcReplicationService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->AddService(new NGRpcService::TGRpcViewService(system, counters, grpcRequestProxies[0], true));
+ grpcServer->Start();
}
- void TServer::EnableGRpc(ui16 port, ui32 grpcServiceNodeId) {
+ void TServer::EnableGRpc(ui16 port, ui32 grpcServiceNodeId, const std::optional<TString>& tenant) {
EnableGRpc(NYdbGrpc::TServerOptions()
.SetHost("localhost")
.SetPort(port)
.SetLogger(NYdbGrpc::CreateActorSystemLogger(*Runtime->GetActorSystem(grpcServiceNodeId), NKikimrServices::GRPC_SERVER)),
- grpcServiceNodeId
+ grpcServiceNodeId, tenant
);
}
@@ -915,11 +927,39 @@ namespace Tests {
host.SetHostConfigId(hostConfig.GetHostConfigId());
bsConfigureRequest->Record.MutableRequest()->AddCommand()->MutableDefineBox()->CopyFrom(boxConfig);
+ std::unordered_map<TString, ui64> poolsConfigGenerations;
+ if (Settings->FetchPoolsGeneration) {
+ auto bsDescribeRequest = MakeHolder<TEvBlobStorage::TEvControllerConfigRequest>();
+ auto& describeCommand = *bsDescribeRequest->Record.MutableRequest()->AddCommand()->MutableReadStoragePool();
+ describeCommand.SetBoxId(Settings->BOX_ID);
+ for (const auto& [_, storagePool] : Settings->StoragePoolTypes) {
+ describeCommand.AddName(storagePool.GetName());
+ }
+
+ Runtime->SendToPipe(MakeBSControllerID(), sender, bsDescribeRequest.Release(), 0, pipeConfig);
+ TAutoPtr<IEventHandle> handleDescResponse;
+ const auto descResponse = Runtime->GrabEdgeEventRethrow<TEvBlobStorage::TEvControllerConfigResponse>(handleDescResponse);
+
+ const auto& response = descResponse->Record.GetResponse();
+ if (!response.GetSuccess()) {
+ Cerr << "\n\n descResponse is #" << descResponse->Record.DebugString() << "\n\n";
+ }
+ UNIT_ASSERT(descResponse->Record.GetResponse().GetSuccess());
+ UNIT_ASSERT_VALUES_EQUAL(response.StatusSize(), 1);
+ const auto& status = response.GetStatus(0);
+
+ poolsConfigGenerations.reserve(status.StoragePoolSize());
+ for (const auto& storagePool : status.GetStoragePool()) {
+ UNIT_ASSERT(poolsConfigGenerations.emplace(storagePool.GetName(), storagePool.GetItemConfigGeneration()).second);
+ }
+ }
+
for (const auto& [poolKind, storagePool] : Settings->StoragePoolTypes) {
if (storagePool.GetNumGroups() > 0) {
auto* command = bsConfigureRequest->Record.MutableRequest()->AddCommand()->MutableDefineStoragePool();
command->CopyFrom(storagePool);
- command->SetItemConfigGeneration(Settings->StorageGeneration);
+ const auto poolGenerationIt = poolsConfigGenerations.find(storagePool.GetName());
+ command->SetItemConfigGeneration(poolGenerationIt == poolsConfigGenerations.end() ? Settings->StorageGeneration : poolGenerationIt->second);
}
}
@@ -1406,7 +1446,10 @@ namespace Tests {
IActor* discoveryCache = CreateDiscoveryCache(NGRpcService::KafkaEndpointId);
TActorId discoveryCacheId = Runtime->Register(discoveryCache, nodeIdx, userPoolId);
- Runtime->RegisterService(NKafka::MakeKafkaDiscoveryCacheID(), discoveryCacheId, nodeIdx);
+ Runtime->RegisterService(NKafka::MakeKafkaDiscoveryCacheID(), discoveryCacheId, nodeIdx);
+
+ TActorId kafkaTxnCoordinatorActorId = Runtime->Register(NKafka::CreateKafkaTransactionsCoordinator(), nodeIdx, userPoolId);
+ Runtime->RegisterService(NKafka::MakeKafkaTransactionsServiceID(), kafkaTxnCoordinatorActorId, nodeIdx);
NKafka::TListenerSettings settings;
settings.Port = Settings->AppConfig->GetKafkaProxyConfig().GetListeningPort();
@@ -1416,8 +1459,7 @@ namespace Tests {
settings.SslCertificatePem = Settings->AppConfig->GetKafkaProxyConfig().GetSslCertificate();
}
- IActor* actor = NKafka::CreateKafkaListener(MakePollerActorId(), settings, Settings->AppConfig->GetKafkaProxyConfig(),
- discoveryCacheId);
+ IActor* actor = NKafka::CreateKafkaListener(MakePollerActorId(), settings, Settings->AppConfig->GetKafkaProxyConfig());
TActorId actorId = Runtime->Register(actor, nodeIdx, userPoolId);
Runtime->RegisterService(TActorId{}, actorId, nodeIdx);
@@ -1632,8 +1674,15 @@ namespace Tests {
}
const NYdbGrpc::TGRpcServer& TServer::GetGRpcServer() const {
- Y_ABORT_UNLESS(GRpcServer);
- return *GRpcServer;
+ Y_ABORT_UNLESS(RootGRpc.GRpcServer);
+ return *RootGRpc.GRpcServer;
+ }
+
+ const NYdbGrpc::TGRpcServer& TServer::GetTenantGRpcServer(const TString& tenant) const {
+ const auto it = TenantsGRpc.find(tenant);
+ Y_ABORT_UNLESS(it != TenantsGRpc.end());
+ Y_ABORT_UNLESS(it->second.GRpcServer);
+ return *it->second.GRpcServer;
}
void TServer::WaitFinalization() {
@@ -1645,9 +1694,7 @@ namespace Tests {
}
TServer::~TServer() {
- if (GRpcServer) {
- GRpcServer->Stop();
- }
+ ShutdownGRpc();
if (YqSharedResources) {
YqSharedResources->Stop();
diff --git a/ydb/core/testlib/test_client.h b/ydb/core/testlib/test_client.h
index 38196a18ee..1c4b013901 100644
--- a/ydb/core/testlib/test_client.h
+++ b/ydb/core/testlib/test_client.h
@@ -24,6 +24,7 @@
#include <ydb/core/protos/flat_scheme_op.pb.h>
#include <ydb/core/testlib/basics/runtime.h>
#include <ydb/core/testlib/basics/appdata.h>
+#include <ydb/core/testlib/mock_transfer_writer_factory.h>
#include <ydb/core/protos/kesus.pb.h>
#include <ydb/core/protos/table_service_config.pb.h>
#include <ydb/core/protos/console_tenant.pb.h>
@@ -109,7 +110,6 @@ namespace Tests {
using TControls = NKikimrConfig::TImmediateControlsConfig;
using TLoggerInitializer = std::function<void (TTestActorRuntime&)>;
using TStoragePoolKinds = TDomainsInfo::TDomain::TStoragePoolKinds;
- using TProxyDSPtr = TIntrusivePtr<NFake::TProxyDS>;
ui16 Port;
ui16 GrpcPort = 0;
@@ -128,6 +128,7 @@ namespace Tests {
ui32 NodeCount = 1;
ui32 DynamicNodeCount = 0;
ui64 StorageGeneration = 0;
+ bool FetchPoolsGeneration = false;
NFake::TStorage CustomDiskParams;
TControls Controls;
TAppPrepare::TFnReg FrFactory = &DefaultFrFactory;
@@ -158,6 +159,8 @@ namespace Tests {
std::shared_ptr<NKikimr::NMsgBusProxy::IPersQueueGetReadSessionsInfoWorkerFactory> PersQueueGetReadSessionsInfoWorkerFactory;
std::shared_ptr<NKikimr::NHttpProxy::IAuthFactory> DataStreamsAuthFactory;
std::shared_ptr<NKikimr::NPQ::TPersQueueMirrorReaderFactory> PersQueueMirrorReaderFactory = std::make_shared<NKikimr::NPQ::TPersQueueMirrorReaderFactory>();
+ std::shared_ptr<NKikimr::NReplication::NService::ITransferWriterFactory> TransferWriterFactory = std::make_shared<MockTransferWriterFactory>();
+
bool EnableMetering = false;
TString MeteringFilePath;
TString AwsRegion;
@@ -171,7 +174,7 @@ namespace Tests {
TString ServerCertFilePath;
bool Verbose = true;
bool UseSectorMap = false;
- TVector<TProxyDSPtr> ProxyDSMocks;
+ TVector<TIntrusivePtr<NFake::TProxyDS>> ProxyDSMocks;
std::function<IActor*(const TTicketParserSettings&)> CreateTicketParser = NKikimr::CreateTicketParser;
std::shared_ptr<TGrpcServiceFactory> GrpcServiceFactory;
@@ -187,7 +190,7 @@ namespace Tests {
TServerSettings& SetDomainName(const TString& value);
TServerSettings& SetNodeCount(ui32 value) { NodeCount = value; return *this; }
TServerSettings& SetDynamicNodeCount(ui32 value) { DynamicNodeCount = value; return *this; }
- TServerSettings& SetStorageGeneration(ui64 value) { StorageGeneration = value; return *this; }
+ TServerSettings& SetStorageGeneration(ui64 storageGeneration, bool fetchPoolsGeneration = false) { StorageGeneration = storageGeneration; FetchPoolsGeneration = fetchPoolsGeneration; return *this; }
TServerSettings& SetCustomDiskParams(const NFake::TStorage& value) { CustomDiskParams = value; return *this; }
TServerSettings& SetControls(const TControls& value) { Controls = value; return *this; }
TServerSettings& SetFrFactory(const TAppPrepare::TFnReg& value) { FrFactory = value; return *this; }
@@ -263,7 +266,7 @@ namespace Tests {
return *this;
}
- TServerSettings& SetProxyDSMocks(const TVector<TProxyDSPtr>& proxyDSMocks) {
+ TServerSettings& SetProxyDSMocks(const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSMocks) {
ProxyDSMocks = proxyDSMocks;
return *this;
}
@@ -339,22 +342,23 @@ namespace Tests {
TServer& operator =(TServer&& server) = default;
virtual ~TServer();
- void EnableGRpc(const NYdbGrpc::TServerOptions& options, ui32 grpcServiceNodeId = 0);
- void EnableGRpc(ui16 port, ui32 grpcServiceNodeId = 0);
+ void EnableGRpc(const NYdbGrpc::TServerOptions& options, ui32 grpcServiceNodeId = 0, const std::optional<TString>& tenant = std::nullopt);
+ void EnableGRpc(ui16 port, ui32 grpcServiceNodeId = 0, const std::optional<TString>& tenant = std::nullopt);
void SetupRootStoragePools(const TActorId sender) const;
void SetupDefaultProfiles();
TIntrusivePtr<::NMonitoring::TDynamicCounters> GetGRpcServerRootCounters() const {
- return GRpcServerRootCounters;
+ return RootGRpc.GRpcServerRootCounters;
}
void ShutdownGRpc() {
- if (GRpcServer) {
- GRpcServer->Stop();
- GRpcServer = nullptr;
+ RootGRpc.Shutdown();
+ for (auto& [_, tenantGRpc] : TenantsGRpc) {
+ tenantGRpc.Shutdown();
}
}
+
void StartDummyTablets();
TVector<ui64> StartPQTablets(ui32 pqTabletsN, bool wait = true);
TTestActorRuntime* GetRuntime() const;
@@ -363,6 +367,7 @@ namespace Tests {
const NMiniKQL::IFunctionRegistry* GetFunctionRegistry();
const NYdb::TDriver& GetDriver() const;
const NYdbGrpc::TGRpcServer& GetGRpcServer() const;
+ const NYdbGrpc::TGRpcServer& GetTenantGRpcServer(const TString& tenant) const;
ui32 StaticNodes() const {
return Settings->NodeCount;
@@ -384,9 +389,22 @@ namespace Tests {
TIntrusivePtr<NBus::TBusMessageQueue> Bus;
const NBus::TBusServerSessionConfig BusServerSessionConfig; //BusServer hold const & on config
TAutoPtr<NMsgBusProxy::IMessageBusServer> BusServer;
- std::unique_ptr<NYdbGrpc::TGRpcServer> GRpcServer;
- TIntrusivePtr<::NMonitoring::TDynamicCounters> GRpcServerRootCounters;
NFq::IYqSharedResources::TPtr YqSharedResources;
+
+ struct TGRpcInfo {
+ std::unique_ptr<NYdbGrpc::TGRpcServer> GRpcServer;
+ TIntrusivePtr<NMonitoring::TDynamicCounters> GRpcServerRootCounters;
+
+ void Shutdown() {
+ if (GRpcServer) {
+ GRpcServer->Stop();
+ GRpcServer = nullptr;
+ }
+ }
+ };
+
+ TGRpcInfo RootGRpc;
+ std::unordered_map<TString, TGRpcInfo> TenantsGRpc;
};
class TClient {
diff --git a/ydb/core/testlib/ya.make b/ydb/core/testlib/ya.make
index 816e9dd2df..faa21b4d64 100644
--- a/ydb/core/testlib/ya.make
+++ b/ydb/core/testlib/ya.make
@@ -10,6 +10,7 @@ SRCS(
fake_scheme_shard.h
minikql_compile.h
mock_pq_metacache.h
+ storage_helpers.cpp
tablet_flat_dummy.cpp
tablet_helpers.cpp
tablet_helpers.h
diff --git a/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp b/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp
index 6c730e7f4e..cd7feae239 100644
--- a/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp
+++ b/ydb/core/tx/columnshard/blobs_action/transaction/tx_blobs_written.cpp
@@ -97,10 +97,11 @@ void TTxBlobsWritingFinished::DoComplete(const TActorContext& ctx) {
}
std::set<TInternalPathId> pathIds;
for (auto&& writeResult : Pack.GetWriteResults()) {
+ const auto& writeMeta = writeResult.GetWriteMeta();
+ writeMeta.OnStage(NEvWrite::EWriteStage::Replied);
if (writeResult.GetNoDataToWrite()) {
continue;
}
- const auto& writeMeta = writeResult.GetWriteMeta();
auto op = Self->GetOperationsManager().GetOperationVerified((TOperationWriteId)writeMeta.GetWriteId());
pathIds.emplace(op->GetPathId());
if (op->GetBehaviour() == EOperationBehaviour::WriteWithLock || op->GetBehaviour() == EOperationBehaviour::NoTxWrite) {
@@ -146,6 +147,7 @@ TTxBlobsWritingFinished::TTxBlobsWritingFinished(TColumnShard* self, const NKiki
bool TTxBlobsWritingFailed::DoExecute(TTransactionContext& txc, const TActorContext& /* ctx */) {
for (auto&& wResult : Pack.GetWriteResults()) {
const auto& writeMeta = wResult.GetWriteMeta();
+ writeMeta.OnStage(NEvWrite::EWriteStage::Replied);
AFL_VERIFY(!writeMeta.HasLongTxId());
auto op = Self->GetOperationsManager().GetOperationVerified((TOperationWriteId)writeMeta.GetWriteId());
Self->OperationsManager->AddTemporaryTxLink(op->GetLockId());
diff --git a/ydb/core/tx/columnshard/columnshard__write.cpp b/ydb/core/tx/columnshard/columnshard__write.cpp
index 818263c771..3a65c5aa5c 100644
--- a/ydb/core/tx/columnshard/columnshard__write.cpp
+++ b/ydb/core/tx/columnshard/columnshard__write.cpp
@@ -487,7 +487,7 @@ void TColumnShard::Handle(NEvents::TDataEvents::TEvWrite::TPtr& ev, const TActor
const auto sendError = [&](const TString& message, const NKikimrDataEvents::TEvWriteResult::EStatus status) {
Counters.GetTabletCounters()->IncCounter(COUNTER_WRITE_FAIL);
- auto result = NEvents::TDataEvents::TEvWriteResult::BuildError(TabletID(), 0, status, message);
+ auto result = NEvents::TDataEvents::TEvWriteResult::BuildError(TabletID(), record.GetTxId(), status, message);
ctx.Send(source, result.release(), 0, cookie);
};
if (behaviour == EOperationBehaviour::CommitWriteLock) {
diff --git a/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/ya.make b/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/ya.make
index f18a5c1990..4f85dd7575 100644
--- a/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/ya.make
+++ b/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/ya.make
@@ -9,6 +9,7 @@ SRCS(
PEERDIR(
ydb/core/tx/columnshard/engines/changes/compaction/common
+ ydb/core/formats/arrow/accessor/sub_columns
)
END()
diff --git a/ydb/core/tx/columnshard/engines/predicate/filter.cpp b/ydb/core/tx/columnshard/engines/predicate/filter.cpp
index 1dc7b77bff..28c47e6c89 100644
--- a/ydb/core/tx/columnshard/engines/predicate/filter.cpp
+++ b/ydb/core/tx/columnshard/engines/predicate/filter.cpp
@@ -36,16 +36,9 @@ TConclusionStatus TPKRangesFilter::Add(
return toContainerConclusion;
}
if (SortedRanges.size() && !FakeRanges) {
- if (ReverseFlag) {
- if (fromContainerConclusion->CrossRanges(SortedRanges.front().GetPredicateTo())) {
- AFL_ERROR(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", "add_range_filter")("problem", "not sorted sequence");
- return TConclusionStatus::Fail("not sorted sequence");
- }
- } else {
- if (fromContainerConclusion->CrossRanges(SortedRanges.back().GetPredicateTo())) {
- AFL_ERROR(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", "add_range_filter")("problem", "not sorted sequence");
- return TConclusionStatus::Fail("not sorted sequence");
- }
+ if (fromContainerConclusion->CrossRanges(SortedRanges.back().GetPredicateTo())) {
+ AFL_ERROR(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", "add_range_filter")("problem", "not sorted sequence");
+ return TConclusionStatus::Fail("not sorted sequence");
}
}
auto pkRangeFilterConclusion = TPKRangeFilter::Build(fromContainerConclusion.DetachResult(), toContainerConclusion.DetachResult());
@@ -56,11 +49,7 @@ TConclusionStatus TPKRangesFilter::Add(
FakeRanges = false;
SortedRanges.clear();
}
- if (ReverseFlag) {
- SortedRanges.emplace_front(pkRangeFilterConclusion.DetachResult());
- } else {
- SortedRanges.emplace_back(pkRangeFilterConclusion.DetachResult());
- }
+ SortedRanges.emplace_back(pkRangeFilterConclusion.DetachResult());
return TConclusionStatus::Success();
}
@@ -112,8 +101,7 @@ TPKRangeFilter::EUsageClass TPKRangesFilter::GetUsageClass(const NArrow::TReplac
return TPKRangeFilter::EUsageClass::NoUsage;
}
-TPKRangesFilter::TPKRangesFilter(const bool reverse)
- : ReverseFlag(reverse) {
+TPKRangesFilter::TPKRangesFilter() {
auto range = TPKRangeFilter::Build(TPredicateContainer::BuildNullPredicateFrom(), TPredicateContainer::BuildNullPredicateTo());
Y_ABORT_UNLESS(range);
SortedRanges.emplace_back(*range);
@@ -148,8 +136,8 @@ std::shared_ptr<arrow::RecordBatch> TPKRangesFilter::SerializeToRecordBatch(cons
}
std::shared_ptr<NKikimr::NOlap::TPKRangesFilter> TPKRangesFilter::BuildFromRecordBatchLines(
- const std::shared_ptr<arrow::RecordBatch>& batch, const bool reverse) {
- std::shared_ptr<TPKRangesFilter> result = std::make_shared<TPKRangesFilter>(reverse);
+ const std::shared_ptr<arrow::RecordBatch>& batch) {
+ std::shared_ptr<TPKRangesFilter> result = std::make_shared<TPKRangesFilter>();
for (ui32 i = 0; i < batch->num_rows(); ++i) {
auto batchRow = batch->Slice(i, 1);
auto pFrom = std::make_shared<NOlap::TPredicate>(NKernels::EOperation::GreaterEqual, batchRow);
@@ -160,8 +148,8 @@ std::shared_ptr<NKikimr::NOlap::TPKRangesFilter> TPKRangesFilter::BuildFromRecor
}
std::shared_ptr<NKikimr::NOlap::TPKRangesFilter> TPKRangesFilter::BuildFromRecordBatchFull(
- const std::shared_ptr<arrow::RecordBatch>& batch, const std::shared_ptr<arrow::Schema>& pkSchema, const bool reverse) {
- std::shared_ptr<TPKRangesFilter> result = std::make_shared<TPKRangesFilter>(reverse);
+ const std::shared_ptr<arrow::RecordBatch>& batch, const std::shared_ptr<arrow::Schema>& pkSchema) {
+ std::shared_ptr<TPKRangesFilter> result = std::make_shared<TPKRangesFilter>();
auto pkBatch = NArrow::TColumnOperator().Adapt(batch, pkSchema).DetachResult();
auto c = batch->GetColumnByName(".ydb_operation_type");
AFL_VERIFY(c);
@@ -201,9 +189,9 @@ std::shared_ptr<NKikimr::NOlap::TPKRangesFilter> TPKRangesFilter::BuildFromRecor
}
std::shared_ptr<NKikimr::NOlap::TPKRangesFilter> TPKRangesFilter::BuildFromString(
- const TString& data, const std::shared_ptr<arrow::Schema>& pkSchema, const bool reverse) {
+ const TString& data, const std::shared_ptr<arrow::Schema>& pkSchema) {
auto batch = NArrow::TStatusValidator::GetValid(NArrow::NSerialization::TNativeSerializer().Deserialize(data));
- return BuildFromRecordBatchFull(batch, pkSchema, reverse);
+ return BuildFromRecordBatchFull(batch, pkSchema);
}
TString TPKRangesFilter::SerializeToString(const std::shared_ptr<arrow::Schema>& pkSchema) const {
diff --git a/ydb/core/tx/columnshard/engines/predicate/filter.h b/ydb/core/tx/columnshard/engines/predicate/filter.h
index b020320280..93a52b46a5 100644
--- a/ydb/core/tx/columnshard/engines/predicate/filter.h
+++ b/ydb/core/tx/columnshard/engines/predicate/filter.h
@@ -1,8 +1,8 @@
#pragma once
#include "range.h"
-#include <ydb/core/protos/tx_datashard.pb.h>
#include <ydb/core/protos/kqp.pb.h>
+#include <ydb/core/protos/tx_datashard.pb.h>
#include <deque>
@@ -12,10 +12,9 @@ class TPKRangesFilter {
private:
bool FakeRanges = true;
std::deque<TPKRangeFilter> SortedRanges;
- bool ReverseFlag = false;
public:
- TPKRangesFilter(const bool reverse);
+ TPKRangesFilter();
std::optional<ui32> GetFilteredCountLimit(const std::shared_ptr<arrow::Schema>& pkSchema) {
ui32 result = 0;
@@ -38,10 +37,6 @@ public:
return SortedRanges.empty() || FakeRanges;
}
- bool IsReverse() const {
- return ReverseFlag;
- }
-
const TPKRangeFilter& Front() const {
Y_ABORT_UNLESS(Size());
return SortedRanges.front();
@@ -85,20 +80,19 @@ public:
std::set<ui32> GetColumnIds(const TIndexInfo& indexInfo) const;
- static std::shared_ptr<TPKRangesFilter> BuildFromRecordBatchLines(const std::shared_ptr<arrow::RecordBatch>& batch, const bool reverse);
+ static std::shared_ptr<TPKRangesFilter> BuildFromRecordBatchLines(const std::shared_ptr<arrow::RecordBatch>& batch);
static std::shared_ptr<TPKRangesFilter> BuildFromRecordBatchFull(
- const std::shared_ptr<arrow::RecordBatch>& batch, const std::shared_ptr<arrow::Schema>& pkSchema, const bool reverse);
- static std::shared_ptr<TPKRangesFilter> BuildFromString(
- const TString& data, const std::shared_ptr<arrow::Schema>& pkSchema, const bool reverse);
+ const std::shared_ptr<arrow::RecordBatch>& batch, const std::shared_ptr<arrow::Schema>& pkSchema);
+ static std::shared_ptr<TPKRangesFilter> BuildFromString(const TString& data, const std::shared_ptr<arrow::Schema>& pkSchema);
template <class TProto>
- static TConclusion<TPKRangesFilter> BuildFromProto(const TProto& proto, const bool reverse, const std::vector<TNameTypeInfo>& ydbPk) {
- TPKRangesFilter result(reverse);
+ static TConclusion<TPKRangesFilter> BuildFromProto(const TProto& proto, const std::vector<TNameTypeInfo>& ydbPk) {
+ TPKRangesFilter result;
for (auto& protoRange : proto.GetRanges()) {
auto fromPredicate = std::make_shared<TPredicate>();
auto toPredicate = std::make_shared<TPredicate>();
- std::tie(*fromPredicate, *toPredicate) = TPredicate::DeserializePredicatesRange(TSerializedTableRange{protoRange}, ydbPk);
+ std::tie(*fromPredicate, *toPredicate) = TPredicate::DeserializePredicatesRange(TSerializedTableRange{ protoRange }, ydbPk);
auto status = result.Add(fromPredicate, toPredicate, NArrow::TStatusValidator::GetValid(NArrow::MakeArrowSchema(ydbPk)));
if (status.IsFail()) {
return status;
@@ -114,6 +108,8 @@ private:
virtual ui64 DoGetEntityRecordsCount() const = 0;
public:
+ virtual ~ICursorEntity() = default;
+
ui64 GetEntityId() const {
return DoGetEntityId();
}
@@ -125,7 +121,7 @@ public:
class IScanCursor {
private:
virtual const std::shared_ptr<arrow::RecordBatch>& DoGetPKCursor() const = 0;
- virtual bool DoCheckEntityIsBorder(const std::shared_ptr<ICursorEntity>& entity, bool& usage) const = 0;
+ virtual bool DoCheckEntityIsBorder(const ICursorEntity& entity, bool& usage) const = 0;
virtual bool DoCheckSourceIntervalUsage(const ui64 sourceId, const ui32 indexStart, const ui32 recordsCount) const = 0;
virtual TConclusionStatus DoDeserializeFromProto(const NKikimrKqp::TEvKqpScanCursor& proto) = 0;
virtual void DoSerializeToProto(NKikimrKqp::TEvKqpScanCursor& proto) const = 0;
@@ -144,7 +140,7 @@ public:
return DoCheckSourceIntervalUsage(sourceId, indexStart, recordsCount);
}
- bool CheckEntityIsBorder(const std::shared_ptr<ICursorEntity>& entity, bool& usage) const {
+ bool CheckEntityIsBorder(const ICursorEntity& entity, bool& usage) const {
AFL_VERIFY(IsInitialized());
return DoCheckEntityIsBorder(entity, usage);
}
@@ -180,18 +176,18 @@ private:
return !!SourceId;
}
- virtual bool DoCheckEntityIsBorder(const std::shared_ptr<ICursorEntity>& entity, bool& usage) const override {
- if (SourceId != entity->GetEntityId()) {
+ virtual bool DoCheckEntityIsBorder(const ICursorEntity& entity, bool& usage) const override {
+ if (SourceId != entity.GetEntityId()) {
return false;
}
- AFL_VERIFY(RecordIndex <= entity->GetEntityRecordsCount());
- usage = RecordIndex < entity->GetEntityRecordsCount();
+ AFL_VERIFY(RecordIndex <= entity.GetEntityRecordsCount());
+ usage = RecordIndex < entity.GetEntityRecordsCount();
return true;
}
virtual TConclusionStatus DoDeserializeFromProto(const NKikimrKqp::TEvKqpScanCursor& proto) override {
if (!proto.HasColumnShardSimple()) {
- return TConclusionStatus::Success();
+ return TConclusionStatus::Fail("absent sorted cursor data");
}
if (!proto.GetColumnShardSimple().HasSourceId()) {
return TConclusionStatus::Fail("incorrect source id for cursor initialization");
@@ -223,6 +219,68 @@ public:
}
};
+class TNotSortedSimpleScanCursor: public TSimpleScanCursor {
+private:
+ YDB_READONLY(ui64, SourceId, 0);
+ YDB_READONLY(ui32, RecordIndex, 0);
+
+ virtual void DoSerializeToProto(NKikimrKqp::TEvKqpScanCursor& proto) const override {
+ auto& data = *proto.MutableColumnShardNotSortedSimple();
+ data.SetSourceId(SourceId);
+ data.SetStartRecordIndex(RecordIndex);
+ }
+
+ virtual const std::shared_ptr<arrow::RecordBatch>& DoGetPKCursor() const override {
+ return Default<std::shared_ptr<arrow::RecordBatch>>();
+ }
+
+ virtual bool IsInitialized() const override {
+ return !!SourceId;
+ }
+
+ virtual bool DoCheckEntityIsBorder(const ICursorEntity& entity, bool& usage) const override {
+ if (SourceId != entity.GetEntityId()) {
+ return false;
+ }
+ AFL_VERIFY(RecordIndex <= entity.GetEntityRecordsCount());
+ usage = RecordIndex < entity.GetEntityRecordsCount();
+ return true;
+ }
+
+ virtual TConclusionStatus DoDeserializeFromProto(const NKikimrKqp::TEvKqpScanCursor& proto) override {
+ if (!proto.HasColumnShardNotSortedSimple()) {
+ return TConclusionStatus::Fail("absent unsorted cursor data");
+ }
+ auto& data = proto.GetColumnShardNotSortedSimple();
+ if (!data.HasSourceId()) {
+ return TConclusionStatus::Fail("incorrect source id for cursor initialization");
+ }
+ SourceId = data.GetSourceId();
+ if (!data.HasStartRecordIndex()) {
+ return TConclusionStatus::Fail("incorrect record index for cursor initialization");
+ }
+ RecordIndex = data.GetStartRecordIndex();
+ return TConclusionStatus::Success();
+ }
+
+ virtual bool DoCheckSourceIntervalUsage(const ui64 sourceId, const ui32 indexStart, const ui32 recordsCount) const override {
+ AFL_VERIFY(sourceId == SourceId);
+ if (indexStart >= RecordIndex) {
+ return true;
+ }
+ AFL_VERIFY(indexStart + recordsCount <= RecordIndex);
+ return false;
+ }
+
+public:
+ TNotSortedSimpleScanCursor() = default;
+
+ TNotSortedSimpleScanCursor(const ui64 portionId, const ui32 recordIndex)
+ : SourceId(portionId)
+ , RecordIndex(recordIndex) {
+ }
+};
+
class TPlainScanCursor: public IScanCursor {
private:
YDB_READONLY_DEF(std::shared_ptr<arrow::RecordBatch>, PrimaryKey);
@@ -244,7 +302,7 @@ private:
return TConclusionStatus::Success();
}
- virtual bool DoCheckEntityIsBorder(const std::shared_ptr<ICursorEntity>& /*entity*/, bool& usage) const override {
+ virtual bool DoCheckEntityIsBorder(const ICursorEntity& /*entity*/, bool& usage) const override {
usage = true;
return true;
}
diff --git a/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp b/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp
index aaaf940f8d..f28536de30 100644
--- a/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/abstract/constructor.cpp
@@ -6,7 +6,7 @@
namespace NKikimr::NOlap::NReader {
-NKikimr::TConclusionStatus IScannerConstructor::ParseProgram(const TVersionedIndex* vIndex, const NKikimrSchemeOp::EOlapProgramType programType,
+TConclusionStatus IScannerConstructor::ParseProgram(const TVersionedIndex* vIndex, const NKikimrSchemeOp::EOlapProgramType programType,
const TString& serializedProgram, TReadDescription& read, const NArrow::NSSA::IColumnResolver& columnResolver) const {
std::set<TString> namesChecker;
if (serializedProgram.empty()) {
@@ -32,7 +32,7 @@ NKikimr::TConclusionStatus IScannerConstructor::ParseProgram(const TVersionedInd
}
}
-NKikimr::TConclusion<std::shared_ptr<TReadMetadataBase>> IScannerConstructor::BuildReadMetadata(
+TConclusion<std::shared_ptr<TReadMetadataBase>> IScannerConstructor::BuildReadMetadata(
const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
TConclusion<std::shared_ptr<TReadMetadataBase>> result = DoBuildReadMetadata(self, read);
if (result.IsFail()) {
@@ -46,7 +46,7 @@ NKikimr::TConclusion<std::shared_ptr<TReadMetadataBase>> IScannerConstructor::Bu
}
}
-NKikimr::TConclusion<std::shared_ptr<NKikimr::NOlap::IScanCursor>> IScannerConstructor::BuildCursorFromProto(
+TConclusion<std::shared_ptr<NKikimr::NOlap::IScanCursor>> IScannerConstructor::BuildCursorFromProto(
const NKikimrKqp::TEvKqpScanCursor& proto) const {
auto result = DoBuildCursor();
if (!result) {
diff --git a/ydb/core/tx/columnshard/engines/reader/abstract/constructor.h b/ydb/core/tx/columnshard/engines/reader/abstract/constructor.h
index 3ad1e86821..02e2e74b06 100644
--- a/ydb/core/tx/columnshard/engines/reader/abstract/constructor.h
+++ b/ydb/core/tx/columnshard/engines/reader/abstract/constructor.h
@@ -14,13 +14,13 @@ class TScannerConstructorContext {
private:
YDB_READONLY(TSnapshot, Snapshot, TSnapshot::Zero());
YDB_READONLY(ui32, ItemsLimit, 0);
- YDB_READONLY(bool, Reverse, false);
+ YDB_READONLY(TReadMetadataBase::ESorting, Sorting, TReadMetadataBase::ESorting::NONE);
public:
- TScannerConstructorContext(const TSnapshot& snapshot, const ui32 itemsLimit, const bool reverse)
+ TScannerConstructorContext(const TSnapshot& snapshot, const ui32 itemsLimit, const TReadMetadataBase::ESorting sorting)
: Snapshot(snapshot)
, ItemsLimit(itemsLimit)
- , Reverse(reverse) {
+ , Sorting(sorting) {
}
};
@@ -28,7 +28,7 @@ class IScannerConstructor {
protected:
const TSnapshot Snapshot;
const ui64 ItemsLimit;
- const bool IsReverse;
+ const TReadMetadataBase::ESorting Sorting;
TConclusionStatus ParseProgram(const TVersionedIndex* vIndex, const NKikimrSchemeOp::EOlapProgramType programType,
const TString& serializedProgram, TReadDescription& read, const NArrow::NSSA::IColumnResolver& columnResolver) const;
@@ -44,7 +44,7 @@ public:
IScannerConstructor(const TScannerConstructorContext& context)
: Snapshot(context.GetSnapshot())
, ItemsLimit(context.GetItemsLimit())
- , IsReverse(context.GetReverse()) {
+ , Sorting(context.GetSorting()) {
}
TConclusion<std::shared_ptr<IScanCursor>> BuildCursorFromProto(const NKikimrKqp::TEvKqpScanCursor& proto) const;
diff --git a/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h b/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h
index ab0ded4d32..c375ff2ccf 100644
--- a/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h
+++ b/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h
@@ -33,11 +33,7 @@ public:
// Holds all metadata that is needed to perform read/scan
class TReadMetadataBase {
public:
- enum class ESorting {
- NONE = 0 /* "not_sorted" */,
- ASC /* "ascending" */,
- DESC /* "descending" */,
- };
+ using ESorting = ERequestSorting;
private:
YDB_ACCESSOR_DEF(TString, ScanIdentifier);
@@ -125,7 +121,6 @@ public:
void SetPKRangesFilter(const std::shared_ptr<TPKRangesFilter>& value) {
AFL_VERIFY(value);
- Y_ABORT_UNLESS(IsSorted() && value->IsReverse() == IsDescSorted());
Y_ABORT_UNLESS(!PKRangesFilter);
PKRangesFilter = value;
if (ResultIndexSchema) {
diff --git a/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp b/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp
index d0c96097a6..c424c84b9a 100644
--- a/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/actor/actor.cpp
@@ -7,8 +7,8 @@
#include <yql/essentials/core/issue/yql_issue.h>
namespace NKikimr::NOlap::NReader {
-constexpr TDuration SCAN_HARD_TIMEOUT = TDuration::Minutes(10);
-constexpr TDuration SCAN_HARD_TIMEOUT_GAP = TDuration::Seconds(5);
+constexpr TDuration SCAN_HARD_TIMEOUT = TDuration::Minutes(60);
+constexpr TDuration COMPUTE_HARD_TIMEOUT = TDuration::Minutes(10);
void TColumnShardScan::PassAway() {
Send(ResourceSubscribeActorId, new TEvents::TEvPoisonPill);
@@ -34,7 +34,7 @@ TColumnShardScan::TColumnShardScan(const TActorId& columnShardActorId, const TAc
, DataFormat(dataFormat)
, TabletId(tabletId)
, ReadMetadataRange(readMetadataRange)
- , Timeout(timeout ? timeout + SCAN_HARD_TIMEOUT_GAP : SCAN_HARD_TIMEOUT)
+ , Timeout(timeout ? timeout : COMPUTE_HARD_TIMEOUT)
, ScanCountersPool(scanCountersPool, TValidator::CheckNotNull(ReadMetadataRange)->GetProgram().GetGraphOptional())
, Stats(NTracing::TTraceClient::GetLocalClient("SHARD", ::ToString(TabletId) /*, "SCAN_TXID:" + ::ToString(TxId)*/))
, ComputeShardingPolicy(computeShardingPolicy) {
@@ -65,7 +65,7 @@ void TColumnShardScan::Bootstrap(const TActorContext& ctx) {
ScheduleWakeup(TMonotonic::Now() + Timeout / 5);
// propagate self actor id // TODO: FlagSubscribeOnSession ?
- Send(ScanComputeActorId, new NKqp::TEvKqpCompute::TEvScanInitActor(ScanId, ctx.SelfID, ScanGen, TabletId),
+ Send(ScanComputeActorId, new NKqp::TEvKqpCompute::TEvScanInitActor(ScanId, ctx.SelfID, ScanGen, TabletId, true),
IEventHandle::FlagTrackDelivery);
Become(&TColumnShardScan::StateScan);
@@ -93,6 +93,7 @@ void TColumnShardScan::HandleScan(NColumnShard::TEvPrivate::TEvTaskProcessedResu
void TColumnShardScan::HandleScan(NKqp::TEvKqpCompute::TEvScanDataAck::TPtr& ev) {
auto g = Stats->MakeGuard("ack");
+
AFL_VERIFY(!AckReceivedInstant);
AckReceivedInstant = TMonotonic::Now();
@@ -111,6 +112,12 @@ void TColumnShardScan::HandleScan(NKqp::TEvKqpCompute::TEvScanDataAck::TPtr& ev)
ContinueProcessing();
}
+void TColumnShardScan::HandleScan(NKqp::TEvKqpCompute::TEvScanPing::TPtr&) {
+ if (!AckReceivedInstant) {
+ LastResultInstant = TMonotonic::Now();
+ }
+}
+
void TColumnShardScan::HandleScan(NActors::TEvents::TEvPoison::TPtr& /*ev*/) noexcept {
PassAway();
}
@@ -165,10 +172,7 @@ void TColumnShardScan::HandleScan(TEvents::TEvWakeup::TPtr& /*ev*/) {
<< " txId: " << TxId << " scanId: " << ScanId << " gen: " << ScanGen << " tablet: " << TabletId);
CheckHanging(true);
- if (!!AckReceivedInstant && TMonotonic::Now() >= GetDeadline() + Timeout * 0.5) {
- SendScanError("ColumnShard scanner timeout: HAS_ACK=1");
- Finish(NColumnShard::TScanCounters::EStatusFinish::Deadline);
- } else if (!AckReceivedInstant && TMonotonic::Now() >= GetDeadline()) {
+ if (!AckReceivedInstant && TMonotonic::Now() >= GetComputeDeadline()) {
SendScanError("ColumnShard scanner timeout: HAS_ACK=0");
Finish(NColumnShard::TScanCounters::EStatusFinish::Deadline);
} else {
@@ -245,18 +249,20 @@ bool TColumnShardScan::ProduceResults() noexcept {
ACFL_DEBUG("stage", "data_format")("batch_size", NArrow::GetTableDataSize(Result->ArrowBatch))("num_rows", numRows)(
"batch_columns", JoinSeq(",", batch->schema()->field_names()));
}
- if (CurrentLastReadKey) {
+ if (CurrentLastReadKey && result.GetScanCursor()->GetPKCursor() && CurrentLastReadKey->GetPKCursor()) {
auto pNew = NArrow::TReplaceKey::FromBatch(result.GetScanCursor()->GetPKCursor(), 0);
auto pOld = NArrow::TReplaceKey::FromBatch(CurrentLastReadKey->GetPKCursor(), 0);
- if (!ReadMetadataRange->IsDescSorted()) {
+ if (ReadMetadataRange->IsAscSorted()) {
AFL_VERIFY(!(pNew < pOld))("old", pOld.DebugJson().GetStringRobust())("new", pNew.DebugJson().GetStringRobust());
- } else {
+ } else if (ReadMetadataRange->IsDescSorted()) {
AFL_VERIFY(!(pOld < pNew))("old", pOld.DebugJson().GetStringRobust())("new", pNew.DebugJson().GetStringRobust());
}
}
CurrentLastReadKey = result.GetScanCursor();
- Result->LastKey = ConvertLastKey(CurrentLastReadKey->GetPKCursor());
+ if (CurrentLastReadKey->GetPKCursor()) {
+ Result->LastKey = ConvertLastKey(CurrentLastReadKey->GetPKCursor());
+ }
Result->LastCursorProto = CurrentLastReadKey->SerializeToProto();
SendResult(false, false);
ScanIterator->OnSentDataFromInterval(result.GetNotFinishedIntervalIdx());
@@ -423,11 +429,14 @@ void TColumnShardScan::ScheduleWakeup(const TMonotonic deadline) {
}
}
-TMonotonic TColumnShardScan::GetDeadline() const {
- AFL_VERIFY(StartInstant);
- if (LastResultInstant) {
- return *LastResultInstant + Timeout;
- }
- return *StartInstant + Timeout;
+TMonotonic TColumnShardScan::GetScanDeadline() const {
+ AFL_VERIFY(!!AckReceivedInstant);
+ return *AckReceivedInstant + SCAN_HARD_TIMEOUT;
}
+
+TMonotonic TColumnShardScan::GetComputeDeadline() const {
+ AFL_VERIFY(!AckReceivedInstant);
+ return (LastResultInstant ? *LastResultInstant : *StartInstant) + Timeout;
+}
+
} // namespace NKikimr::NOlap::NReader
diff --git a/ydb/core/tx/columnshard/engines/reader/actor/actor.h b/ydb/core/tx/columnshard/engines/reader/actor/actor.h
index a66d1ad7f6..09c5f4279a 100644
--- a/ydb/core/tx/columnshard/engines/reader/actor/actor.h
+++ b/ydb/core/tx/columnshard/engines/reader/actor/actor.h
@@ -46,6 +46,7 @@ private:
"TabletId", TabletId)("ScanId", ScanId)("TxId", TxId)("ScanGen", ScanGen)("task_identifier", ReadMetadataRange->GetScanIdentifier()));
switch (ev->GetTypeRewrite()) {
hFunc(NKqp::TEvKqpCompute::TEvScanDataAck, HandleScan);
+ hFunc(NKqp::TEvKqpCompute::TEvScanPing, HandleScan);
hFunc(NKqp::TEvKqp::TEvAbortExecution, HandleScan);
hFunc(NActors::TEvents::TEvPoison, HandleScan);
hFunc(TEvents::TEvUndelivered, HandleScan);
@@ -60,6 +61,8 @@ private:
void HandleScan(NKqp::TEvKqpCompute::TEvScanDataAck::TPtr& ev);
+ void HandleScan(NKqp::TEvKqpCompute::TEvScanPing::TPtr& ev);
+
// Returns true if it was able to produce new batch
bool ProduceResults() noexcept;
@@ -109,7 +112,9 @@ private:
void ScheduleWakeup(const TMonotonic deadline);
- TMonotonic GetDeadline() const;
+ TMonotonic GetScanDeadline() const;
+
+ TMonotonic GetComputeDeadline() const;
private:
const TActorId ColumnShardActorId;
diff --git a/ydb/core/tx/columnshard/engines/reader/common/description.h b/ydb/core/tx/columnshard/engines/reader/common/description.h
index a08291316e..d3dba0d06e 100644
--- a/ydb/core/tx/columnshard/engines/reader/common/description.h
+++ b/ydb/core/tx/columnshard/engines/reader/common/description.h
@@ -1,12 +1,18 @@
#pragma once
-#include <ydb/core/tx/columnshard/common/snapshot.h>
#include <ydb/core/tx/columnshard/common/path_id.h>
+#include <ydb/core/tx/columnshard/common/snapshot.h>
#include <ydb/core/tx/columnshard/engines/predicate/filter.h>
#include <ydb/core/tx/program/program.h>
#include <ydb/library/yql/dq/actors/protos/dq_stats.pb.h>
namespace NKikimr::NOlap::NReader {
+enum class ERequestSorting {
+ NONE = 0 /* "not_sorted" */,
+ ASC /* "ascending" */,
+ DESC /* "descending" */,
+};
+
// Describes read/scan request
struct TReadDescription {
private:
@@ -14,6 +20,7 @@ private:
TProgramContainer Program;
std::shared_ptr<IScanCursor> ScanCursor;
YDB_ACCESSOR_DEF(TString, ScanIdentifier);
+ YDB_ACCESSOR(ERequestSorting, Sorting, ERequestSorting::NONE);
public:
// Table
@@ -40,9 +47,10 @@ public:
ScanCursor = cursor;
}
- TReadDescription(const TSnapshot& snapshot, const bool isReverse)
+ TReadDescription(const TSnapshot& snapshot, const ERequestSorting sorting)
: Snapshot(snapshot)
- , PKRangesFilter(std::make_shared<NOlap::TPKRangesFilter>(isReverse)) {
+ , Sorting(sorting)
+ , PKRangesFilter(std::make_shared<NOlap::TPKRangesFilter>()) {
}
void SetProgram(TProgramContainer&& value) {
@@ -58,4 +66,4 @@ public:
}
};
-}
+} // namespace NKikimr::NOlap::NReader
diff --git a/ydb/core/tx/columnshard/engines/reader/common/ya.make b/ydb/core/tx/columnshard/engines/reader/common/ya.make
index 8c7beb01bd..d2b891818e 100644
--- a/ydb/core/tx/columnshard/engines/reader/common/ya.make
+++ b/ydb/core/tx/columnshard/engines/reader/common/ya.make
@@ -13,4 +13,6 @@ PEERDIR(
ydb/core/formats/arrow/reader
)
+GENERATE_ENUM_SERIALIZATION(description.h)
+
END()
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp b/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp
index e3cf723128..1ad7b8d3d3 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/read_metadata.cpp
@@ -43,8 +43,8 @@ TConclusionStatus TReadMetadata::Init(
}
TReadMetadata::TReadMetadata(const std::shared_ptr<TVersionedIndex>& schemaIndex, const TReadDescription& read)
- : TBase(schemaIndex, read.PKRangesFilter->IsReverse() ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC,
- read.GetProgram(), schemaIndex->GetSchemaVerified(read.GetSnapshot()), read.GetSnapshot(), read.GetScanCursorOptional())
+ : TBase(schemaIndex, read.GetSorting(), read.GetProgram(), schemaIndex->GetSchemaVerified(read.GetSnapshot()), read.GetSnapshot(),
+ read.GetScanCursorOptional())
, PathId(read.PathId)
, ReadStats(std::make_shared<TReadStats>()) {
}
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.h b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.h
index f9dbeb3b9c..4a0cc71cb5 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.h
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/default_fetching.h
@@ -6,6 +6,15 @@ namespace NKikimr::NOlap::NReader::NCommon {
class TDefaultFetchLogic: public IKernelFetchLogic {
private:
using TBase = IKernelFetchLogic;
+ std::optional<bool> IsEmptyChunks;
+
+ std::shared_ptr<NArrow::NAccessor::TColumnLoader> GetColumnLoader(const std::shared_ptr<NCommon::IDataSource>& source) const {
+ if (auto loader = source->GetSourceSchema()->GetColumnLoaderOptional(GetEntityId())) {
+ return loader;
+ }
+ AFL_VERIFY(IsEmptyChunks && *IsEmptyChunks);
+ return source->GetContext()->GetReadMetadata()->GetResultSchema()->GetColumnLoaderVerified(GetEntityId());
+ }
class TChunkRestoreInfo {
private:
@@ -50,8 +59,7 @@ private:
chunks.emplace_back(i.ExtractDataVerified());
}
- TPortionDataAccessor::TPreparedColumn column(
- std::move(chunks), context.GetSource()->GetSourceSchema()->GetColumnLoaderVerified(GetEntityId()));
+ TPortionDataAccessor::TPreparedColumn column(std::move(chunks), GetColumnLoader(context.GetSource()));
context.GetAccessors().AddVerified(GetEntityId(), column.AssembleAccessor().DetachResult(), true);
}
@@ -71,9 +79,10 @@ private:
virtual void DoStart(TReadActionsCollection& nextRead, TFetchingResultContext& context) override {
auto source = context.GetSource();
auto columnChunks = source->GetStageData().GetPortionAccessor().GetColumnChunksPointers(GetEntityId());
+ IsEmptyChunks.emplace(columnChunks.empty());
if (columnChunks.empty()) {
- ColumnChunks.emplace_back(source->GetRecordsCount(), TPortionDataAccessor::TAssembleBlobInfo(source->GetRecordsCount(),
- source->GetSourceSchema()->GetExternalDefaultValueVerified(GetEntityId())));
+ ColumnChunks.emplace_back(source->GetRecordsCount(),
+ TPortionDataAccessor::TAssembleBlobInfo(source->GetRecordsCount(), GetColumnLoader(context.GetSource())->GetDefaultValue()));
return;
}
StorageId = source->GetColumnStorageId(GetEntityId());
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp
index dfb83d035a..f36ebb1a81 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/fetching.cpp
@@ -182,7 +182,7 @@ void TFetchingScriptBuilder::AddAssembleStep(
*actualSet = *actualSet - *cross;
}
if (!actualSet->IsEmpty()) {
- AddAllocation(notSequentialColumnIds, stage, EMemType::RawSequential);
+ AddAllocation(actualSet->GetColumnIds(), stage, EMemType::RawSequential);
AddStep(std::make_shared<TOptionalAssemblerStep>(actualSet, purposeId));
}
} else {
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp
index 093daee5a6..609773c897 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/iterator.cpp
@@ -9,7 +9,6 @@ TColumnShardScanIterator::TColumnShardScanIterator(const std::shared_ptr<TReadCo
, ReadMetadata(context->GetReadMetadataPtrVerifiedAs<TReadMetadata>())
, ReadyResults(context->GetCounters()) {
IndexedData = ReadMetadata->BuildReader(Context);
- Y_ABORT_UNLESS(Context->GetReadMetadata()->IsSorted());
}
TConclusion<std::shared_ptr<TPartialReadResult>> TColumnShardScanIterator::GetBatch() {
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.h b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.h
index 466250fa20..c8c62b09af 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.h
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/source.h
@@ -114,7 +114,7 @@ public:
}
};
-class IDataSource: public ICursorEntity, public NArrow::NSSA::IDataSource, public NColumnShard::TMonitoringObjectsCounter<IDataSource> {
+class IDataSource: public ICursorEntity, public NArrow::NSSA::IDataSource {
private:
YDB_READONLY(ui64, SourceId, 0);
YDB_READONLY(ui32, SourceIdx, 0);
@@ -153,20 +153,21 @@ private:
const std::shared_ptr<IDataSource>& sourcePtr, const TFetchingScriptCursor& step, const TColumnsSetIds& columns) = 0;
virtual void DoAssembleColumns(const std::shared_ptr<TColumnsSet>& columns, const bool sequential) = 0;
- NEvLog::TLogsThread Events;
+ std::optional<NEvLog::TLogsThread> Events;
+ std::unique_ptr<TFetchedData> StageData;
protected:
std::vector<std::shared_ptr<NGroupedMemoryManager::TAllocationGuard>> ResourceGuards;
- std::unique_ptr<TFetchedData> StageData;
std::unique_ptr<TFetchedResult> StageResult;
public:
void AddEvent(const TString& evDescription) {
- Events.AddEvent(evDescription);
+ AFL_VERIFY(!!Events);
+ Events->AddEvent(evDescription);
}
TString GetEventsReport() const {
- return Events.DebugString();
+ return Events ? Events->DebugString() : Default<TString>();
}
TExecutionContext& MutableExecutionContext() {
@@ -212,6 +213,7 @@ public:
, RecordsCount(recordsCount)
, ShardingVersionOptional(shardingVersion)
, HasDeletions(hasDeletions) {
+ FOR_DEBUG_LOG(NKikimrServices::COLUMNSHARD_SCAN_EVLOG, Events.emplace(NEvLog::TLogsThread()));
FOR_DEBUG_LOG(NKikimrServices::COLUMNSHARD_SCAN_EVLOG, AddEvent("c"));
}
@@ -315,8 +317,20 @@ public:
return false;
}
- bool HasStageData() const {
- return !!StageData;
+ void InitStageData(std::unique_ptr<TFetchedData>&& data) {
+ AFL_VERIFY(!StageData);
+ StageData = std::move(data);
+ }
+
+ std::unique_ptr<TFetchedData> ExtractStageData() {
+ AFL_VERIFY(StageData);
+ auto result = std::move(StageData);
+ StageData.reset();
+ return std::move(result);
+ }
+
+ void ClearStageData() {
+ StageData.reset();
}
const TFetchedData& GetStageData() const {
@@ -324,6 +338,10 @@ public:
return *StageData;
}
+ bool HasStageData() const {
+ return !!StageData;
+ }
+
TFetchedData& MutableStageData() {
AFL_VERIFY(StageData);
return *StageData;
diff --git a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/ya.make b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/ya.make
index 1f3ed01155..6dd94c3943 100644
--- a/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/ya.make
+++ b/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/ya.make
@@ -15,6 +15,7 @@ SRCS(
PEERDIR(
ydb/core/tx/columnshard/engines/scheme
+ ydb/core/formats/arrow/accessor/sub_columns
yql/essentials/minikql
ydb/core/util/evlog
)
diff --git a/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp b/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp
index d3654d881f..ee01720c60 100644
--- a/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/constructor.cpp
@@ -35,7 +35,11 @@ NKikimr::TConclusion<std::shared_ptr<TReadMetadataBase>> TIndexScannerConstructo
TDataStorageAccessor dataAccessor(insertTable, index);
AFL_VERIFY(read.PathId);
- auto readMetadata = std::make_shared<TReadMetadata>(index->CopyVersionedIndexPtr(), read);
+ auto readCopy = read;
+ if (readCopy.GetSorting() == ERequestSorting::NONE) {
+ readCopy.SetSorting(ERequestSorting::ASC);
+ }
+ auto readMetadata = std::make_shared<TReadMetadata>(index->CopyVersionedIndexPtr(), readCopy);
auto initResult = readMetadata->Init(self, read, dataAccessor);
if (!initResult) {
diff --git a/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp b/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp
index 1c39452987..c0300c23ae 100644
--- a/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.cpp
@@ -28,7 +28,7 @@ void IDataSource::RegisterInterval(TFetchingInterval& interval, const std::share
if (AtomicCas(&SourceStartedFlag, 1, 0)) {
SetMemoryGroupId(interval.GetIntervalId());
AFL_VERIFY(FetchingPlan);
- StageData = std::make_unique<TFetchedData>(GetExclusiveIntervalOnly(), GetRecordsCount());
+ InitStageData(std::make_unique<TFetchedData>(GetExclusiveIntervalOnly(), GetRecordsCount()));
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("InitFetchingPlan", FetchingPlan->DebugString())("source_idx", GetSourceIdx());
NActors::TLogContextGuard logGuard(NActors::TLogContextBuilder::Build()("source", GetSourceIdx())("method", "InitFetchingPlan"));
if (GetContext()->IsAborted()) {
@@ -64,15 +64,15 @@ void IDataSource::DoOnEmptyStageData(const std::shared_ptr<NCommon::IDataSource>
StageResult = TFetchedResult::BuildEmpty();
} else {
StageResult = std::make_unique<TFetchedResult>(
- std::move(StageData), GetContext()->GetMergeColumns()->GetColumnIds(), *GetContext()->GetCommonContext()->GetResolver());
+ ExtractStageData(), GetContext()->GetMergeColumns()->GetColumnIds(), *GetContext()->GetCommonContext()->GetResolver());
}
- StageData.reset();
+ ClearStageData();
}
void IDataSource::DoBuildStageResult(const std::shared_ptr<NCommon::IDataSource>& /*sourcePtr*/) {
TMemoryProfileGuard mpg("SCAN_PROFILE::STAGE_RESULT", IS_DEBUG_LOG_ENABLED(NKikimrServices::TX_COLUMNSHARD_SCAN_MEMORY));
- StageResult = std::make_unique<TFetchedResult>(std::move(StageData), *GetContext()->GetCommonContext()->GetResolver());
- StageData.reset();
+ StageResult = std::make_unique<TFetchedResult>(ExtractStageData(), *GetContext()->GetCommonContext()->GetResolver());
+ ClearStageData();
}
void TPortionDataSource::NeedFetchColumns(const std::set<ui32>& columnIds, TBlobsAction& blobsAction,
@@ -111,15 +111,15 @@ bool TPortionDataSource::DoStartFetchingColumns(
const std::shared_ptr<NCommon::IDataSource>& sourcePtr, const TFetchingScriptCursor& step, const TColumnsSetIds& columns) {
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName());
AFL_VERIFY(columns.GetColumnsCount());
- AFL_VERIFY(!StageData->GetAppliedFilter() || !StageData->GetAppliedFilter()->IsTotalDenyFilter());
+ AFL_VERIFY(!GetStageData().GetAppliedFilter() || !GetStageData().GetAppliedFilter()->IsTotalDenyFilter());
auto& columnIds = columns.GetColumnIds();
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName())("fetching_info", step.DebugString());
TBlobsAction action(GetContext()->GetCommonContext()->GetStoragesManager(), NBlobOperations::EConsumer::SCAN);
{
THashMap<TChunkAddress, TPortionDataAccessor::TAssembleBlobInfo> nullBlocks;
- NeedFetchColumns(columnIds, action, nullBlocks, StageData->GetAppliedFilter());
- StageData->AddDefaults(std::move(nullBlocks));
+ NeedFetchColumns(columnIds, action, nullBlocks, GetStageData().GetAppliedFilter());
+ MutableStageData().AddDefaults(std::move(nullBlocks));
}
auto readActions = action.GetReadingActions();
@@ -184,7 +184,7 @@ public:
} // namespace
bool TPortionDataSource::DoStartFetchingAccessor(const std::shared_ptr<IDataSource>& sourcePtr, const TFetchingScriptCursor& step) {
- AFL_VERIFY(!StageData->HasPortionAccessor());
+ AFL_VERIFY(!GetStageData().HasPortionAccessor());
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName())("fetching_info", step.DebugString());
std::shared_ptr<TDataAccessorsRequest> request = std::make_shared<TDataAccessorsRequest>("PLAIN::" + step.GetName());
diff --git a/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.h b/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.h
index e43e8d75cd..3823b0a1cc 100644
--- a/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.h
+++ b/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/source.h
@@ -239,7 +239,7 @@ public:
}
virtual bool NeedAccessorsFetching() const override {
- return !StageData || !StageData->HasPortionAccessor();
+ return !HasStageData() || !GetStageData().HasPortionAccessor();
}
virtual bool DoAddTxConflict() override {
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp
index 07b1a7653b..0363e2385c 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.cpp
@@ -43,4 +43,14 @@ NKikimr::TConclusion<std::shared_ptr<TReadMetadataBase>> TIndexScannerConstructo
return static_pointer_cast<TReadMetadataBase>(readMetadata);
}
+std::shared_ptr<NKikimr::NOlap::IScanCursor> TIndexScannerConstructor::DoBuildCursor() const {
+ switch (Sorting) {
+ case ERequestSorting::ASC:
+ case ERequestSorting::DESC:
+ return std::make_shared<TSimpleScanCursor>();
+ case ERequestSorting::NONE:
+ return std::make_shared<TNotSortedSimpleScanCursor>();
+ }
+}
+
} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.h b/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.h
index 76596f8dd9..7f52b72156 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.h
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/constructor.h
@@ -13,9 +13,7 @@ private:
using TBase = IScannerConstructor;
static const inline TFactory::TRegistrator<TIndexScannerConstructor> Registrator =
TFactory::TRegistrator<TIndexScannerConstructor>(GetClassNameStatic());
- virtual std::shared_ptr<IScanCursor> DoBuildCursor() const override {
- return std::make_shared<TSimpleScanCursor>();
- }
+ virtual std::shared_ptr<IScanCursor> DoBuildCursor() const override;
protected:
virtual TConclusion<std::shared_ptr<TReadMetadataBase>> DoBuildReadMetadata(const NColumnShard::TColumnShard* self, const TReadDescription& read) const override;
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.cpp
new file mode 100644
index 0000000000..97beea7c9d
--- /dev/null
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.cpp
@@ -0,0 +1,100 @@
+#include "collections.h"
+
+#include <ydb/core/tx/columnshard/engines/predicate/filter.h>
+
+namespace NKikimr::NOlap::NReader::NSimple {
+
+std::shared_ptr<IDataSource> TScanWithLimitCollection::DoExtractNext() {
+ AFL_VERIFY(HeapSources.size());
+ std::pop_heap(HeapSources.begin(), HeapSources.end());
+ auto result = HeapSources.back().Construct(Context);
+ AFL_VERIFY(FetchingInFlightSources.emplace(TCompareKeyForScanSequence::FromFinish(result)).second);
+ auto predPosition = std::move(HeapSources.back());
+ HeapSources.pop_back();
+ if (HeapSources.size()) {
+ FullIntervalsFetchingCount.Add(GetInFlightIntervalsCount(predPosition.GetStart(), HeapSources.front().GetStart()));
+ } else {
+ FullIntervalsFetchingCount = FetchingInFlightSources.size() + FinishedSources.size();
+ }
+ FetchingInFlightCount.Inc();
+ return result;
+}
+
+void TScanWithLimitCollection::DoOnSourceFinished(const std::shared_ptr<IDataSource>& source) {
+ FetchingInFlightCount.Dec();
+ AFL_VERIFY(FetchingInFlightSources.erase(TCompareKeyForScanSequence::FromFinish(source)));
+ AFL_VERIFY(FinishedSources.emplace(TCompareKeyForScanSequence::FromFinish(source), TFinishedDataSource(source)).second);
+ while (FinishedSources.size() && (HeapSources.empty() || FinishedSources.begin()->first < HeapSources.front().GetStart())) {
+ auto finishedSource = FinishedSources.begin()->second;
+ if (!finishedSource.GetRecordsCount() && InFlightLimit < GetMaxInFlight()) {
+ InFlightLimit = 2 * InFlightLimit;
+ }
+ FetchedCount += finishedSource.GetRecordsCount();
+ FinishedSources.erase(FinishedSources.begin());
+ if (Context->IsActive()) {
+ --FullIntervalsFetchingCount;
+ }
+ AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD)("event", "source_finished")("source_id", finishedSource.GetSourceId())(
+ "source_idx", finishedSource.GetSourceIdx())("limit", Limit)("fetched", finishedSource.GetRecordsCount());
+ if (Limit <= FetchedCount && HeapSources.size()) {
+ AFL_NOTICE(NKikimrServices::TX_COLUMNSHARD)("event", "limit_exhausted")("limit", Limit)("fetched", FetchedCount);
+ HeapSources.clear();
+ FullIntervalsFetchingCount = FinishedSources.size() + FetchingInFlightSources.size();
+ }
+ }
+}
+
+ui32 TScanWithLimitCollection::GetInFlightIntervalsCount(const TCompareKeyForScanSequence& from, const TCompareKeyForScanSequence& to) const {
+ AFL_VERIFY(from < to);
+ ui32 inFlightCountLocal = 0;
+ {
+ auto itUpperFinishedFrom = FinishedSources.upper_bound(from);
+ auto itUpperFinishedTo = FinishedSources.upper_bound(to);
+ for (auto&& it = itUpperFinishedFrom; it != itUpperFinishedTo; ++it) {
+ ++inFlightCountLocal;
+ }
+ }
+ {
+ auto itUpperFetchingFrom = FetchingInFlightSources.upper_bound(from);
+ auto itUpperFetchingTo = FetchingInFlightSources.upper_bound(to);
+ for (auto&& it = itUpperFetchingFrom; it != itUpperFetchingTo; ++it) {
+ ++inFlightCountLocal;
+ }
+ }
+ return inFlightCountLocal;
+}
+
+TScanWithLimitCollection::TScanWithLimitCollection(
+ const std::shared_ptr<TSpecialReadContext>& context, std::deque<TSourceConstructor>&& sources, const std::shared_ptr<IScanCursor>& cursor)
+ : TBase(context)
+ , Limit((ui64)Context->GetCommonContext()->GetReadMetadata()->GetLimitRobust()) {
+ if (cursor && cursor->IsInitialized()) {
+ for (auto&& i : sources) {
+ bool usage = false;
+ if (!context->GetCommonContext()->GetScanCursor()->CheckEntityIsBorder(i, usage)) {
+ continue;
+ }
+ if (usage) {
+ i.SetIsStartedByCursor();
+ }
+ break;
+ }
+ }
+
+ HeapSources = std::move(sources);
+ std::make_heap(HeapSources.begin(), HeapSources.end());
+}
+
+ISourcesCollection::ISourcesCollection(const std::shared_ptr<TSpecialReadContext>& context)
+ : Context(context) {
+ if (HasAppData() && AppDataVerified().ColumnShardConfig.HasMaxInFlightIntervalsOnRequest()) {
+ MaxInFlight = AppDataVerified().ColumnShardConfig.GetMaxInFlightIntervalsOnRequest();
+ }
+}
+
+std::shared_ptr<NKikimr::NOlap::IScanCursor> TNotSortedFullScanCollection::DoBuildCursor(
+ const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const {
+ return std::make_shared<TNotSortedSimpleScanCursor>(source->GetSourceId(), readyRecords);
+}
+
+} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.h b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.h
new file mode 100644
index 0000000000..bec893dbb5
--- /dev/null
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections.h
@@ -0,0 +1,208 @@
+#pragma once
+#include "context.h"
+#include "source.h"
+
+#include <ydb/library/accessor/positive_integer.h>
+
+namespace NKikimr::NOlap::NReader::NSimple {
+
+class ISourcesCollection {
+private:
+ virtual bool DoIsFinished() const = 0;
+ virtual std::shared_ptr<IDataSource> DoExtractNext() = 0;
+ virtual bool DoCheckInFlightLimits() const = 0;
+ virtual void DoOnSourceFinished(const std::shared_ptr<IDataSource>& source) = 0;
+ virtual void DoClear() = 0;
+
+ TPositiveControlInteger SourcesInFlightCount;
+ YDB_READONLY(ui64, MaxInFlight, 1024);
+
+ virtual TString DoDebugString() const {
+ return "";
+ }
+ virtual std::shared_ptr<IScanCursor> DoBuildCursor(const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const = 0;
+
+protected:
+ const std::shared_ptr<TSpecialReadContext> Context;
+
+public:
+ std::shared_ptr<IScanCursor> BuildCursor(const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const {
+ return DoBuildCursor(source, readyRecords);
+ }
+
+ TString DebugString() const {
+ return DoDebugString();
+ }
+
+ virtual ~ISourcesCollection() = default;
+
+ std::shared_ptr<IDataSource> ExtractNext() {
+ SourcesInFlightCount.Inc();
+ return DoExtractNext();
+ }
+
+ bool IsFinished() const {
+ return DoIsFinished();
+ }
+
+ void OnSourceFinished(const std::shared_ptr<IDataSource>& source) {
+ SourcesInFlightCount.Dec();
+ DoOnSourceFinished(source);
+ }
+
+ bool CheckInFlightLimits() const {
+ return DoCheckInFlightLimits();
+ }
+
+ void Clear() {
+ DoClear();
+ }
+
+ ISourcesCollection(const std::shared_ptr<TSpecialReadContext>& context);
+};
+
+class TNotSortedFullScanCollection: public ISourcesCollection {
+private:
+ using TBase = ISourcesCollection;
+ std::deque<TSourceConstructor> Sources;
+ TPositiveControlInteger InFlightCount;
+ virtual void DoClear() override {
+ Sources.clear();
+ }
+ virtual std::shared_ptr<IScanCursor> DoBuildCursor(const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const override;
+ virtual bool DoIsFinished() const override {
+ return Sources.empty();
+ }
+ virtual std::shared_ptr<IDataSource> DoExtractNext() override {
+ AFL_VERIFY(Sources.size());
+ auto result = Sources.front().Construct(Context);
+ Sources.pop_front();
+ InFlightCount.Inc();
+ return result;
+ }
+ virtual bool DoCheckInFlightLimits() const override {
+ return InFlightCount < GetMaxInFlight();
+ }
+ virtual void DoOnSourceFinished(const std::shared_ptr<IDataSource>& /*source*/) override {
+ InFlightCount.Dec();
+ }
+
+public:
+ TNotSortedFullScanCollection(const std::shared_ptr<TSpecialReadContext>& context, std::deque<TSourceConstructor>&& sources,
+ const std::shared_ptr<IScanCursor>& cursor)
+ : TBase(context) {
+ if (cursor && cursor->IsInitialized()) {
+ while (sources.size()) {
+ bool usage = false;
+ if (!context->GetCommonContext()->GetScanCursor()->CheckEntityIsBorder(sources.front(), usage)) {
+ sources.pop_front();
+ continue;
+ }
+ if (usage) {
+ sources.front().SetIsStartedByCursor();
+ }
+ break;
+ }
+ }
+ Sources = std::move(sources);
+ }
+};
+
+class TSortedFullScanCollection: public ISourcesCollection {
+private:
+ using TBase = ISourcesCollection;
+ std::deque<TSourceConstructor> HeapSources;
+ TPositiveControlInteger InFlightCount;
+ virtual void DoClear() override {
+ HeapSources.clear();
+ }
+ virtual bool DoIsFinished() const override {
+ return HeapSources.empty();
+ }
+ virtual std::shared_ptr<IScanCursor> DoBuildCursor(const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const override {
+ return std::make_shared<TSimpleScanCursor>(source->GetStartPKRecordBatch(), source->GetSourceId(), readyRecords);
+ }
+ virtual std::shared_ptr<IDataSource> DoExtractNext() override {
+ AFL_VERIFY(HeapSources.size());
+ auto result = HeapSources.front().Construct(Context);
+ std::pop_heap(HeapSources.begin(), HeapSources.end());
+ HeapSources.pop_back();
+ InFlightCount.Inc();
+ return result;
+ }
+ virtual bool DoCheckInFlightLimits() const override {
+ return InFlightCount < GetMaxInFlight();
+ }
+ virtual void DoOnSourceFinished(const std::shared_ptr<IDataSource>& /*source*/) override {
+ InFlightCount.Dec();
+ }
+
+public:
+ TSortedFullScanCollection(const std::shared_ptr<TSpecialReadContext>& context, std::deque<TSourceConstructor>&& sources,
+ const std::shared_ptr<IScanCursor>& cursor)
+ : TBase(context) {
+ if (cursor && cursor->IsInitialized()) {
+ for (auto&& i : sources) {
+ bool usage = false;
+ if (!context->GetCommonContext()->GetScanCursor()->CheckEntityIsBorder(i, usage)) {
+ continue;
+ }
+ if (usage) {
+ i.SetIsStartedByCursor();
+ }
+ break;
+ }
+ }
+ HeapSources = std::move(sources);
+ std::make_heap(HeapSources.begin(), HeapSources.end());
+ }
+};
+
+class TScanWithLimitCollection: public ISourcesCollection {
+private:
+ using TBase = ISourcesCollection;
+ class TFinishedDataSource {
+ private:
+ YDB_READONLY(ui32, RecordsCount, 0);
+ YDB_READONLY(ui32, SourceId, 0);
+ YDB_READONLY(ui32, SourceIdx, 0);
+
+ public:
+ TFinishedDataSource(const std::shared_ptr<IDataSource>& source)
+ : RecordsCount(source->GetResultRecordsCount())
+ , SourceId(source->GetSourceId())
+ , SourceIdx(source->GetSourceIdx()) {
+ }
+ };
+
+ std::deque<TSourceConstructor> HeapSources;
+ TPositiveControlInteger FetchingInFlightCount;
+ TPositiveControlInteger FullIntervalsFetchingCount;
+ ui64 Limit = 0;
+ ui64 InFlightLimit = 1;
+ ui64 FetchedCount = 0;
+ std::map<TCompareKeyForScanSequence, TFinishedDataSource> FinishedSources;
+ std::set<TCompareKeyForScanSequence> FetchingInFlightSources;
+
+ virtual std::shared_ptr<IScanCursor> DoBuildCursor(const std::shared_ptr<IDataSource>& source, const ui32 readyRecords) const override {
+ return std::make_shared<TSimpleScanCursor>(source->GetStartPKRecordBatch(), source->GetSourceId(), readyRecords);
+ }
+ virtual void DoClear() override {
+ HeapSources.clear();
+ }
+ virtual bool DoIsFinished() const override {
+ return HeapSources.empty();
+ }
+ virtual std::shared_ptr<IDataSource> DoExtractNext() override;
+ virtual bool DoCheckInFlightLimits() const override {
+ return (FetchingInFlightCount < GetMaxInFlight()) && (FullIntervalsFetchingCount < InFlightLimit);
+ }
+ virtual void DoOnSourceFinished(const std::shared_ptr<IDataSource>& source) override;
+ ui32 GetInFlightIntervalsCount(const TCompareKeyForScanSequence& from, const TCompareKeyForScanSequence& to) const;
+
+public:
+ TScanWithLimitCollection(const std::shared_ptr<TSpecialReadContext>& context, std::deque<TSourceConstructor>&& sources,
+ const std::shared_ptr<IScanCursor>& cursor);
+};
+
+} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp
index edd0fc4509..b00d2e8a38 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/context.cpp
@@ -11,7 +11,7 @@ std::shared_ptr<TFetchingScript> TSpecialReadContext::DoGetColumnsFetchingPlan(c
const bool needSnapshots = GetReadMetadata()->GetRequestSnapshot() < source->GetRecordSnapshotMax();
const bool dontNeedColumns = !needSnapshots && GetFFColumns()->GetColumnIds().size() == 1 &&
GetFFColumns()->GetColumnIds().contains(NOlap::NPortion::TSpecialColumns::SPEC_COL_PLAN_STEP_INDEX);
- if (!dontNeedColumns && !source->GetStageData().HasPortionAccessor()) {
+ if (!dontNeedColumns && !source->HasStageData()) {
if (!AskAccumulatorsScript) {
NCommon::TFetchingScriptBuilder acc(*this);
acc.AddStep(std::make_shared<NCommon::TAllocateMemoryStep>
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp
index bd787e135f..511170d173 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/fetching.cpp
@@ -159,7 +159,10 @@ TConclusion<bool> TBuildResultStep::DoExecuteInplace(const std::shared_ptr<IData
if (!source->GetStageResult().IsEmpty()) {
resultBatch = source->GetStageResult().GetBatch()->BuildTableVerified(contextTableConstruct);
if (auto filter = source->GetStageResult().GetNotAppliedFilter()) {
- AFL_VERIFY(filter->Apply(resultBatch, NArrow::TColumnFilter::TApplyContext(StartIndex, RecordsCount).SetTrySlices(true)));
+ filter->Apply(resultBatch, NArrow::TColumnFilter::TApplyContext(StartIndex, RecordsCount).SetTrySlices(true));
+ if (!resultBatch->num_rows()) {
+ resultBatch = nullptr;
+ }
}
}
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp
index 4859562081..bb34dd0d36 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/plain_read_data.cpp
@@ -8,7 +8,7 @@ TPlainReadData::TPlainReadData(const std::shared_ptr<TReadContext>& context)
: TBase(context)
, SpecialReadContext(std::make_shared<TSpecialReadContext>(context)) {
ui32 sourceIdx = 0;
- std::deque<std::shared_ptr<IDataSource>> sources;
+ std::deque<TSourceConstructor> sources;
const auto& portions = GetReadMetadata()->SelectInfo->Portions;
ui64 compactedPortionsBytes = 0;
ui64 insertedPortionsBytes = 0;
@@ -19,9 +19,9 @@ TPlainReadData::TPlainReadData(const std::shared_ptr<TReadContext>& context)
insertedPortionsBytes += i->GetTotalBlobBytes();
}
- sources.emplace_back(std::make_shared<TPortionDataSource>(sourceIdx++, i, SpecialReadContext));
+ sources.emplace_back(TSourceConstructor(sourceIdx++, i, context));
}
- std::sort(sources.begin(), sources.end(), IDataSource::TCompareStartForScanSequence());
+ std::make_heap(sources.begin(), sources.end());
Scanner = std::make_shared<TScanHead>(std::move(sources), SpecialReadContext);
auto& stats = GetReadMetadata()->ReadStats;
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp
index b2613f9789..5f0c76cfc1 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp
@@ -20,9 +20,6 @@ void TScanHead::OnSourceReady(const std::shared_ptr<IDataSource>& source, std::s
source->GetRecordsCount(), source->GetUsedRawBytes(), tableExt ? tableExt->num_rows() : 0);
source->MutableStageResult().SetResultChunk(std::move(tableExt), startIndex, recordsCount);
- if (source->GetStageResult().IsFinished()) {
- SourcesInFlightCount.Dec();
- }
while (FetchingSources.size()) {
auto frontSource = FetchingSources.front();
if (!frontSource->HasStageResult()) {
@@ -44,8 +41,7 @@ void TScanHead::OnSourceReady(const std::shared_ptr<IDataSource>& source, std::s
if (table && table->num_rows()) {
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD)("event", "has_result")("source_id", frontSource->GetSourceId())(
"source_idx", frontSource->GetSourceIdx())("table", table->num_rows());
- auto cursor =
- std::make_shared<TSimpleScanCursor>(frontSource->GetStartPKRecordBatch(), frontSource->GetSourceId(), startIndex + recordsCount);
+ auto cursor = SourcesCollection->BuildCursor(frontSource, startIndex + recordsCount);
reader.OnIntervalResult(std::make_shared<TPartialReadResult>(frontSource->GetResourceGuards(), frontSource->GetGroupGuard(), table,
cursor, Context->GetCommonContext(), sourceIdxToContinue));
} else if (sourceIdxToContinue) {
@@ -60,65 +56,25 @@ void TScanHead::OnSourceReady(const std::shared_ptr<IDataSource>& source, std::s
AFL_VERIFY(FetchingSourcesByIdx.erase(frontSource->GetSourceIdx()));
FetchingSources.pop_front();
frontSource->ClearResult();
- if (Context->GetCommonContext()->GetReadMetadata()->HasLimit()) {
- AFL_VERIFY(FetchingInFlightSources.erase(TCompareKeyForScanSequence::FromFinish(frontSource)));
- AFL_VERIFY(FinishedSources.emplace(TCompareKeyForScanSequence::FromFinish(frontSource), frontSource).second);
- while (FinishedSources.size() &&
- (SortedSources.empty() || FinishedSources.begin()->second->GetFinish() < SortedSources.front()->GetStart())) {
- auto finishedSource = FinishedSources.begin()->second;
- if (!finishedSource->GetResultRecordsCount() && InFlightLimit < MaxInFlight) {
- InFlightLimit = 2 * InFlightLimit;
- }
- FetchedCount += finishedSource->GetResultRecordsCount();
- FinishedSources.erase(FinishedSources.begin());
- if (Context->IsActive()) {
- --IntervalsInFlightCount;
- }
- AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD)("event", "source_finished")("source_id", finishedSource->GetSourceId())(
- "source_idx", finishedSource->GetSourceIdx())("limit", Context->GetCommonContext()->GetReadMetadata()->GetLimitRobust())(
- "fetched", finishedSource->GetResultRecordsCount());
- if (FetchedCount > (ui64)Context->GetCommonContext()->GetReadMetadata()->GetLimitRobust() && SortedSources.size()) {
- AFL_NOTICE(NKikimrServices::TX_COLUMNSHARD)("event", "limit_exhausted")(
- "limit", Context->GetCommonContext()->GetReadMetadata()->GetLimitRobust())("fetched", FetchedCount);
- SortedSources.clear();
- IntervalsInFlightCount = GetInFlightIntervalsCount();
- }
- }
- }
+ SourcesCollection->OnSourceFinished(frontSource);
}
}
TConclusionStatus TScanHead::Start() {
- for (auto&& i : SortedSources) {
- i->InitFetchingPlan(Context->GetColumnsFetchingPlan(i));
- }
return TConclusionStatus::Success();
}
-TScanHead::TScanHead(std::deque<std::shared_ptr<IDataSource>>&& sources, const std::shared_ptr<TSpecialReadContext>& context)
+TScanHead::TScanHead(std::deque<TSourceConstructor>&& sources, const std::shared_ptr<TSpecialReadContext>& context)
: Context(context) {
- if (HasAppData() && AppDataVerified().ColumnShardConfig.HasMaxInFlightIntervalsOnRequest()) {
- MaxInFlight = AppDataVerified().ColumnShardConfig.GetMaxInFlightIntervalsOnRequest();
- }
if (Context->GetReadMetadata()->HasLimit()) {
- InFlightLimit = 1;
+ SourcesCollection =
+ std::make_unique<TScanWithLimitCollection>(Context, std::move(sources), context->GetCommonContext()->GetScanCursor());
+ } else if (Context->GetReadMetadata()->IsSorted()) {
+ SourcesCollection =
+ std::make_unique<TSortedFullScanCollection>(Context, std::move(sources), context->GetCommonContext()->GetScanCursor());
} else {
- InFlightLimit = MaxInFlight;
- }
- bool started = !context->GetCommonContext()->GetScanCursor()->IsInitialized();
- for (auto&& i : sources) {
- if (!started) {
- bool usage = false;
- if (!context->GetCommonContext()->GetScanCursor()->CheckEntityIsBorder(i, usage)) {
- continue;
- }
- started = true;
- if (!usage) {
- continue;
- }
- i->SetIsStartedByCursor();
- }
- SortedSources.emplace_back(i);
+ SourcesCollection =
+ std::make_unique<TNotSortedFullScanCollection>(Context, std::move(sources), context->GetCommonContext()->GetScanCursor());
}
}
@@ -126,40 +82,14 @@ TConclusion<bool> TScanHead::BuildNextInterval() {
if (!Context->IsActive()) {
return false;
}
- if (SortedSources.size() == 0) {
- return false;
- }
bool changed = false;
- if (!Context->GetCommonContext()->GetReadMetadata()->HasLimit()) {
- while (SortedSources.size() && SourcesInFlightCount.Val() < InFlightLimit && Context->IsActive()) {
- FOR_DEBUG_LOG(NKikimrServices::COLUMNSHARD_SCAN_EVLOG, SortedSources.front()->AddEvent("f"));
- SortedSources.front()->StartProcessing(SortedSources.front());
- FetchingSources.emplace_back(SortedSources.front());
- SourcesInFlightCount.Inc();
- AFL_VERIFY(FetchingSourcesByIdx.emplace(SortedSources.front()->GetSourceIdx(), SortedSources.front()).second);
- SortedSources.pop_front();
- changed = true;
- }
- } else {
- if (InFlightLimit <= IntervalsInFlightCount) {
- return false;
- }
- ui32 inFlightCountLocal = GetInFlightIntervalsCount();
- AFL_VERIFY(IntervalsInFlightCount == inFlightCountLocal)("count_global", IntervalsInFlightCount)("count_local", inFlightCountLocal);
- while (SortedSources.size() && inFlightCountLocal < InFlightLimit && FetchingInFlightSources.size() < InFlightLimit && Context->IsActive()) {
- FOR_DEBUG_LOG(NKikimrServices::COLUMNSHARD_SCAN_EVLOG, SortedSources.front()->AddEvent("f"));
- SortedSources.front()->StartProcessing(SortedSources.front());
- FetchingSources.emplace_back(SortedSources.front());
- SourcesInFlightCount.Inc();
- AFL_VERIFY(FetchingSourcesByIdx.emplace(SortedSources.front()->GetSourceIdx(), SortedSources.front()).second);
- AFL_VERIFY(FetchingInFlightSources.emplace(TCompareKeyForScanSequence::FromFinish(SortedSources.front()), SortedSources.front()).second);
- SortedSources.pop_front();
- const ui32 inFlightCountLocalNew = GetInFlightIntervalsCount();
- AFL_VERIFY(inFlightCountLocal <= inFlightCountLocalNew);
- inFlightCountLocal = inFlightCountLocalNew;
- changed = true;
- }
- IntervalsInFlightCount = inFlightCountLocal;
+ while (!SourcesCollection->IsFinished() && SourcesCollection->CheckInFlightLimits() && Context->IsActive()) {
+ auto source = SourcesCollection->ExtractNext();
+ source->InitFetchingPlan(Context->GetColumnsFetchingPlan(source));
+ source->StartProcessing(source);
+ FetchingSources.emplace_back(source);
+ AFL_VERIFY(FetchingSourcesByIdx.emplace(source->GetSourceIdx(), source).second);
+ changed = true;
}
return changed;
}
@@ -177,11 +107,8 @@ void TScanHead::Abort() {
for (auto&& i : FetchingSources) {
i->Abort();
}
- for (auto&& i : SortedSources) {
- i->Abort();
- }
FetchingSources.clear();
- SortedSources.clear();
+ SourcesCollection->Clear();
Y_ABORT_UNLESS(IsFinished());
}
@@ -189,20 +116,4 @@ TScanHead::~TScanHead() {
AFL_VERIFY(!IntervalsInFlightCount || !Context->IsActive());
}
-ui32 TScanHead::GetInFlightIntervalsCount() const {
- if (SortedSources.empty()) {
- return FetchingInFlightSources.size() + FinishedSources.size();
- }
- ui32 inFlightCountLocal = 0;
- auto itUpperFinished = FinishedSources.upper_bound(TCompareKeyForScanSequence::BorderStart(SortedSources.front()));
- for (auto&& it = FinishedSources.begin(); it != itUpperFinished; ++it) {
- ++inFlightCountLocal;
- }
- auto itUpperFetching = FetchingInFlightSources.upper_bound(TCompareKeyForScanSequence::BorderStart(SortedSources.front()));
- for (auto&& it = FetchingInFlightSources.begin(); it != itUpperFetching; ++it) {
- ++inFlightCountLocal;
- }
- return inFlightCountLocal;
-}
-
} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.h b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.h
index 02315e5495..d38ebef6eb 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.h
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.h
@@ -1,5 +1,7 @@
#pragma once
+#include "collections.h"
#include "source.h"
+
#include <ydb/core/formats/arrow/reader/position.h>
#include <ydb/core/tx/columnshard/common/limits.h>
#include <ydb/core/tx/columnshard/engines/reader/abstract/read_context.h>
@@ -9,36 +11,15 @@ namespace NKikimr::NOlap::NReader::NSimple {
class TPlainReadData;
-class TDataSourceEndpoint {
-private:
- YDB_READONLY_DEF(std::vector<std::shared_ptr<IDataSource>>, StartSources);
- YDB_READONLY_DEF(std::vector<std::shared_ptr<IDataSource>>, FinishSources);
-public:
- void AddStart(std::shared_ptr<IDataSource> source) {
- StartSources.emplace_back(source);
- }
- void AddFinish(std::shared_ptr<IDataSource> source) {
- FinishSources.emplace_back(source);
- }
-};
-
class TScanHead {
private:
- using TCompareKeyForScanSequence = TPortionDataSource::TCompareKeyForScanSequence;
-
std::shared_ptr<TSpecialReadContext> Context;
THashMap<ui64, std::shared_ptr<IDataSource>> FetchingSourcesByIdx;
- std::deque<std::shared_ptr<IDataSource>> SortedSources;
std::deque<std::shared_ptr<IDataSource>> FetchingSources;
- std::map<TCompareKeyForScanSequence, std::shared_ptr<IDataSource>> FinishedSources;
- std::map<TCompareKeyForScanSequence, std::shared_ptr<IDataSource>> FetchingInFlightSources;
TPositiveControlInteger IntervalsInFlightCount;
- ui64 FetchedCount = 0;
- ui64 InFlightLimit = 1;
- ui64 MaxInFlight = 256;
- TPositiveControlInteger SourcesInFlightCount;
+ std::unique_ptr<ISourcesCollection> SourcesCollection;
- ui32 GetInFlightIntervalsCount() const;
+ void StartNextSource(const std::shared_ptr<TPortionDataSource>& source);
public:
~TScanHead();
@@ -53,17 +34,14 @@ public:
void Abort();
bool IsFinished() const {
- return FetchingSources.empty() && SortedSources.empty();
+ return FetchingSources.empty() && SourcesCollection->IsFinished();
}
const TReadContext& GetContext() const;
TString DebugString() const {
TStringBuilder sb;
- sb << "S:";
- for (auto&& i : SortedSources) {
- sb << i->GetSourceId() << ";";
- }
+ sb << "S:{" << SourcesCollection->DebugString() << "};";
sb << "F:";
for (auto&& i : FetchingSources) {
sb << i->GetSourceId() << ";";
@@ -76,10 +54,9 @@ public:
TConclusionStatus Start();
- TScanHead(std::deque<std::shared_ptr<IDataSource>>&& sources, const std::shared_ptr<TSpecialReadContext>& context);
+ TScanHead(std::deque<TSourceConstructor>&& sources, const std::shared_ptr<TSpecialReadContext>& context);
[[nodiscard]] TConclusion<bool> BuildNextInterval();
-
};
-}
+} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp
index 4269465a24..9bb81ebf47 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.cpp
@@ -27,6 +27,7 @@ void IDataSource::InitFetchingPlan(const std::shared_ptr<TFetchingScript>& fetch
void IDataSource::StartProcessing(const std::shared_ptr<IDataSource>& sourcePtr) {
AFL_VERIFY(!ProcessingStarted);
+ InitStageData(std::make_unique<TFetchedData>(true, sourcePtr->GetRecordsCount()));
AFL_VERIFY(FetchingPlan);
ProcessingStarted = true;
SourceGroupGuard = NGroupedMemoryManager::TScanMemoryLimiterOperator::BuildGroupGuard(
@@ -58,7 +59,7 @@ void IDataSource::DoOnEmptyStageData(const std::shared_ptr<NCommon::IDataSource>
ResourceGuards.clear();
StageResult = TFetchedResult::BuildEmpty();
StageResult->SetPages({ TPortionDataAccessor::TReadPage(0, GetRecordsCount(), 0) });
- StageData.reset();
+ ClearStageData();
}
void IDataSource::DoBuildStageResult(const std::shared_ptr<NCommon::IDataSource>& /*sourcePtr*/) {
@@ -68,14 +69,14 @@ void IDataSource::DoBuildStageResult(const std::shared_ptr<NCommon::IDataSource>
void IDataSource::Finalize(const std::optional<ui64> memoryLimit) {
TMemoryProfileGuard mpg("SCAN_PROFILE::STAGE_RESULT", IS_DEBUG_LOG_ENABLED(NKikimrServices::TX_COLUMNSHARD_SCAN_MEMORY));
if (memoryLimit && !IsSourceInMemory()) {
- const auto accessor = StageData->GetPortionAccessor();
- StageResult = std::make_unique<TFetchedResult>(std::move(StageData), *GetContext()->GetCommonContext()->GetResolver());
+ const auto accessor = GetStageData().GetPortionAccessor();
+ StageResult = std::make_unique<TFetchedResult>(ExtractStageData(), *GetContext()->GetCommonContext()->GetResolver());
StageResult->SetPages(accessor.BuildReadPages(*memoryLimit, GetContext()->GetProgramInputColumns()->GetColumnIds()));
} else {
- StageResult = std::make_unique<TFetchedResult>(std::move(StageData), *GetContext()->GetCommonContext()->GetResolver());
+ StageResult = std::make_unique<TFetchedResult>(ExtractStageData(), *GetContext()->GetCommonContext()->GetResolver());
StageResult->SetPages({ TPortionDataAccessor::TReadPage(0, GetRecordsCount(), 0) });
}
- StageData.reset();
+ ClearStageData();
}
void TPortionDataSource::NeedFetchColumns(const std::set<ui32>& columnIds, TBlobsAction& blobsAction,
@@ -114,15 +115,15 @@ bool TPortionDataSource::DoStartFetchingColumns(
const std::shared_ptr<NCommon::IDataSource>& sourcePtr, const TFetchingScriptCursor& step, const TColumnsSetIds& columns) {
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName());
AFL_VERIFY(columns.GetColumnsCount());
- AFL_VERIFY(!StageData->GetAppliedFilter() || !StageData->GetAppliedFilter()->IsTotalDenyFilter());
+ AFL_VERIFY(!GetStageData().GetAppliedFilter() || !GetStageData().GetAppliedFilter()->IsTotalDenyFilter());
auto& columnIds = columns.GetColumnIds();
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName())("fetching_info", step.DebugString());
TBlobsAction action(GetContext()->GetCommonContext()->GetStoragesManager(), NBlobOperations::EConsumer::SCAN);
{
THashMap<TChunkAddress, TPortionDataAccessor::TAssembleBlobInfo> nullBlocks;
- NeedFetchColumns(columnIds, action, nullBlocks, StageData->GetAppliedFilter());
- StageData->AddDefaults(std::move(nullBlocks));
+ NeedFetchColumns(columnIds, action, nullBlocks, GetStageData().GetAppliedFilter());
+ MutableStageData().AddDefaults(std::move(nullBlocks));
}
auto readActions = action.GetReadingActions();
@@ -137,7 +138,7 @@ bool TPortionDataSource::DoStartFetchingColumns(
}
std::shared_ptr<NIndexes::TSkipIndex> TPortionDataSource::SelectOptimalIndex(
- const std::vector<std::shared_ptr<NIndexes::TSkipIndex>>& indexes, const NArrow::NSSA::EIndexCheckOperation /*op*/) const {
+ const std::vector<std::shared_ptr<NIndexes::TSkipIndex>>& indexes, const NArrow::NSSA::TIndexCheckOperation& /*op*/) const {
if (indexes.size() == 0) {
return nullptr;
}
@@ -386,8 +387,16 @@ public:
} // namespace
+TCompareKeyForScanSequence TCompareKeyForScanSequence::FromStart(const std::shared_ptr<IDataSource>& src) {
+ return TCompareKeyForScanSequence(src->GetStart(), src->GetSourceId());
+}
+
+TCompareKeyForScanSequence TCompareKeyForScanSequence::FromFinish(const std::shared_ptr<IDataSource>& src) {
+ return TCompareKeyForScanSequence(src->GetFinish(), src->GetSourceId());
+}
+
bool TPortionDataSource::DoStartFetchingAccessor(const std::shared_ptr<IDataSource>& sourcePtr, const TFetchingScriptCursor& step) {
- AFL_VERIFY(!StageData->HasPortionAccessor());
+ AFL_VERIFY(!GetStageData().HasPortionAccessor());
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", step.GetName())("fetching_info", step.DebugString());
std::shared_ptr<TDataAccessorsRequest> request = std::make_shared<TDataAccessorsRequest>("SIMPLE::" + step.GetName());
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.h b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.h
index 2c58a5d393..d3ed8214c0 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.h
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/source.h
@@ -6,8 +6,8 @@
#include <ydb/core/formats/arrow/reader/position.h>
#include <ydb/core/tx/columnshard/blob.h>
#include <ydb/core/tx/columnshard/blobs_action/abstract/action.h>
-#include <ydb/core/tx/columnshard/common/snapshot.h>
#include <ydb/core/tx/columnshard/common/path_id.h>
+#include <ydb/core/tx/columnshard/common/snapshot.h>
#include <ydb/core/tx/columnshard/engines/portions/portion_info.h>
#include <ydb/core/tx/columnshard/engines/predicate/range.h>
#include <ydb/core/tx/columnshard/engines/reader/common_reader/iterator/columns_set.h>
@@ -47,8 +47,8 @@ public:
class TReplaceKeyAdapter {
private:
- const bool Reverse = false;
- const NArrow::TReplaceKey Value;
+ bool Reverse = false;
+ NArrow::TReplaceKey Value;
public:
TReplaceKeyAdapter(const NArrow::TReplaceKey& rk, const bool reverse)
@@ -72,18 +72,7 @@ public:
}
bool operator<(const TReplaceKeyAdapter& item) const {
- AFL_VERIFY(Reverse == item.Reverse);
- const std::partial_ordering result = Value.CompareNotNull(item.Value);
- if (result == std::partial_ordering::equivalent) {
- return false;
- } else if (result == std::partial_ordering::less) {
- return !Reverse;
- } else if (result == std::partial_ordering::greater) {
- return Reverse;
- } else {
- AFL_VERIFY(false);
- return false;
- }
+ return Compare(item) == std::partial_ordering::less;
}
TString DebugString() const {
@@ -91,6 +80,38 @@ public:
}
};
+class TCompareKeyForScanSequence {
+private:
+ TReplaceKeyAdapter Key;
+ YDB_READONLY(ui32, SourceId, 0);
+
+public:
+ const TReplaceKeyAdapter GetKey() const {
+ return Key;
+ }
+
+ explicit TCompareKeyForScanSequence(const TReplaceKeyAdapter& key, const ui32 sourceId)
+ : Key(key)
+ , SourceId(sourceId) {
+ }
+
+ static TCompareKeyForScanSequence FromStart(const std::shared_ptr<IDataSource>& src);
+ static TCompareKeyForScanSequence FromFinish(const std::shared_ptr<IDataSource>& src);
+
+ static TCompareKeyForScanSequence BorderStart(const TReplaceKeyAdapter& key) {
+ return TCompareKeyForScanSequence(key, 0);
+ }
+
+ bool operator<(const TCompareKeyForScanSequence& item) const {
+ const std::partial_ordering compareResult = Key.Compare(item.Key);
+ if (compareResult == std::partial_ordering::equivalent) {
+ return SourceId < item.SourceId;
+ } else {
+ return compareResult == std::partial_ordering::less;
+ }
+ };
+};
+
class IDataSource: public NCommon::IDataSource {
private:
using TBase = NCommon::IDataSource;
@@ -157,7 +178,7 @@ public:
}
virtual void ClearResult() {
- StageData.reset();
+ ClearStageData();
StageResult.reset();
ResourceGuards.clear();
SourceGroupGuard = nullptr;
@@ -174,6 +195,7 @@ public:
void ContinueCursor(const std::shared_ptr<IDataSource>& sourcePtr);
+ template <bool Reverse>
class TCompareStartForScanSequence {
public:
bool operator()(const std::shared_ptr<IDataSource>& l, const std::shared_ptr<IDataSource>& r) const {
@@ -181,40 +203,7 @@ public:
if (compareResult == std::partial_ordering::equivalent) {
return l->GetSourceId() < r->GetSourceId();
} else {
- return compareResult == std::partial_ordering::less;
- }
- };
- };
-
- class TCompareKeyForScanSequence {
- private:
- const TReplaceKeyAdapter Key;
- const ui32 SourceId;
-
- public:
- TCompareKeyForScanSequence(const TReplaceKeyAdapter& key, const ui32 sourceId)
- : Key(key)
- , SourceId(sourceId) {
- }
-
- static TCompareKeyForScanSequence FromStart(const std::shared_ptr<IDataSource>& src) {
- return TCompareKeyForScanSequence(src->GetStart(), src->GetSourceId());
- }
-
- static TCompareKeyForScanSequence FromFinish(const std::shared_ptr<IDataSource>& src) {
- return TCompareKeyForScanSequence(src->GetFinish(), src->GetSourceId());
- }
-
- static TCompareKeyForScanSequence BorderStart(const std::shared_ptr<IDataSource>& src) {
- return TCompareKeyForScanSequence(src->GetStart(), 0);
- }
-
- bool operator<(const TCompareKeyForScanSequence& item) const {
- const std::partial_ordering compareResult = Key.Compare(item.Key);
- if (compareResult == std::partial_ordering::equivalent) {
- return SourceId < item.SourceId;
- } else {
- return compareResult == std::partial_ordering::less;
+ return Reverse ? compareResult == std::partial_ordering::greater : compareResult == std::partial_ordering::less;
}
};
};
@@ -264,12 +253,11 @@ public:
: TBase(sourceId, sourceIdx, context, recordSnapshotMin, recordSnapshotMax, recordsCount, shardingVersion, hasDeletions)
, Start(context->GetReadMetadata()->IsDescSorted() ? finish : start, context->GetReadMetadata()->IsDescSorted())
, Finish(context->GetReadMetadata()->IsDescSorted() ? start : finish, context->GetReadMetadata()->IsDescSorted()) {
- StageData = std::make_unique<TFetchedData>(true, recordsCount);
UsageClass = GetContext()->GetReadMetadata()->GetPKRangesFilter().GetUsageClass(start, finish);
AFL_VERIFY(UsageClass != TPKRangeFilter::EUsageClass::NoUsage);
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_SCAN)("event", "portions_for_merge")("start", Start.DebugString())(
"finish", Finish.DebugString());
- Y_ABORT_UNLESS(Start.Compare(Finish) != std::partial_ordering::greater);
+ AFL_VERIFY_DEBUG(Start.Compare(Finish) != std::partial_ordering::greater);
}
virtual ~IDataSource() = default;
@@ -286,7 +274,7 @@ private:
virtual void InitUsedRawBytes() override {
AFL_VERIFY(!UsedRawBytes);
- UsedRawBytes = StageData->GetPortionAccessor().GetColumnRawBytes(GetContext()->GetAllUsageColumns()->GetColumnIds(), false);
+ UsedRawBytes = GetStageData().GetPortionAccessor().GetColumnRawBytes(GetContext()->GetAllUsageColumns()->GetColumnIds(), false);
}
virtual bool DoStartFetchingColumns(
@@ -294,7 +282,7 @@ private:
virtual void DoAssembleColumns(const std::shared_ptr<TColumnsSet>& columns, const bool sequential) override;
std::shared_ptr<NIndexes::TSkipIndex> SelectOptimalIndex(
- const std::vector<std::shared_ptr<NIndexes::TSkipIndex>>& indexes, const NArrow::NSSA::EIndexCheckOperation op) const;
+ const std::vector<std::shared_ptr<NIndexes::TSkipIndex>>& indexes, const NArrow::NSSA::TIndexCheckOperation& op) const;
virtual TConclusion<bool> DoStartFetchImpl(
const NArrow::NSSA::TProcessorContext& context, const std::vector<std::shared_ptr<NCommon::IKernelFetchLogic>>& fetchersExt) override;
@@ -437,4 +425,56 @@ public:
TPortionDataSource(const ui32 sourceIdx, const std::shared_ptr<TPortionInfo>& portion, const std::shared_ptr<TSpecialReadContext>& context);
};
+class TSourceConstructor: public ICursorEntity {
+private:
+ TCompareKeyForScanSequence Start;
+ YDB_READONLY(ui32, SourceId, 0);
+ YDB_READONLY(ui32, PortionIdx, 0);
+ ui32 RecordsCount = 0;
+ bool IsStartedByCursorFlag = false;
+
+ virtual ui64 DoGetEntityId() const override {
+ return SourceId;
+ }
+ virtual ui64 DoGetEntityRecordsCount() const override {
+ return RecordsCount;
+ }
+
+public:
+ void SetIsStartedByCursor() {
+ IsStartedByCursorFlag = true;
+ }
+ bool GetIsStartedByCursor() const {
+ return IsStartedByCursorFlag;
+ }
+
+ const TCompareKeyForScanSequence& GetStart() const {
+ return Start;
+ }
+
+ TSourceConstructor(const ui32 portionIdx, const std::shared_ptr<TPortionInfo>& portion, const std::shared_ptr<TReadContext>& context)
+ : Start(TReplaceKeyAdapter(context->GetReadMetadata()->IsDescSorted() ? portion->IndexKeyEnd() : portion->IndexKeyStart(),
+ context->GetReadMetadata()->IsDescSorted()),
+ portion->GetPortionId())
+ , SourceId(portion->GetPortionId())
+ , PortionIdx(portionIdx)
+ , RecordsCount(portion->GetRecordsCount()) {
+ }
+
+ bool operator<(const TSourceConstructor& item) const {
+ return item.Start < Start;
+ }
+
+ std::shared_ptr<TPortionDataSource> Construct(const std::shared_ptr<TSpecialReadContext>& context) const {
+ const auto& portions = context->GetReadMetadata()->SelectInfo->Portions;
+ AFL_VERIFY(PortionIdx < portions.size());
+ auto result = std::make_shared<TPortionDataSource>(PortionIdx, portions[PortionIdx], context);
+ if (IsStartedByCursorFlag) {
+ result->SetIsStartedByCursor();
+ }
+ FOR_DEBUG_LOG(NKikimrServices::COLUMNSHARD_SCAN_EVLOG, result->AddEvent("s"));
+ return result;
+ }
+};
+
} // namespace NKikimr::NOlap::NReader::NSimple
diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/ya.make b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/ya.make
index 45fef368d3..3917d8d913 100644
--- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/ya.make
+++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/ya.make
@@ -8,6 +8,7 @@ SRCS(
context.cpp
fetching.cpp
iterator.cpp
+ collections.cpp
)
PEERDIR(
diff --git a/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp b/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp
index abbfad5c92..09d1e73944 100644
--- a/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/iterator.cpp
@@ -45,7 +45,7 @@ TConclusion<std::shared_ptr<TPartialReadResult>> TStatsIteratorBase::GetBatch()
{
NArrow::TColumnFilter filter = ReadMetadata->GetPKRangesFilter().BuildFilter(originalBatch);
- AFL_VERIFY(filter.Apply(originalBatch));
+ filter.Apply(originalBatch);
}
// Leave only requested columns
diff --git a/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp b/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp
index 1df5bfeae1..1d834877ba 100644
--- a/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/chunks.cpp
@@ -131,8 +131,7 @@ std::vector<std::pair<TString, NKikimr::NScheme::TTypeInfo>> TReadStatsMetadata:
std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(
const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
auto* index = self->GetIndexOptional();
- return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(),
- IsReverse ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC, read.GetProgram(),
+ return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(), Sorting, read.GetProgram(),
index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
}
diff --git a/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp b/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp
index 62d911c5f8..0144236ed3 100644
--- a/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/sys_view/granules/granules.cpp
@@ -1,7 +1,9 @@
#include "granules.h"
+
#include <ydb/core/formats/arrow/switch/switch_type.h>
#include <ydb/core/tx/columnshard/blobs_action/common/const.h>
#include <ydb/core/tx/columnshard/engines/reader/abstract/read_context.h>
+
#include <util/system/hostname.h>
namespace NKikimr::NOlap::NReader::NSysView::NGranules {
@@ -23,11 +25,11 @@ std::vector<std::pair<TString, NKikimr::NScheme::TTypeInfo>> TReadStatsMetadata:
return GetColumns(TStatsIterator::StatsSchema, TStatsIterator::StatsSchema.KeyColumns);
}
-std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
+std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(
+ const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
auto* index = self->GetIndexOptional();
- return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(),
- IsReverse ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC,
- read.GetProgram(), index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
+ return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(), Sorting, read.GetProgram(),
+ index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
}
-}
+} // namespace NKikimr::NOlap::NReader::NSysView::NGranules
diff --git a/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp b/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp
index 813362a713..30ddcb83dc 100644
--- a/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/optimizer.cpp
@@ -1,7 +1,9 @@
#include "optimizer.h"
+
#include <ydb/core/formats/arrow/switch/switch_type.h>
#include <ydb/core/tx/columnshard/blobs_action/common/const.h>
#include <ydb/core/tx/columnshard/engines/reader/abstract/read_context.h>
+
#include <util/system/hostname.h>
namespace NKikimr::NOlap::NReader::NSysView::NOptimizer {
@@ -30,11 +32,11 @@ std::vector<std::pair<TString, NKikimr::NScheme::TTypeInfo>> TReadStatsMetadata:
return GetColumns(TStatsIterator::StatsSchema, TStatsIterator::StatsSchema.KeyColumns);
}
-std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
+std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(
+ const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
auto* index = self->GetIndexOptional();
- return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(),
- IsReverse ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC,
- read.GetProgram(), index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
+ return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(), Sorting, read.GetProgram(),
+ index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
}
-}
+} // namespace NKikimr::NOlap::NReader::NSysView::NOptimizer
diff --git a/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp b/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp
index 3e58ce3a82..267b0cfbbd 100644
--- a/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/sys_view/portions/portions.cpp
@@ -1,4 +1,5 @@
#include "portions.h"
+
#include <ydb/core/formats/arrow/switch/switch_type.h>
#include <ydb/core/tx/columnshard/blobs_action/common/const.h>
#include <ydb/core/tx/columnshard/engines/reader/abstract/read_context.h>
@@ -60,11 +61,11 @@ std::vector<std::pair<TString, NKikimr::NScheme::TTypeInfo>> TReadStatsMetadata:
return GetColumns(TStatsIterator::StatsSchema, TStatsIterator::StatsSchema.KeyColumns);
}
-std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
+std::shared_ptr<NAbstract::TReadStatsMetadata> TConstructor::BuildMetadata(
+ const NColumnShard::TColumnShard* self, const TReadDescription& read) const {
auto* index = self->GetIndexOptional();
- return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(),
- IsReverse ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC,
- read.GetProgram(), index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
+ return std::make_shared<TReadStatsMetadata>(index ? index->CopyVersionedIndexPtr() : nullptr, self->TabletID(), Sorting, read.GetProgram(),
+ index ? index->GetVersionedIndex().GetLastSchema() : nullptr, read.GetSnapshot());
}
-}
+} // namespace NKikimr::NOlap::NReader::NSysView::NPortions
diff --git a/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp b/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp
index 76a66c25b4..4ff5ae1a37 100644
--- a/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/transaction/tx_internal_scan.cpp
@@ -38,9 +38,13 @@ void TTxInternalScan::Complete(const TActorContext& ctx) {
const NActors::TLogContextGuard gLogging =
NActors::TLogContextBuilder::Build()("tablet", Self->TabletID())("snapshot", snapshot.DebugString())("task_id", request.TaskIdentifier);
TReadMetadataPtr readMetadataRange;
- TScannerConstructorContext context(snapshot, 0, request.GetReverse());
+ const TReadMetadataBase::ESorting sorting = [&]() {
+ return request.GetReverse() ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC;
+ }();
+
+ TScannerConstructorContext context(snapshot, 0, sorting);
{
- TReadDescription read(snapshot, request.GetReverse());
+ TReadDescription read(snapshot, sorting);
read.SetScanIdentifier(request.TaskIdentifier);
read.PathId = request.GetPathId();
read.LockId = LockId;
diff --git a/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp b/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp
index 3ab63b339c..0982f11e1a 100644
--- a/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp
+++ b/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp
@@ -38,7 +38,16 @@ void TTxScan::Complete(const TActorContext& ctx) {
if (snapshot.IsZero()) {
snapshot = Self->GetLastTxSnapshot();
}
- TScannerConstructorContext context(snapshot, request.HasItemsLimit() ? request.GetItemsLimit() : 0, request.GetReverse());
+ const TReadMetadataBase::ESorting sorting =
+ [&]() {
+ if (request.HasReverse()) {
+ return request.GetReverse() ? TReadMetadataBase::ESorting::DESC : TReadMetadataBase::ESorting::ASC;
+ } else {
+ return TReadMetadataBase::ESorting::NONE;
+ }
+ }();
+
+ TScannerConstructorContext context(snapshot, request.HasItemsLimit() ? request.GetItemsLimit() : 0, sorting);
const auto scanId = request.GetScanId();
const ui64 txId = request.GetTxId();
const ui32 scanGen = request.GetGeneration();
@@ -55,7 +64,7 @@ void TTxScan::Complete(const TActorContext& ctx) {
{
LOG_S_DEBUG("TTxScan prepare txId: " << txId << " scanId: " << scanId << " at tablet " << Self->TabletID());
- TReadDescription read(snapshot, request.GetReverse());
+ TReadDescription read(snapshot, sorting);
read.TxId = txId;
if (request.HasLockTxId()) {
read.LockId = request.GetLockTxId();
@@ -95,7 +104,7 @@ void TTxScan::Complete(const TActorContext& ctx) {
if (!scannerConstructor) {
return SendError("cannot build scanner", AppDataVerified().ColumnShardConfig.GetReaderClassName(), ctx);
}
- {
+ if (request.HasScanCursor()) {
auto cursorConclusion = scannerConstructor->BuildCursorFromProto(request.GetScanCursor());
if (cursorConclusion.IsFail()) {
return SendError("cannot build scanner cursor", cursorConclusion.GetErrorMessage(), ctx);
@@ -110,28 +119,23 @@ void TTxScan::Complete(const TActorContext& ctx) {
if (!parseResult) {
return SendError("cannot parse program", parseResult.GetErrorMessage(), ctx);
}
-
- if (!request.RangesSize()) {
+ {
+ if (request.RangesSize()) {
+ auto ydbKey = scannerConstructor->GetPrimaryKeyScheme(Self);
+ {
+ auto filterConclusion = NOlap::TPKRangesFilter::BuildFromProto(request, ydbKey);
+ if (filterConclusion.IsFail()) {
+ return SendError("cannot build ranges filter", filterConclusion.GetErrorMessage(), ctx);
+ }
+ read.PKRangesFilter = std::make_shared<NOlap::TPKRangesFilter>(filterConclusion.DetachResult());
+ }
+ }
auto newRange = scannerConstructor->BuildReadMetadata(Self, read);
if (newRange.IsSuccess()) {
readMetadataRange = TValidator::CheckNotNull(newRange.DetachResult());
} else {
return SendError("cannot build metadata withno ranges", newRange.GetErrorMessage(), ctx);
}
- } else {
- auto ydbKey = scannerConstructor->GetPrimaryKeyScheme(Self);
- {
- auto filterConclusion = NOlap::TPKRangesFilter::BuildFromProto(request, request.GetReverse(), ydbKey);
- if (filterConclusion.IsFail()) {
- return SendError("cannot build ranges filter", filterConclusion.GetErrorMessage(), ctx);
- }
- read.PKRangesFilter = std::make_shared<NOlap::TPKRangesFilter>(filterConclusion.DetachResult());
- }
- auto newRange = scannerConstructor->BuildReadMetadata(Self, read);
- if (!newRange) {
- return SendError("cannot build metadata", newRange.GetErrorMessage(), ctx);
- }
- readMetadataRange = TValidator::CheckNotNull(newRange.DetachResult());
}
}
AFL_VERIFY(readMetadataRange);
diff --git a/ydb/core/tx/columnshard/engines/scheme/index_info.cpp b/ydb/core/tx/columnshard/engines/scheme/index_info.cpp
index b49760873c..12b1a1b9c6 100644
--- a/ydb/core/tx/columnshard/engines/scheme/index_info.cpp
+++ b/ydb/core/tx/columnshard/engines/scheme/index_info.cpp
@@ -667,7 +667,7 @@ ui32 TIndexInfo::GetColumnIndexVerified(const ui32 id) const {
}
std::vector<std::shared_ptr<NIndexes::TSkipIndex>> TIndexInfo::FindSkipIndexes(
- const NIndexes::NRequest::TOriginalDataAddress& originalDataAddress, const NArrow::NSSA::EIndexCheckOperation op) const {
+ const NIndexes::NRequest::TOriginalDataAddress& originalDataAddress, const NArrow::NSSA::TIndexCheckOperation& op) const {
std::vector<std::shared_ptr<NIndexes::TSkipIndex>> result;
for (auto&& [_, i] : Indexes) {
if (!i->IsSkipIndex()) {
diff --git a/ydb/core/tx/columnshard/engines/scheme/index_info.h b/ydb/core/tx/columnshard/engines/scheme/index_info.h
index bb45f1e45a..92574ef1a2 100644
--- a/ydb/core/tx/columnshard/engines/scheme/index_info.h
+++ b/ydb/core/tx/columnshard/engines/scheme/index_info.h
@@ -174,6 +174,11 @@ public:
return ColumnFeatures[colIndex]->GetIsNullable();
}
+ const TColumnFeatures& GetColumnFeaturesVerifiedByIndex(const ui32 colIndex) const {
+ AFL_VERIFY(colIndex < ColumnFeatures.size());
+ return *ColumnFeatures[colIndex];
+ }
+
bool IsNullableVerified(const ui32 colId) const {
return GetColumnFeaturesVerified(colId).GetIsNullable();
}
@@ -330,7 +335,7 @@ public:
}
std::vector<std::shared_ptr<NIndexes::TSkipIndex>> FindSkipIndexes(
- const NIndexes::NRequest::TOriginalDataAddress& originalDataAddress, const NArrow::NSSA::EIndexCheckOperation op) const;
+ const NIndexes::NRequest::TOriginalDataAddress& originalDataAddress, const NArrow::NSSA::TIndexCheckOperation& op) const;
std::shared_ptr<NIndexes::NMax::TIndexMeta> GetIndexMetaMax(const ui32 columnId) const;
std::shared_ptr<NIndexes::NCountMinSketch::TIndexMeta> GetIndexMetaCountMinSketch(const std::set<ui32>& columnIds) const;
diff --git a/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp b/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp
index 07a765aacb..12b6d788a3 100644
--- a/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp
+++ b/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.cpp
@@ -6,7 +6,7 @@
namespace NKikimr::NOlap::NIndexes {
std::shared_ptr<IIndexMeta> TIndexesCollection::FindIndexFor(
- const NRequest::TOriginalDataAddress& address, const NArrow::NSSA::EIndexCheckOperation op) const {
+ const NRequest::TOriginalDataAddress& address, const NArrow::NSSA::TIndexCheckOperation& op) const {
auto it = IndexByOriginalData.find(address);
if (it == IndexByOriginalData.end()) {
return nullptr;
diff --git a/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.h b/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.h
index 5b6a88985f..1fc9c5adc2 100644
--- a/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.h
+++ b/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/collection.h
@@ -181,7 +181,7 @@ public:
}
}
- std::shared_ptr<IIndexMeta> FindIndexFor(const NRequest::TOriginalDataAddress& address, const NArrow::NSSA::EIndexCheckOperation op) const;
+ std::shared_ptr<IIndexMeta> FindIndexFor(const NRequest::TOriginalDataAddress& address, const NArrow::NSSA::TIndexCheckOperation& op) const;
};
} // namespace NKikimr::NOlap::NIndexes
diff --git a/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp b/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp
index b93bb5d1cb..b9a16ee068 100644
--- a/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp
+++ b/ydb/core/tx/columnshard/engines/scheme/versions/abstract_scheme.cpp
@@ -101,8 +101,17 @@ TConclusion<NArrow::TContainerWithIndexes<arrow::RecordBatch>> ISnapshotSchema::
if (targetIdx == -1) {
return TConclusionStatus::Success();
}
- const auto hasNull = NArrow::HasNulls(incomingBatch->column(incomingIdx));
- const std::optional<i32> pkFieldIdx = GetIndexInfo().GetPKColumnIndexByIndexVerified(targetIdx);
+ const auto& incomingColumn = incomingBatch->column(incomingIdx);
+ const auto hasNull = NArrow::HasNulls(incomingColumn);
+ const TColumnFeatures& features = GetIndexInfo().GetColumnFeaturesVerifiedByIndex(targetIdx);
+ const std::optional<i32> pkFieldIdx = features.GetPKColumnIndex();
+ if (!features.GetDataAccessorConstructor()->HasInternalConversion() || !!pkFieldIdx) {
+ if (!features.GetArrowField()->type()->Equals(incomingColumn->type())) {
+ return TConclusionStatus::Fail(
+ "not equal type for column: " + features.GetColumnName() + ": " + features.GetArrowField()->type()->ToString()
+ + " vs " + incomingColumn->type()->ToString());
+ }
+ }
if (pkFieldIdx && hasNull && !AppData()->ColumnShardConfig.GetAllowNullableColumnsInPK()) {
return TConclusionStatus::Fail("null data for pk column is impossible for '" + dstSchema.field(targetIdx)->name() + "'");
}
@@ -130,7 +139,7 @@ TConclusion<NArrow::TContainerWithIndexes<arrow::RecordBatch>> ISnapshotSchema::
const auto nameResolver = [&](const std::string& fieldName) -> i32 {
return GetIndexInfo().GetColumnIndexOptional(fieldName).value_or(-1);
};
- auto batchConclusion = NArrow::TColumnOperator().SkipIfAbsent().ErrorOnDifferentFieldTypes().AdaptIncomingToDestinationExt(
+ auto batchConclusion = NArrow::TColumnOperator().SkipIfAbsent().IgnoreOnDifferentFieldTypes().AdaptIncomingToDestinationExt(
incomingBatch, dstSchema, pred, nameResolver);
if (batchConclusion.IsFail()) {
return batchConclusion;
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/abstract.h b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/abstract.h
index 923bd35a84..208daf711a 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/abstract.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/abstract.h
@@ -50,7 +50,7 @@ private:
virtual TConclusion<std::shared_ptr<IBitsStorage>> DoBuild(const TString& data) const = 0;
public:
- static std::shared_ptr<IBitsStorageConstructor> GetDefault();;
+ static std::shared_ptr<IBitsStorageConstructor> GetDefault();
virtual ~IBitsStorageConstructor() = default;
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/bitset.h b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/bitset.h
index 4674af2437..63b7e6abfc 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/bitset.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/bitset.h
@@ -30,6 +30,9 @@ public:
}
bool TestHash(const ui64 hash) const {
+ if (!Bits.Size()) {
+ return false;
+ }
return Bits.Get(hash % Bits.Size());
}
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/string.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/string.cpp
index c2a4b2a451..54e52e8034 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/string.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/string.cpp
@@ -7,6 +7,9 @@
namespace NKikimr::NOlap::NIndexes {
bool TFixStringBitsStorage::DoGet(const ui32 idx) const {
+ if (!Data.size()) {
+ return false;
+ }
AFL_VERIFY(idx < Data.size() * 8);
const ui8 start = (*(ui8*)&Data[idx / 8]);
return start & (1 << (idx % 8));
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp
index 6a2bc50a2d..2ba8811ff4 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.cpp
@@ -63,10 +63,10 @@ TString TBloomIndexMeta::DoBuildIndexImpl(TChunkedBatchReader& reader, const ui3
return GetBitsStorageConstructor()->Build(std::move(filterBits))->SerializeToString();
}
-bool TBloomIndexMeta::DoCheckValueImpl(
- const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const {
+bool TBloomIndexMeta::DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const {
std::set<ui64> hashes;
- AFL_VERIFY(op == EOperation::Equals)("op", op);
+ AFL_VERIFY(op.GetOperation() == EOperation::Equals)("op", op.DebugString());
const ui32 bitsCount = data.GetBitsCount();
if (!!category) {
for (ui64 hashSeed = 0; hashSeed < HashesCount; ++hashSeed) {
@@ -88,8 +88,8 @@ bool TBloomIndexMeta::DoCheckValueImpl(
std::optional<ui64> TBloomIndexMeta::DoCalcCategory(const TString& subColumnName) const {
ui64 result;
- const NRequest::TOriginalDataAddress addr(Max<ui32>(), subColumnName);
- AFL_VERIFY(GetDataExtractor()->CheckForIndex(addr, result));
+ const NRequest::TOriginalDataAddress addr(GetColumnId(), subColumnName);
+ AFL_VERIFY(GetDataExtractor()->CheckForIndex(addr, &result));
if (subColumnName) {
return result;
} else {
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.h b/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.h
index 2be01b8d79..bdb39bb445 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom/meta.h
@@ -20,8 +20,8 @@ private:
virtual std::optional<ui64> DoCalcCategory(const TString& subColumnName) const override;
- virtual bool DoIsAppropriateFor(const TString& /*subColumnName*/, const EOperation op) const override {
- return op == EOperation::Equals;
+ virtual bool DoIsAppropriateFor(const NArrow::NSSA::TIndexCheckOperation& op) const override {
+ return op.GetOperation() == EOperation::Equals && op.GetCaseSensitive();
}
protected:
@@ -32,7 +32,7 @@ protected:
virtual void DoSerializeToProto(NKikimrSchemeOp::TOlapIndexDescription& proto) const override;
virtual bool DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
- const EOperation op) const override;
+ const NArrow::NSSA::TIndexCheckOperation& op) const override;
public:
TBloomIndexMeta() = default;
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp
index b73e9195fe..60131226f0 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.cpp
@@ -16,7 +16,7 @@ std::shared_ptr<IIndexMeta> TIndexConstructor::DoCreateIndexMeta(
}
const ui32 columnId = columnInfo->GetId();
return std::make_shared<TIndexMeta>(indexId, indexName, GetStorageId().value_or(NBlobOperations::TGlobal::DefaultStorageId), columnId,
- GetDataExtractor(), HashesCount, FilterSizeBytes, NGrammSize, RecordsCount, TBase::GetBitsStorageConstructor());
+ GetDataExtractor(), HashesCount, FilterSizeBytes, NGrammSize, RecordsCount, TBase::GetBitsStorageConstructor(), CaseSensitive);
}
TConclusionStatus TIndexConstructor::DoDeserializeFromJson(const NJson::TJsonValue& jsonInfo) {
@@ -61,7 +61,15 @@ TConclusionStatus TIndexConstructor::DoDeserializeFromJson(const NJson::TJsonVal
return TConclusionStatus::Fail(
"hashes_count have to be in bloom ngramm filter in interval " + TConstants::GetHashesCountIntervalString());
}
+
+ if (jsonInfo.Has("case_sensitive")) {
+ if (!jsonInfo["case_sensitive"].IsBoolean()) {
+ return TConclusionStatus::Fail("case_sensitive have to be in bloom filter features as boolean field");
+ }
+ CaseSensitive = jsonInfo["case_sensitive"].GetBoolean();
+ }
return TConclusionStatus::Success();
+
}
NKikimr::TConclusionStatus TIndexConstructor::DoDeserializeFromProto(const NKikimrSchemeOp::TOlapIndexRequested& proto) {
@@ -77,6 +85,9 @@ NKikimr::TConclusionStatus TIndexConstructor::DoDeserializeFromProto(const NKiki
return conclusion;
}
}
+ if (bFilter.HasCaseSensitive()) {
+ CaseSensitive = bFilter.GetCaseSensitive();
+ }
RecordsCount = bFilter.GetRecordsCount();
if (!TConstants::CheckRecordsCount(RecordsCount)) {
return TConclusionStatus::Fail("RecordsCount have to be in " + TConstants::GetRecordsCountIntervalString());
@@ -107,6 +118,7 @@ void TIndexConstructor::DoSerializeToProto(NKikimrSchemeOp::TOlapIndexRequested&
auto* filterProto = proto.MutableBloomNGrammFilter();
TBase::SerializeToProtoBitsStorageOnly(*filterProto);
filterProto->SetColumnName(GetColumnName());
+ filterProto->SetCaseSensitive(CaseSensitive);
filterProto->SetRecordsCount(RecordsCount);
filterProto->SetNGrammSize(NGrammSize);
filterProto->SetFilterSizeBytes(FilterSizeBytes);
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.h b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.h
index 375de80ebd..343984c0a9 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/constructor.h
@@ -19,6 +19,7 @@ private:
ui32 FilterSizeBytes = 512;
ui32 HashesCount = 2;
ui32 RecordsCount = 10000;
+ bool CaseSensitive = true;
static inline auto Registrator = TFactory::TRegistrator<TIndexConstructor>(GetClassNameStatic());
protected:
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp
index c0dabbc4ba..f0d5443b89 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.cpp
@@ -16,6 +16,7 @@ namespace NKikimr::NOlap::NIndexes::NBloomNGramm {
class TNGrammBuilder {
private:
const ui32 HashesCount;
+ const bool CaseSensitive;
template <ui32 CharsRemained>
class THashesBuilder {
@@ -133,17 +134,29 @@ private:
AFL_VERIFY(false);
}
};
+ TBuffer LowerStringBuffer;
public:
- TNGrammBuilder(const ui32 hashesCount)
- : HashesCount(hashesCount) {
+ TNGrammBuilder(const ui32 hashesCount, const bool caseSensitive)
+ : HashesCount(hashesCount)
+ , CaseSensitive(caseSensitive) {
}
template <class TAction>
void BuildNGramms(
const char* data, const ui32 dataSize, const std::optional<NRequest::TLikePart::EOperation> op, const ui32 nGrammSize, TAction& pred) {
- THashesSelector<TConstants::MaxHashesCount, TConstants::MaxNGrammSize>::BuildHashes(
- (const ui8*)data, dataSize, HashesCount, nGrammSize, op, pred);
+ if (CaseSensitive) {
+ THashesSelector<TConstants::MaxHashesCount, TConstants::MaxNGrammSize>::BuildHashes(
+ (const ui8*)data, dataSize, HashesCount, nGrammSize, op, pred);
+ } else {
+ LowerStringBuffer.Clear();
+ LowerStringBuffer.Reserve(dataSize);
+ for (ui32 i = 0; i < dataSize; ++i) {
+ LowerStringBuffer.Append(std::tolower(data[i]));
+ }
+ THashesSelector<TConstants::MaxHashesCount, TConstants::MaxNGrammSize>::BuildHashes(
+ (const ui8*)LowerStringBuffer.Data(), dataSize, HashesCount, nGrammSize, op, pred);
+ }
}
template <class TFiller>
@@ -171,8 +184,14 @@ public:
}
template <class TFiller>
- void FillNGrammHashes(const ui32 nGrammSize, const NRequest::TLikePart::EOperation op, const TString& userReq, TFiller& fillData) {
- BuildNGramms(userReq.data(), userReq.size(), op, nGrammSize, fillData);
+ void FillNGrammHashes(
+ const ui32 nGrammSize, const NRequest::TLikePart::EOperation op, const TString& userReq, TFiller& fillData) {
+ if (CaseSensitive) {
+ BuildNGramms(userReq.data(), userReq.size(), op, nGrammSize, fillData);
+ } else {
+ const TString lowerString = to_lower(userReq);
+ BuildNGramms(lowerString.data(), lowerString.size(), op, nGrammSize, fillData);
+ }
}
};
@@ -259,7 +278,7 @@ public:
TString TIndexMeta::DoBuildIndexImpl(TChunkedBatchReader& reader, const ui32 recordsCount) const {
AFL_VERIFY(reader.GetColumnsCount() == 1)("count", reader.GetColumnsCount());
- TNGrammBuilder builder(HashesCount);
+ TNGrammBuilder builder(HashesCount, CaseSensitive);
ui32 size = FilterSizeBytes * 8;
if ((size & (size - 1)) == 0) {
@@ -311,8 +330,8 @@ TString TIndexMeta::DoBuildIndexImpl(TChunkedBatchReader& reader, const ui32 rec
return GetBitsStorageConstructor()->Build(inserter.ExtractBits())->SerializeToString();
}
-bool TIndexMeta::DoCheckValueImpl(
- const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const {
+bool TIndexMeta::DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const {
AFL_VERIFY(!category);
AFL_VERIFY(value->type->id() == arrow::utf8()->id() || value->type->id() == arrow::binary()->id())("id", value->type->ToString());
bool result = true;
@@ -322,10 +341,11 @@ bool TIndexMeta::DoCheckValueImpl(
result = false;
}
};
- TNGrammBuilder builder(HashesCount);
+ TNGrammBuilder builder(HashesCount, CaseSensitive);
+ AFL_VERIFY(!CaseSensitive || op.GetCaseSensitive());
NRequest::TLikePart::EOperation opLike;
- switch (op) {
+ switch (op.GetOperation()) {
case TSkipIndex::EOperation::Equals:
opLike = NRequest::TLikePart::EOperation::Equals;
break;
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.h b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.h
index d1326c3da4..31f29d1e7a 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/meta.h
@@ -13,6 +13,7 @@ public:
private:
using TBase = TSkipBitmapIndex;
std::shared_ptr<arrow::Schema> ResultSchema;
+ bool CaseSensitive = true;
ui32 NGrammSize = 3;
ui32 FilterSizeBytes = 512;
ui32 RecordsCount = 10000;
@@ -28,16 +29,13 @@ private:
AFL_VERIFY(TConstants::CheckRecordsCount(RecordsCount));
}
- virtual bool DoIsAppropriateFor(const TString& subColumnName, const EOperation op) const override {
- if (!!subColumnName) {
- return false;
- }
- switch (op) {
+ virtual bool DoIsAppropriateFor(const NArrow::NSSA::TIndexCheckOperation& op) const override {
+ switch (op.GetOperation()) {
case EOperation::Equals:
case EOperation::StartsWith:
case EOperation::EndsWith:
case EOperation::Contains:
- return true;
+ return !CaseSensitive || op.GetCaseSensitive();
}
return false;
@@ -50,12 +48,6 @@ protected:
return TConclusionStatus::Fail(
"cannot read meta as appropriate class: " + GetClassName() + ". Meta said that class name is " + newMeta.GetClassName());
}
- if (HashesCount != bMeta->HashesCount) {
- return TConclusionStatus::Fail("cannot modify hashes count");
- }
- if (NGrammSize != bMeta->NGrammSize) {
- return TConclusionStatus::Fail("cannot modify ngramm size");
- }
return TBase::CheckSameColumnsForModification(newMeta);
}
virtual TString DoBuildIndexImpl(TChunkedBatchReader& reader, const ui32 recordsCount) const override;
@@ -80,6 +72,9 @@ protected:
if (!MutableDataExtractor().DeserializeFromProto(bFilter.GetDataExtractor())) {
return false;
}
+ if (bFilter.HasCaseSensitive()) {
+ CaseSensitive = bFilter.GetCaseSensitive();
+ }
HashesCount = bFilter.GetHashesCount();
if (!TConstants::CheckHashesCount(HashesCount)) {
return false;
@@ -111,18 +106,20 @@ protected:
filterProto->SetFilterSizeBytes(FilterSizeBytes);
filterProto->SetHashesCount(HashesCount);
filterProto->SetColumnId(GetColumnId());
+ filterProto->SetCaseSensitive(CaseSensitive);
*filterProto->MutableDataExtractor() = GetDataExtractor().SerializeToProto();
}
virtual bool DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
- const EOperation op) const override;
+ const NArrow::NSSA::TIndexCheckOperation& op) const override;
public:
TIndexMeta() = default;
TIndexMeta(const ui32 indexId, const TString& indexName, const TString& storageId, const ui32 columnId,
const TReadDataExtractorContainer& dataExtractor, const ui32 hashesCount, const ui32 filterSizeBytes, const ui32 nGrammSize,
- const ui32 recordsCount, const std::shared_ptr<IBitsStorageConstructor>& bitsStorageConstructor)
+ const ui32 recordsCount, const std::shared_ptr<IBitsStorageConstructor>& bitsStorageConstructor, const bool caseSensitive)
: TBase(indexId, indexName, columnId, storageId, dataExtractor, bitsStorageConstructor)
+ , CaseSensitive(caseSensitive)
, NGrammSize(nGrammSize)
, FilterSizeBytes(filterSizeBytes)
, RecordsCount(recordsCount)
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp
index e6d7c62b1e..6ce01b442d 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.cpp
@@ -166,10 +166,11 @@ TConclusion<std::shared_ptr<IIndexHeader>> TIndexMeta::DoBuildHeader(const TChun
return std::make_shared<TCompositeBloomHeader>(std::move(proto), IIndexHeader::ReadHeaderSize(data.GetDataVerified(), true).DetachResult());
}
-bool TIndexMeta::DoCheckValueImpl(
- const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const {
+bool TIndexMeta::DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const {
AFL_VERIFY(!!category);
- AFL_VERIFY(op == EOperation::Equals)("op", op);
+ AFL_VERIFY(op.GetOperation() == EOperation::Equals)("op", op.DebugString());
+ AFL_VERIFY(op.GetCaseSensitive());
const ui32 bitsCount = data.GetBitsCount();
if (!bitsCount) {
return false;
@@ -185,8 +186,8 @@ bool TIndexMeta::DoCheckValueImpl(
std::optional<ui64> TIndexMeta::DoCalcCategory(const TString& subColumnName) const {
ui64 result;
- const NRequest::TOriginalDataAddress addr(Max<ui32>(), subColumnName);
- AFL_VERIFY(GetDataExtractor()->CheckForIndex(addr, result));
+ const NRequest::TOriginalDataAddress addr(GetColumnId(), subColumnName);
+ AFL_VERIFY(GetDataExtractor()->CheckForIndex(addr, &result));
return result;
}
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.h b/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.h
index 6f7a239697..decf03fb79 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/meta.h
@@ -21,15 +21,15 @@ private:
}
virtual bool DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> category, const std::shared_ptr<arrow::Scalar>& value,
- const EOperation op) const override;
+ const NArrow::NSSA::TIndexCheckOperation& op) const override;
virtual TConclusion<std::shared_ptr<IIndexHeader>> DoBuildHeader(const TChunkOriginalData& data) const override;
- virtual bool DoIsAppropriateFor(const TString& subColumnName, const EOperation op) const override {
- if (!subColumnName) {
+ virtual bool DoIsAppropriateFor(const NArrow::NSSA::TIndexCheckOperation& op) const override {
+ if (!op.GetCaseSensitive()) {
return false;
}
- if (op != EOperation::Equals) {
+ if (op.GetOperation() != EOperation::Equals) {
return false;
}
return true;
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/abstract.h b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/abstract.h
index 82fda99971..ebdb32051a 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/abstract.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/abstract.h
@@ -23,7 +23,7 @@ private:
virtual void DoSerializeToProto(TProto& proto) const = 0;
virtual bool DoDeserializeFromProto(const TProto& proto) = 0;
- virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& dataSource, ui64& baseHash) const = 0;
+ virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& dataSource, ui64* baseHash) const = 0;
virtual THashMap<ui64, ui32> DoGetIndexHitsCount(const std::shared_ptr<NArrow::NAccessor::IChunkedArray>& dataArray) const = 0;
public:
@@ -34,8 +34,10 @@ public:
return DoGetIndexHitsCount(dataArray);
}
- bool CheckForIndex(const NRequest::TOriginalDataAddress& dataSource, ui64& baseHash) const {
- baseHash = 0;
+ bool CheckForIndex(const NRequest::TOriginalDataAddress& dataSource, ui64* baseHash) const {
+ if (baseHash) {
+ *baseHash = 0;
+ }
return DoCheckForIndex(dataSource, baseHash);
}
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.cpp b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.cpp
index 2da8da4da2..9a38a9580e 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.cpp
@@ -37,7 +37,7 @@ void TDefaultDataExtractor::DoVisitAll(const std::shared_ptr<NArrow::NAccessor::
}
}
-bool TDefaultDataExtractor::DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64& hashBase) const {
+bool TDefaultDataExtractor::DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64* hashBase) const {
if (request.GetSubColumnName()) {
std::string_view sv = [&]() {
if (request.GetSubColumnName().StartsWith("$.")) {
@@ -46,10 +46,9 @@ bool TDefaultDataExtractor::DoCheckForIndex(const NRequest::TOriginalDataAddress
return std::string_view(request.GetSubColumnName().data(), request.GetSubColumnName().size());
}
}();
- if (sv.starts_with("\"") && sv.ends_with("\"")) {
- sv = std::string_view(sv.data() + 1, sv.size() - 2);
+ if (hashBase) {
+ *hashBase = NRequest::TOriginalDataAddress::CalcSubColumnHash(sv);
}
- hashBase = NRequest::TOriginalDataAddress::CalcSubColumnHash(sv);
}
return true;
}
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.h b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.h
index 64f534edf5..47de05f3f9 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/default.h
@@ -27,7 +27,7 @@ private:
virtual void DoVisitAll(const std::shared_ptr<NArrow::NAccessor::IChunkedArray>& dataArray, const TChunkVisitor& chunkVisitor,
const TRecordVisitor& recordVisitor) const override;
- virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64& hashBase) const override;
+ virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64* hashBase) const override;
virtual THashMap<ui64, ui32> DoGetIndexHitsCount(const std::shared_ptr<NArrow::NAccessor::IChunkedArray>& dataArray) const override;
public:
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/sub_column.h b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/sub_column.h
index f7e9e6daa6..6479afc789 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/sub_column.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/sub_column.h
@@ -45,7 +45,7 @@ private:
virtual void DoVisitAll(const std::shared_ptr<NArrow::NAccessor::IChunkedArray>& dataArray, const TChunkVisitor& chunkVisitor,
const TRecordVisitor& recordVisitor) const override;
- virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64& /*hashBase*/) const override {
+ virtual bool DoCheckForIndex(const NRequest::TOriginalDataAddress& request, ui64* /*hashBase*/) const override {
return request.GetSubColumnName() == SubColumnName;
}
diff --git a/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.h b/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.h
index eb63fe0d6b..3294190887 100644
--- a/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.h
+++ b/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/meta.h
@@ -13,15 +13,16 @@ private:
using TBase = TIndexByColumns;
public:
- using EOperation = NArrow::NSSA::EIndexCheckOperation;
+ using EOperation = NArrow::NSSA::TIndexCheckOperation::EOperation;
private:
- virtual bool DoIsAppropriateFor(const TString& subColumnName, const EOperation op) const = 0;
- virtual bool DoCheckValue(
- const TString& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const = 0;
+ virtual bool DoIsAppropriateFor(const NArrow::NSSA::TIndexCheckOperation& op) const = 0;
+ virtual bool DoCheckValue(const TString& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const = 0;
public:
- bool CheckValue(const TString& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const {
+ bool CheckValue(const TString& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const {
return DoCheckValue(data, cat, value, op);
}
@@ -29,11 +30,14 @@ public:
return true;
}
- bool IsAppropriateFor(const NRequest::TOriginalDataAddress& addr, const EOperation op) const {
+ bool IsAppropriateFor(const NRequest::TOriginalDataAddress& addr, const NArrow::NSSA::TIndexCheckOperation& op) const {
if (GetColumnId() != addr.GetColumnId()) {
return false;
}
- return DoIsAppropriateFor(addr.GetSubColumnName(), op);
+ if (!GetDataExtractor()->CheckForIndex(addr, nullptr)) {
+ return false;
+ }
+ return DoIsAppropriateFor(op);
}
using TBase::TBase;
};
@@ -42,11 +46,14 @@ class TSkipBitmapIndex: public TSkipIndex {
private:
std::shared_ptr<IBitsStorageConstructor> BitsStorageConstructor;
using TBase = TSkipIndex;
- virtual bool DoCheckValueImpl(
- const IBitsStorage& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value, const EOperation op) const = 0;
+ virtual bool DoCheckValueImpl(const IBitsStorage& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value,
+ const NArrow::NSSA::TIndexCheckOperation& op) const = 0;
virtual bool DoCheckValue(const TString& data, const std::optional<ui64> cat, const std::shared_ptr<arrow::Scalar>& value,
- const EOperation op) const override final {
+ const NArrow::NSSA::TIndexCheckOperation& op) const override final {
+ if (data.empty()) {
+ return false;
+ }
auto storageConclusion = BitsStorageConstructor->Build(data);
return DoCheckValueImpl(*storageConclusion.GetResult(), cat, value, op);
}
diff --git a/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp b/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp
index 60e99b9a3d..a3a1d87575 100644
--- a/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp
+++ b/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.cpp
@@ -68,4 +68,16 @@ TInstant TZeroLevelPortions::DoGetWeightExpirationInstant() const {
return *PredOptimization + DurationToDrop;
}
+TZeroLevelPortions::TZeroLevelPortions(const ui32 levelIdx, const std::shared_ptr<IPortionsLevel>& nextLevel,
+ const TLevelCounters& levelCounters, const TDuration durationToDrop, const ui64 expectedBlobsSize, const ui64 portionsCountAvailable)
+ : TBase(levelIdx, nextLevel)
+ , LevelCounters(levelCounters)
+ , DurationToDrop(durationToDrop)
+ , ExpectedBlobsSize(expectedBlobsSize)
+ , PortionsCountAvailable(portionsCountAvailable) {
+ if (DurationToDrop != TDuration::Max() && PredOptimization) {
+ *PredOptimization -= TDuration::Seconds(RandomNumber<ui32>(DurationToDrop.Seconds()));
+ }
+}
+
} // namespace NKikimr::NOlap::NStorageOptimizer::NLCBuckets
diff --git a/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.h b/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.h
index e4cbd13259..a9a6f38fa0 100644
--- a/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.h
+++ b/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/zero_level.h
@@ -95,13 +95,7 @@ private:
public:
TZeroLevelPortions(const ui32 levelIdx, const std::shared_ptr<IPortionsLevel>& nextLevel, const TLevelCounters& levelCounters,
- const TDuration durationToDrop, const ui64 expectedBlobsSize, const ui64 portionsCountAvailable)
- : TBase(levelIdx, nextLevel)
- , LevelCounters(levelCounters)
- , DurationToDrop(durationToDrop)
- , ExpectedBlobsSize(expectedBlobsSize)
- , PortionsCountAvailable(portionsCountAvailable) {
- }
+ const TDuration durationToDrop, const ui64 expectedBlobsSize, const ui64 portionsCountAvailable);
};
} // namespace NKikimr::NOlap::NStorageOptimizer::NLCBuckets
diff --git a/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp b/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp
index 017d43015c..c8b4dc2dc6 100644
--- a/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp
+++ b/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp
@@ -570,28 +570,28 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // select from snap before insert
ui64 planStep = 1;
ui64 txId = 0;
- auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 0);
}
{ // select from snap between insert (greater txId)
ui64 planStep = 1;
ui64 txId = 2;
- auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 0);
}
{ // select from snap after insert (greater planStep)
ui64 planStep = 2;
ui64 txId = 1;
- auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(paths[0], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 1);
}
{ // select another pathId
ui64 planStep = 2;
ui64 txId = 1;
- auto selectInfo = engine.Select(paths[1], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(paths[1], TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 0);
}
}
@@ -661,7 +661,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // full scan
ui64 txId = 1;
- auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 20);
}
@@ -673,7 +673,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
if (key[0].GetType() == TTypeInfo(NTypeIds::Utf8)) {
gt10k = MakeStrPredicate("10000", NKikimr::NKernels::EOperation::Greater);
}
- NOlap::TPKRangesFilter pkFilter(false);
+ NOlap::TPKRangesFilter pkFilter;
Y_ABORT_UNLESS(pkFilter.Add(gt10k, nullptr, indexInfo.GetReplaceKey()));
auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), pkFilter, false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 10);
@@ -685,7 +685,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
if (key[0].GetType() == TTypeInfo(NTypeIds::Utf8)) {
lt10k = MakeStrPredicate("08999", NKikimr::NKernels::EOperation::Less);
}
- NOlap::TPKRangesFilter pkFilter(false);
+ NOlap::TPKRangesFilter pkFilter;
Y_ABORT_UNLESS(pkFilter.Add(nullptr, lt10k, indexInfo.GetReplaceKey()));
auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), pkFilter, false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 9);
@@ -845,7 +845,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // full scan
ui64 txId = 1;
- auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 20);
}
@@ -854,7 +854,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // full scan
ui64 txId = 1;
- auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 20);
}
@@ -870,7 +870,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // full scan
ui64 txId = 1;
- auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 10);
}
}
@@ -886,7 +886,7 @@ Y_UNIT_TEST_SUITE(TColumnEngineTestLogs) {
{ // full scan
ui64 txId = 1;
- auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(false), false);
+ auto selectInfo = engine.Select(pathId, TSnapshot(planStep, txId), NOlap::TPKRangesFilter(), false);
UNIT_ASSERT_VALUES_EQUAL(selectInfo->Portions.size(), 10);
}
}
diff --git a/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp b/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp
index faff758164..dfa57b5bee 100644
--- a/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp
+++ b/ydb/core/tx/columnshard/engines/writer/buffer/actor2.cpp
@@ -71,7 +71,7 @@ void TWriteAggregation::Flush(const ui64 tabletId) {
Context.GetWritingCounters()->OnAggregationWrite(Units.size(), SumSize);
std::shared_ptr<NConveyor::ITask> task =
std::make_shared<TBuildPackSlicesTask>(std::move(Units), Context, PathId, tabletId, ModificationType);
- NConveyor::TInsertServiceOperator::AsyncTaskToExecute(task);
+ NConveyor::TInsertServiceOperator::SendTaskToExecute(task);
Units.clear();
SumSize = 0;
}
diff --git a/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp b/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp
index cf97466404..f5c4481599 100644
--- a/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp
+++ b/ydb/core/tx/columnshard/operations/batch_builder/builder.cpp
@@ -54,7 +54,7 @@ TConclusionStatus TBuildBatchesTask::DoExecute(const std::shared_ptr<ITask>& /*t
if (!WriteData.GetWritePortions() || !Context.GetNoTxWrite()) {
std::shared_ptr<NConveyor::ITask> task =
std::make_shared<NOlap::TBuildSlicesTask>(std::move(WriteData), batch.GetContainer(), Context);
- NConveyor::TInsertServiceOperator::AsyncTaskToExecute(task);
+ NConveyor::TInsertServiceOperator::SendTaskToExecute(task);
} else {
NActors::TActivationContext::ActorSystem()->Send(Context.GetBufferizationPortionsActorId(),
new NWritingPortions::TEvAddInsertedDataToBuffer(
@@ -86,7 +86,7 @@ TConclusionStatus TBuildBatchesTask::DoExecute(const std::shared_ptr<ITask>& /*t
if (!WriteData.GetWritePortions() || !Context.GetNoTxWrite()) {
std::shared_ptr<NConveyor::ITask> task =
std::make_shared<NOlap::TBuildSlicesTask>(std::move(WriteData), batch.GetContainer(), Context);
- NConveyor::TInsertServiceOperator::AsyncTaskToExecute(task);
+ NConveyor::TInsertServiceOperator::SendTaskToExecute(task);
} else {
NActors::TActivationContext::ActorSystem()->Send(Context.GetBufferizationPortionsActorId(),
new NWritingPortions::TEvAddInsertedDataToBuffer(
diff --git a/ydb/core/tx/columnshard/operations/batch_builder/merger.h b/ydb/core/tx/columnshard/operations/batch_builder/merger.h
index f1d8fcddfa..148af13daf 100644
--- a/ydb/core/tx/columnshard/operations/batch_builder/merger.h
+++ b/ydb/core/tx/columnshard/operations/batch_builder/merger.h
@@ -67,7 +67,7 @@ public:
virtual NArrow::TContainerWithIndexes<arrow::RecordBatch> BuildResultBatch() override {
auto result = IncomingData;
- AFL_VERIFY(Filter.Apply(result.MutableContainer()));
+ Filter.Apply(result.MutableContainer());
return result;
}
};
diff --git a/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp b/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp
index 3d6d8e03ed..4854fc010f 100644
--- a/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp
+++ b/ydb/core/tx/columnshard/operations/batch_builder/restore.cpp
@@ -13,7 +13,7 @@ std::unique_ptr<TEvColumnShard::TEvInternalScan> TModificationRestoreTask::DoBui
AFL_DEBUG(NKikimrServices::TX_COLUMNSHARD_RESTORE)("event", "restore_start")("count", IncomingData.HasContainer() ? IncomingData->num_rows() : 0)(
"task_id", WriteData.GetWriteMeta().GetId());
auto pkData = NArrow::TColumnOperator().VerifyIfAbsent().Extract(IncomingData.GetContainer(), Context.GetActualSchema()->GetPKColumnNames());
- request->RangesFilter = TPKRangesFilter::BuildFromRecordBatchLines(pkData, false);
+ request->RangesFilter = TPKRangesFilter::BuildFromRecordBatchLines(pkData);
for (auto&& i : Context.GetActualSchema()->GetIndexInfo().GetColumnIds(false)) {
request->AddColumn(i);
}
@@ -49,7 +49,7 @@ NKikimr::TConclusionStatus TModificationRestoreTask::DoOnFinished() {
if (!WriteData.GetWritePortions() || !Context.GetNoTxWrite()) {
std::shared_ptr<NConveyor::ITask> task =
std::make_shared<NOlap::TBuildSlicesTask>(std::move(WriteData), batchResult.GetContainer(), Context);
- NConveyor::TInsertServiceOperator::AsyncTaskToExecute(task);
+ NConveyor::TInsertServiceOperator::SendTaskToExecute(task);
} else {
NActors::TActivationContext::ActorSystem()->Send(
Context.GetBufferizationPortionsActorId(), new NWritingPortions::TEvAddInsertedDataToBuffer(
diff --git a/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp b/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp
index 6ab38d7cbc..d3bc168d9b 100644
--- a/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp
+++ b/ydb/core/tx/columnshard/operations/slice_builder/pack_builder.cpp
@@ -89,13 +89,14 @@ public:
SequentialWriteId.emplace_back(data->GetWriteMeta().GetWriteId());
}
- [[nodiscard]] TConclusionStatus Finalize(const NOlap::TWritingContext& context, std::vector<TPortionWriteController::TInsertPortion>& result) {
+ [[nodiscard]] TConclusionStatus Finalize(
+ const NOlap::TWritingContext& context, std::vector<TPortionWriteController::TInsertPortion>& result) {
if (Batches.size() == 0) {
return TConclusionStatus::Success();
}
if (Batches.size() == 1) {
- auto portionConclusion = context.GetActualSchema()->PrepareForWrite(context.GetActualSchema(), PathId, Batches.front().GetContainer(),
- ModificationType, context.GetStoragesManager(), context.GetSplitterCounters());
+ auto portionConclusion = context.GetActualSchema()->PrepareForWrite(context.GetActualSchema(), PathId,
+ Batches.front().GetContainer(), ModificationType, context.GetStoragesManager(), context.GetSplitterCounters());
result.emplace_back(portionConclusion.DetachResult());
} else {
ui32 idx = 0;
@@ -121,10 +122,12 @@ public:
if (defaultColumn.IsFail()) {
return defaultColumn;
}
- gContainer->AddField(context.GetActualSchema()->GetFieldByIndexVerified(*itAllIndexes), defaultColumn.DetachResult()).Validate();
+ gContainer->AddField(context.GetActualSchema()->GetFieldByIndexVerified(*itAllIndexes), defaultColumn.DetachResult())
+ .Validate();
} else {
AFL_VERIFY(*itAllIndexes == *itBatchIndexes);
- gContainer->AddField(context.GetActualSchema()->GetFieldByIndexVerified(*itAllIndexes),
+ gContainer
+ ->AddField(context.GetActualSchema()->GetFieldByIndexVerified(*itAllIndexes),
i->column(itBatchIndexes - i.GetColumnIndexes().begin()))
.Validate();
++itBatchIndexes;
@@ -193,7 +196,10 @@ TConclusionStatus TBuildPackSlicesTask::DoExecute(const std::shared_ptr<ITask>&
}
std::vector<TPortionWriteController::TInsertPortion> portionsToWrite;
for (auto&& i : slicesToMerge) {
- i.Finalize(Context, portionsToWrite).Validate();
+ auto conclusion = i.Finalize(Context, portionsToWrite);
+ if (conclusion.IsFail()) {
+ return conclusion;
+ }
}
auto actions = WriteUnits.front().GetData()->GetBlobsAction();
auto writeController =
diff --git a/ydb/core/tx/columnshard/operations/write.cpp b/ydb/core/tx/columnshard/operations/write.cpp
index d739caf6ae..b7d184b9c0 100644
--- a/ydb/core/tx/columnshard/operations/write.cpp
+++ b/ydb/core/tx/columnshard/operations/write.cpp
@@ -38,7 +38,7 @@ void TWriteOperation::Start(
NEvWrite::TWriteData writeData(writeMeta, data, owner.TablesManager.GetPrimaryIndex()->GetReplaceKey(),
owner.StoragesManager->GetInsertOperator()->StartWritingAction(NOlap::NBlobOperations::EConsumer::WRITING_OPERATOR), WritePortions);
std::shared_ptr<NConveyor::ITask> task = std::make_shared<NOlap::TBuildBatchesTask>(std::move(writeData), context);
- NConveyor::TInsertServiceOperator::AsyncTaskToExecute(task);
+ NConveyor::TInsertServiceOperator::SendTaskToExecute(task);
Status = EOperationStatus::Started;
}
diff --git a/ydb/core/tx/columnshard/test_helper/shard_writer.cpp b/ydb/core/tx/columnshard/test_helper/shard_writer.cpp
index 92e262d2f7..f9c7583ef1 100644
--- a/ydb/core/tx/columnshard/test_helper/shard_writer.cpp
+++ b/ydb/core/tx/columnshard/test_helper/shard_writer.cpp
@@ -18,7 +18,7 @@ NKikimrDataEvents::TEvWriteResult::EStatus TShardWriter::StartCommit(const ui64
TAutoPtr<NActors::IEventHandle> handle;
auto event = Runtime.GrabEdgeEvent<NKikimr::NEvents::TDataEvents::TEvWriteResult>(handle);
AFL_VERIFY(event);
-
+ AFL_VERIFY(event->Record.GetTxId() == txId);
return event->Record.GetStatus();
}
diff --git a/ydb/core/tx/columnshard/transactions/locks/read_start.cpp b/ydb/core/tx/columnshard/transactions/locks/read_start.cpp
index 963c47b068..5165e8f194 100644
--- a/ydb/core/tx/columnshard/transactions/locks/read_start.cpp
+++ b/ydb/core/tx/columnshard/transactions/locks/read_start.cpp
@@ -17,7 +17,7 @@ bool TEvReadStart::DoDeserializeFromProto(const NKikimrColumnShardTxProto::TEven
AFL_ERROR(NKikimrServices::TX_COLUMNSHARD)("error", "cannot_parse_TEvReadStart")("reason", "cannot_parse_schema");
return false;
}
- Filter = TPKRangesFilter::BuildFromString(proto.GetRead().GetFilter(), Schema, false);
+ Filter = TPKRangesFilter::BuildFromString(proto.GetRead().GetFilter(), Schema);
if (!Filter) {
AFL_ERROR(NKikimrServices::TX_COLUMNSHARD)("error", "cannot_parse_TEvReadStart")("reason", "cannot_parse_filter");
return false;
diff --git a/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp b/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp
index 2b8ac08794..731fe98cc8 100644
--- a/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp
+++ b/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp
@@ -20,6 +20,25 @@
#include <library/cpp/deprecated/atomic/atomic.h>
#include <library/cpp/testing/hook/hook.h>
+#define Y_UNIT_TEST_OCTO(BaseName, Flag1, Flag2, Flag3) \
+ template<bool, bool, bool> void BaseName(NUnitTest::TTestContext&); \
+ struct TTestRegistration##BaseName { \
+ TTestRegistration##BaseName() { \
+ TCurrentTest::AddTest(#BaseName "-" #Flag1 "-" #Flag2 "-" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<false, false, false>), false); \
+ TCurrentTest::AddTest(#BaseName "+" #Flag1 "-" #Flag2 "-" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<true, false, false>), false); \
+ TCurrentTest::AddTest(#BaseName "-" #Flag1 "+" #Flag2 "-" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<false, true, false>), false); \
+ TCurrentTest::AddTest(#BaseName "+" #Flag1 "+" #Flag2 "-" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<true, true, false>), false); \
+ TCurrentTest::AddTest(#BaseName "-" #Flag1 "-" #Flag2 "+" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<false, false, true>), false); \
+ TCurrentTest::AddTest(#BaseName "+" #Flag1 "-" #Flag2 "+" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<true, false, true>), false); \
+ TCurrentTest::AddTest(#BaseName "-" #Flag1 "+" #Flag2 "+" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<false, true, true>), false); \
+ TCurrentTest::AddTest(#BaseName "+" #Flag1 "+" #Flag2 "+" #Flag3, static_cast<void (*)(NUnitTest::TTestContext&)>(&BaseName<true, true, true>), false); \
+ } \
+ }; \
+ static TTestRegistration##BaseName testRegistration##BaseName; \
+ template<bool Flag1, bool Flag2, bool Flag3> \
+ void BaseName(NUnitTest::TTestContext&)
+
+
namespace NKikimr {
using namespace NTxUT;
@@ -168,23 +187,20 @@ static constexpr ui32 PORTION_ROWS = 80 * 1000;
// ts[0] = 1600000000; // date -u --date='@1600000000' Sun Sep 13 12:26:40 UTC 2020
// ts[1] = 1620000000; // date -u --date='@1620000000' Mon May 3 00:00:00 UTC 2021
-void TestTtl(bool reboots, bool internal, TTestSchema::TTableSpecials spec = {},
- const std::vector<NArrow::NTest::TTestColumn>& ydbSchema = testYdbSchema)
+void TestTtl(bool reboots, bool internal, bool useFirstPkColumnForTtl, NScheme::TTypeId ttlColumnTypeId)
{
auto csControllerGuard = NKikimr::NYDBTest::TControllers::RegisterCSControllerGuard<NOlap::TWaitCompactionController>();
csControllerGuard->DisableBackground(NKikimr::NYDBTest::ICSController::EBackground::Compaction);
csControllerGuard->SetOverrideTasksActualizationLag(TDuration::Zero());
std::vector<ui64> ts = { 1600000000, 1620000000 };
- ui32 ttlIncSeconds = 1;
- for (auto& c : ydbSchema) {
- if (c.GetName() == spec.TtlColumn) {
- if (c.GetType().GetTypeId() == NTypeIds::Date) {
- ttlIncSeconds = TDuration::Days(1).Seconds();
- }
- break;
- }
- }
+ auto ydbSchema = TTestSchema::YdbSchema();
+ const auto ttlColumnNameIdx = useFirstPkColumnForTtl ? 0 : 8;
+ const auto ttlColumnName = ydbSchema[ttlColumnNameIdx].GetName();
+ UNIT_ASSERT(ttlColumnName == (useFirstPkColumnForTtl ? "timestamp" : "saved_at")); //to detect default schema changes
+ ydbSchema[ttlColumnNameIdx].SetType(ttlColumnTypeId);
+ const auto ttlIncSeconds = ttlColumnTypeId == NTypeIds::Date ? TDuration::Days(1).Seconds() : 1;
+ TTestSchema::TTableSpecials specs;
TTestBasicRuntime runtime;
TTester::Setup(runtime);
@@ -214,12 +230,9 @@ void TestTtl(bool reboots, bool internal, TTestSchema::TTableSpecials spec = {},
} else {
ttlSec -= ts[0] + ttlIncSeconds;
}
- if (spec.HasTiers()) {
- spec.Tiers[0].EvictAfter = TDuration::Seconds(ttlSec);
- } else {
- UNIT_ASSERT(!spec.TtlColumn.empty());
- spec.EvictAfter = TDuration::Seconds(ttlSec);
- }
+ TTestSchema::TTableSpecials spec;
+ spec.TtlColumn = ttlColumnName;
+ spec.EvictAfter = TDuration::Seconds(ttlSec);
SetupSchema(runtime, sender,
TTestSchema::CreateInitShardTxBody(tableId, ydbSchema, testYdbPk, spec, "/Root/olapStore"),
NOlap::TSnapshot(++planStep, ++txId));
@@ -1183,79 +1196,14 @@ Y_UNIT_TEST_SUITE(TColumnShardTestSchema) {
}
}
- Y_UNIT_TEST(ExternalTTL) {
- TestTtl(false, false); // over NTypeIds::Timestamp ttl column
- }
-
- Y_UNIT_TEST(ExternalTTL_Types) {
- auto ydbSchema = testYdbSchema;
- for (auto typeId : {NTypeIds::Datetime, NTypeIds::Date, NTypeIds::Uint32, NTypeIds::Uint64}) {
- UNIT_ASSERT_EQUAL(ydbSchema[8].GetName(), "saved_at");
- ydbSchema[8].SetType(TTypeInfo(typeId));
-
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("saved_at");
-
- TestTtl(false, false, specs, ydbSchema);
- }
- }
-
- Y_UNIT_TEST(RebootExternalTTL) {
- NColumnShard::gAllowLogBatchingDefaultValue = false;
- TestTtl(true, false);
- }
-
- Y_UNIT_TEST(InternalTTL) {
- TestTtl(false, true); // over NTypeIds::Timestamp ttl column
- }
-
- Y_UNIT_TEST(InternalTTL_Types) {
- auto ydbSchema = testYdbSchema;
- for (auto typeId : {NTypeIds::Datetime, NTypeIds::Date, NTypeIds::Uint32, NTypeIds::Uint64}) {
- UNIT_ASSERT_EQUAL(ydbSchema[8].GetName(), "saved_at");
- ydbSchema[8].SetType(TTypeInfo(typeId));
-
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("saved_at");
-
- TestTtl(false, true, specs, ydbSchema);
+ Y_UNIT_TEST_OCTO(TTL, Reboot, Internal, FirstPkColumn) {
+ for (auto typeId : {NTypeIds::Timestamp, NTypeIds::Datetime, NTypeIds::Date, NTypeIds::Uint32, NTypeIds::Uint64}) {
+ TestTtl(Reboot, Internal, FirstPkColumn, typeId);
}
}
- Y_UNIT_TEST(RebootInternalTTL) {
- NColumnShard::gAllowLogBatchingDefaultValue = false;
- TestTtl(true, true);
- }
-
- Y_UNIT_TEST(OneTier) {
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("timestamp");
-// specs.Tiers.emplace_back(TTestSchema::TStorageTier("default").SetTtlColumn("timestamp"));
- TestTtl(false, true, specs);
- }
- Y_UNIT_TEST(RebootOneTier) {
- NColumnShard::gAllowLogBatchingDefaultValue = false;
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("timestamp");
-// specs.Tiers.emplace_back(TTestSchema::TStorageTier("default").SetTtlColumn("timestamp"));
- TestTtl(true, true, specs);
- }
- Y_UNIT_TEST(OneTierExternalTtl) {
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("timestamp");
-// specs.Tiers.emplace_back(TTestSchema::TStorageTier("default").SetTtlColumn("timestamp"));
- TestTtl(false, false, specs);
- }
-
- Y_UNIT_TEST(RebootOneTierExternalTtl) {
- NColumnShard::gAllowLogBatchingDefaultValue = false;
- TTestSchema::TTableSpecials specs;
- specs.SetTtlColumn("timestamp");
-// specs.Tiers.emplace_back(TTestSchema::TStorageTier("default").SetTtlColumn("timestamp"));
- TestTtl(true, false, specs);
- }
// TODO: EnableOneTierAfterTtl, EnableTtlAfterOneTier
diff --git a/ydb/core/tx/conveyor/service/service.cpp b/ydb/core/tx/conveyor/service/service.cpp
index 295745c4e6..3f93fed0dc 100644
--- a/ydb/core/tx/conveyor/service/service.cpp
+++ b/ydb/core/tx/conveyor/service/service.cpp
@@ -36,9 +36,10 @@ void TDistributor::HandleMain(TEvInternal::TEvTaskProcessedResult::TPtr& evExt)
("queue", ProcessesOrdered.size())("workers", Workers.size())("count", ev->GetProcessIds().size())("d", ev->GetInstants().back() - ev->GetInstants().front());
for (ui32 idx = 0; idx < ev->GetProcessIds().size(); ++idx) {
AddCPUTime(ev->GetProcessIds()[idx], ev->GetInstants()[idx + 1] - std::max(LastAddProcessInstant, ev->GetInstants()[idx]));
+ Counters.TaskExecuteHistogram->Collect((ev->GetInstants()[idx + 1] - ev->GetInstants()[idx]).MicroSeconds());
}
const TDuration dExecution = ev->GetInstants().back() - ev->GetInstants().front();
- Counters.ExecuteHistogram->Collect(dExecution.MicroSeconds());
+ Counters.PackExecuteHistogram->Collect(dExecution.MicroSeconds());
Counters.ExecuteDuration->Add(dExecution.MicroSeconds());
const TMonotonic now = TMonotonic::Now();
diff --git a/ydb/core/tx/conveyor/service/service.h b/ydb/core/tx/conveyor/service/service.h
index e42ae5ec00..d58167fe44 100644
--- a/ydb/core/tx/conveyor/service/service.h
+++ b/ydb/core/tx/conveyor/service/service.h
@@ -35,7 +35,8 @@ public:
const ::NMonitoring::THistogramPtr WaitingHistogram;
const ::NMonitoring::THistogramPtr PackHistogram;
- const ::NMonitoring::THistogramPtr ExecuteHistogram;
+ const ::NMonitoring::THistogramPtr PackExecuteHistogram;
+ const ::NMonitoring::THistogramPtr TaskExecuteHistogram;
const ::NMonitoring::THistogramPtr SendBackHistogram;
const ::NMonitoring::THistogramPtr SendFwdHistogram;
const ::NMonitoring::THistogramPtr ReceiveTaskHistogram;
@@ -58,7 +59,8 @@ public:
, UseWorkerRate(TBase::GetDeriviative("UseWorker"))
, WaitingHistogram(TBase::GetHistogram("Waiting/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
, PackHistogram(TBase::GetHistogram("ExecutionPack/Count", NMonitoring::LinearHistogram(25, 1, 1)))
- , ExecuteHistogram(TBase::GetHistogram("Execute/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
+ , PackExecuteHistogram(TBase::GetHistogram("PackExecute/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
+ , TaskExecuteHistogram(TBase::GetHistogram("TaskExecute/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
, SendBackHistogram(TBase::GetHistogram("SendBack/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
, SendFwdHistogram(TBase::GetHistogram("SendForward/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
, ReceiveTaskHistogram(TBase::GetHistogram("ReceiveTask/Duration/Us", NMonitoring::ExponentialHistogram(25, 2, 50)))
diff --git a/ydb/core/tx/data_events/common/signals_flow.cpp b/ydb/core/tx/data_events/common/signals_flow.cpp
index cbe1839dcd..d043c9c4a9 100644
--- a/ydb/core/tx/data_events/common/signals_flow.cpp
+++ b/ydb/core/tx/data_events/common/signals_flow.cpp
@@ -18,7 +18,7 @@ TWriteFlowCounters::TWriteFlowCounters()
for (auto&& to : GetEnumAllValues<EWriteStage>()) {
auto subTo = sub.CreateSubGroup("stage_to", ::ToString(to));
CountByStageMoving.back().emplace_back(subTo.GetDeriviative("Transfers/Count"));
- CountByStageDuration.back().emplace_back(subTo.GetDeriviative("Transfers/Count"));
+ CountByStageDuration.back().emplace_back(subTo.GetDeriviative("Transfers/Duration/Ms"));
}
}
}
diff --git a/ydb/core/tx/data_events/common/signals_flow.h b/ydb/core/tx/data_events/common/signals_flow.h
index 4cd74a3c5f..e37e10a4cc 100644
--- a/ydb/core/tx/data_events/common/signals_flow.h
+++ b/ydb/core/tx/data_events/common/signals_flow.h
@@ -13,7 +13,8 @@ enum class EWriteStage {
BuildSlicesPack,
Result,
Finished,
- Aborted
+ Aborted,
+ Replied
};
class TWriteFlowCounters: public NColumnShard::TCommonCountersOwner {
diff --git a/ydb/core/tx/data_events/shard_writer.cpp b/ydb/core/tx/data_events/shard_writer.cpp
index d4d38101e6..1ce27af0d6 100644
--- a/ydb/core/tx/data_events/shard_writer.cpp
+++ b/ydb/core/tx/data_events/shard_writer.cpp
@@ -29,12 +29,10 @@ namespace NKikimr::NEvWrite {
void TWritersController::OnFail(const Ydb::StatusIds::StatusCode code, const TString& message) {
Counters->OnCSFailed(code);
FailsCount.Inc();
- if (!Code) {
- TGuard<TMutex> g(Mutex);
- if (!Code) {
- Issues.AddIssue(message);
- Code = code;
- }
+ if (AtomicCas(&HasCodeFail, 1, 0)) {
+ AFL_VERIFY(!Code);
+ Issues.AddIssue(message);
+ Code = code;
}
if (!WritesCount.Dec()) {
SendReply();
diff --git a/ydb/core/tx/data_events/shard_writer.h b/ydb/core/tx/data_events/shard_writer.h
index b3c53d3d36..c716f690cd 100644
--- a/ydb/core/tx/data_events/shard_writer.h
+++ b/ydb/core/tx/data_events/shard_writer.h
@@ -105,9 +105,11 @@ private:
TAtomicCounter WritesCount = 0;
TAtomicCounter WritesIndex = 0;
TAtomicCounter FailsCount = 0;
- TMutex Mutex;
+
+ TAtomic HasCodeFail = 0;
NYql::TIssues Issues;
std::optional<Ydb::StatusIds::StatusCode> Code;
+
NActors::TActorIdentity LongTxActorId;
std::vector<TWriteIdForShard> WriteIds;
const TMonotonic StartInstant = TMonotonic::Now();
diff --git a/ydb/core/tx/data_events/write_data.cpp b/ydb/core/tx/data_events/write_data.cpp
index 4e05aeccea..f3779a1cb5 100644
--- a/ydb/core/tx/data_events/write_data.cpp
+++ b/ydb/core/tx/data_events/write_data.cpp
@@ -21,10 +21,13 @@ TWriteData::TWriteData(const std::shared_ptr<TWriteMeta>& writeMeta, IDataContai
}
void TWriteMeta::OnStage(const EWriteStage stage) const {
- AFL_VERIFY(CurrentStage != EWriteStage::Finished && CurrentStage != EWriteStage::Aborted);
+ if (stage == CurrentStage) {
+ return;
+ }
AFL_VERIFY((ui32)stage > (ui32)CurrentStage)("from", CurrentStage)("to", stage);
const TMonotonic nextStageInstant = TMonotonic::Now();
Counters->OnStageMove(CurrentStage, stage, nextStageInstant - LastStageInstant);
+ CurrentStage = stage;
LastStageInstant = nextStageInstant;
if (stage == EWriteStage::Finished) {
Counters->OnWriteFinished(nextStageInstant - WriteStartInstant);
diff --git a/ydb/core/tx/data_events/write_data.h b/ydb/core/tx/data_events/write_data.h
index 56407661b1..51e6b989c4 100644
--- a/ydb/core/tx/data_events/write_data.h
+++ b/ydb/core/tx/data_events/write_data.h
@@ -58,8 +58,8 @@ public:
void OnStage(const EWriteStage stage) const;
~TWriteMeta() {
- if (CurrentStage != EWriteStage::Finished && CurrentStage != EWriteStage::Aborted) {
- Counters->OnWriteAborted(TMonotonic::Now() - WriteStartInstant);
+ if (CurrentStage != EWriteStage::Replied) {
+ OnStage(EWriteStage::Aborted);
}
}
diff --git a/ydb/core/tx/datashard/buffer_data.h b/ydb/core/tx/datashard/buffer_data.h
index f254fe0e9f..d224ccd3c8 100644
--- a/ydb/core/tx/datashard/buffer_data.h
+++ b/ydb/core/tx/datashard/buffer_data.h
@@ -52,9 +52,8 @@ public:
return Rows->size();
}
- bool IsReachLimits(const TUploadLimits& Limits) {
- // TODO(mbkkt) why [0..BatchRowsLimit) but [0..BatchBytesLimit]
- return Rows->size() >= Limits.BatchRowsLimit || ByteSize > Limits.BatchBytesLimit;
+ bool HasReachedLimits(size_t rowsLimit, ui64 bytesLimit) const {
+ return Rows->size() > rowsLimit || ByteSize > bytesLimit;
}
auto&& ExtractLastKey() {
diff --git a/ydb/core/tx/datashard/build_index.cpp b/ydb/core/tx/datashard/build_index.cpp
index 9a49d965de..82fd0880bf 100644
--- a/ydb/core/tx/datashard/build_index.cpp
+++ b/ydb/core/tx/datashard/build_index.cpp
@@ -103,7 +103,7 @@ class TBuildScanUpload: public TActor<TBuildScanUpload<Activity>>, public NTable
using TBase = TActor<TThis>;
protected:
- const TUploadLimits Limits;
+ const TIndexBuildScanSettings ScanSettings;
const ui64 BuildIndexId;
const TString TargetTable;
@@ -129,7 +129,7 @@ protected:
TSerializedCellVec LastUploadedKey;
TActorId Uploader;
- ui64 RetryCount = 0;
+ ui32 RetryCount = 0;
TUploadMonStats Stats = TUploadMonStats("tablets", "build_index_upload");
TUploadStatus UploadStatus;
@@ -141,9 +141,9 @@ protected:
const TActorId& progressActorId,
const TSerializedTableRange& range,
const TUserTable& tableInfo,
- TUploadLimits limits)
+ const TIndexBuildScanSettings& scanSettings)
: TBase(&TThis::StateWork)
- , Limits(limits)
+ , ScanSettings(scanSettings)
, BuildIndexId(buildIndexId)
, TargetTable(target)
, SeqNo(seqNo)
@@ -161,7 +161,7 @@ protected:
addRow();
- if (!ReadBuf.IsReachLimits(Limits)) {
+ if (!HasReachedLimits(ReadBuf, ScanSettings)) {
return EScan::Feed;
}
@@ -354,7 +354,7 @@ private:
this->Send(ProgressActorId, progress.Release());
- if (!ReadBuf.IsEmpty() && ReadBuf.IsReachLimits(Limits)) {
+ if (HasReachedLimits(ReadBuf, ScanSettings)) {
ReadBuf.FlushTo(WriteBuf);
Upload();
}
@@ -363,10 +363,10 @@ private:
return;
}
- if (RetryCount < Limits.MaxUploadRowsRetryCount && UploadStatus.IsRetriable()) {
+ if (RetryCount < ScanSettings.GetMaxBatchRetries() && UploadStatus.IsRetriable()) {
LOG_N("Got retriable error, " << Debug());
- ctx.Schedule(Limits.GetTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
+ ctx.Schedule(GetRetryWakeupTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
return;
}
@@ -412,8 +412,8 @@ public:
TProtoColumnsCRef targetIndexColumns,
TProtoColumnsCRef targetDataColumns,
const TUserTable& tableInfo,
- TUploadLimits limits)
- : TBuildScanUpload(buildIndexId, target, seqNo, dataShardId, progressActorId, range, tableInfo, limits)
+ const TIndexBuildScanSettings& scanSettings)
+ : TBuildScanUpload(buildIndexId, target, seqNo, dataShardId, progressActorId, range, tableInfo, scanSettings)
, TargetDataColumnPos(targetIndexColumns.size()) {
ScanTags = BuildTags(tableInfo, targetIndexColumns, targetDataColumns);
UploadColumnsTypes = BuildTypes(tableInfo, targetIndexColumns, targetDataColumns);
@@ -444,8 +444,8 @@ public:
const TSerializedTableRange& range,
const NKikimrIndexBuilder::TColumnBuildSettings& columnBuildSettings,
const TUserTable& tableInfo,
- TUploadLimits limits)
- : TBuildScanUpload(buildIndexId, target, seqNo, dataShardId, progressActorId, range, tableInfo, limits) {
+ const TIndexBuildScanSettings& scanSettings)
+ : TBuildScanUpload(buildIndexId, target, seqNo, dataShardId, progressActorId, range, tableInfo, scanSettings) {
Y_ENSURE(columnBuildSettings.columnSize() > 0);
UploadColumnsTypes = BuildTypes(tableInfo, columnBuildSettings);
UploadMode = NTxProxy::EUploadRowsMode::UpsertIfExists;
@@ -481,13 +481,13 @@ TAutoPtr<NTable::IScan> CreateBuildIndexScan(
TProtoColumnsCRef targetDataColumns,
const NKikimrIndexBuilder::TColumnBuildSettings& columnsToBuild,
const TUserTable& tableInfo,
- TUploadLimits limits) {
+ const TIndexBuildScanSettings& scanSettings) {
if (columnsToBuild.columnSize() > 0) {
return new TBuildColumnsScan(
- buildIndexId, target, seqNo, dataShardId, progressActorId, range, columnsToBuild, tableInfo, limits);
+ buildIndexId, target, seqNo, dataShardId, progressActorId, range, columnsToBuild, tableInfo, scanSettings);
}
return new TBuildIndexScan(
- buildIndexId, target, seqNo, dataShardId, progressActorId, range, targetIndexColumns, targetDataColumns, tableInfo, limits);
+ buildIndexId, target, seqNo, dataShardId, progressActorId, range, targetIndexColumns, targetDataColumns, tableInfo, scanSettings);
}
class TDataShard::TTxHandleSafeBuildIndexScan: public NTabletFlatExecutor::TTransactionBase<TDataShard> {
@@ -608,17 +608,6 @@ void TDataShard::HandleSafe(TEvDataShard::TEvBuildIndexCreateRequest::TPtr& ev,
scanOpts.SetSnapshotRowVersion(rowVersion);
scanOpts.SetResourceBroker("build_index", 10);
- TUploadLimits limits;
- if (record.HasMaxBatchRows()) {
- limits.BatchRowsLimit = record.GetMaxBatchRows();
- }
- if (record.HasMaxBatchBytes()) {
- limits.BatchBytesLimit = record.GetMaxBatchBytes();
- }
- if (record.HasMaxRetries()) {
- limits.MaxUploadRowsRetryCount = record.GetMaxRetries();
- }
-
const auto scanId = QueueScan(userTable.LocalTid,
CreateBuildIndexScan(buildIndexId,
record.GetTargetName(),
@@ -630,7 +619,7 @@ void TDataShard::HandleSafe(TEvDataShard::TEvBuildIndexCreateRequest::TPtr& ev,
record.GetDataColumns(),
record.GetColumnBuildSettings(),
userTable,
- limits),
+ record.GetScanSettings()),
0,
scanOpts);
diff --git a/ydb/core/tx/datashard/datashard__data_cleanup.cpp b/ydb/core/tx/datashard/datashard__data_cleanup.cpp
index 9d261614db..d6ed58aa07 100644
--- a/ydb/core/tx/datashard/datashard__data_cleanup.cpp
+++ b/ydb/core/tx/datashard/datashard__data_cleanup.cpp
@@ -26,7 +26,19 @@ public:
Response = std::make_unique<TEvDataShard::TEvForceDataCleanupResult>(
record.GetDataCleanupGeneration(),
Self->TabletID(),
- NKikimrTxDataShard::TEvForceDataCleanupResult::FAILED);
+ NKikimrTxDataShard::TEvForceDataCleanupResult::WRONG_SHARD_STATE);
+ return true;
+ }
+
+ if (Self->Executor()->HasLoanedParts()) {
+ LOG_WARN_S(ctx, NKikimrServices::TX_DATASHARD,
+ "DataCleanup of tablet# " << Self->TabletID()
+ << ": has borrowed parts"
+ << ", requested from " << Ev->Sender);
+ Response = std::make_unique<TEvDataShard::TEvForceDataCleanupResult>(
+ record.GetDataCleanupGeneration(),
+ Self->TabletID(),
+ NKikimrTxDataShard::TEvForceDataCleanupResult::BORROWED);
return true;
}
diff --git a/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp b/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp
index 0a918aa010..1c2e7ccbaa 100644
--- a/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp
@@ -1,4 +1,5 @@
#include <ydb/core/tx/datashard/ut_common/datashard_ut_common.h>
+#include <ydb/core/testlib/storage_helpers.h>
namespace NKikimr {
@@ -16,31 +17,8 @@ Y_UNIT_TEST_SUITE(DataCleanup) {
static const TString PresentShortValue3("_Some_value_3_");
static const TString DeletedLongValue4(size_t(100 * 1024), 't');
- int CountBlobsWithSubstring(ui64 tabletId, const TVector<TServerSettings::TProxyDSPtr>& proxyDSs, const TString& substring) {
- int res = 0;
- for (const auto& proxyDS : proxyDSs) {
- for (const auto& [id, blob] : proxyDS->AllMyBlobs()) {
- if (id.TabletID() == tabletId && !blob.DoNotKeep && blob.Buffer.ConvertToString().Contains(substring)) {
- ++res;
- }
- }
- }
- return res;
- }
-
- bool BlobStorageContains(const TVector<TServerSettings::TProxyDSPtr>& proxyDSs, const TString& value) {
- for (const auto& proxyDS : proxyDSs) {
- for (const auto& [id, blob] : proxyDS->AllMyBlobs()) {
- if (!blob.DoNotKeep && blob.Buffer.ConvertToString().Contains(value)) {
- return true;
- }
- }
- }
- return false;
- }
-
auto SetupWithTable(bool withCompaction) {
- TVector<TServerSettings::TProxyDSPtr> proxyDSs {
+ TVector<TIntrusivePtr<NFake::TProxyDS>> proxyDSs {
MakeIntrusive<NFake::TProxyDS>(TGroupId::FromValue(0)),
MakeIntrusive<NFake::TProxyDS>(TGroupId::FromValue(2181038080)),
};
@@ -113,7 +91,7 @@ Y_UNIT_TEST_SUITE(DataCleanup) {
UNIT_ASSERT_VALUES_EQUAL(ev.Record.GetDataCleanupGeneration(), generation);
}
- void CheckTableData(Tests::TServer::TPtr server, const TVector<TServerSettings::TProxyDSPtr>& proxyDSs, const TString& table) {
+ void CheckTableData(Tests::TServer::TPtr server, const TVector<TIntrusivePtr<NFake::TProxyDS>>& proxyDSs, const TString& table) {
auto result = ReadShardedTable(server, table);
UNIT_ASSERT_VALUES_EQUAL(result,
"key = 2, subkey = " + PresentSubkey2 + ", value = " + PresentLongValue2 + "\n"
@@ -329,6 +307,52 @@ Y_UNIT_TEST_SUITE(DataCleanup) {
CheckTableData(server, proxyDSs, "/Root/table-1");
CheckTableData(server, proxyDSs, "/Root/table-2");
}
+
+ Y_UNIT_TEST(BorrowerDataCleanedAfterCopyTable) {
+ auto [server, sender, tableShards, proxyDSs] = SetupWithTable(true);
+ auto& runtime = *server->GetRuntime();
+
+ auto txIdCopy = AsyncCreateCopyTable(server, sender, "/Root", "table-2", "/Root/table-1");
+ WaitTxNotification(server, sender, txIdCopy);
+ auto table2Shards = GetTableShards(server, sender, "/Root/table-2");
+ auto table2Id = ResolveTableId(server, sender, "/Root/table-2");
+
+ ExecSQL(server, sender, "DELETE FROM `/Root/table-1` WHERE key IN (1, 4);");
+ ExecSQL(server, sender, "DELETE FROM `/Root/table-2` WHERE key IN (1, 4);");
+ SimulateSleep(runtime, TDuration::Seconds(2));
+
+ ui64 dataCleanupGeneration = 24;
+ {
+ // cleanup for the first table should be failed due to borrowed parts
+ auto request = MakeHolder<TEvDataShard::TEvForceDataCleanup>(dataCleanupGeneration);
+ runtime.SendToPipe(tableShards.at(0), sender, request.Release(), 0, GetPipeConfigWithRetries());
+
+ auto ev = runtime.GrabEdgeEventRethrow<TEvDataShard::TEvForceDataCleanupResult>(sender);
+ UNIT_ASSERT_EQUAL(ev->Get()->Record.GetStatus(), NKikimrTxDataShard::TEvForceDataCleanupResult::BORROWED);
+ UNIT_ASSERT_VALUES_EQUAL(ev->Get()->Record.GetTabletId(), tableShards.at(0));
+ UNIT_ASSERT_VALUES_EQUAL(ev->Get()->Record.GetDataCleanupGeneration(), dataCleanupGeneration);
+ }
+ {
+ // cleanup for the second table
+ auto request = MakeHolder<TEvDataShard::TEvForceDataCleanup>(dataCleanupGeneration);
+ runtime.SendToPipe(table2Shards.at(0), sender, request.Release(), 0, GetPipeConfigWithRetries());
+
+ auto ev = runtime.GrabEdgeEventRethrow<TEvDataShard::TEvForceDataCleanupResult>(sender);
+ CheckResultEvent(*ev->Get(), table2Shards.at(0), dataCleanupGeneration);
+ }
+ {
+ // next cleanup for the first table should succeed after compaction of the second table
+ ++dataCleanupGeneration;
+ auto request = MakeHolder<TEvDataShard::TEvForceDataCleanup>(dataCleanupGeneration);
+ runtime.SendToPipe(tableShards.at(0), sender, request.Release(), 0, GetPipeConfigWithRetries());
+
+ auto ev = runtime.GrabEdgeEventRethrow<TEvDataShard::TEvForceDataCleanupResult>(sender);
+ CheckResultEvent(*ev->Get(), tableShards.at(0), dataCleanupGeneration);
+ }
+
+ CheckTableData(server, proxyDSs, "/Root/table-1");
+ CheckTableData(server, proxyDSs, "/Root/table-2");
+ }
}
} // namespace NKikimr
diff --git a/ydb/core/tx/datashard/datashard_ut_local_kmeans.cpp b/ydb/core/tx/datashard/datashard_ut_local_kmeans.cpp
index df2e850944..3d081ef34a 100644
--- a/ydb/core/tx/datashard/datashard_ut_local_kmeans.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_local_kmeans.cpp
@@ -64,10 +64,8 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
}
rec.SetSeed(1337);
- rec.SetState(NKikimrTxDataShard::TEvLocalKMeansRequest::SAMPLE);
- rec.SetUpload(NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING);
+ rec.SetUpload(NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING);
- rec.SetDoneRounds(0);
rec.SetNeedsRounds(3);
rec.SetParentFrom(0);
@@ -93,7 +91,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
static std::tuple<TString, TString> DoLocalKMeans(
Tests::TServer::TPtr server, TActorId sender, NTableIndex::TClusterId parent, ui64 seed, ui64 k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::EState upload, VectorIndexSettings::VectorType type,
+ NKikimrTxDataShard::EKMeansState upload, VectorIndexSettings::VectorType type,
VectorIndexSettings::Metric metric)
{
auto id = sId.fetch_add(1, std::memory_order_relaxed);
@@ -129,10 +127,8 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
rec.SetK(k);
rec.SetSeed(seed);
- rec.SetState(NKikimrTxDataShard::TEvLocalKMeansRequest::SAMPLE);
rec.SetUpload(upload);
- rec.SetDoneRounds(0);
rec.SetNeedsRounds(300);
rec.SetParentFrom(parent);
@@ -351,7 +347,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = mm\3\n"
"__ydb_parent = 0, __ydb_id = 2, __ydb_centroid = 11\3\n");
@@ -366,7 +362,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = 11\3\n"
"__ydb_parent = 0, __ydb_id = 2, __ydb_centroid = mm\3\n");
@@ -382,7 +378,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
VectorIndexSettings::DISTANCE_COSINE})
{
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = II\3\n");
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, data = one\n"
@@ -441,7 +437,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = mm\3\n"
"__ydb_parent = 0, __ydb_id = 2, __ydb_centroid = 11\3\n");
@@ -456,7 +452,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = 11\3\n"
"__ydb_parent = 0, __ydb_id = 2, __ydb_centroid = mm\3\n");
@@ -472,7 +468,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
VectorIndexSettings::DISTANCE_COSINE})
{
auto [level, posting] = DoLocalKMeans(server, sender, 0, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 0, __ydb_id = 1, __ydb_centroid = II\3\n");
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, embedding = \x30\x30\3, data = one\n"
@@ -533,7 +529,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = mm\3\n"
"__ydb_parent = 40, __ydb_id = 42, __ydb_centroid = 11\3\n");
@@ -548,7 +544,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = 11\3\n"
"__ydb_parent = 40, __ydb_id = 42, __ydb_centroid = mm\3\n");
@@ -564,7 +560,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
VectorIndexSettings::DISTANCE_COSINE})
{
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = II\3\n");
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, data = one\n"
@@ -625,7 +621,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = mm\3\n"
"__ydb_parent = 40, __ydb_id = 42, __ydb_centroid = 11\3\n");
@@ -640,7 +636,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = 11\3\n"
"__ydb_parent = 40, __ydb_id = 42, __ydb_centroid = mm\3\n");
@@ -656,7 +652,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardLocalKMeansScan) {
VectorIndexSettings::DISTANCE_COSINE})
{
auto [level, posting] = DoLocalKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(level, "__ydb_parent = 40, __ydb_id = 41, __ydb_centroid = II\3\n");
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, embedding = \x30\x30\3, data = one\n"
diff --git a/ydb/core/tx/datashard/datashard_ut_prefix_kmeans.cpp b/ydb/core/tx/datashard/datashard_ut_prefix_kmeans.cpp
index a7d8100be1..4bab0e1098 100644
--- a/ydb/core/tx/datashard/datashard_ut_prefix_kmeans.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_prefix_kmeans.cpp
@@ -61,7 +61,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
}
rec.SetSeed(1337);
- rec.SetUpload(NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING);
+ rec.SetUpload(NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING);
rec.SetNeedsRounds(3);
@@ -86,8 +86,8 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
static std::tuple<TString, TString, TString> DoPrefixKMeans(
Tests::TServer::TPtr server, TActorId sender, NTableIndex::TClusterId parent, ui64 seed, ui64 k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::EState upload, VectorIndexSettings::VectorType type,
- VectorIndexSettings::Metric metric)
+ NKikimrTxDataShard::EKMeansState upload, VectorIndexSettings::VectorType type,
+ VectorIndexSettings::Metric metric, ui32 maxBatchRows)
{
auto id = sId.fetch_add(1, std::memory_order_relaxed);
auto& runtime = *server->GetRuntime();
@@ -131,6 +131,8 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
rec.SetPrefixName(kPrefixTable);
rec.SetLevelName(kLevelTable);
rec.SetPostingName(kPostingTable);
+
+ rec.MutableScanSettings()->SetMaxBatchRows(maxBatchRows);
};
fill(ev1);
fill(ev2);
@@ -359,9 +361,10 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
- VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, distance, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -388,13 +391,14 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 45, key = 25, data = 2-five\n"
);
recreate();
- }
+ }}
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
- VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, distance, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -421,14 +425,13 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 45, key = 25, data = 2-five\n"
);
recreate();
- }
+ }}
seed = 32;
- for (auto similarity : {VectorIndexSettings::SIMILARITY_INNER_PRODUCT, VectorIndexSettings::SIMILARITY_COSINE,
- VectorIndexSettings::DISTANCE_COSINE})
- {
+ for (auto similarity : {VectorIndexSettings::SIMILARITY_INNER_PRODUCT, VectorIndexSettings::SIMILARITY_COSINE, VectorIndexSettings::DISTANCE_COSINE}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
- VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, similarity, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -453,7 +456,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 44, key = 25, data = 2-five\n"
);
recreate();
- }
+ }}
}
Y_UNIT_TEST (BuildToBuild) {
@@ -511,9 +514,10 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
seed = 0;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
- VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, distance, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -540,13 +544,14 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 45, key = 25, embedding = \x75\x75\3, data = 2-five\n"
);
recreate();
- }
+ }}
seed = 111;
for (auto distance : {VectorIndexSettings::DISTANCE_MANHATTAN, VectorIndexSettings::DISTANCE_EUCLIDEAN}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
- VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, distance, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -573,14 +578,13 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 45, key = 25, embedding = \x75\x75\3, data = 2-five\n"
);
recreate();
- }
+ }}
seed = 32;
- for (auto similarity : {VectorIndexSettings::SIMILARITY_INNER_PRODUCT, VectorIndexSettings::SIMILARITY_COSINE,
- VectorIndexSettings::DISTANCE_COSINE})
- {
+ for (auto similarity : {VectorIndexSettings::SIMILARITY_INNER_PRODUCT, VectorIndexSettings::SIMILARITY_COSINE, VectorIndexSettings::DISTANCE_COSINE}) {
+ for (ui32 maxBatchRows : {0, 1, 4, 5, 6, 50000}) {
auto [prefix, level, posting] = DoPrefixKMeans(server, sender, 40, seed, k,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
- VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
+ VectorIndexSettings::VECTOR_TYPE_UINT8, similarity, maxBatchRows);
UNIT_ASSERT_VALUES_EQUAL(prefix,
"user = user-1, __ydb_id = 40\n"
@@ -605,7 +609,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardPrefixKMeansScan) {
"__ydb_parent = 44, key = 25, embedding = \x75\x75\3, data = 2-five\n"
);
recreate();
- }
+ }}
}
}
diff --git a/ydb/core/tx/datashard/datashard_ut_reshuffle_kmeans.cpp b/ydb/core/tx/datashard/datashard_ut_reshuffle_kmeans.cpp
index cc455de7fa..75e5663ef3 100644
--- a/ydb/core/tx/datashard/datashard_ut_reshuffle_kmeans.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_reshuffle_kmeans.cpp
@@ -58,7 +58,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
settings.set_metric(metric);
*rec.MutableSettings() = settings;
- rec.SetUpload(NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING);
+ rec.SetUpload(NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING);
rec.SetParent(0);
rec.SetChild(1);
@@ -87,7 +87,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
static TString DoReshuffleKMeans(Tests::TServer::TPtr server, TActorId sender, NTableIndex::TClusterId parent,
const std::vector<TString>& level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::EState upload,
+ NKikimrTxDataShard::EKMeansState upload,
VectorIndexSettings::VectorType type, VectorIndexSettings::Metric metric)
{
auto id = sId.fetch_add(1, std::memory_order_relaxed);
@@ -315,7 +315,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"11\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 4, data = four\n"
"__ydb_parent = 1, key = 5, data = five\n"
@@ -330,7 +330,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"mm\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, data = one\n"
"__ydb_parent = 1, key = 2, data = two\n"
@@ -346,7 +346,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"II\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, data = one\n"
"__ydb_parent = 1, key = 2, data = two\n"
@@ -400,7 +400,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"11\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 4, embedding = \x65\x65\3, data = four\n"
"__ydb_parent = 1, key = 5, embedding = \x75\x75\3, data = five\n"
@@ -415,7 +415,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"mm\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, embedding = \x30\x30\3, data = one\n"
"__ydb_parent = 1, key = 2, embedding = \x31\x31\3, data = two\n"
@@ -431,7 +431,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"II\3",
};
auto posting = DoReshuffleKMeans(server, sender, 0, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 1, key = 1, embedding = \x30\x30\3, data = one\n"
"__ydb_parent = 1, key = 2, embedding = \x31\x31\3, data = two\n"
@@ -487,7 +487,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"11\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 4, data = four\n"
"__ydb_parent = 41, key = 5, data = five\n"
@@ -502,7 +502,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"mm\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, data = one\n"
"__ydb_parent = 41, key = 2, data = two\n"
@@ -518,7 +518,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"II\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, data = one\n"
"__ydb_parent = 41, key = 2, data = two\n"
@@ -574,7 +574,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"11\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 4, embedding = \x65\x65\3, data = four\n"
"__ydb_parent = 41, key = 5, embedding = \x75\x75\3, data = five\n"
@@ -589,7 +589,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"mm\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, distance);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, embedding = \x30\x30\3, data = one\n"
"__ydb_parent = 41, key = 2, embedding = \x31\x31\3, data = two\n"
@@ -605,7 +605,7 @@ Y_UNIT_TEST_SUITE (TTxDataShardReshuffleKMeansScan) {
"II\3",
};
auto posting = DoReshuffleKMeans(server, sender, 40, level,
- NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD,
+ NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD,
VectorIndexSettings::VECTOR_TYPE_UINT8, similarity);
UNIT_ASSERT_VALUES_EQUAL(posting, "__ydb_parent = 41, key = 1, embedding = \x30\x30\3, data = one\n"
"__ydb_parent = 41, key = 2, embedding = \x31\x31\3, data = two\n"
diff --git a/ydb/core/tx/datashard/datashard_ut_trace.cpp b/ydb/core/tx/datashard/datashard_ut_trace.cpp
index dedca2a16e..137d57c37d 100644
--- a/ydb/core/tx/datashard/datashard_ut_trace.cpp
+++ b/ydb/core/tx/datashard/datashard_ut_trace.cpp
@@ -313,7 +313,7 @@ Y_UNIT_TEST_SUITE(TDataShardTrace) {
ExpectedSpan("DataExecuter",
"WaitForTableResolve",
ExpectedSpan("ComputeActor",
- Repeat(("TKqpForwardWriteActor"), 1)),
+ Repeat(("ForwardWriteActor"), 1)),
"RunTasks",
ExpectedSpan(
"Commit",
diff --git a/ydb/core/tx/datashard/extstorage_usage_config.h b/ydb/core/tx/datashard/extstorage_usage_config.h
index 1dc2c6afed..bd84fd91ef 100644
--- a/ydb/core/tx/datashard/extstorage_usage_config.h
+++ b/ydb/core/tx/datashard/extstorage_usage_config.h
@@ -25,21 +25,11 @@ public:
const TMaybe<NBackup::TEncryptionIV> IV;
static TEncryptionSettings FromBackupTask(const NKikimrSchemeOp::TBackupTask& task) {
- if (task.HasEncryptionSettings()) {
- return TEncryptionSettings{
- .EncryptedBackup = true,
- .EncryptionAlgorithm = task.GetEncryptionSettings().GetEncryptionAlgorithm(),
- .Key = NBackup::TEncryptionKey(task.GetEncryptionSettings().GetSymmetricKey().key()),
- .IV = NBackup::TEncryptionIV::FromBinaryString(task.GetEncryptionSettings().GetIV()),
- };
- } else {
- return TEncryptionSettings{
- .EncryptedBackup = false,
- .EncryptionAlgorithm = {},
- .Key = Nothing(),
- .IV = Nothing(),
- };
- }
+ return FromProto(task);
+ }
+
+ static TEncryptionSettings FromRestoreTask(const NKikimrSchemeOp::TRestoreTask& task) {
+ return FromProto(task);
}
TMaybe<NBackup::TEncryptionIV> GetMetadataIV() const {
@@ -70,6 +60,29 @@ public:
}
return iv;
}
+
+ template <class TProto>
+ static TEncryptionSettings FromProto(const TProto& task) {
+ if (task.HasEncryptionSettings()) {
+ TString algorithm;
+ if constexpr (std::is_same_v<TProto, NKikimrSchemeOp::TBackupTask>) {
+ algorithm = task.GetEncryptionSettings().GetEncryptionAlgorithm();
+ }
+ return TEncryptionSettings{
+ .EncryptedBackup = true,
+ .EncryptionAlgorithm = algorithm,
+ .Key = NBackup::TEncryptionKey(task.GetEncryptionSettings().GetSymmetricKey().key()),
+ .IV = NBackup::TEncryptionIV::FromBinaryString(task.GetEncryptionSettings().GetIV()),
+ };
+ } else {
+ return TEncryptionSettings{
+ .EncryptedBackup = false,
+ .EncryptionAlgorithm = {},
+ .Key = Nothing(),
+ .IV = Nothing(),
+ };
+ }
+ }
};
public:
const TString Bucket;
@@ -93,7 +106,7 @@ public:
}
static TS3Settings FromRestoreTask(const NKikimrSchemeOp::TRestoreTask& task) {
- return TS3Settings(task.GetS3Settings(), task.GetShardNum(), TEncryptionSettings{});
+ return TS3Settings(task.GetS3Settings(), task.GetShardNum(), TEncryptionSettings::FromRestoreTask(task));
}
inline const TString& GetBucket() const { return Bucket; }
diff --git a/ydb/core/tx/datashard/import_s3.cpp b/ydb/core/tx/datashard/import_s3.cpp
index aa90e9da6f..035e5230ba 100644
--- a/ydb/core/tx/datashard/import_s3.cpp
+++ b/ydb/core/tx/datashard/import_s3.cpp
@@ -488,17 +488,34 @@ class TS3Downloader: public TActorBootstrapped<TS3Downloader> {
return HeadObject(Settings.GetDataKey(DataFormat, CompressionCodec));
}
+ THolder<IReadController> reader;
switch (CompressionCodec) {
case NBackupRestoreTraits::ECompressionCodec::None:
- Reader.Reset(new TReadControllerRaw(ReadBatchSize, ReadBufferSizeLimit));
+ reader.Reset(new TReadControllerRaw(ReadBatchSize, ReadBufferSizeLimit));
break;
case NBackupRestoreTraits::ECompressionCodec::Zstd:
- Reader.Reset(new TReadControllerZstd(ReadBatchSize, ReadBufferSizeLimit));
+ reader.Reset(new TReadControllerZstd(ReadBatchSize, ReadBufferSizeLimit));
break;
case NBackupRestoreTraits::ECompressionCodec::Invalid:
Y_ENSURE(false, "unreachable");
}
+ if (Settings.EncryptionSettings.EncryptedBackup) {
+ NBackup::TEncryptionIV expectedIV = NBackup::TEncryptionIV::Combine(
+ *Settings.EncryptionSettings.IV,
+ NBackup::EBackupFileType::TableData,
+ 0 /* already combined */,
+ Settings.Shard
+ );
+ Reader = MakeHolder<TEncryptionDeserializerController>(
+ *Settings.EncryptionSettings.Key,
+ expectedIV,
+ std::move(reader)
+ );
+ } else {
+ Reader = std::move(reader);
+ }
+
ETag = result.GetResult().GetETag();
ContentLength = result.GetResult().GetContentLength();
diff --git a/ydb/core/tx/datashard/kmeans_helper.cpp b/ydb/core/tx/datashard/kmeans_helper.cpp
index 6a063821a6..19c84d1f5f 100644
--- a/ydb/core/tx/datashard/kmeans_helper.cpp
+++ b/ydb/core/tx/datashard/kmeans_helper.cpp
@@ -91,7 +91,7 @@ TTags MakeUploadTags(const TUserTable& table, const TProtoStringType& embedding,
}
std::shared_ptr<NTxProxy::TUploadTypes>
-MakeUploadTypes(const TUserTable& table, NKikimrTxDataShard::TEvLocalKMeansRequest::EState uploadState,
+MakeUploadTypes(const TUserTable& table, NKikimrTxDataShard::EKMeansState uploadState,
const TProtoStringType& embedding, const google::protobuf::RepeatedPtrField<TProtoStringType>& data,
ui32 prefixColumns)
{
@@ -116,12 +116,12 @@ MakeUploadTypes(const TUserTable& table, NKikimrTxDataShard::TEvLocalKMeansReque
addType(table.Columns.at(column).Name);
}
switch (uploadState) {
- case NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD:
- case NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD:
+ case NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD:
+ case NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD:
addType(embedding);
[[fallthrough]];
- case NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING:
- case NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING: {
+ case NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING:
+ case NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING: {
for (const auto& column : data) {
addType(column);
}
diff --git a/ydb/core/tx/datashard/kmeans_helper.h b/ydb/core/tx/datashard/kmeans_helper.h
index cc8817e28d..a074f87fd8 100644
--- a/ydb/core/tx/datashard/kmeans_helper.h
+++ b/ydb/core/tx/datashard/kmeans_helper.h
@@ -218,7 +218,7 @@ TTags MakeUploadTags(const TUserTable& table, const TProtoStringType& embedding,
ui32& dataPos, NTable::TTag& embeddingTag);
std::shared_ptr<NTxProxy::TUploadTypes>
-MakeUploadTypes(const TUserTable& table, NKikimrTxDataShard::TEvLocalKMeansRequest::EState uploadState,
+MakeUploadTypes(const TUserTable& table, NKikimrTxDataShard::EKMeansState uploadState,
const TProtoStringType& embedding, const google::protobuf::RepeatedPtrField<TProtoStringType>& data,
ui32 prefixColumns = 0);
diff --git a/ydb/core/tx/datashard/local_kmeans.cpp b/ydb/core/tx/datashard/local_kmeans.cpp
index 3061055584..b39a09c3f6 100644
--- a/ydb/core/tx/datashard/local_kmeans.cpp
+++ b/ydb/core/tx/datashard/local_kmeans.cpp
@@ -49,7 +49,7 @@ public:
Size = 0;
lock.unlock();
- LOG_N("FinishTLocalKMeansScan " << Response->Record.ShortDebugString());
+ LOG_N("Finish TLocalKMeansScan " << Response->Record.ShortDebugString());
ctx.Send(ResponseActorId, std::move(Response));
}
@@ -86,7 +86,7 @@ static constexpr double MinVectorsNeedsReassigned = 0.01;
class TLocalKMeansScanBase: public TActor<TLocalKMeansScanBase>, public NTable::IScan {
protected:
- using EState = NKikimrTxDataShard::TEvLocalKMeansRequest;
+ using EState = NKikimrTxDataShard::EKMeansState;
NTableIndex::TClusterId Parent = 0;
NTableIndex::TClusterId Child = 0;
@@ -96,8 +96,8 @@ protected:
ui32 K = 0;
- EState::EState State;
- EState::EState UploadState;
+ EState State;
+ EState UploadState;
IDriver* Driver = nullptr;
@@ -139,7 +139,7 @@ protected:
ui32 RetryCount = 0;
TActorId Uploader;
- TUploadLimits Limits;
+ const TIndexBuildScanSettings ScanSettings;
NTable::TTag KMeansScan;
TTags UploadScan;
@@ -163,15 +163,16 @@ public:
: TActor{&TThis::StateWork}
, Parent{parent}
, Child{child}
- , MaxRounds{request.GetNeedsRounds() - request.GetDoneRounds()}
+ , MaxRounds{request.GetNeedsRounds()}
, K{request.GetK()}
- , State{request.GetState()}
+ , State{EState::SAMPLE}
, UploadState{request.GetUpload()}
, Lead{std::move(lead)}
, BuildId{buildId}
, Rng{request.GetSeed()}
, TargetTable{request.GetLevelName()}
, NextTable{request.GetPostingName()}
+ , ScanSettings(request.GetScanSettings())
, Result{std::move(result)}
{
const auto& embedding = request.GetEmbeddingColumn();
@@ -238,21 +239,15 @@ public:
TString Debug() const
{
- return TStringBuilder() << " TLocalKMeansScan Id: " << BuildId << " Parent: " << Parent << " Child: " << Child
+ return TStringBuilder() << "TLocalKMeansScan Id: " << BuildId << " Parent: " << Parent << " Child: " << Child
<< " Target: " << TargetTable << " K: " << K << " Clusters: " << Clusters.size()
<< " State: " << State << " Round: " << Round << " / " << MaxRounds
- << " ReadBuf size: " << ReadBuf.Size() << " WriteBuf size: " << WriteBuf.Size() << " ";
+ << " ReadBuf size: " << ReadBuf.Size() << " WriteBuf size: " << WriteBuf.Size();
}
EScan PageFault() final
{
LOG_T("PageFault " << Debug());
-
- if (!ReadBuf.IsEmpty() && WriteBuf.IsEmpty()) {
- ReadBuf.FlushTo(WriteBuf);
- Upload(false);
- }
-
return EScan::Feed;
}
@@ -296,7 +291,7 @@ protected:
UploadRows += WriteBuf.GetRows();
UploadBytes += WriteBuf.GetBytes();
WriteBuf.Clear();
- if (!ReadBuf.IsEmpty() && ReadBuf.IsReachLimits(Limits)) {
+ if (HasReachedLimits(ReadBuf, ScanSettings)) {
ReadBuf.FlushTo(WriteBuf);
Upload(false);
}
@@ -305,10 +300,10 @@ protected:
return;
}
- if (RetryCount < Limits.MaxUploadRowsRetryCount && UploadStatus.IsRetriable()) {
+ if (RetryCount < ScanSettings.GetMaxBatchRetries() && UploadStatus.IsRetriable()) {
LOG_N("Got retriable error, " << Debug() << UploadStatus.ToString());
- ctx.Schedule(Limits.GetTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
+ ctx.Schedule(GetRetryWakeupTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
return;
}
@@ -319,7 +314,7 @@ protected:
EScan FeedUpload()
{
- if (!ReadBuf.IsReachLimits(Limits)) {
+ if (!HasReachedLimits(ReadBuf, ScanSettings)) {
return EScan::Feed;
}
if (!WriteBuf.IsEmpty()) {
@@ -655,11 +650,9 @@ void TDataShard::Handle(TEvDataShard::TEvLocalKMeansRequest::TPtr& ev, const TAc
void TDataShard::HandleSafe(TEvDataShard::TEvLocalKMeansRequest::TPtr& ev, const TActorContext& ctx)
{
auto& request = ev->Get()->Record;
- const bool needsSnapshot = request.HasSnapshotStep() || request.HasSnapshotTxId();
- TRowVersion rowVersion(request.GetSnapshotStep(), request.GetSnapshotTxId());
- if (!needsSnapshot) {
- rowVersion = GetMvccTxVersion(EMvccTxMode::ReadOnly);
- }
+ auto rowVersion = request.HasSnapshotStep() || request.HasSnapshotTxId()
+ ? TRowVersion(request.GetSnapshotStep(), request.GetSnapshotTxId())
+ : GetMvccTxVersion(EMvccTxMode::ReadOnly);
LOG_N("Starting TLocalKMeansScan " << request.ShortDebugString()
<< " row version " << rowVersion);
@@ -724,12 +717,14 @@ void TDataShard::HandleSafe(TEvDataShard::TEvLocalKMeansRequest::TPtr& ev, const
localTid = userTable.LocalTid;
- const TSnapshotKey snapshotKey(pathId, rowVersion.Step, rowVersion.TxId);
- if (needsSnapshot && !SnapshotManager.FindAvailable(snapshotKey)) {
- badRequest(TStringBuilder() << "no snapshot has been found" << " , path id is " << pathId.OwnerId << ":"
- << pathId.LocalPathId << " , snapshot step is " << snapshotKey.Step
- << " , snapshot tx is " << snapshotKey.TxId);
- return;
+ if (request.HasSnapshotStep() || request.HasSnapshotTxId()) {
+ const TSnapshotKey snapshotKey(pathId, rowVersion.Step, rowVersion.TxId);
+ if (!SnapshotManager.FindAvailable(snapshotKey)) {
+ badRequest(TStringBuilder() << "no snapshot has been found" << " , path id is " << pathId.OwnerId << ":"
+ << pathId.LocalPathId << " , snapshot step is " << snapshotKey.Step
+ << " , snapshot tx is " << snapshotKey.TxId);
+ return;
+ }
}
if (!IsStateActive()) {
diff --git a/ydb/core/tx/datashard/prefix_kmeans.cpp b/ydb/core/tx/datashard/prefix_kmeans.cpp
index fd0c9ac84a..5a36035d78 100644
--- a/ydb/core/tx/datashard/prefix_kmeans.cpp
+++ b/ydb/core/tx/datashard/prefix_kmeans.cpp
@@ -22,12 +22,12 @@
namespace NKikimr::NDataShard {
using namespace NKMeans;
-// This scan needed to run kmeans reshuffle which is part of global kmeans run.
+// If less than 1% of vectors are reassigned to new clusters we want to stop
static constexpr double MinVectorsNeedsReassigned = 0.01;
class TPrefixKMeansScanBase: public TActor<TPrefixKMeansScanBase>, public NTable::IScan {
protected:
- using EState = NKikimrTxDataShard::TEvLocalKMeansRequest;
+ using EState = NKikimrTxDataShard::EKMeansState;
NTableIndex::TClusterId Parent = 0;
NTableIndex::TClusterId Child = 0;
@@ -38,8 +38,8 @@ protected:
const ui32 InitK = 0;
ui32 K = 0;
- EState::EState State;
- const EState::EState UploadState;
+ EState State;
+ const EState UploadState;
IDriver* Driver = nullptr;
@@ -87,7 +87,7 @@ protected:
ui32 RetryCount = 0;
TActorId Uploader;
- TUploadLimits Limits;
+ const TIndexBuildScanSettings ScanSettings;
NTable::TTag EmbeddingTag;
TTags ScanTags;
@@ -132,6 +132,7 @@ public:
, LevelTable{request.GetLevelName()}
, PostingTable{request.GetPostingName()}
, PrefixTable{request.GetPrefixName()}
+ , ScanSettings(request.GetScanSettings())
, ResponseActorId{responseActorId}
, Response{std::move(response)}
, PrefixColumns{request.GetPrefixColumns()}
@@ -206,7 +207,7 @@ public:
}
NYql::IssuesToMessage(UploadStatus.Issues, record.MutableIssues());
- LOG_N("Finish" << Debug() << " " << Response->Record.ShortDebugString());
+ LOG_N("Finish " << Debug() << " " << Response->Record.ShortDebugString());
Send(ResponseActorId, Response.Release());
Driver = nullptr;
@@ -225,18 +226,12 @@ public:
<< " K: " << K << " Clusters: " << Clusters.size()
<< " State: " << State << " Round: " << Round << " / " << MaxRounds
<< " LevelBuf size: " << LevelBuf.Size() << " PostingBuf size: " << PostingBuf.Size() << " PrefixBuf size: " << PrefixBuf.Size()
- << " UploadTable: " << UploadTable << " UploadBuf size: " << UploadBuf.Size();
+ << " UploadTable: " << UploadTable << " UploadBuf size: " << UploadBuf.Size() << " RetryCount: " << RetryCount;
}
EScan PageFault() final
{
LOG_T("PageFault " << Debug());
-
- UploadInProgress()
- || TryUpload(LevelBuf, LevelTable, LevelTypes, false)
- || TryUpload(PostingBuf, PostingTable, PostingTypes, false)
- || TryUpload(PrefixBuf, PrefixTable, PrefixTypes, false);
-
return EScan::Feed;
}
@@ -292,10 +287,10 @@ protected:
return;
}
- if (RetryCount < Limits.MaxUploadRowsRetryCount && UploadStatus.IsRetriable()) {
+ if (RetryCount < ScanSettings.GetMaxBatchRetries() && UploadStatus.IsRetriable()) {
LOG_N("Got retriable error, " << Debug() << " " << UploadStatus.ToString());
- ctx.Schedule(Limits.GetTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
+ ctx.Schedule(GetRetryWakeupTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
return;
}
@@ -311,7 +306,7 @@ protected:
bool ShouldWaitUpload()
{
- if (!LevelBuf.IsReachLimits(Limits) && !PostingBuf.IsReachLimits(Limits) && !PrefixBuf.IsReachLimits(Limits)) {
+ if (!HasReachedLimits(LevelBuf, ScanSettings) && !HasReachedLimits(PostingBuf, ScanSettings) && !HasReachedLimits(PrefixBuf, ScanSettings)) {
return false;
}
@@ -323,11 +318,13 @@ protected:
|| TryUpload(PostingBuf, PostingTable, PostingTypes, true)
|| TryUpload(PrefixBuf, PrefixTable, PrefixTypes, true);
- return !LevelBuf.IsReachLimits(Limits) && !PostingBuf.IsReachLimits(Limits) && !PrefixBuf.IsReachLimits(Limits);
+ return !HasReachedLimits(LevelBuf, ScanSettings) && !HasReachedLimits(PostingBuf, ScanSettings) && !HasReachedLimits(PrefixBuf, ScanSettings);
}
void UploadImpl()
{
+ LOG_D("Uploading " << Debug());
+
Y_ASSERT(!UploadBuf.IsEmpty());
Y_ASSERT(!Uploader);
auto actor = NTxProxy::CreateUploadRowsInternal(
@@ -363,7 +360,7 @@ protected:
return true;
}
- if (!buffer.IsEmpty() && (!byLimit || buffer.IsReachLimits(Limits))) {
+ if (!buffer.IsEmpty() && (!byLimit || HasReachedLimits(buffer, ScanSettings))) {
buffer.FlushTo(UploadBuf);
InitUpload(table, types);
return true;
@@ -475,7 +472,7 @@ public:
if (IsFirstPrefixFeed && IsPrefixRowsValid) {
PrefixRows.AddRow(TSerializedCellVec{key}, TSerializedCellVec::Serialize(*row));
- if (PrefixRows.IsReachLimits(Limits)) {
+ if (HasReachedLimits(PrefixRows, ScanSettings)) {
PrefixRows.Clear();
IsPrefixRowsValid = false;
}
@@ -741,10 +738,10 @@ void TDataShard::Handle(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, const TA
void TDataShard::HandleSafe(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, const TActorContext& ctx)
{
- auto& record = ev->Get()->Record;
+ auto& request = ev->Get()->Record;
TRowVersion rowVersion = GetMvccTxVersion(EMvccTxMode::ReadOnly);
- LOG_N("Starting TPrefixKMeansScan " << record.ShortDebugString()
+ LOG_N("Starting TPrefixKMeansScan " << request.ShortDebugString()
<< " row version " << rowVersion);
// Note: it's very unlikely that we have volatile txs before this snapshot
@@ -752,13 +749,13 @@ void TDataShard::HandleSafe(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, cons
VolatileTxManager.AttachWaitingSnapshotEvent(rowVersion, std::unique_ptr<IEventHandle>(ev.Release()));
return;
}
- const ui64 id = record.GetId();
+ const ui64 id = request.GetId();
auto response = MakeHolder<TEvDataShard::TEvPrefixKMeansResponse>();
response->Record.SetId(id);
response->Record.SetTabletId(TabletID());
- TScanRecord::TSeqNo seqNo = {record.GetSeqNoGeneration(), record.GetSeqNoRound()};
+ TScanRecord::TSeqNo seqNo = {request.GetSeqNoGeneration(), request.GetSeqNoRound()};
response->Record.SetRequestSeqNoGeneration(seqNo.Generation);
response->Record.SetRequestSeqNoRound(seqNo.Round);
@@ -771,12 +768,12 @@ void TDataShard::HandleSafe(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, cons
response.Reset();
};
- if (const ui64 shardId = record.GetTabletId(); shardId != TabletID()) {
+ if (const ui64 shardId = request.GetTabletId(); shardId != TabletID()) {
badRequest(TStringBuilder() << "Wrong shard " << shardId << " this is " << TabletID());
return;
}
- const auto pathId = TPathId::FromProto(record.GetPathId());
+ const auto pathId = TPathId::FromProto(request.GetPathId());
const auto* userTableIt = GetUserTables().FindPtr(pathId.LocalPathId);
if (!userTableIt) {
badRequest(TStringBuilder() << "Unknown table id: " << pathId.LocalPathId);
@@ -802,7 +799,7 @@ void TDataShard::HandleSafe(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, cons
return;
}
- if (record.GetK() < 2) {
+ if (request.GetK() < 2) {
badRequest("Should be requested partition on at least two rows");
return;
}
@@ -810,10 +807,10 @@ void TDataShard::HandleSafe(TEvDataShard::TEvPrefixKMeansRequest::TPtr& ev, cons
TAutoPtr<NTable::IScan> scan;
auto createScan = [&]<typename T> {
scan = new TPrefixKMeansScan<T>{
- userTable, record, ev->Sender, std::move(response),
+ userTable, request, ev->Sender, std::move(response),
};
};
- MakeScan(record, createScan, badRequest);
+ MakeScan(request, createScan, badRequest);
if (!scan) {
Y_ASSERT(!response);
return;
diff --git a/ydb/core/tx/datashard/reshuffle_kmeans.cpp b/ydb/core/tx/datashard/reshuffle_kmeans.cpp
index b919fc306c..f7db951e60 100644
--- a/ydb/core/tx/datashard/reshuffle_kmeans.cpp
+++ b/ydb/core/tx/datashard/reshuffle_kmeans.cpp
@@ -25,14 +25,14 @@ using namespace NKMeans;
// This scan needed to run kmeans reshuffle which is part of global kmeans run.
class TReshuffleKMeansScanBase: public TActor<TReshuffleKMeansScanBase>, public NTable::IScan {
protected:
- using EState = NKikimrTxDataShard::TEvLocalKMeansRequest;
+ using EState = NKikimrTxDataShard::EKMeansState;
NTableIndex::TClusterId Parent = 0;
NTableIndex::TClusterId Child = 0;
ui32 K = 0;
- EState::EState UploadState;
+ EState UploadState;
IDriver* Driver = nullptr;
@@ -59,7 +59,7 @@ protected:
ui32 RetryCount = 0;
TActorId Uploader;
- TUploadLimits Limits;
+ const TIndexBuildScanSettings ScanSettings;
TTags UploadScan;
@@ -91,6 +91,7 @@ public:
, BuildId{request.GetId()}
, Clusters{request.GetClusters().begin(), request.GetClusters().end()}
, TargetTable{request.GetPostingName()}
+ , ScanSettings(request.GetScanSettings())
, ResponseActorId{responseActorId}
, Response{std::move(response)}
{
@@ -155,7 +156,7 @@ public:
}
NYql::IssuesToMessage(UploadStatus.Issues, record.MutableIssues());
- LOG_N("Finish" << Debug() << " " << Response->Record.ShortDebugString());
+ LOG_N("Finish " << Debug() << " " << Response->Record.ShortDebugString());
Send(ResponseActorId, Response.Release());
Driver = nullptr;
@@ -170,20 +171,14 @@ public:
TString Debug() const
{
- return TStringBuilder() << " TReshuffleKMeansScan Id: " << BuildId << " Parent: " << Parent << " Child: " << Child
+ return TStringBuilder() << "TReshuffleKMeansScan Id: " << BuildId << " Parent: " << Parent << " Child: " << Child
<< " Target: " << TargetTable << " K: " << K << " Clusters: " << Clusters.size()
- << " ReadBuf size: " << ReadBuf.Size() << " WriteBuf size: " << WriteBuf.Size() << " ";
+ << " ReadBuf size: " << ReadBuf.Size() << " WriteBuf size: " << WriteBuf.Size();
}
EScan PageFault() final
{
LOG_T("PageFault " << Debug());
-
- if (!ReadBuf.IsEmpty() && WriteBuf.IsEmpty()) {
- ReadBuf.FlushTo(WriteBuf);
- Upload(false);
- }
-
return EScan::Feed;
}
@@ -214,8 +209,9 @@ protected:
<< " ev->Sender: " << ev->Sender.ToString());
if (Uploader) {
- Y_ENSURE(Uploader == ev->Sender, "Mismatch Uploader: " << Uploader.ToString() << " ev->Sender: "
- << ev->Sender.ToString() << Debug());
+ Y_ENSURE(Uploader == ev->Sender, "Mismatch"
+ << " Uploader: " << Uploader.ToString()
+ << " Sender: " << ev->Sender.ToString());
} else {
Y_ENSURE(Driver == nullptr);
return;
@@ -227,7 +223,7 @@ protected:
UploadRows += WriteBuf.GetRows();
UploadBytes += WriteBuf.GetBytes();
WriteBuf.Clear();
- if (!ReadBuf.IsEmpty() && ReadBuf.IsReachLimits(Limits)) {
+ if (HasReachedLimits(ReadBuf, ScanSettings)) {
ReadBuf.FlushTo(WriteBuf);
Upload(false);
}
@@ -236,21 +232,21 @@ protected:
return;
}
- if (RetryCount < Limits.MaxUploadRowsRetryCount && UploadStatus.IsRetriable()) {
- LOG_N("Got retriable error, " << Debug() << UploadStatus.ToString());
+ if (RetryCount < ScanSettings.GetMaxBatchRetries() && UploadStatus.IsRetriable()) {
+ LOG_N("Got retriable error, " << Debug() << " " << UploadStatus.ToString());
- Schedule(Limits.GetTimeoutBackoff(RetryCount), new TEvents::TEvWakeup);
+ Schedule(GetRetryWakeupTimeoutBackoff(RetryCount), new TEvents::TEvWakeup);
return;
}
- LOG_N("Got error, abort scan, " << Debug() << UploadStatus.ToString());
+ LOG_N("Got error, abort scan, " << Debug() << " " << UploadStatus.ToString());
Driver->Touch(EScan::Final);
}
EScan FeedUpload()
{
- if (!ReadBuf.IsReachLimits(Limits)) {
+ if (!HasReachedLimits(ReadBuf, ScanSettings)) {
return EScan::Feed;
}
if (!WriteBuf.IsEmpty()) {
diff --git a/ydb/core/tx/datashard/sample_k.cpp b/ydb/core/tx/datashard/sample_k.cpp
index 5d04c9c6d6..e8472a4d7e 100644
--- a/ydb/core/tx/datashard/sample_k.cpp
+++ b/ydb/core/tx/datashard/sample_k.cpp
@@ -156,7 +156,7 @@ public:
} else {
Response->Record.SetStatus(NKikimrIndexBuilder::EBuildStatus::ABORTED);
}
- LOG_N("Finish" << Debug() << " " << Response->Record.ShortDebugString());
+ LOG_N("Finish " << Debug() << " " << Response->Record.ShortDebugString());
Send(ResponseActorId, Response.Release());
Driver = nullptr;
PassAway();
@@ -172,8 +172,8 @@ public:
}
TString Debug() const {
- return TStringBuilder() << " TSampleKScan Id: " << BuildId
- << " K: " << K << " Clusters: " << MaxRows.size() << " ";
+ return TStringBuilder() << "TSampleKScan Id: " << BuildId
+ << " K: " << K << " Clusters: " << MaxRows.size();
}
private:
diff --git a/ydb/core/tx/datashard/scan_common.h b/ydb/core/tx/datashard/scan_common.h
index 0ed5942110..e29bcf4f8b 100644
--- a/ydb/core/tx/datashard/scan_common.h
+++ b/ydb/core/tx/datashard/scan_common.h
@@ -1,5 +1,8 @@
#pragma once
+#include "buffer_data.h"
+
+#include <ydb/core/protos/index_builder.pb.h>
#include <ydb/public/api/protos/ydb_value.pb.h>
#include <ydb/core/tablet_flat/flat_cxx_database.h>
#include <ydb/core/util/intrusive_heap.h>
@@ -10,6 +13,7 @@
namespace NKikimr::NDataShard {
+using TIndexBuildScanSettings = NKikimrIndexBuilder::TIndexBuildScanSettings;
class TDataShard;
struct TUserTable;
@@ -87,4 +91,14 @@ TColumnsTypes GetAllTypes(const TUserTable& tableInfo);
// if IScan will provide for us "how much data did we read"?
ui64 CountBytes(TArrayRef<const TCell> key, const NTable::TRowState& row);
+inline TDuration GetRetryWakeupTimeoutBackoff(ui32 attempt) {
+ const ui32 maxBackoffExponent = 3;
+
+ return TDuration::Seconds(1u << Min(attempt, maxBackoffExponent));
+}
+
+inline bool HasReachedLimits(const TBufferData& buffer, const TIndexBuildScanSettings& scanSettings) {
+ return buffer.HasReachedLimits(scanSettings.GetMaxBatchRows(), scanSettings.GetMaxBatchBytes());
+}
+
}
diff --git a/ydb/core/tx/datashard/upload_stats.h b/ydb/core/tx/datashard/upload_stats.h
index 21372bb6fb..9899fce85c 100644
--- a/ydb/core/tx/datashard/upload_stats.h
+++ b/ydb/core/tx/datashard/upload_stats.h
@@ -56,7 +56,10 @@ struct TUploadStatus {
}
bool IsRetriable() const {
- return StatusCode == Ydb::StatusIds::UNAVAILABLE || StatusCode == Ydb::StatusIds::OVERLOADED;
+ return StatusCode == Ydb::StatusIds::UNAVAILABLE
+ || StatusCode == Ydb::StatusIds::OVERLOADED
+ || StatusCode == Ydb::StatusIds::TIMEOUT
+ ;
}
TString ToString() const {
@@ -68,18 +71,4 @@ struct TUploadStatus {
}
};
-struct TUploadRetryLimits {
- ui32 MaxUploadRowsRetryCount = 50;
- ui32 BackoffCeiling = 3;
-
- TDuration GetTimeoutBackoff(ui32 retryNo) const {
- return TDuration::Seconds(1u << Max(retryNo, BackoffCeiling));
- }
-};
-
-struct TUploadLimits: TUploadRetryLimits {
- ui64 BatchRowsLimit = 50000;
- ui64 BatchBytesLimit = 8388608; // 8MB
-};
-
}
diff --git a/ydb/core/tx/program/builder.cpp b/ydb/core/tx/program/builder.cpp
index 31a5d1be32..144a1b2bfe 100644
--- a/ydb/core/tx/program/builder.cpp
+++ b/ydb/core/tx/program/builder.cpp
@@ -17,8 +17,17 @@
namespace NKikimr::NArrow::NSSA {
-TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(
- const TColumnInfo& name, const NKikimrSSA::TProgram::TAssignment::TFunction& func, std::vector<TColumnChainInfo>& arguments) const {
+TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(const TColumnInfo& name,
+ const NKikimrSSA::TProgram::TAssignment::TFunction& func, std::shared_ptr<NArrow::NSSA::IKernelLogic>& kernelLogic,
+ std::vector<TColumnChainInfo>& arguments) const {
+ if (func.GetKernelName()) {
+ kernelLogic.reset(IKernelLogic::TFactory::Construct(func.GetKernelName()));
+ } else if (func.HasYqlOperationId()) {
+ kernelLogic = std::make_shared<TSimpleKernelLogic>(func.GetYqlOperationId());
+ } else {
+ kernelLogic = std::make_shared<TSimpleKernelLogic>();
+ }
+
using TId = NKikimrSSA::TProgram::TAssignment;
arguments.clear();
@@ -27,6 +36,15 @@ TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(
}
if (func.GetFunctionType() == NKikimrSSA::TProgram::EFunctionType::TProgram_EFunctionType_YQL_KERNEL) {
+ if (func.GetYqlOperationId() == (ui32)NYql::TKernelRequestBuilder::EBinaryOp::Equals) {
+ kernelLogic = std::make_shared<TLogicEquals>(false);
+ } else if (func.GetYqlOperationId() == (ui32)NYql::TKernelRequestBuilder::EBinaryOp::StringContains) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::Contains, true, false);
+ } else if (func.GetYqlOperationId() == (ui32)NYql::TKernelRequestBuilder::EBinaryOp::StartsWith) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::StartsWith, true, false);
+ } else if (func.GetYqlOperationId() == (ui32)NYql::TKernelRequestBuilder::EBinaryOp::EndsWith) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::EndsWith, true, false);
+ }
auto kernelFunction = KernelsRegistry.GetFunction(func.GetKernelIdx());
if (!kernelFunction) {
return TConclusionStatus::Fail(
@@ -59,6 +77,7 @@ TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(
switch (func.GetId()) {
case TId::FUNC_CMP_EQUAL:
+ kernelLogic = std::make_shared<TLogicEquals>(true);
return std::make_shared<TSimpleFunction>(EOperation::Equal);
case TId::FUNC_CMP_NOT_EQUAL:
return std::make_shared<TSimpleFunction>(EOperation::NotEqual);
@@ -76,6 +95,7 @@ TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(
return std::make_shared<TSimpleFunction>(EOperation::BinaryLength);
case TId::FUNC_STR_MATCH: {
if (auto opts = mkLikeOptions(false)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::Contains, true, true);
return std::make_shared<TSimpleFunction>(EOperation::MatchSubstring, opts);
}
break;
@@ -88,30 +108,35 @@ TConclusion<std::shared_ptr<IStepFunction>> TProgramBuilder::MakeFunction(
}
case TId::FUNC_STR_STARTS_WITH: {
if (auto opts = mkLikeOptions(false)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::StartsWith, true, true);
return std::make_shared<TSimpleFunction>(EOperation::StartsWith, opts);
}
break;
}
case TId::FUNC_STR_ENDS_WITH: {
if (auto opts = mkLikeOptions(false)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::EndsWith, true, true);
return std::make_shared<TSimpleFunction>(EOperation::EndsWith, opts);
}
break;
}
case TId::FUNC_STR_MATCH_IGNORE_CASE: {
if (auto opts = mkLikeOptions(true)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::Contains, false, true);
return std::make_shared<TSimpleFunction>(EOperation::MatchSubstring, opts);
}
break;
}
case TId::FUNC_STR_STARTS_WITH_IGNORE_CASE: {
if (auto opts = mkLikeOptions(true)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::StartsWith, false, true);
return std::make_shared<TSimpleFunction>(EOperation::StartsWith, opts);
}
break;
}
case TId::FUNC_STR_ENDS_WITH_IGNORE_CASE: {
if (auto opts = mkLikeOptions(true)) {
+ kernelLogic = std::make_shared<TLogicMatchString>(TIndexCheckOperation::EOperation::EndsWith, false, true);
return std::make_shared<TSimpleFunction>(EOperation::EndsWith, opts);
}
break;
@@ -276,18 +301,14 @@ TConclusionStatus TProgramBuilder::ReadAssign(
switch (assign.GetExpressionCase()) {
case TId::kFunction: {
std::shared_ptr<IKernelLogic> kernelLogic;
- if (assign.GetFunction().GetKernelName()) {
- kernelLogic.reset(IKernelLogic::TFactory::Construct(assign.GetFunction().GetKernelName()));
- }
-
std::vector<TColumnChainInfo> arguments;
- auto function = MakeFunction(columnName, assign.GetFunction(), arguments);
+ auto function = MakeFunction(columnName, assign.GetFunction(), kernelLogic, arguments);
if (function.IsFail()) {
return function;
}
- if (assign.GetFunction().HasYqlOperationId() && assign.GetFunction().GetYqlOperationId() ==
- (ui32)NYql::TKernelRequestBuilder::EBinaryOp::And) {
+ if (assign.GetFunction().HasYqlOperationId() &&
+ assign.GetFunction().GetYqlOperationId() == (ui32)NYql::TKernelRequestBuilder::EBinaryOp::And) {
auto processor =
std::make_shared<TStreamLogicProcessor>(std::move(arguments), columnName.GetColumnId(), NKernels::EOperation::And);
Builder.Add(processor);
@@ -301,9 +322,6 @@ TConclusionStatus TProgramBuilder::ReadAssign(
if (processor.IsFail()) {
return processor;
}
- if (assign.GetFunction().HasYqlOperationId()) {
- processor.GetResult()->SetYqlOperationId(assign.GetFunction().GetYqlOperationId());
- }
Builder.Add(processor.DetachResult());
}
break;
@@ -399,7 +417,7 @@ TConclusionStatus TProgramBuilder::ReadGroupBy(const NKikimrSSA::TProgram::TGrou
}
auto aggrType = GetAggregationType(agg.GetFunction());
auto argColumnIds = extractColumnIds(agg.GetFunction().GetArguments());
- auto status = TCalculationProcessor::Build(std::move(argColumnIds), columnName.GetColumnId(), func.DetachResult(), nullptr);
+ auto status = TCalculationProcessor::Build(std::move(argColumnIds), columnName.GetColumnId(), func.DetachResult(), std::make_shared<TSimpleKernelLogic>());
if (status.IsFail()) {
return status;
}
diff --git a/ydb/core/tx/program/builder.h b/ydb/core/tx/program/builder.h
index 94d6793008..5e7b0fcb17 100644
--- a/ydb/core/tx/program/builder.h
+++ b/ydb/core/tx/program/builder.h
@@ -7,6 +7,7 @@
#include <ydb/core/formats/arrow/program/functions.h>
#include <ydb/core/formats/arrow/program/graph_execute.h>
#include <ydb/core/formats/arrow/program/graph_optimization.h>
+#include <ydb/core/formats/arrow/program/kernel_logic.h>
#include <ydb/library/formats/arrow/protos/ssa.pb.h>
@@ -37,8 +38,9 @@ private:
TColumnInfo GetColumnInfo(const NKikimrSSA::TProgram::TColumn& column) const;
std::string GenerateName(const NKikimrSSA::TProgram::TColumn& column) const;
- [[nodiscard]] TConclusion<std::shared_ptr<IStepFunction>> MakeFunction(
- const TColumnInfo& name, const NKikimrSSA::TProgram::TAssignment::TFunction& func, std::vector<TColumnChainInfo>& arguments) const;
+ [[nodiscard]] TConclusion<std::shared_ptr<IStepFunction>> MakeFunction(const TColumnInfo& name,
+ const NKikimrSSA::TProgram::TAssignment::TFunction& func, std::shared_ptr<IKernelLogic>& kernelLogic,
+ std::vector<TColumnChainInfo>& arguments) const;
[[nodiscard]] TConclusion<std::shared_ptr<TConstProcessor>> MakeConstant(
const TColumnInfo& name, const NKikimrSSA::TProgram::TConstant& constant) const;
[[nodiscard]] TConclusion<std::shared_ptr<TConstProcessor>> MaterializeParameter(const TColumnInfo& name,
diff --git a/ydb/core/tx/replication/service/service.cpp b/ydb/core/tx/replication/service/service.cpp
index 103e4260f6..2d4a7c9311 100644
--- a/ydb/core/tx/replication/service/service.cpp
+++ b/ydb/core/tx/replication/service/service.cpp
@@ -2,7 +2,7 @@
#include "service.h"
#include "table_writer.h"
#include "topic_reader.h"
-#include "transfer_writer.h"
+#include "transfer_writer_factory.h"
#include "worker.h"
#include <ydb/core/base/appdata.h>
@@ -417,7 +417,8 @@ class TReplicationService: public TActorBootstrapped<TReplicationService> {
}
std::function<IActor*(void)> TransferWriterFn(
- const NKikimrReplication::TTransferWriterSettings& writerSettings)
+ const NKikimrReplication::TTransferWriterSettings& writerSettings,
+ const ITransferWriterFactory* transferWriterFactory)
{
if (!CompilationService) {
CompilationService = Register(
@@ -429,9 +430,10 @@ class TReplicationService: public TActorBootstrapped<TReplicationService> {
tablePathId = TPathId::FromProto(writerSettings.GetPathId()),
transformLambda = writerSettings.GetTransformLambda(),
compilationService = *CompilationService,
- batchingSettings = writerSettings.GetBatching()
+ batchingSettings = writerSettings.GetBatching(),
+ transferWriterFactory = transferWriterFactory
]() {
- return CreateTransferWriter(transformLambda, tablePathId, compilationService, batchingSettings);
+ return transferWriterFactory->Create({transformLambda, tablePathId, compilationService, batchingSettings});
};
}
@@ -478,7 +480,12 @@ class TReplicationService: public TActorBootstrapped<TReplicationService> {
writerFn = WriterFn(writerSettings, consistencySettings);
} else if (cmd.HasTransferWriter()) {
const auto& writerSettings = cmd.GetTransferWriter();
- writerFn = TransferWriterFn(writerSettings);
+ const auto* transferWriterFactory = AppData()->TransferWriterFactory.get();
+ if (!transferWriterFactory) {
+ LOG_C("Run transfer but TransferWriterFactory does not exists.");
+ return;
+ }
+ writerFn = TransferWriterFn(writerSettings, transferWriterFactory);
} else {
Y_ABORT("Unsupported");
}
diff --git a/ydb/core/tx/replication/service/transfer_writer.cpp b/ydb/core/tx/replication/service/transfer_writer.cpp
deleted file mode 100644
index f1d8f00016..0000000000
--- a/ydb/core/tx/replication/service/transfer_writer.cpp
+++ /dev/null
@@ -1,840 +0,0 @@
-#include "logging.h"
-#include "transfer_writer.h"
-#include "worker.h"
-
-#include <ydb/core/fq/libs/row_dispatcher/events/data_plane.h>
-#include <ydb/core/fq/libs/row_dispatcher/purecalc_compilation/compile_service.h>
-#include <ydb/core/kqp/runtime/kqp_write_table.h>
-#include <ydb/core/tx/replication/ydb_proxy/topic_message.h>
-#include <ydb/core/persqueue/purecalc/purecalc.h> // should be after topic_message
-#include <ydb/core/protos/replication.pb.h>
-#include <ydb/core/tx/scheme_cache/helpers.h>
-#include <ydb/core/tx/tx_proxy/upload_rows_common_impl.h>
-#include <ydb/library/actors/core/actor_bootstrapped.h>
-#include <ydb/library/actors/core/hfunc.h>
-#include <ydb/library/services/services.pb.h>
-#include <ydb/public/lib/scheme_types/scheme_type_id.h>
-
-#include <yql/essentials/providers/common/schema/parser/yql_type_parser.h>
-#include <yql/essentials/public/purecalc/helpers/stream/stream_from_vector.h>
-
-using namespace NFq::NRowDispatcher;
-
-namespace NKikimr::NReplication::NService {
-
-namespace {
-
-constexpr const char* RESULT_COLUMN_NAME = "__ydb_r";
-
-using namespace NYql::NPureCalc;
-using namespace NKikimr::NMiniKQL;
-
-struct TSchemaColumn {
- TString Name;
- ui32 Id;
- NScheme::TTypeInfo PType;
- bool KeyColumn;
- bool Nullable;
-
- bool operator==(const TSchemaColumn& other) const = default;
-
- TString ToString() const;
-
- TString TypeName() const {
- return NScheme::TypeName(PType);
- }
-};
-
-struct TScheme {
- TVector<TSchemaColumn> TopicColumns;
- TVector<TSchemaColumn> TableColumns;
- TVector<NKikimrKqp::TKqpColumnMetadataProto> ColumnsMetadata;
- std::vector<ui32> WriteIndex;
-};
-
-struct TOutputType {
- NUdf::TUnboxedValue Value;
- NMiniKQL::TUnboxedValueBatch Data;
-};
-
-class TMessageOutputSpec : public NYql::NPureCalc::TOutputSpecBase {
-public:
- explicit TMessageOutputSpec(const TVector<TSchemaColumn>& tableColumns, const NYT::TNode& schema)
- : TableColumns(tableColumns)
- , Schema(schema)
- {}
-
-public:
- const NYT::TNode& GetSchema() const override {
- return Schema;
- }
-
- const TVector<TSchemaColumn> GetTableColumns() const {
- return TableColumns;
- }
-
-private:
- const TVector<TSchemaColumn> TableColumns;
- const NYT::TNode Schema;
-};
-
-class TOutputListImpl final: public IStream<TOutputType*> {
-protected:
- TWorkerHolder<IPullListWorker> WorkerHolder_;
- const TMessageOutputSpec& OutputSpec;
-
-public:
- explicit TOutputListImpl(const TMessageOutputSpec& outputSpec, TWorkerHolder<IPullListWorker> worker)
- : WorkerHolder_(std::move(worker))
- , OutputSpec(outputSpec)
- {
- Row.resize(1);
- }
-
-public:
- TOutputType* Fetch() override {
- TBindTerminator bind(WorkerHolder_->GetGraph().GetTerminator());
-
- with_lock(WorkerHolder_->GetScopedAlloc()) {
- Out.Data.clear();
-
- NYql::NUdf::TUnboxedValue value;
-
- if (!WorkerHolder_->GetOutputIterator().Next(value)) {
- return nullptr;
- }
-
- Out.Value = value.GetElement(0);
- Out.Data.PushRow(&Out.Value, 1);
-
- return &Out;
- }
- }
-
-private:
- std::vector<NUdf::TUnboxedValue> Row;
- TOutputType Out;
-};
-
-} // namespace
-
-} // namespace NKikimr::NReplication::NService
-
-
-template <>
-struct NYql::NPureCalc::TOutputSpecTraits<NKikimr::NReplication::NService::TMessageOutputSpec> {
- static const constexpr bool IsPartial = false;
-
- static const constexpr bool SupportPullListMode = true;
-
- using TOutputItemType = NKikimr::NReplication::NService::TOutputType*;
- using TPullStreamReturnType = THolder<IStream<TOutputItemType>>;
- using TPullListReturnType = THolder<IStream<TOutputItemType>>;
-
- static TPullListReturnType ConvertPullListWorkerToOutputType(
- const NKikimr::NReplication::NService::TMessageOutputSpec& outputSpec,
- TWorkerHolder<IPullListWorker> worker
- ) {
- return MakeHolder<NKikimr::NReplication::NService::TOutputListImpl>(outputSpec, std::move(worker));
- }
-};
-
-
-namespace NKikimr::NReplication::NService {
-
-namespace {
-
-NYT::TNode CreateTypeNode(const TString& fieldType) {
- return NYT::TNode::CreateList()
- .Add("DataType")
- .Add(fieldType);
-}
-
-NYT::TNode CreateOptionalTypeNode(const TString& fieldType) {
- return NYT::TNode::CreateList()
- .Add("OptionalType")
- .Add(CreateTypeNode(fieldType));
-}
-
-void AddField(NYT::TNode& node, const TString& fieldName, const TString& fieldType) {
- node.Add(
- NYT::TNode::CreateList()
- .Add(fieldName)
- .Add(CreateOptionalTypeNode(fieldType))
- );
-}
-
-NYT::TNode MakeOutputSchema(const TVector<TSchemaColumn>& columns) {
- auto structMembers = NYT::TNode::CreateList();
-
- for (const auto& column : columns) {
- AddField(structMembers, column.Name, column.TypeName());
- }
-
- auto rootMembers = NYT::TNode::CreateList();
- rootMembers.Add(
- NYT::TNode::CreateList()
- .Add(RESULT_COLUMN_NAME)
- .Add(NYT::TNode::CreateList()
- .Add("StructType")
- .Add(std::move(structMembers)))
- );
-
- return NYT::TNode::CreateList()
- .Add("StructType")
- .Add(std::move(rootMembers));
-}
-
-class TProgramHolder : public NFq::IProgramHolder {
-public:
- using TPtr = TIntrusivePtr<TProgramHolder>;
-
-public:
- TProgramHolder(
- const TVector<TSchemaColumn>& tableColumns,
- const TString& sql
- )
- : TopicColumns()
- , TableColumns(tableColumns)
- , Sql(sql)
- {}
-
-public:
- void CreateProgram(NYql::NPureCalc::IProgramFactoryPtr programFactory) override {
- // Program should be stateless because input values
- // allocated on another allocator and should be released
- Program = programFactory->MakePullListProgram(
- NYdb::NTopic::NPurecalc::TMessageInputSpec(),
- TMessageOutputSpec(TableColumns, MakeOutputSchema(TableColumns)),
- Sql,
- NYql::NPureCalc::ETranslationMode::SQL
- );
- }
-
- NYql::NPureCalc::TPullListProgram<NYdb::NTopic::NPurecalc::TMessageInputSpec, TMessageOutputSpec>* GetProgram() {
- return Program.Get();
- }
-
-private:
- const TVector<TSchemaColumn> TopicColumns;
- const TVector<TSchemaColumn> TableColumns;
- const TString Sql;
-
- THolder<NYql::NPureCalc::TPullListProgram<NYdb::NTopic::NPurecalc::TMessageInputSpec, TMessageOutputSpec>> Program;
-};
-
-TScheme BuildScheme(const TAutoPtr<NSchemeCache::TSchemeCacheNavigate>& nav) {
- const auto& entry = nav->ResultSet.at(0);
-
- TScheme result;
-
- result.TableColumns.reserve(entry.Columns.size());
- result.ColumnsMetadata.reserve(entry.Columns.size());
- result.WriteIndex.reserve(entry.Columns.size());
-
- size_t keyColumns = CountIf(entry.Columns, [](auto& c) {
- return c.second.KeyOrder >= 0;
- });
-
- result.TableColumns.resize(keyColumns);
-
- for (const auto& [_, column] : entry.Columns) {
- if (column.KeyOrder >= 0) {
- result.TableColumns[column.KeyOrder] = {column.Name, column.Id, column.PType, column.KeyOrder >= 0, !column.IsNotNullColumn};
- } else {
- result.TableColumns.emplace_back(column.Name, column.Id, column.PType, column.KeyOrder >= 0, !column.IsNotNullColumn);
- }
- }
-
- std::map<TString, TSysTables::TTableColumnInfo> columns;
- for (const auto& [_, column] : entry.Columns) {
- columns[column.Name] = column;
- }
-
- size_t i = keyColumns;
- for (const auto& [_, column] : columns) {
- result.ColumnsMetadata.emplace_back();
- auto& c = result.ColumnsMetadata.back();
- result.WriteIndex.push_back(column.KeyOrder >= 0 ? column.KeyOrder : i++);
-
- c.SetName(column.Name);
- c.SetId(column.Id);
- c.SetTypeId(column.PType.GetTypeId());
-
- if (NScheme::NTypeIds::IsParametrizedType(column.PType.GetTypeId())) {
- NScheme::ProtoFromTypeInfo(column.PType, "", *c.MutableTypeInfo());
- }
- }
-
- return result;
-}
-
-class ITableKindState {
-public:
- using TPtr = std::unique_ptr<ITableKindState>;
-
- ITableKindState(const TActorId& selfId, const TAutoPtr<NSchemeCache::TSchemeCacheNavigate>& result)
- : SelfId(selfId)
- , Scheme(BuildScheme(result))
- {}
-
- virtual ~ITableKindState() = default;
-
- void EnshureDataBatch() {
- if (!Batcher) {
- Batcher = CreateDataBatcher();
- }
- }
-
- void AddData(const NMiniKQL::TUnboxedValueBatch &data) {
- Batcher->AddData(data);
- }
-
- i64 BatchSize() const {
- return Batcher->GetMemory();
- }
-
- virtual NKqp::IDataBatcherPtr CreateDataBatcher() = 0;
- virtual bool Flush() = 0;
-
- virtual TString Handle(TEvents::TEvCompleted::TPtr& ev) = 0;
-
- const TVector<TSchemaColumn>& GetTableColumns() const {
- return Scheme.TableColumns;
- }
-
-protected:
- const TActorId SelfId;
- const TScheme Scheme;
-
- NKqp::IDataBatcherPtr Batcher;
-};
-
-class TColumnTableState : public ITableKindState {
-public:
- TColumnTableState(
- const TActorId& selfId,
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate>& result
- )
- : ITableKindState(selfId, result)
- {
- NavigateResult.reset(result.Release());
- Path = JoinPath(NavigateResult->ResultSet.front().Path);
- }
-
- NKqp::IDataBatcherPtr CreateDataBatcher() override {
- return NKqp::CreateColumnDataBatcher(Scheme.ColumnsMetadata, Scheme.WriteIndex);
- }
-
- bool Flush() override {
- auto doWrite = [&]() {
- Issues = std::make_shared<NYql::TIssues>();
-
- NTxProxy::DoLongTxWriteSameMailbox(TActivationContext::AsActorContext(), SelfId /* replyTo */, { /* longTxId */ }, { /* dedupId */ },
- NavigateResult->DatabaseName, Path, NavigateResult, Data, Issues, true /* noTxWrite */);
- };
-
- if (Data) {
- doWrite();
- return true;
- }
-
- if (!Batcher || !Batcher->GetMemory()) {
- return false;
- }
-
- NKqp::IDataBatchPtr batch = Batcher->Build();
- auto data = batch->ExtractBatch();
-
- Data = reinterpret_pointer_cast<arrow::RecordBatch>(data);
- Y_VERIFY(Data);
-
- doWrite();
- return true;
- }
-
- TString Handle(TEvents::TEvCompleted::TPtr& ev) override {
- if (ev->Get()->Status == Ydb::StatusIds::SUCCESS) {
- Data.reset();
- Issues.reset();
-
- return "";
- }
-
- return Issues->ToOneLineString();
- }
-
-private:
- std::shared_ptr<const NSchemeCache::TSchemeCacheNavigate> NavigateResult;
- TString Path;
-
- std::shared_ptr<arrow::RecordBatch> Data;
- std::shared_ptr<NYql::TIssues> Issues;
-};
-
-class TRowTableState : public ITableKindState {
-public:
- TRowTableState(
- const TActorId& selfId,
- TAutoPtr<NSchemeCache::TSchemeCacheNavigate>& result
- )
- : ITableKindState(selfId, result)
- {}
-
- NKqp::IDataBatcherPtr CreateDataBatcher() override {
- return NKqp::CreateRowDataBatcher(ColumnsMetadata, WriteIndex);
- }
-
- bool Flush() override {
- Y_ABORT("Unsupported");
- }
-
- TString Handle(TEvents::TEvCompleted::TPtr&) override {
- return "Unsupported";
- }
-
-private:
- const TVector<NKikimrKqp::TKqpColumnMetadataProto> ColumnsMetadata;
- const std::vector<ui32> WriteIndex;
-};
-
-enum class ETag {
- FlushTimeout,
- RetryFlush
-};
-
-} // anonymous namespace
-
-class TTransferWriter
- : public TActorBootstrapped<TTransferWriter>
- , private NSchemeCache::TSchemeCacheHelpers
-{
- static constexpr TDuration MinRetryDelay = TDuration::Seconds(1);
- static constexpr TDuration MaxRetryDelay = TDuration::Minutes(10);
-
-public:
- void Bootstrap() {
- GetTableScheme();
- }
-
-private:
- void GetTableScheme() {
- LOG_D("GetTableScheme: worker# " << Worker);
- Become(&TThis::StateGetTableScheme);
-
- auto request = MakeHolder<TNavigate>();
- request->ResultSet.emplace_back(MakeNavigateEntry(TablePathId, TNavigate::OpTable));
- Send(MakeSchemeCacheID(), new TEvNavigate(request.Release()));
- }
-
- STFUNC(StateGetTableScheme) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvTxProxySchemeCache::TEvNavigateKeySetResult, Handle);
-
- hFunc(TEvWorker::TEvHandshake, Handle);
- hFunc(TEvWorker::TEvData, HoldHandle);
- sFunc(TEvents::TEvWakeup, GetTableScheme);
- sFunc(TEvents::TEvPoison, PassAway);
- }
- }
-
- void LogCritAndLeave(const TString& error) {
- LOG_C(error);
- Leave(TEvWorker::TEvGone::SCHEME_ERROR, error);
- }
-
- void LogWarnAndRetry(const TString& error) {
- LOG_W(error);
- Retry();
- }
-
- template <typename CheckFunc, typename FailFunc, typename T, typename... Args>
- bool Check(CheckFunc checkFunc, FailFunc failFunc, const T& subject, Args&&... args) {
- return checkFunc("writer", subject, std::forward<Args>(args)..., std::bind(failFunc, this, std::placeholders::_1));
- }
-
- template <typename T>
- bool CheckNotEmpty(const TAutoPtr<T>& result) {
- return Check(&TSchemeCacheHelpers::CheckNotEmpty<T>, &TThis::LogCritAndLeave, result);
- }
-
- template <typename T>
- bool CheckEntriesCount(const TAutoPtr<T>& result, ui32 expected) {
- return Check(&TSchemeCacheHelpers::CheckEntriesCount<T>, &TThis::LogCritAndLeave, result, expected);
- }
-
- template <typename T>
- bool CheckTableId(const T& entry, const TTableId& expected) {
- return Check(&TSchemeCacheHelpers::CheckTableId<T>, &TThis::LogCritAndLeave, entry, expected);
- }
-
- template <typename T>
- bool CheckEntrySucceeded(const T& entry) {
- return Check(&TSchemeCacheHelpers::CheckEntrySucceeded<T>, &TThis::LogWarnAndRetry, entry);
- }
-
- template <typename T>
- bool CheckEntryKind(const T& entry, TNavigate::EKind expected) {
- return Check(&TSchemeCacheHelpers::CheckEntryKind<T>, &TThis::LogCritAndLeave, entry, expected);
- }
-
- void Handle(TEvTxProxySchemeCache::TEvNavigateKeySetResult::TPtr& ev) {
- auto& result = ev->Get()->Request;
-
- LOG_D("Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult"
- << ": result# " << (result ? result->ToString(*AppData()->TypeRegistry) : "nullptr"));
-
- if (!CheckNotEmpty(result)) {
- return;
- }
-
- if (!CheckEntriesCount(result, 1)) {
- return;
- }
-
- const auto& entry = result->ResultSet.at(0);
-
- if (!CheckTableId(entry, TablePathId)) {
- return;
- }
-
- // TODO support row tables
- if (entry.Status == TNavigate::EStatus::PathNotTable || entry.Kind != TNavigate::KindColumnTable) {
- return LogCritAndLeave("Only column tables are supported as transfer targets");
- }
-
- if (!CheckEntrySucceeded(entry)) {
- return;
- }
-
- if (entry.Kind == TNavigate::KindColumnTable) {
- TableState = std::make_unique<TColumnTableState>(SelfId(), result);
- } else {
- TableState = std::make_unique<TRowTableState>(SelfId(), result);
- }
-
- CompileTransferLambda();
- }
-
-private:
- void CompileTransferLambda() {
- LOG_D("CompileTransferLambda: worker# " << Worker);
-
- NFq::TPurecalcCompileSettings settings = {};
- auto programHolder = MakeIntrusive<TProgramHolder>(TableState->GetTableColumns(), GenerateSql());
- auto result = std::make_unique<NFq::TEvRowDispatcher::TEvPurecalcCompileRequest>(std::move(programHolder), settings);
-
- Send(CompileServiceId, result.release(), 0, ++InFlightCompilationId);
- Become(&TThis::StateCompileTransferLambda);
- }
-
- STFUNC(StateCompileTransferLambda) {
- switch (ev->GetTypeRewrite()) {
- hFunc(NFq::TEvRowDispatcher::TEvPurecalcCompileResponse, Handle);
-
- hFunc(TEvWorker::TEvHandshake, Handle);
- hFunc(TEvWorker::TEvData, HoldHandle);
- sFunc(TEvents::TEvPoison, PassAway);
- }
- }
-
- TString GenerateSql() {
- TStringBuilder sb;
- sb << TransformLambda;
- sb << "SELECT * FROM (\n";
- sb << " SELECT $__ydb_transfer_lambda(TableRow()) AS " << RESULT_COLUMN_NAME << " FROM Input\n";
- sb << ") FLATTEN BY " << RESULT_COLUMN_NAME << ";\n";
- LOG_T("SQL: " << sb);
- return sb;
- }
-
- void Handle(NFq::TEvRowDispatcher::TEvPurecalcCompileResponse::TPtr& ev) {
- const auto& result = ev->Get();
-
- LOG_D("Handle TEvPurecalcCompileResponse"
- << ": result# " << (result ? result->Issues.ToOneLineString() : "nullptr"));
-
- if (ev->Cookie != InFlightCompilationId) {
- LOG_D("Outdated compiler response ignored for id " << ev->Cookie << ", current compile id " << InFlightCompilationId);
- return;
- }
-
- if (!result->ProgramHolder) {
- return LogCritAndLeave(TStringBuilder() << "Compilation failed: " << result->Issues.ToOneLineString());
- }
-
- auto r = dynamic_cast<TProgramHolder*>(ev->Get()->ProgramHolder.Release());
- Y_ENSURE(result, "Unexpected compile response");
-
- ProgramHolder = TIntrusivePtr<TProgramHolder>(r);
-
- StartWork();
- }
-
-private:
- void StartWork() {
- Become(&TThis::StateWork);
-
- Attempt = 0;
- Delay = MinRetryDelay;
-
- if (PendingRecords) {
- ProcessData(PendingPartitionId, *PendingRecords);
- PendingRecords.reset();
- }
-
- if (!WakeupScheduled) {
- WakeupScheduled = true;
- Schedule(FlushInterval, new TEvents::TEvWakeup(ui32(ETag::FlushTimeout)));
- }
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvWorker::TEvHandshake, Handle);
- hFunc(TEvWorker::TEvData, Handle);
-
- sFunc(TEvents::TEvPoison, PassAway);
- sFunc(TEvents::TEvWakeup, TryFlush);
- }
- }
-
- void Handle(TEvWorker::TEvHandshake::TPtr& ev) {
- Worker = ev->Sender;
- LOG_D("Handshake"
- << ": worker# " << Worker);
-
- if (ProcessingError) {
- Leave(ProcessingErrorStatus, *ProcessingError);
- } else {
- PollSent = true;
- Send(Worker, new TEvWorker::TEvHandshake());
- }
- }
-
- void HoldHandle(TEvWorker::TEvData::TPtr& ev) {
- Y_ABORT_UNLESS(!PendingRecords);
- PendingPartitionId = ev->Get()->PartitionId;
- PendingRecords = std::move(ev->Get()->Records);
- }
-
- void Handle(TEvWorker::TEvData::TPtr& ev) {
- LOG_D("Handle TEvData record count: " << ev->Get()->Records.size());
- ProcessData(ev->Get()->PartitionId, ev->Get()->Records);
- }
-
- void ProcessData(const ui32 partitionId, const TVector<TTopicMessage>& records) {
- if (!records) {
- Send(Worker, new TEvWorker::TEvGone(TEvWorker::TEvGone::DONE));
- return;
- }
-
- PollSent = false;
-
- TableState->EnshureDataBatch();
- if (!LastWriteTime) {
- LastWriteTime = TInstant::Now();
- }
-
- for (auto& message : records) {
- NYdb::NTopic::NPurecalc::TMessage input;
- input.Data = std::move(message.GetData());
- input.MessageGroupId = std::move(message.GetMessageGroupId());
- input.Partition = partitionId;
- input.ProducerId = std::move(message.GetProducerId());
- input.Offset = message.GetOffset();
- input.SeqNo = message.GetSeqNo();
-
- try {
- auto result = ProgramHolder->GetProgram()->Apply(NYql::NPureCalc::StreamFromVector(TVector{input}));
- while (auto* m = result->Fetch()) {
- TableState->AddData(m->Data);
- }
- } catch (const yexception& e) {
- ProcessingErrorStatus = TEvWorker::TEvGone::EStatus::SCHEME_ERROR;
- ProcessingError = TStringBuilder() << "Error transform message: " << e.what();
- break;
- }
- }
-
- if (TableState->BatchSize() >= BatchSizeBytes || *LastWriteTime < TInstant::Now() - FlushInterval) {
- if (TableState->Flush()) {
- LastWriteTime.reset();
- return Become(&TThis::StateWrite);
- }
- }
-
- if (ProcessingError) {
- LogCritAndLeave(*ProcessingError);
- } else {
- PollSent = true;
- Send(Worker, new TEvWorker::TEvPoll(true));
- }
- }
-
- void TryFlush() {
- if (LastWriteTime && LastWriteTime < TInstant::Now() - FlushInterval && TableState->Flush()) {
- LastWriteTime.reset();
- WakeupScheduled = false;
- Become(&TThis::StateWrite);
- } else {
- Schedule(FlushInterval, new TEvents::TEvWakeup(ui32(ETag::FlushTimeout)));
- }
- }
-
-private:
- STFUNC(StateWrite) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvents::TEvCompleted, Handle);
- hFunc(TEvWorker::TEvHandshake, Handle);
- hFunc(TEvWorker::TEvData, HoldHandle);
-
- sFunc(TEvents::TEvPoison, PassAway);
- hFunc(TEvents::TEvWakeup, WriteWakeup);
- }
- }
-
- void Handle(TEvents::TEvCompleted::TPtr& ev) {
- LOG_D("Handle TEvents::TEvCompleted"
- << ": worker# " << Worker
- << " status# " << ev->Get()->Status);
-
- auto error = TableState->Handle(ev);
- if (ui32(NYdb::EStatus::SUCCESS) != ev->Get()->Status && Delay < MaxRetryDelay && !PendingLeave()) {
- return LogWarnAndRetry(error);
- }
-
- if (error && !ProcessingError) {
- ProcessingError = error;
- }
-
- if (ProcessingError) {
- return LogCritAndLeave(*ProcessingError);
- }
-
- if (!PollSent) {
- PollSent = true;
- Send(Worker, new TEvWorker::TEvPoll());
- }
-
- return StartWork();
- }
-
- void WriteWakeup(TEvents::TEvWakeup::TPtr& ev) {
- switch(ETag(ev->Get()->Tag)) {
- case ETag::FlushTimeout:
- WakeupScheduled = false;
- break;
- case ETag::RetryFlush:
- TableState->Flush();
- break;
- }
- }
-
-private:
-
- bool PendingLeave() {
- return PendingRecords && PendingRecords->empty();
- }
-
- TStringBuf GetLogPrefix() const {
- if (!LogPrefix) {
- LogPrefix = TStringBuilder()
- << "[TransferWriter]"
- << SelfId() << " ";
- }
-
- return LogPrefix.GetRef();
- }
-
- template <typename TResult>
- bool CheckResult(const TResult& result, const TStringBuf marker) {
- if (result.IsSuccess()) {
- return true;
- }
-
- LOG_E("Error at '" << marker << "'"
- << ", error# " << result);
- RetryOrLeave(result.GetError());
-
- return false;
- }
-
- void Retry() {
- Delay = Attempt++ ? Delay * 2 : MinRetryDelay;
- Delay = Min(Delay, MaxRetryDelay);
- const TDuration random = TDuration::FromValue(TAppData::RandomProvider->GenRand64() % Delay.MicroSeconds());
- this->Schedule(Delay + random, new TEvents::TEvWakeup(ui32(ETag::RetryFlush)));
- }
-
- void Leave(TEvWorker::TEvGone::EStatus status, const TString& message) {
- LOG_I("Leave");
-
- if (Worker) {
- Send(Worker, new TEvWorker::TEvGone(status, message));
- PassAway();
- } else {
- ProcessingErrorStatus = status;
- ProcessingError = message;
- }
- }
-
- void PassAway() override {
- TActor::PassAway();
- }
-
-public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::REPLICATION_TRANSFER_WRITER;
- }
-
- explicit TTransferWriter(
- const TString& transformLambda,
- const TPathId& tablePathId,
- const TActorId& compileServiceId,
- const NKikimrReplication::TBatchingSettings& batchingSettings)
- : TransformLambda(transformLambda)
- , TablePathId(tablePathId)
- , CompileServiceId(compileServiceId)
- , FlushInterval(TDuration::MilliSeconds(std::max<ui64>(batchingSettings.GetFlushIntervalMilliSeconds(), 1000)))
- , BatchSizeBytes(std::min<ui64>(batchingSettings.GetBatchSizeBytes(), 1_GB))
- {}
-
-private:
- const TString TransformLambda;
- const TPathId TablePathId;
- const TActorId CompileServiceId;
- const TDuration FlushInterval;
- const i64 BatchSizeBytes;
- TActorId Worker;
-
- ITableKindState::TPtr TableState;
-
- size_t InFlightCompilationId = 0;
- TProgramHolder::TPtr ProgramHolder;
-
- mutable bool WakeupScheduled = false;
- mutable bool PollSent = false;
- mutable std::optional<TInstant> LastWriteTime;
-
- mutable TMaybe<TString> LogPrefix;
-
- mutable TEvWorker::TEvGone::EStatus ProcessingErrorStatus;
- mutable TMaybe<TString> ProcessingError;
-
- ui32 PendingPartitionId = 0;
- std::optional<TVector<TTopicMessage>> PendingRecords;
-
- ui32 Attempt = 0;
- TDuration Delay = MinRetryDelay;
-
-}; // TTransferWriter
-
-IActor* CreateTransferWriter(const TString& transformLambda, const TPathId& tablePathId,
- const TActorId& compileServiceId, const NKikimrReplication::TBatchingSettings& batchingSettings)
-{
- return new TTransferWriter(transformLambda, tablePathId, compileServiceId, batchingSettings);
-}
-
-}
-
diff --git a/ydb/core/tx/replication/service/transfer_writer.h b/ydb/core/tx/replication/service/transfer_writer.h
deleted file mode 100644
index 2b28f19dc2..0000000000
--- a/ydb/core/tx/replication/service/transfer_writer.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <ydb/core/base/defs.h>
-
-#include <util/generic/string.h>
-
-namespace NKikimr {
- struct TPathId;
-}
-
-namespace NKikimrReplication {
- class TBatchingSettings;
-}
-
-namespace NKikimr::NReplication::NService {
-
-IActor* CreateTransferWriter(const TString& transformLambda, const TPathId& tablePathId,
- const TActorId& compileServiceId, const NKikimrReplication::TBatchingSettings& batchingSettings);
-
-}
diff --git a/ydb/core/tx/replication/service/transfer_writer_factory.h b/ydb/core/tx/replication/service/transfer_writer_factory.h
new file mode 100644
index 0000000000..3bc65085db
--- /dev/null
+++ b/ydb/core/tx/replication/service/transfer_writer_factory.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include <ydb/core/base/defs.h>
+
+#include <util/generic/string.h>
+
+namespace NKikimr {
+ struct TPathId;
+}
+
+namespace NKikimrReplication {
+ class TBatchingSettings;
+}
+
+namespace NKikimr::NReplication::NService {
+
+class ITransferWriterFactory {
+public:
+ struct Parameters {
+ const TString& TransformLambda;
+ const TPathId& TablePathId;
+ const TActorId& CompileServiceId;
+ const NKikimrReplication::TBatchingSettings& BatchingSettings;
+ };
+
+ virtual IActor* Create(const Parameters& parameters) const = 0;
+
+ virtual ~ITransferWriterFactory() = default;
+};
+
+
+}
diff --git a/ydb/core/tx/replication/service/transfer_writer_ut.cpp b/ydb/core/tx/replication/service/transfer_writer_ut.cpp
deleted file mode 100644
index 951d154960..0000000000
--- a/ydb/core/tx/replication/service/transfer_writer_ut.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-#include "common_ut.h"
-#include "service.h"
-#include "transfer_writer.h"
-#include "worker.h"
-
-#include <ydb/core/fq/libs/row_dispatcher/purecalc_compilation/compile_service.h>
-#include <ydb/core/tx/datashard/ut_common/datashard_ut_common.h>
-#include <ydb/core/tx/replication/ut_helpers/test_env.h>
-#include <ydb/core/tx/replication/ut_helpers/test_table.h>
-
-#include <library/cpp/string_utils/base64/base64.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/string/printf.h>
-#include <util/string/strip.h>
-
-namespace NKikimr::NReplication::NService {
-
-Y_UNIT_TEST_SUITE(TransferWriter) {
- using namespace NTestHelpers;
-
- Y_UNIT_TEST(Write_ColumnTable) {
- TEnv env;
- env.GetRuntime().SetLogPriority(NKikimrServices::REPLICATION_SERVICE, NLog::PRI_DEBUG);
- env.GetRuntime().SetLogPriority(NKikimrServices::FQ_ROW_DISPATCHER, NLog::PRI_DEBUG);
-
- env.CreateColumnTable("/Root", *MakeColumnTableDescription(TTestTableDescription{
- .Name = "Table",
- .KeyColumns = {"key"},
- .Columns = {
- {.Name = "value", .Type = "Utf8"},
- {.Name = "key", .Type = "Uint32"},
- },
- }));
-
- auto lambda = R"(
- $__ydb_transfer_lambda = ($x) -> {
- RETURN [
- <|
- key:CAST($x._offset As Uint32)
- , value:CAST($x._data AS Utf8)
- |>
- ];
- };
- )";
-
- const TPathId tablePathId = env.GetPathId("/Root/Table");
-
- auto compiler = env.GetRuntime().Register(NFq::NRowDispatcher::CreatePurecalcCompileService({}, MakeIntrusive<NMonitoring::TDynamicCounters>()));
-
- NKikimrReplication::TBatchingSettings batchingSettings;
- batchingSettings.SetFlushIntervalMilliSeconds(1);
- auto writer = env.GetRuntime().Register(CreateTransferWriter(lambda, tablePathId, compiler, batchingSettings));
- env.Send<TEvWorker::TEvHandshake>(writer, new TEvWorker::TEvHandshake());
-
- env.Send<TEvWorker::TEvPoll>(writer, new TEvWorker::TEvData(0, "TestSource", {
- TRecord(1, R"({"key":[1], "update":{"value":"10"}})"),
- TRecord(2, R"({"key":[2], "update":{"value":"20"}})"),
- TRecord(3, R"({"key":[3], "update":{"value":"30"}})"),
- }));
- }
-
-}
-
-}
diff --git a/ydb/core/tx/replication/service/ya.make b/ydb/core/tx/replication/service/ya.make
index 9ac9a4a481..4ed9f1ff97 100644
--- a/ydb/core/tx/replication/service/ya.make
+++ b/ydb/core/tx/replication/service/ya.make
@@ -24,7 +24,6 @@ SRCS(
service.cpp
table_writer.cpp
topic_reader.cpp
- transfer_writer.cpp
worker.cpp
)
@@ -44,7 +43,6 @@ RECURSE_FOR_TESTS(
ut_json_change_record
ut_table_writer
ut_topic_reader
- ut_transfer_writer
ut_worker
)
diff --git a/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp b/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
index 872a3f2914..3534906ac5 100644
--- a/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp
@@ -2,6 +2,7 @@
#include <ydb/core/tablet/tablet_exception.h>
#include <ydb/core/tablet_flat/flat_cxx_database.h>
+#include <ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.h>
namespace NKikimr {
namespace NSchemeShard {
@@ -179,6 +180,9 @@ struct TSchemeShard::TTxDeleteTabletReply : public TSchemeShard::TRwTxBase {
"Close pipe to deleted shardIdx " << ShardIdx << " tabletId " << TabletId);
Self->PipeClientCache->ForceClose(ctx, ui64(TabletId));
}
+ if (Self->DataErasureManager->GetStatus() == EDataErasureStatus::IN_PROGRESS) {
+ Self->Execute(Self->CreateTxCancelDataErasureShards({ShardIdx}));
+ }
}
}
diff --git a/ydb/core/tx/schemeshard/schemeshard__init.cpp b/ydb/core/tx/schemeshard/schemeshard__init.cpp
index c97a392a51..1b346d40ee 100644
--- a/ydb/core/tx/schemeshard/schemeshard__init.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__init.cpp
@@ -4431,6 +4431,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
}
switch (importInfo->State) {
+ case TImportInfo::EState::DownloadExportMetadata:
case TImportInfo::EState::Waiting:
case TImportInfo::EState::Cancellation:
ImportsToResume.push_back(importInfo->Id);
@@ -4507,6 +4508,10 @@ struct TSchemeShard::TTxInit : public TTransactionBase<TSchemeShard> {
item.NextIndexIdx = rowset.GetValueOrDefault<Schema::ImportItems::NextIndexIdx>(0);
item.NextChangefeedIdx = rowset.GetValueOrDefault<Schema::ImportItems::NextChangefeedIdx>(0);
item.Issue = rowset.GetValueOrDefault<Schema::ImportItems::Issue>(TString());
+ item.SrcPrefix = rowset.GetValueOrDefault<Schema::ImportItems::SrcPrefix>(TString());
+ if (rowset.HaveValue<Schema::ImportItems::EncryptionIV>()) {
+ item.ExportItemIV = NBackup::TEncryptionIV::FromBinaryString(rowset.GetValue<Schema::ImportItems::EncryptionIV>());
+ }
if (item.WaitTxId != InvalidTxId) {
Self->TxIdToImport[item.WaitTxId] = {importId, itemIdx};
diff --git a/ydb/core/tx/schemeshard/schemeshard__login.cpp b/ydb/core/tx/schemeshard/schemeshard__login.cpp
index bdbdccefda..797a78f4e7 100644
--- a/ydb/core/tx/schemeshard/schemeshard__login.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__login.cpp
@@ -1,6 +1,7 @@
#include <ydb/library/security/util.h>
#include <ydb/core/protos/auth.pb.h>
#include <ydb/core/base/auth.h>
+#include <ydb/core/base/local_user_token.h>
#include "schemeshard_impl.h"
@@ -89,10 +90,7 @@ struct TSchemeShard::TTxLogin : TSchemeShard::TRwTxBase {
private:
bool IsAdmin() const {
const auto& user = Request->Get()->Record.GetUser();
- const auto providerGroups = Self->LoginProvider.GetGroupsMembership(user);
- const TVector<NACLib::TSID> groups(providerGroups.begin(), providerGroups.end());
- const auto userToken = NACLib::TUserToken(user, groups);
-
+ const auto userToken = NKikimr::BuildLocalUserToken(Self->LoginProvider, user);
return IsAdministrator(AppData(), &userToken);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp
index 3ab13d0633..7ea1df5bf8 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp
@@ -5,6 +5,8 @@
#include <ydb/library/security/util.h>
#include <ydb/core/base/auth.h>
+#include <ydb/core/base/local_user_token.h>
+
#include <ydb/core/protos/auth.pb.h>
@@ -293,7 +295,7 @@ public:
NLogin::TLoginProvider::TBasicResponse CanRemoveSid(TOperationContext& context, const TString sid, const TString& sidType) {
if (!AppData()->FeatureFlags.GetEnableStrictAclCheck()) {
- return {};
+ return {};
}
auto subTree = context.SS->ListSubTree(context.SS->RootPathId(), context.Ctx);
@@ -316,9 +318,7 @@ public:
}
void AddIsUserAdmin(const TString& user, NLogin::TLoginProvider& loginProvider, TParts& additionalParts) {
- const auto providerGroups = loginProvider.GetGroupsMembership(user);
- const TVector<NACLib::TSID> groups(providerGroups.begin(), providerGroups.end());
- const auto userToken = NACLib::TUserToken(user, groups);
+ const auto userToken = NKikimr::BuildLocalUserToken(loginProvider, user);
if (IsAdministrator(AppData(), &userToken)) {
additionalParts.emplace_back("login_user_level", "admin");
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h b/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h
index 6e1300a0f0..9b7676047d 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h
@@ -105,7 +105,7 @@ public:
const auto domainPath = TPath::Init(pathIdForDomainId, context.SS);
auto unableToMakeABill = [&](const TStringBuf reason) {
- LOG_WARN_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "Unable to make a bill"
+ LOG_INFO_S(context.Ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "Unable to make a bill"
<< ": kind# " << TKind::Name()
<< ", opId# " << operationId
<< ", reason# " << reason
diff --git a/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp b/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp
index 0cc1eea804..074801aab2 100644
--- a/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp
@@ -72,6 +72,11 @@ struct TTransferStrategy : public IStrategy {
return true;
}
+ if (!AppData()->TransferWriterFactory) {
+ result.SetError(NKikimrScheme::StatusNotAvailable, "The transfer is only available in the Enterprise version");
+ return true;
+ }
+
return false;
}
};
diff --git a/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp b/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp
index 691d7f9507..27164725d7 100644
--- a/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp
@@ -165,7 +165,7 @@ void TRootDataErasureManager::ScheduleDataErasureWakeup() {
IsDataErasureWakeupScheduled = true;
LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
- "[RootDataErasureManager] ScheduleDataErasureWakeup: Interval# " << CurrentWakeupInterval);
+ "[RootDataErasureManager] ScheduleDataErasureWakeup: Interval# " << CurrentWakeupInterval << ", Timestamp# " << AppData(ctx)->TimeProvider->Now());
}
void TRootDataErasureManager::WakeupToRunDataErasure(TEvSchemeShard::TEvWakeupToRunDataErasure::TPtr& ev, const NActors::TActorContext& ctx) {
diff --git a/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp b/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp
index 4ee18f4995..23c88e87e0 100644
--- a/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp
@@ -167,6 +167,15 @@ struct TSchemeShard::TTxServerlessStorageBilling : public TTransactionBase<TSche
}},
};
+ for (const auto& [k, v] : dbRootEl->UserAttrs->Attrs) {
+ auto label = TStringBuf(k);
+ if (!label.SkipPrefix("label_")) {
+ continue;
+ }
+
+ json["labels"][label] = v;
+ }
+
TStringBuilder billRecord;
NJson::WriteJson(&billRecord.Out, &json, /*formatOutput=*/false, /*sortkeys=*/false);
billRecord << Endl;
diff --git a/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp b/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp
index b01c823773..893ed3874c 100644
--- a/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp
@@ -527,6 +527,14 @@ struct TSchemeShard::TTxCompleteDataErasureShard : public TSchemeShard::TRwTxBas
"TTxCompleteDataErasureShard Execute at schemestard: " << Self->TabletID());
const auto& record = Ev->Get()->Record;
+ if (record.GetStatus() != NKikimrTxDataShard::TEvForceDataCleanupResult::OK) {
+ LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD,
+ "TTxCompleteDataErasureShard: data erasure failed at DataShard #" << record.GetTabletId()
+ << " with status: " << NKikimrTxDataShard::TEvForceDataCleanupResult::EStatus_Name(record.GetStatus())
+ << ", schemestard: " << Self->TabletID());
+ return; // will be retried after timout in the queue in TTenantDataErasureManager::OnTimeout()
+ }
+
auto& manager = Self->DataErasureManager;
const ui64 cleanupGeneration = record.GetDataCleanupGeneration();
if (cleanupGeneration != manager->GetGeneration()) {
diff --git a/ydb/core/tx/schemeshard/schemeshard_build_index.cpp b/ydb/core/tx/schemeshard/schemeshard_build_index.cpp
index 9071b11c5b..06d6318acb 100644
--- a/ydb/core/tx/schemeshard/schemeshard_build_index.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_build_index.cpp
@@ -63,10 +63,10 @@ void TSchemeShard::PersistCreateBuildIndex(NIceDb::TNiceDb& db, const TIndexBuil
NIceDb::TUpdate<Schema::IndexBuild::TableLocalId>(info.TablePathId.LocalPathId),
NIceDb::TUpdate<Schema::IndexBuild::IndexName>(info.IndexName),
NIceDb::TUpdate<Schema::IndexBuild::IndexType>(info.IndexType),
- NIceDb::TUpdate<Schema::IndexBuild::MaxBatchRows>(info.Limits.MaxBatchRows),
- NIceDb::TUpdate<Schema::IndexBuild::MaxBatchBytes>(info.Limits.MaxBatchBytes),
- NIceDb::TUpdate<Schema::IndexBuild::MaxShards>(info.Limits.MaxShards),
- NIceDb::TUpdate<Schema::IndexBuild::MaxRetries>(info.Limits.MaxRetries),
+ NIceDb::TUpdate<Schema::IndexBuild::MaxBatchRows>(info.ScanSettings.GetMaxBatchRows()),
+ NIceDb::TUpdate<Schema::IndexBuild::MaxBatchBytes>(info.ScanSettings.GetMaxBatchBytes()),
+ NIceDb::TUpdate<Schema::IndexBuild::MaxShards>(info.MaxInProgressShards),
+ NIceDb::TUpdate<Schema::IndexBuild::MaxRetries>(info.ScanSettings.GetMaxBatchRetries()),
NIceDb::TUpdate<Schema::IndexBuild::BuildKind>(ui32(info.BuildKind))
);
// Persist details of the index build operation: ImplTableDescriptions and SpecializedIndexDescription.
diff --git a/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp b/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp
index 73f4d9fd02..8790950ff4 100644
--- a/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp
@@ -177,10 +177,8 @@ public:
return makeReply("missing index or column to build");
}
- buildInfo->Limits.MaxBatchRows = settings.max_batch_rows();
- buildInfo->Limits.MaxBatchBytes = settings.max_batch_bytes();
- buildInfo->Limits.MaxShards = settings.max_shards_in_flight();
- buildInfo->Limits.MaxRetries = settings.max_retries_upload_batch();
+ buildInfo->ScanSettings.CopyFrom(settings.GetScanSettings());
+ buildInfo->MaxInProgressShards = settings.max_shards_in_flight();
buildInfo->CreateSender = Request->Sender;
buildInfo->SenderCookie = Request->Cookie;
diff --git a/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp b/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp
index 918d9f3c30..d3e13a8d4f 100644
--- a/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp
@@ -8,6 +8,7 @@
#include <ydb/core/scheme/scheme_types_proto.h>
#include <ydb/core/tx/datashard/range_ops.h>
#include <ydb/core/tx/datashard/upload_stats.h>
+#include <ydb/core/tx/datashard/scan_common.h>
#include <ydb/core/tx/tx_proxy/proxy.h>
#include <ydb/core/tx/tx_proxy/upload_rows.h>
@@ -85,7 +86,7 @@ protected:
TString LogPrefix;
TString TargetTable;
- NDataShard::TUploadRetryLimits Limits;
+ const NKikimrIndexBuilder::TIndexBuildScanSettings ScanSettings;
TActorId ResponseActorId;
ui64 BuildIndexId = 0;
@@ -104,13 +105,14 @@ protected:
public:
TUploadSampleK(TString targetTable,
- const TIndexBuildInfo::TLimits& limits,
+ const NKikimrIndexBuilder::TIndexBuildScanSettings& scanSettings,
const TActorId& responseActorId,
ui64 buildIndexId,
TIndexBuildInfo::TSample::TRows init,
NTableIndex::TClusterId parent,
NTableIndex::TClusterId child)
: TargetTable(std::move(targetTable))
+ , ScanSettings(scanSettings)
, ResponseActorId(responseActorId)
, BuildIndexId(buildIndexId)
, Init(std::move(init))
@@ -120,7 +122,6 @@ public:
LogPrefix = TStringBuilder()
<< "TUploadSampleK: BuildIndexId: " << BuildIndexId
<< " ResponseActorId: " << ResponseActorId;
- Limits.MaxUploadRowsRetryCount = limits.MaxRetries;
Y_ASSERT(!Init.empty());
Y_ASSERT(Parent < Child);
Y_ASSERT(Child != 0);
@@ -206,10 +207,10 @@ private:
UploadStatus.StatusCode = ev->Get()->Status;
UploadStatus.Issues = std::move(ev->Get()->Issues);
- if (UploadStatus.IsRetriable() && RetryCount < Limits.MaxUploadRowsRetryCount) {
+ if (UploadStatus.IsRetriable() && RetryCount < ScanSettings.GetMaxBatchRetries()) {
LOG_N("Got retriable error, " << Debug() << " RetryCount: " << RetryCount);
- this->Schedule(Limits.GetTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
+ this->Schedule(NDataShard::GetRetryWakeupTimeoutBackoff(RetryCount), new TEvents::TEvWakeup());
return;
}
TAutoPtr<TEvIndexBuilder::TEvUploadSampleKResponse> response = new TEvIndexBuilder::TEvUploadSampleKResponse;
@@ -624,9 +625,7 @@ private:
buildInfo.SpecializedIndexDescription).GetSettings().settings();
ev->Record.SetK(buildInfo.KMeans.K);
ev->Record.SetUpload(buildInfo.KMeans.GetUpload());
- ev->Record.SetState(NKikimrTxDataShard::TEvLocalKMeansRequest::SAMPLE);
- ev->Record.SetDoneRounds(0);
ev->Record.SetNeedsRounds(3); // TODO(mbkkt) should be configurable
if (buildInfo.KMeans.State != TIndexBuildInfo::TKMeans::MultiLocal) {
@@ -742,9 +741,7 @@ private:
ev->Record.SetTargetName(buildInfo.TargetName);
- ev->Record.SetMaxBatchRows(buildInfo.Limits.MaxBatchRows);
- ev->Record.SetMaxBatchBytes(buildInfo.Limits.MaxBatchBytes);
- ev->Record.SetMaxRetries(buildInfo.Limits.MaxRetries);
+ ev->Record.MutableScanSettings()->CopyFrom(buildInfo.ScanSettings);
auto shardId = CommonFillRecord(ev->Record, shardIdx, buildInfo);
@@ -760,7 +757,7 @@ private:
.Dive(NTableIndex::NTableVectorKmeansTreeIndex::LevelTable);
Y_ASSERT(buildInfo.Sample.Rows.size() <= buildInfo.KMeans.K);
auto actor = new TUploadSampleK(path.PathString(),
- buildInfo.Limits, Self->SelfId(), ui64(BuildId),
+ buildInfo.ScanSettings, Self->SelfId(), ui64(BuildId),
buildInfo.Sample.Rows, buildInfo.KMeans.Parent, buildInfo.KMeans.Child);
TActivationContext::AsActorContext().MakeFor(Self->SelfId()).Register(actor);
@@ -777,7 +774,7 @@ private:
template<typename Send>
bool SendToShards(TIndexBuildInfo& buildInfo, Send&& send) {
- while (!buildInfo.ToUploadShards.empty() && buildInfo.InProgressShards.size() < buildInfo.Limits.MaxShards) {
+ while (!buildInfo.ToUploadShards.empty() && buildInfo.InProgressShards.size() < buildInfo.MaxInProgressShards) {
auto shardIdx = buildInfo.ToUploadShards.front();
buildInfo.ToUploadShards.pop_front();
buildInfo.InProgressShards.emplace(shardIdx);
diff --git a/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp b/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp
index dec6877e3f..e07d231c38 100644
--- a/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp
@@ -296,10 +296,8 @@ void TSchemeShard::TIndexBuilder::TTxBase::Fill(NKikimrIndexBuilder::TIndexBuild
}
}
- settings.set_max_batch_bytes(info.Limits.MaxBatchBytes);
- settings.set_max_batch_rows(info.Limits.MaxBatchRows);
- settings.set_max_shards_in_flight(info.Limits.MaxShards);
- settings.set_max_retries_upload_batch(info.Limits.MaxRetries);
+ settings.MutableScanSettings()->CopyFrom(info.ScanSettings);
+ settings.set_max_shards_in_flight(info.MaxInProgressShards);
}
void TSchemeShard::TIndexBuilder::TTxBase::AddIssue(::google::protobuf::RepeatedPtrField<::Ydb::Issue::IssueMessage>* issues,
diff --git a/ydb/core/tx/schemeshard/schemeshard_export__create.cpp b/ydb/core/tx/schemeshard/schemeshard_export__create.cpp
index 099c564a3e..ea97b3310e 100644
--- a/ydb/core/tx/schemeshard/schemeshard_export__create.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_export__create.cpp
@@ -209,12 +209,25 @@ private:
return true;
}
+ TString GetCommonSourcePath(const Ydb::Export::ExportToS3Settings& settings) {
+ return settings.source_path();
+ }
+
+ TString GetCommonSourcePath(const Ydb::Export::ExportToYtSettings&) {
+ return {};
+ }
+
template <typename TSettings>
bool FillItems(TExportInfo::TPtr exportInfo, const TSettings& settings, TString& explain) {
+ TString commonSourcePath = GetCommonSourcePath(settings);
+ if (commonSourcePath && commonSourcePath.back() != '/') {
+ commonSourcePath.push_back('/');
+ }
exportInfo->Items.reserve(settings.items().size());
for (ui32 itemIdx : xrange(settings.items().size())) {
const auto& item = settings.items(itemIdx);
- const TPath path = TPath::Resolve(item.source_path(), Self);
+ const TString srcPath = commonSourcePath + item.source_path();
+ const TPath path = TPath::Resolve(srcPath, Self);
{
TPath::TChecker checks = path.Check();
checks
@@ -230,7 +243,7 @@ private:
}
}
- exportInfo->Items.emplace_back(item.source_path(), path.Base()->PathId, path->PathType);
+ exportInfo->Items.emplace_back(srcPath, path.Base()->PathId, path->PathType);
exportInfo->PendingItems.push_back(itemIdx);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp b/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp
index e8f81a6946..2851f91ffb 100644
--- a/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp
@@ -477,7 +477,6 @@ private:
}
writer.CloseMap();
}
- writer.Write("exportedObjects", "SchemaMappingV0");
writer.CloseMap();
writer.CloseMap();
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.cpp b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
index f0b26158dd..4a2756a256 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.cpp
@@ -4949,6 +4949,7 @@ void TSchemeShard::StateWork(STFUNC_SIG) {
HFuncTraced(TEvImport::TEvForgetImportRequest, Handle);
HFuncTraced(TEvImport::TEvListImportsRequest, Handle);
HFuncTraced(TEvPrivate::TEvImportSchemeReady, Handle);
+ HFuncTraced(TEvPrivate::TEvImportSchemaMappingReady, Handle);
HFuncTraced(TEvPrivate::TEvImportSchemeQueryResult, Handle);
// } // NImport
@@ -7149,32 +7150,27 @@ void TSchemeShard::SetPartitioning(TPathId pathId, TTableInfo::TPtr tableInfo, T
newPartitioningSet.reserve(newPartitioning.size());
const auto& oldPartitioning = tableInfo->GetPartitions();
+ std::vector<TShardIdx> dataErasureShards;
for (const auto& p: newPartitioning) {
if (!oldPartitioning.empty())
newPartitioningSet.insert(p.ShardIdx);
const auto& partitionStats = tableInfo->GetStats().PartitionStats;
auto it = partitionStats.find(p.ShardIdx);
- std::vector<TShardIdx> dataErasureShards;
if (it != partitionStats.end()) {
EnqueueBackgroundCompaction(p.ShardIdx, it->second);
UpdateShardMetrics(p.ShardIdx, it->second);
dataErasureShards.push_back(p.ShardIdx);
}
- if (DataErasureManager->GetStatus() == EDataErasureStatus::IN_PROGRESS) {
- Execute(CreateTxAddEntryToDataErasure(dataErasureShards), this->ActorContext());
- }
+ }
+ if (DataErasureManager->GetStatus() == EDataErasureStatus::IN_PROGRESS) {
+ Execute(CreateTxAddEntryToDataErasure(dataErasureShards), this->ActorContext());
}
- std::vector<TShardIdx> cancelDataErasureShards;
for (const auto& p: oldPartitioning) {
if (!newPartitioningSet.contains(p.ShardIdx)) {
// note that queues might not contain the shard
OnShardRemoved(p.ShardIdx);
- cancelDataErasureShards.push_back(p.ShardIdx);
- }
- if (DataErasureManager->GetStatus() == EDataErasureStatus::IN_PROGRESS) {
- Execute(CreateTxCancelDataErasureShards(cancelDataErasureShards), this->ActorContext());
}
}
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.h b/ydb/core/tx/schemeshard/schemeshard_impl.h
index 6f3ea3f723..9b4a405f96 100644
--- a/ydb/core/tx/schemeshard/schemeshard_impl.h
+++ b/ydb/core/tx/schemeshard/schemeshard_impl.h
@@ -1305,6 +1305,7 @@ public:
void FromXxportInfo(NKikimrImport::TImport& exprt, const TImportInfo::TPtr importInfo);
static void PersistCreateImport(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo);
+ static void PersistSchemaMappingImportFields(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo);
static void PersistRemoveImport(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo);
static void PersistImportState(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo);
static void PersistImportItemState(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo, ui32 itemIdx);
@@ -1333,6 +1334,7 @@ public:
NTabletFlatExecutor::ITransaction* CreateTxProgressImport(ui64 id, const TMaybe<ui32>& itemIdx = Nothing());
NTabletFlatExecutor::ITransaction* CreateTxProgressImport(TEvPrivate::TEvImportSchemeReady::TPtr& ev);
+ NTabletFlatExecutor::ITransaction* CreateTxProgressImport(TEvPrivate::TEvImportSchemaMappingReady::TPtr& ev);
NTabletFlatExecutor::ITransaction* CreateTxProgressImport(TEvPrivate::TEvImportSchemeQueryResult::TPtr& ev);
NTabletFlatExecutor::ITransaction* CreateTxProgressImport(TEvTxAllocatorClient::TEvAllocateResult::TPtr& ev);
NTabletFlatExecutor::ITransaction* CreateTxProgressImport(TEvSchemeShard::TEvModifySchemeTransactionResult::TPtr& ev);
@@ -1345,6 +1347,7 @@ public:
void Handle(TEvImport::TEvForgetImportRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvImport::TEvListImportsRequest::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPrivate::TEvImportSchemeReady::TPtr& ev, const TActorContext& ctx);
+ void Handle(TEvPrivate::TEvImportSchemaMappingReady::TPtr& ev, const TActorContext& ctx);
void Handle(TEvPrivate::TEvImportSchemeQueryResult::TPtr& ev, const TActorContext& ctx);
void ResumeImports(const TVector<ui64>& ids, const TActorContext& ctx);
diff --git a/ydb/core/tx/schemeshard/schemeshard_import.cpp b/ydb/core/tx/schemeshard/schemeshard_import.cpp
index 05a002eee5..2ccd2d7572 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_import.cpp
@@ -60,6 +60,9 @@ void TSchemeShard::FromXxportInfo(NKikimrImport::TImport& import, const TImportI
}
switch (importInfo->State) {
+ case TImportInfo::EState::DownloadExportMetadata:
+ import.SetProgress(Ydb::Import::ImportProgress::PROGRESS_PREPARING);
+ break;
case TImportInfo::EState::Waiting:
switch (GetMinState(importInfo)) {
case TImportInfo::EState::GetScheme:
@@ -136,8 +139,27 @@ void TSchemeShard::PersistCreateImport(NIceDb::TNiceDb& db, const TImportInfo::T
db.Table<Schema::ImportItems>().Key(importInfo->Id, itemIdx).Update(
NIceDb::TUpdate<Schema::ImportItems::DstPathName>(item.DstPathName),
- NIceDb::TUpdate<Schema::ImportItems::State>(static_cast<ui8>(item.State))
+ NIceDb::TUpdate<Schema::ImportItems::State>(static_cast<ui8>(item.State)),
+ NIceDb::TUpdate<Schema::ImportItems::SrcPrefix>(item.SrcPrefix)
+ );
+ }
+}
+
+void TSchemeShard::PersistSchemaMappingImportFields(NIceDb::TNiceDb& db, const TImportInfo::TPtr importInfo) {
+ // There can be new items, so do at least the same as for creation
+ for (ui32 itemIdx : xrange(importInfo->Items.size())) {
+ const auto& item = importInfo->Items.at(itemIdx);
+
+ db.Table<Schema::ImportItems>().Key(importInfo->Id, itemIdx).Update(
+ NIceDb::TUpdate<Schema::ImportItems::DstPathName>(item.DstPathName),
+ NIceDb::TUpdate<Schema::ImportItems::State>(static_cast<ui8>(item.State)),
+ NIceDb::TUpdate<Schema::ImportItems::SrcPrefix>(item.SrcPrefix)
);
+ if (item.ExportItemIV) {
+ db.Table<Schema::ImportItems>().Key(importInfo->Id, itemIdx).Update(
+ NIceDb::TUpdate<Schema::ImportItems::EncryptionIV>(item.ExportItemIV->GetBinaryString())
+ );
+ }
}
}
@@ -244,6 +266,10 @@ void TSchemeShard::Handle(TEvPrivate::TEvImportSchemeReady::TPtr& ev, const TAct
Execute(CreateTxProgressImport(ev), ctx);
}
+void TSchemeShard::Handle(TEvPrivate::TEvImportSchemaMappingReady::TPtr& ev, const TActorContext& ctx) {
+ Execute(CreateTxProgressImport(ev), ctx);
+}
+
void TSchemeShard::Handle(TEvPrivate::TEvImportSchemeQueryResult::TPtr& ev, const TActorContext& ctx) {
Execute(CreateTxProgressImport(ev), ctx);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp b/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp
index d494c514c3..efed43d23d 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp
@@ -54,6 +54,7 @@ struct TSchemeShard::TImport::TTxCancel: public TSchemeShard::TXxport::TTxBase {
case TImportInfo::EState::Cancelled:
return respond(Ydb::StatusIds::SUCCESS);
+ case TImportInfo::EState::DownloadExportMetadata:
case TImportInfo::EState::Waiting:
case TImportInfo::EState::Cancellation:
importInfo->Issue = "Cancelled manually";
diff --git a/ydb/core/tx/schemeshard/schemeshard_import__create.cpp b/ydb/core/tx/schemeshard/schemeshard_import__create.cpp
index 98764f8f9b..74388d3645 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import__create.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_import__create.cpp
@@ -3,7 +3,7 @@
#include "schemeshard_import.h"
#include "schemeshard_import_flow_proposals.h"
#include "schemeshard_import_helpers.h"
-#include "schemeshard_import_scheme_getter.h"
+#include "schemeshard_import_getters.h"
#include "schemeshard_import_scheme_query_executor.h"
#include "schemeshard_xxport__helpers.h"
#include "schemeshard_xxport__tx_base.h"
@@ -67,6 +67,42 @@ TString GetDatabase(TSchemeShard& ss) {
return CanonizePath(ss.RootPathElements);
}
+bool ValidateDstPath(const TString& dstPath, TSchemeShard* ss, TString& explain) {
+ const TPath path = TPath::Resolve(dstPath, ss);
+ TPath::TChecker checks = path.Check();
+ checks
+ .IsAtLocalSchemeShard()
+ .HasResolvedPrefix()
+ .FailOnRestrictedCreateInTempZone();
+
+ if (path.IsResolved()) {
+ checks
+ .IsResolved()
+ .IsDeleted();
+ } else {
+ checks
+ .NotEmpty()
+ .NotResolved();
+ }
+
+ if (checks) {
+ checks
+ .IsValidLeafName()
+ .DepthLimit()
+ .PathsLimit();
+
+ if (path.Parent().IsResolved()) {
+ checks.DirChildrenLimit();
+ }
+ }
+
+ if (!checks) {
+ explain = checks.GetError();
+ return false;
+ }
+ return true;
+}
+
}
struct TSchemeShard::TImport::TTxCreate: public TSchemeShard::TXxport::TTxBase {
@@ -141,6 +177,7 @@ struct TSchemeShard::TImport::TTxCreate: public TSchemeShard::TXxport::TTxBase {
}
TImportInfo::TPtr importInfo = nullptr;
+ TImportInfo::EState initialState = TImportInfo::EState::Waiting;
switch (request.GetRequest().GetSettingsCase()) {
case NKikimrImport::TCreateImportRequest::kImportFromS3Settings:
@@ -150,6 +187,10 @@ struct TSchemeShard::TImport::TTxCreate: public TSchemeShard::TXxport::TTxBase {
settings.set_scheme(Ydb::Import::ImportFromS3Settings::HTTPS);
}
+ if (!settings.source_prefix().empty() && AppData()->FeatureFlags.GetEnableEncryptedExport()) {
+ initialState = TImportInfo::EState::DownloadExportMetadata;
+ }
+
importInfo = new TImportInfo(id, uid, TImportInfo::EKind::S3, settings, domainPath.Base()->PathId, request.GetPeerName());
if (request.HasUserSID()) {
@@ -172,7 +213,7 @@ struct TSchemeShard::TImport::TTxCreate: public TSchemeShard::TXxport::TTxBase {
NIceDb::TNiceDb db(txc.DB);
Self->PersistCreateImport(db, importInfo);
- importInfo->State = TImportInfo::EState::Waiting;
+ importInfo->State = initialState;
importInfo->StartTime = TAppData::TimeProvider->Now();
Self->PersistImportState(db, importInfo);
@@ -232,42 +273,12 @@ private:
return false;
}
- const TPath path = TPath::Resolve(dstPath, Self);
- {
- TPath::TChecker checks = path.Check();
- checks
- .IsAtLocalSchemeShard()
- .HasResolvedPrefix()
- .FailOnRestrictedCreateInTempZone();
-
- if (path.IsResolved()) {
- checks
- .IsResolved()
- .IsDeleted();
- } else {
- checks
- .NotEmpty()
- .NotResolved();
- }
-
- if (checks) {
- checks
- .IsValidLeafName()
- .DepthLimit()
- .PathsLimit();
-
- if (path.Parent().IsResolved()) {
- checks.DirChildrenLimit();
- }
- }
-
- if (!checks) {
- explain = checks.GetError();
- return false;
- }
+ if (!ValidateDstPath(dstPath, Self, explain)) {
+ return false;
}
- importInfo->Items.emplace_back(dstPath);
+ auto& item = importInfo->Items.emplace_back(dstPath);
+ item.SrcPrefix = settings.items(itemIdx).source_prefix();
}
return true;
@@ -284,6 +295,7 @@ struct TSchemeShard::TImport::TTxProgress: public TSchemeShard::TXxport::TTxBase
ui64 Id;
TMaybe<ui32> ItemIdx;
TEvPrivate::TEvImportSchemeReady::TPtr SchemeResult = nullptr;
+ TEvPrivate::TEvImportSchemaMappingReady::TPtr SchemaMappingResult = nullptr;
TEvPrivate::TEvImportSchemeQueryResult::TPtr SchemeQueryResult = nullptr;
TEvTxAllocatorClient::TEvAllocateResult::TPtr AllocateResult = nullptr;
TEvSchemeShard::TEvModifySchemeTransactionResult::TPtr ModifyResult = nullptr;
@@ -303,6 +315,13 @@ struct TSchemeShard::TImport::TTxProgress: public TSchemeShard::TXxport::TTxBase
{
}
+ explicit TTxProgress(TSelf* self, TEvPrivate::TEvImportSchemaMappingReady::TPtr& ev)
+ : TXxport::TTxBase(self)
+ , Id(ev->Get()->ImportId)
+ , SchemaMappingResult(ev)
+ {
+ }
+
explicit TTxProgress(TSelf* self, TEvPrivate::TEvImportSchemeQueryResult::TPtr& ev)
: TXxport::TTxBase(self)
, SchemeQueryResult(ev)
@@ -342,6 +361,8 @@ struct TSchemeShard::TImport::TTxProgress: public TSchemeShard::TXxport::TTxBase
if (SchemeResult) {
OnSchemeResult(txc, ctx);
+ } else if (SchemaMappingResult) {
+ OnSchemaMappingResult(txc, ctx);
} else if (SchemeQueryResult) {
OnSchemeQueryPreparation(txc, ctx);
} else if (AllocateResult) {
@@ -372,10 +393,18 @@ private:
<< ": info# " << importInfo->ToString()
<< ", item# " << item.ToString(itemIdx));
- item.SchemeGetter = ctx.RegisterWithSameMailbox(CreateSchemeGetter(Self->SelfId(), importInfo, itemIdx));
+ item.SchemeGetter = ctx.RegisterWithSameMailbox(CreateSchemeGetter(Self->SelfId(), importInfo, itemIdx, item.ExportItemIV));
Self->RunningImportSchemeGetters.emplace(item.SchemeGetter);
}
+ void GetSchemaMapping(TImportInfo::TPtr importInfo, const TActorContext& ctx) {
+ LOG_I("TImport::TTxProgress: Download schema mapping"
+ << ": info# " << importInfo->ToString());
+
+ importInfo->SchemaMappingGetter = ctx.RegisterWithSameMailbox(CreateSchemaMappingGetter(Self->SelfId(), importInfo));
+ Self->RunningImportSchemeGetters.emplace(importInfo->SchemaMappingGetter);
+ }
+
void CreateTable(TImportInfo::TPtr importInfo, ui32 itemIdx, TTxId txId) {
Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
auto& item = importInfo->Items.at(itemIdx);
@@ -685,15 +714,27 @@ private:
}
void Cancel(TImportInfo::TPtr importInfo, ui32 itemIdx, TStringBuf marker) {
- Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
- const auto& item = importInfo->Items.at(itemIdx);
+ const TItem* item = nullptr;
+ if (itemIdx != ui32(-1)) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ item = &importInfo->Items.at(itemIdx);
+ }
+ TStringBuilder itemLogStr;
+ if (item) {
+ itemLogStr << ", item# " << item->ToString(itemIdx);
+ }
LOG_N("TImport::TTxProgress: " << marker << ", cancelling"
<< ", info# " << importInfo->ToString()
- << ", item# " << item.ToString(itemIdx));
+ << itemLogStr);
importInfo->State = EState::Cancelled;
+ if (auto schemaMappingGetter = std::exchange(importInfo->SchemaMappingGetter, {})) {
+ Send(schemaMappingGetter, new TEvents::TEvPoisonPill());
+ Self->RunningImportSchemeGetters.erase(schemaMappingGetter);
+ }
+
for (ui32 i : xrange(importInfo->Items.size())) {
KillChildActors(importInfo->Items[i]);
if (i == itemIdx) {
@@ -720,14 +761,16 @@ private:
}
void CancelAndPersist(NIceDb::TNiceDb& db, TImportInfo::TPtr importInfo, ui32 itemIdx, TStringBuf itemIssue, TStringBuf marker) {
- Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
- auto& item = importInfo->Items[itemIdx];
+ if (itemIdx != ui32(-1)) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ auto& item = importInfo->Items[itemIdx];
- item.Issue = itemIssue;
- PersistImportItemState(db, importInfo, itemIdx);
+ item.Issue = itemIssue;
+ PersistImportItemState(db, importInfo, itemIdx);
- if (importInfo->State != EState::Waiting) {
- return;
+ if (importInfo->State != EState::Waiting) {
+ return;
+ }
}
Cancel(importInfo, itemIdx, marker);
@@ -801,11 +844,20 @@ private:
<< ": id# " << Id
<< ", itemIdx# " << ItemIdx);
- if (ItemIdx) {
- Resume(importInfo, *ItemIdx, txc, ctx);
- } else {
- for (ui32 itemIdx : xrange(importInfo->Items.size())) {
- Resume(importInfo, itemIdx, txc, ctx);
+ switch (importInfo->State) {
+ case EState::DownloadExportMetadata: {
+ GetSchemaMapping(importInfo, ctx);
+ break;
+ }
+ default: {
+ if (ItemIdx) {
+ Resume(importInfo, *ItemIdx, txc, ctx);
+ } else {
+ for (ui32 itemIdx : xrange(importInfo->Items.size())) {
+ Resume(importInfo, itemIdx, txc, ctx);
+ }
+ }
+ break;
}
}
}
@@ -943,7 +995,7 @@ private:
// Send the creation query to KQP to prepare.
const auto database = GetDatabase(*Self);
const TString source = TStringBuilder()
- << importInfo->Settings.items(msg.ItemIdx).source_prefix() << NYdb::NDump::NFiles::CreateView().FileName;
+ << importInfo->GetItemSrcPrefix(msg.ItemIdx) << NYdb::NDump::NFiles::CreateView().FileName;
NYql::TIssues issues;
if (!NYdb::NDump::RewriteCreateViewQuery(item.CreationQuery, database, true, item.DstPathName, issues)) {
@@ -965,6 +1017,94 @@ private:
}
}
+ void OnSchemaMappingResult(TTransactionContext& txc, const TActorContext& ctx) {
+ Y_ABORT_UNLESS(SchemaMappingResult);
+
+ const auto& msg = *SchemaMappingResult->Get();
+
+ LOG_D("TImport::TTxProgress: OnSchemaMappingResult"
+ << ": id# " << msg.ImportId
+ << ", success# " << msg.Success
+ );
+
+ if (!Self->Imports.contains(msg.ImportId)) {
+ LOG_E("TImport::TTxProgress: OnSchemaMappingResult received unknown id"
+ << ": id# " << msg.ImportId);
+ return;
+ }
+
+ TImportInfo::TPtr importInfo = Self->Imports.at(msg.ImportId);
+
+ NIceDb::TNiceDb db(txc.DB);
+
+ Self->RunningImportSchemeGetters.erase(std::exchange(importInfo->SchemaMappingGetter, {}));
+
+ if (!msg.Success) {
+ return CancelAndPersist(db, importInfo, -1, {}, TStringBuilder() << "cannot get schema mapping: " << msg.Error);
+ }
+
+ if (!importInfo->SchemaMapping->Items.empty()) {
+ if (importInfo->Settings.has_encryption_settings() != importInfo->SchemaMapping->Items[0].IV.Defined()) {
+ return CancelAndPersist(db, importInfo, -1, {}, "incorrect schema mapping");
+ }
+ }
+
+ // Path in database for import
+ TVector<TString> dstPath;
+ if (importInfo->Settings.destination_path().empty()) {
+ dstPath = Self->RootPathElements;
+ } else {
+ dstPath = SplitPath(importInfo->Settings.destination_path());
+ }
+ TString sourcePrefix = importInfo->Settings.source_prefix();
+ if (sourcePrefix && sourcePrefix.back() != '/') {
+ sourcePrefix.push_back('/');
+ }
+ auto combineDstPath = [&](const TString& mappingObjectPath) {
+ TVector<TString> objectPath = SplitPath(mappingObjectPath);
+ TVector<TString> dstObjectPath = dstPath;
+ dstObjectPath.insert(dstObjectPath.end(), objectPath.begin(), objectPath.end());
+ return CombinePath(dstObjectPath.begin(), dstObjectPath.end());
+ };
+ auto init = [&](const NBackup::TSchemaMapping::TItem& schemaMappingItem, NSchemeShard::TImportInfo::TItem& item) {
+ TStringBuf exportPrefix(schemaMappingItem.ExportPrefix);
+ exportPrefix.SkipPrefix("/");
+ item.SrcPrefix = TStringBuilder() << sourcePrefix << exportPrefix;
+ item.ExportItemIV = schemaMappingItem.IV;
+ };
+ if (importInfo->Items.empty()) { // Fill the whole list from schema mapping
+ for (const auto& schemaMappingItem : importInfo->SchemaMapping->Items) {
+ TString dstPath = combineDstPath(schemaMappingItem.ObjectPath);
+ TString explain;
+ if (!ValidateDstPath(dstPath, Self, explain)) {
+ return CancelAndPersist(db, importInfo, -1, {}, TStringBuilder() << "cannot validate mapping: " << explain);
+ }
+
+ auto& item = importInfo->Items.emplace_back(dstPath);
+ init(schemaMappingItem, item);
+ }
+ } else { // Take existing items from items list
+ THashMap<TString, size_t> schemaMappingIndex;
+ for (size_t i = 0; i < importInfo->SchemaMapping->Items.size(); ++i) {
+ schemaMappingIndex[combineDstPath(importInfo->SchemaMapping->Items[i].ObjectPath)] = i;
+ }
+ for (auto& item : importInfo->Items) {
+ TString dstPath = CanonizePath(item.DstPathName);
+ auto mappingIt = schemaMappingIndex.find(dstPath);
+ if (mappingIt == schemaMappingIndex.end()) {
+ return CancelAndPersist(db, importInfo, -1, {}, TStringBuilder() << "cannot find path " << dstPath << " in schema mapping");
+ }
+ const auto& schemaMappingItem = importInfo->SchemaMapping->Items[mappingIt->second];
+ init(schemaMappingItem, item);
+ }
+ }
+
+ importInfo->State = EState::Waiting;
+ PersistImportState(db, importInfo);
+ PersistSchemaMappingImportFields(db, importInfo);
+ Resume(txc, ctx);
+ }
+
void OnSchemeQueryPreparation(TTransactionContext& txc, const TActorContext& ctx) {
Y_ABORT_UNLESS(SchemeQueryResult);
const auto& message = *SchemeQueryResult.Get()->Get();
@@ -1408,6 +1548,10 @@ ITransaction* TSchemeShard::CreateTxProgressImport(TEvPrivate::TEvImportSchemeRe
return new TImport::TTxProgress(this, ev);
}
+ITransaction* TSchemeShard::CreateTxProgressImport(TEvPrivate::TEvImportSchemaMappingReady::TPtr& ev) {
+ return new TImport::TTxProgress(this, ev);
+}
+
ITransaction* TSchemeShard::CreateTxProgressImport(TEvPrivate::TEvImportSchemeQueryResult::TPtr& ev) {
return new TImport::TTxProgress(this, ev);
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp b/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp
index 01984722d3..745df56387 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp
@@ -144,6 +144,14 @@ THolder<TEvSchemeShard::TEvModifySchemeTransaction> RestorePropose(
task.SetTableName(dstPath.LeafName());
*task.MutableTableDescription() = RebuildTableDescription(GetTableDescription(ss, item.DstPathId), item.Scheme);
+ if (importInfo->Settings.has_encryption_settings()) {
+ auto& taskEncryptionSettings = *task.MutableEncryptionSettings();
+ *taskEncryptionSettings.MutableSymmetricKey() = importInfo->Settings.encryption_settings().symmetric_key();
+ if (item.ExportItemIV) {
+ taskEncryptionSettings.SetIV(item.ExportItemIV->GetBinaryString());
+ }
+ }
+
switch (importInfo->Kind) {
case TImportInfo::EKind::S3:
{
@@ -153,7 +161,7 @@ THolder<TEvSchemeShard::TEvModifySchemeTransaction> RestorePropose(
restoreSettings.SetBucket(importInfo->Settings.bucket());
restoreSettings.SetAccessKey(importInfo->Settings.access_key());
restoreSettings.SetSecretKey(importInfo->Settings.secret_key());
- restoreSettings.SetObjectKeyPattern(importInfo->Settings.items(itemIdx).source_prefix());
+ restoreSettings.SetObjectKeyPattern(importInfo->GetItemSrcPrefix(itemIdx));
restoreSettings.SetUseVirtualAddressing(!importInfo->Settings.disable_virtual_addressing());
switch (importInfo->Settings.scheme()) {
@@ -338,7 +346,7 @@ THolder<TEvSchemeShard::TEvModifySchemeTransaction> CreateConsumersPropose(
auto* tabletConfig = pqGroup.MutablePQTabletConfig();
const auto& pqConfig = AppData()->PQConfig;
-
+
for (const auto& consumer : topic.consumers()) {
auto& addedConsumer = *tabletConfig->AddConsumers();
auto consumerName = NPersQueue::ConvertNewConsumerName(consumer.name(), pqConfig);
@@ -347,7 +355,7 @@ THolder<TEvSchemeShard::TEvModifySchemeTransaction> CreateConsumersPropose(
addedConsumer.SetImportant(true);
}
}
-
+
return propose;
}
diff --git a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.cpp b/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp
index e09e9efadc..4b372dafc5 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.cpp
+++ b/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp
@@ -1,8 +1,9 @@
-#include "schemeshard_import_scheme_getter.h"
+#include "schemeshard_import_getters.h"
#include "schemeshard_import_helpers.h"
#include "schemeshard_private.h"
#include <ydb/core/backup/common/checksum.h>
+#include <ydb/core/backup/common/encryption.h>
#include <ydb/core/backup/common/metadata.h>
#include <ydb/core/wrappers/s3_storage_config.h>
#include <ydb/core/wrappers/s3_wrapper.h>
@@ -12,6 +13,8 @@
#include <ydb/library/actors/core/hfunc.h>
#include <ydb/public/lib/ydb_cli/dump/files/files.h>
+#include <library/cpp/json/json_reader.h>
+
#include <google/protobuf/text_format.h>
#include <util/string/subst.h>
@@ -26,31 +29,173 @@ using namespace Aws::Client;
using namespace Aws::S3;
using namespace Aws;
+static constexpr TDuration MaxDelay = TDuration::Minutes(10);
+
+template <class TDerived>
+class TGetterFromS3 : public TActorBootstrapped<TDerived> {
+protected:
+ explicit TGetterFromS3(TImportInfo::TPtr importInfo, TMaybe<NBackup::TEncryptionIV> iv)
+ : ImportInfo(std::move(importInfo))
+ , ExternalStorageConfig(new NWrappers::NExternalStorage::TS3ExternalStorageConfig(ImportInfo->Settings))
+ , IV(std::move(iv))
+ , Retries(ImportInfo->Settings.number_of_retries())
+ {
+ if (ImportInfo->Settings.has_encryption_settings()) {
+ Key = NBackup::TEncryptionKey(ImportInfo->Settings.encryption_settings().symmetric_key().key());
+ }
+ }
+
+ void HeadObject(const TString& key, bool autoAddEncSuffix = true) {
+ auto request = Model::HeadObjectRequest()
+ .WithKey(GetKey(key, autoAddEncSuffix));
+
+ this->Send(Client, new TEvExternalStorage::TEvHeadObjectRequest(request));
+ }
+
+ void GetObject(const TString& key, const std::pair<ui64, ui64>& range, bool autoAddEncSuffix = true) {
+ auto request = Model::GetObjectRequest()
+ .WithKey(GetKey(key, autoAddEncSuffix))
+ .WithRange(TStringBuilder() << "bytes=" << range.first << "-" << range.second);
+
+ this->Send(Client, new TEvExternalStorage::TEvGetObjectRequest(request));
+ }
+
+ void GetObject(const TString& key, ui64 contentLength, bool autoAddEncSuffix = true) {
+ GetObject(key, std::make_pair(0, contentLength - 1), autoAddEncSuffix);
+ }
+
+ void ListObjects(const TString& prefix) {
+ auto request = Model::ListObjectsRequest()
+ .WithPrefix(prefix);
+
+ this->Send(Client, new TEvExternalStorage::TEvListObjectsRequest(request));
+ }
+
+ void Download(const TString& key, bool autoAddEncSuffix = true) {
+ CreateClient();
+ HeadObject(key, autoAddEncSuffix);
+ }
+
+ void CreateClient() {
+ if (Client) {
+ this->Send(Client, new TEvents::TEvPoisonPill());
+ }
+ Client = this->RegisterWithSameMailbox(CreateS3Wrapper(ExternalStorageConfig->ConstructStorageOperator()));
+ }
+
+ void PassAway() override {
+ this->Send(Client, new TEvents::TEvPoisonPill());
+ TActorBootstrapped<TDerived>::PassAway();
+ }
+
+ TString GetKey(TString key, bool autoAddEncSuffix = true) {
+ if (autoAddEncSuffix && ImportInfo->Settings.has_encryption_settings()) {
+ key += ".enc";
+ }
+ return key;
+ }
+
+ template <typename TResult>
+ bool CheckResult(const TResult& result, const TStringBuf marker) {
+ if (result.IsSuccess()) {
+ return true;
+ }
+
+ LOG_E("Error at '" << marker << "'"
+ << ": self# " << this->SelfId()
+ << ", error# " << result);
+ MaybeRetry(result.GetError());
+
+ return false;
+ }
+
+ void MaybeRetry(const Aws::S3::S3Error& error) {
+ if (Attempt < Retries && error.ShouldRetry()) {
+ Delay = Min(Delay * ++Attempt, MaxDelay);
+ this->Schedule(Delay, new TEvents::TEvWakeup());
+ } else {
+ Reply(false, TStringBuilder() << "S3 error: " << error.GetMessage().c_str());
+ }
+ }
+
+ void ResetRetries() {
+ Attempt = 0;
+ }
+
+ // If export is encrypted, decrypts and gets export IV,
+ // else returns true
+ bool MaybeDecryptAndSaveIV(const TString& content, TString& result) {
+ if (Key) {
+ try {
+ auto [buffer, iv] = NBackup::TEncryptedFileDeserializer::DecryptFullFile(*Key, TBuffer(content.data(), content.size()));
+ IV = iv;
+ result.assign(buffer.Data(), buffer.Size());
+ return true;
+ } catch (const std::exception& ex) {
+ Reply(false, ex.what());
+ return false;
+ }
+ }
+ result = content;
+ return true;
+ }
+
+ bool MaybeDecrypt(const TString& content, TString& result, NBackup::EBackupFileType fileType) {
+ if (Key && IV) {
+ try {
+ NBackup::TEncryptionIV expectedIV = NBackup::TEncryptionIV::Combine(*IV, fileType, 0 /* already combined */, 0);
+ auto buffer = NBackup::TEncryptedFileDeserializer::DecryptFullFile(*Key, expectedIV, TBuffer(content.data(), content.size()));
+ result.assign(buffer.Data(), buffer.Size());
+ return true;
+ } catch (const std::exception& ex) {
+ Reply(false, ex.what());
+ return false;
+ }
+ }
+ result = content;
+ return true;
+ }
+
+ virtual void Reply(bool success = true, const TString& error = TString()) = 0;
+
+protected:
+ TImportInfo::TPtr ImportInfo;
+ NWrappers::IExternalStorageConfig::TPtr ExternalStorageConfig;
+ TActorId Client;
+ TMaybe<NBackup::TEncryptionKey> Key;
+ TMaybe<NBackup::TEncryptionIV> IV;
+
+ const ui32 Retries;
+ ui32 Attempt = 0;
+
+ TDuration Delay = TDuration::Minutes(1);
+};
+
// Downloads scheme-related objects from S3
-class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
- static TString MetadataKeyFromSettings(const Ydb::Import::ImportFromS3Settings& settings, ui32 itemIdx) {
- Y_ABORT_UNLESS(itemIdx < (ui32)settings.items_size());
- return TStringBuilder() << settings.items(itemIdx).source_prefix() << "/metadata.json";
+class TSchemeGetter: public TGetterFromS3<TSchemeGetter> {
+ static TString MetadataKeyFromSettings(const TImportInfo::TPtr& importInfo, ui32 itemIdx) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ return TStringBuilder() << importInfo->GetItemSrcPrefix(itemIdx) << "/metadata.json";
}
- static TString SchemeKeyFromSettings(const Ydb::Import::ImportFromS3Settings& settings, ui32 itemIdx, TStringBuf filename) {
- Y_ABORT_UNLESS(itemIdx < (ui32)settings.items_size());
- return TStringBuilder() << settings.items(itemIdx).source_prefix() << '/' << filename;
+ static TString SchemeKeyFromSettings(const TImportInfo::TPtr& importInfo, ui32 itemIdx, TStringBuf filename) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ return TStringBuilder() << importInfo->GetItemSrcPrefix(itemIdx) << '/' << filename;
}
- static TString PermissionsKeyFromSettings(const Ydb::Import::ImportFromS3Settings& settings, ui32 itemIdx) {
- Y_ABORT_UNLESS(itemIdx < (ui32)settings.items_size());
- return TStringBuilder() << settings.items(itemIdx).source_prefix() << "/permissions.pb";
+ static TString PermissionsKeyFromSettings(const TImportInfo::TPtr& importInfo, ui32 itemIdx) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ return TStringBuilder() << importInfo->GetItemSrcPrefix(itemIdx) << "/permissions.pb";
}
- static TString ChangefeedDescriptionKeyFromSettings(const Ydb::Import::ImportFromS3Settings& settings, ui32 itemIdx, const TString& changefeedName) {
- Y_ABORT_UNLESS(itemIdx < (ui32)settings.items_size());
- return TStringBuilder() << settings.items(itemIdx).source_prefix() << "/" << changefeedName << "/changefeed_description.pb";
+ static TString ChangefeedDescriptionKeyFromSettings(const TImportInfo::TPtr& importInfo, ui32 itemIdx, const TString& changefeedName) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ return TStringBuilder() << importInfo->GetItemSrcPrefix(itemIdx) << "/" << changefeedName << "/changefeed_description.pb";
}
- static TString TopicDescriptionKeyFromSettings(const Ydb::Import::ImportFromS3Settings& settings, ui32 itemIdx, const TString& changefeedName) {
- Y_ABORT_UNLESS(itemIdx < (ui32)settings.items_size());
- return TStringBuilder() << settings.items(itemIdx).source_prefix() << "/" << changefeedName << "/topic_description.pb";
+ static TString TopicDescriptionKeyFromSettings(const TImportInfo::TPtr& importInfo, ui32 itemIdx, const TString& changefeedName) {
+ Y_ABORT_UNLESS(itemIdx < importInfo->Items.size());
+ return TStringBuilder() << importInfo->GetItemSrcPrefix(itemIdx) << "/" << changefeedName << "/topic_description.pb";
}
static bool IsView(TStringBuf schemeKey) {
@@ -61,13 +206,6 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return errorType == S3Errors::RESOURCE_NOT_FOUND || errorType == S3Errors::NO_SUCH_KEY;
}
- void HeadObject(const TString& key) {
- auto request = Model::HeadObjectRequest()
- .WithKey(key);
-
- Send(Client, new TEvExternalStorage::TEvHeadObjectRequest(request));
- }
-
void HandleMetadata(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
const auto& result = ev->Get()->Result;
@@ -79,8 +217,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
- GetObject(MetadataKey, std::make_pair(0, contentLength - 1));
+ GetObject(MetadataKey, result.GetResult().GetContentLength());
}
void HandleScheme(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
@@ -92,7 +229,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
if (!IsView(SchemeKey) && NoObjectFound(result.GetError().GetErrorType())) {
// try search for a view
- SchemeKey = SchemeKeyFromSettings(ImportInfo->Settings, ItemIdx, NYdb::NDump::NFiles::CreateView().FileName);
+ SchemeKey = SchemeKeyFromSettings(ImportInfo, ItemIdx, NYdb::NDump::NFiles::CreateView().FileName);
HeadObject(SchemeKey);
return;
}
@@ -101,8 +238,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
- GetObject(SchemeKey, std::make_pair(0, contentLength - 1));
+ GetObject(SchemeKey, result.GetResult().GetContentLength());
}
void HandlePermissions(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
@@ -119,8 +255,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
- GetObject(PermissionsKey, std::make_pair(0, contentLength - 1));
+ GetObject(PermissionsKey, result.GetResult().GetContentLength());
}
void HandleChecksum(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
@@ -134,8 +269,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
- GetObject(NBackup::ChecksumKey(CurrentObjectKey), std::make_pair(0, contentLength - 1));
+ GetObject(NBackup::ChecksumKey(CurrentObjectKey), result.GetResult().GetContentLength(), false);
}
void HandleChangefeed(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
@@ -149,9 +283,8 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
Y_ABORT_UNLESS(IndexDownloadedChangefeed < ChangefeedsNames.size());
- GetObject(ChangefeedDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), std::make_pair(0, contentLength - 1));
+ GetObject(ChangefeedDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), result.GetResult().GetContentLength());
}
void HandleTopic(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
@@ -165,17 +298,8 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
- const auto contentLength = result.GetResult().GetContentLength();
Y_ABORT_UNLESS(IndexDownloadedChangefeed < ChangefeedsNames.size());
- GetObject(TopicDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), std::make_pair(0, contentLength - 1));
- }
-
- void GetObject(const TString& key, const std::pair<ui64, ui64>& range) {
- auto request = Model::GetObjectRequest()
- .WithKey(key)
- .WithRange(TStringBuilder() << "bytes=" << range.first << "-" << range.second);
-
- Send(Client, new TEvExternalStorage::TEvGetObjectRequest(request));
+ GetObject(TopicDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), result.GetResult().GetContentLength());
}
void HandleMetadata(TEvExternalStorage::TEvGetObjectResponse::TPtr& ev) {
@@ -190,14 +314,19 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::Metadata)) {
+ return;
+ }
+
Y_ABORT_UNLESS(ItemIdx < ImportInfo->Items.size());
auto& item = ImportInfo->Items.at(ItemIdx);
LOG_T("Trying to parse metadata"
<< ": self# " << SelfId()
- << ", body# " << SubstGlobalCopy(msg.Body, "\n", "\\n"));
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
- item.Metadata = NBackup::TMetadata::Deserialize(msg.Body);
+ item.Metadata = NBackup::TMetadata::Deserialize(content);
if (item.Metadata.HasVersion() && item.Metadata.GetVersion() == 0) {
NeedValidateChecksums = false;
@@ -208,7 +337,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
};
if (NeedValidateChecksums) {
- StartValidatingChecksum(MetadataKey, msg.Body, nextStep);
+ StartValidatingChecksum(MetadataKey, content, nextStep);
} else {
nextStep();
}
@@ -226,6 +355,11 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::TableSchema)) {
+ return;
+ }
+
Y_ABORT_UNLESS(ItemIdx < ImportInfo->Items.size());
auto& item = ImportInfo->Items.at(ItemIdx);
@@ -233,11 +367,11 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
<< ": self# " << SelfId()
<< ", itemIdx# " << ItemIdx
<< ", schemeKey# " << SchemeKey
- << ", body# " << SubstGlobalCopy(msg.Body, "\n", "\\n"));
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
if (IsView(SchemeKey)) {
- item.CreationQuery = msg.Body;
- } else if (!google::protobuf::TextFormat::ParseFromString(msg.Body, &item.Scheme)) {
+ item.CreationQuery = content;
+ } else if (!google::protobuf::TextFormat::ParseFromString(content, &item.Scheme)) {
return Reply(false, "Cannot parse scheme");
}
@@ -250,7 +384,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
};
if (NeedValidateChecksums) {
- StartValidatingChecksum(SchemeKey, msg.Body, nextStep);
+ StartValidatingChecksum(SchemeKey, content, nextStep);
} else {
nextStep();
}
@@ -268,15 +402,20 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::Permissions)) {
+ return;
+ }
+
Y_ABORT_UNLESS(ItemIdx < ImportInfo->Items.size());
auto& item = ImportInfo->Items.at(ItemIdx);
LOG_T("Trying to parse permissions"
<< ": self# " << SelfId()
- << ", body# " << SubstGlobalCopy(msg.Body, "\n", "\\n"));
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
Ydb::Scheme::ModifyPermissionsRequest permissions;
- if (!google::protobuf::TextFormat::ParseFromString(msg.Body, &permissions)) {
+ if (!google::protobuf::TextFormat::ParseFromString(content, &permissions)) {
return Reply(false, "Cannot parse permissions");
}
item.Permissions = std::move(permissions);
@@ -286,7 +425,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
};
if (NeedValidateChecksums) {
- StartValidatingChecksum(PermissionsKey, msg.Body, nextStep);
+ StartValidatingChecksum(PermissionsKey, content, nextStep);
} else {
nextStep();
}
@@ -326,15 +465,20 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::TableChangefeed)) {
+ return;
+ }
+
Y_ABORT_UNLESS(ItemIdx < ImportInfo->Items.size());
auto& item = ImportInfo->Items.at(ItemIdx);
LOG_T("Trying to parse changefeed"
<< ": self# " << SelfId()
- << ", body# " << SubstGlobalCopy(msg.Body, "\n", "\\n"));
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
Ydb::Table::ChangefeedDescription changefeed;
- if (!google::protobuf::TextFormat::ParseFromString(msg.Body, &changefeed)) {
+ if (!google::protobuf::TextFormat::ParseFromString(content, &changefeed)) {
return Reply(false, "Cannot parse сhangefeed");
}
@@ -342,14 +486,14 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
auto nextStep = [this]() {
Become(&TThis::StateDownloadTopics);
- HeadObject(TopicDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
+ HeadObject(TopicDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
};
if (NeedValidateChecksums) {
- StartValidatingChecksum(ChangefeedDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), msg.Body, nextStep);
+ StartValidatingChecksum(ChangefeedDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), content, nextStep);
} else {
nextStep();
- }
+ }
}
void HandleTopic(TEvExternalStorage::TEvGetObjectResponse::TPtr& ev) {
@@ -364,15 +508,20 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
return;
}
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::TableTopic)) {
+ return;
+ }
+
Y_ABORT_UNLESS(ItemIdx < ImportInfo->Items.size());
auto& item = ImportInfo->Items.at(ItemIdx);
LOG_T("Trying to parse topic"
<< ": self# " << SelfId()
- << ", body# " << SubstGlobalCopy(msg.Body, "\n", "\\n"));
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
Ydb::Topic::DescribeTopicResult topic;
- if (!google::protobuf::TextFormat::ParseFromString(msg.Body, &topic)) {
+ if (!google::protobuf::TextFormat::ParseFromString(content, &topic)) {
return Reply(false, "Cannot parse topic");
}
*item.Changefeeds.MutableChangefeeds(IndexDownloadedChangefeed)->MutableTopic() = std::move(topic);
@@ -382,22 +531,15 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
Reply();
} else {
Become(&TThis::StateDownloadChangefeeds);
- HeadObject(ChangefeedDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
+ HeadObject(ChangefeedDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
}
};
if (NeedValidateChecksums) {
- StartValidatingChecksum(TopicDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), msg.Body, nextStep);
+ StartValidatingChecksum(TopicDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]), content, nextStep);
} else {
nextStep();
- }
- }
-
- void ListObjects(const TString& prefix) {
- auto request = Model::ListObjectsRequest()
- .WithPrefix(prefix);
-
- Send(Client, new TEvExternalStorage::TEvListObjectsRequest(request));
+ }
}
template <typename T>
@@ -431,37 +573,14 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
Resize(item.Changefeeds.MutableChangefeeds(), ChangefeedsNames.size());
Y_ABORT_UNLESS(IndexDownloadedChangefeed < ChangefeedsNames.size());
- HeadObject(ChangefeedDescriptionKeyFromSettings(ImportInfo->Settings, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
+ HeadObject(ChangefeedDescriptionKeyFromSettings(ImportInfo, ItemIdx, ChangefeedsNames[IndexDownloadedChangefeed]));
} else {
Reply();
}
}
- template <typename TResult>
- bool CheckResult(const TResult& result, const TStringBuf marker) {
- if (result.IsSuccess()) {
- return true;
- }
-
- LOG_E("Error at '" << marker << "'"
- << ": self# " << SelfId()
- << ", error# " << result);
- MaybeRetry(result.GetError());
-
- return false;
- }
-
- void MaybeRetry(const Aws::S3::S3Error& error) {
- if (Attempt < Retries && error.ShouldRetry()) {
- Delay = Min(Delay * ++Attempt, MaxDelay);
- Schedule(Delay, new TEvents::TEvWakeup());
- } else {
- Reply(false, TStringBuilder() << "S3 error: " << error.GetMessage().c_str());
- }
- }
-
- void Reply(bool success = true, const TString& error = TString()) {
+ void Reply(bool success = true, const TString& error = TString()) override {
LOG_I("Reply"
<< ": self# " << SelfId()
<< ", success# " << success
@@ -471,26 +590,9 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
PassAway();
}
- void PassAway() override {
- Send(Client, new TEvents::TEvPoisonPill());
- TActor::PassAway();
- }
-
- void CreateClient() {
- if (Client) {
- Send(Client, new TEvents::TEvPoisonPill());
- }
- Client = RegisterWithSameMailbox(CreateS3Wrapper(ExternalStorageConfig->ConstructStorageOperator()));
- }
-
void ListChangefeeds() {
CreateClient();
- ListObjects(ImportInfo->Settings.items(ItemIdx).source_prefix());
- }
-
- void Download(const TString& key) {
- CreateClient();
- HeadObject(key);
+ ListObjects(ImportInfo->GetItemSrcPrefix(ItemIdx));
}
void DownloadMetadata() {
@@ -506,7 +608,7 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
}
void DownloadChecksum() {
- Download(NBackup::ChecksumKey(CurrentObjectKey));
+ Download(NBackup::ChecksumKey(CurrentObjectKey), false);
}
void DownloadChangefeeds() {
@@ -514,10 +616,6 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
ListChangefeeds();
}
- void ResetRetries() {
- Attempt = 0;
- }
-
void StartDownloadingScheme() {
ResetRetries();
DownloadScheme();
@@ -546,17 +644,15 @@ class TSchemeGetter: public TActorBootstrapped<TSchemeGetter> {
}
public:
- explicit TSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx)
- : ExternalStorageConfig(new NWrappers::NExternalStorage::TS3ExternalStorageConfig(importInfo->Settings))
+ explicit TSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx, TMaybe<NBackup::TEncryptionIV> iv)
+ : TGetterFromS3<TSchemeGetter>(std::move(importInfo), std::move(iv))
, ReplyTo(replyTo)
- , ImportInfo(importInfo)
, ItemIdx(itemIdx)
- , MetadataKey(MetadataKeyFromSettings(importInfo->Settings, itemIdx))
- , SchemeKey(SchemeKeyFromSettings(importInfo->Settings, itemIdx, "scheme.pb"))
- , PermissionsKey(PermissionsKeyFromSettings(importInfo->Settings, itemIdx))
- , Retries(importInfo->Settings.number_of_retries())
- , NeedDownloadPermissions(!importInfo->Settings.no_acl())
- , NeedValidateChecksums(!importInfo->Settings.skip_checksum_validation())
+ , MetadataKey(MetadataKeyFromSettings(ImportInfo, itemIdx))
+ , SchemeKey(SchemeKeyFromSettings(ImportInfo, itemIdx, "scheme.pb"))
+ , PermissionsKey(PermissionsKeyFromSettings(ImportInfo, itemIdx))
+ , NeedDownloadPermissions(!ImportInfo->Settings.no_acl())
+ , NeedValidateChecksums(!ImportInfo->Settings.skip_checksum_validation())
{
}
@@ -627,9 +723,7 @@ public:
}
private:
- NWrappers::IExternalStorageConfig::TPtr ExternalStorageConfig;
const TActorId ReplyTo;
- TImportInfo::TPtr ImportInfo;
const ui32 ItemIdx;
const TString MetadataKey;
@@ -638,16 +732,8 @@ private:
TVector<TString> ChangefeedsNames;
ui64 IndexDownloadedChangefeed = 0;
- const ui32 Retries;
- ui32 Attempt = 0;
-
- TDuration Delay = TDuration::Minutes(1);
- static constexpr TDuration MaxDelay = TDuration::Minutes(10);
-
const bool NeedDownloadPermissions = true;
- TActorId Client;
-
bool NeedValidateChecksums = true;
TString CurrentObjectChecksum;
@@ -655,8 +741,192 @@ private:
std::function<void()> ChecksumValidatedCallback;
}; // TSchemeGetter
-IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx) {
- return new TSchemeGetter(replyTo, importInfo, itemIdx);
+class TSchemaMappingGetter : public TGetterFromS3<TSchemaMappingGetter> {
+ static TString SchemaMappingKeyFromSettings(const TImportInfo::TPtr& importInfo) {
+ return TStringBuilder() << importInfo->Settings.source_prefix() << "/SchemaMapping/mapping.json";
+ }
+
+ static TString SchemaMappingMetadataKeyFromSettings(const TImportInfo::TPtr& importInfo) {
+ return TStringBuilder() << importInfo->Settings.source_prefix() << "/SchemaMapping/metadata.json";
+ }
+
+ void HandleMetadata(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
+ const auto& result = ev->Get()->Result;
+
+ LOG_D("HandleMetadata TEvExternalStorage::TEvHeadObjectResponse"
+ << ": self# " << SelfId()
+ << ", result# " << result);
+
+ if (!CheckResult(result, "HeadObject")) {
+ return;
+ }
+
+ GetObject(MetadataKey, result.GetResult().GetContentLength());
+ }
+
+ void HandleSchemaMapping(TEvExternalStorage::TEvHeadObjectResponse::TPtr& ev) {
+ const auto& result = ev->Get()->Result;
+
+ LOG_D("HandleSchemaMapping TEvExternalStorage::TEvHeadObjectResponse"
+ << ": self# " << SelfId()
+ << ", result# " << result);
+
+ if (!CheckResult(result, "HeadObject")) {
+ return;
+ }
+
+ GetObject(SchemaMappingKey, result.GetResult().GetContentLength());
+ }
+
+ void HandleMetadata(TEvExternalStorage::TEvGetObjectResponse::TPtr& ev) {
+ const auto& msg = *ev->Get();
+ const auto& result = msg.Result;
+
+ LOG_D("HandleMetadata TEvExternalStorage::TEvGetObjectResponse"
+ << ": self# " << SelfId()
+ << ", result# " << result);
+
+ if (!CheckResult(result, "GetObject")) {
+ return;
+ }
+
+ TString content;
+ if (!MaybeDecryptAndSaveIV(msg.Body, content)) {
+ return;
+ }
+ ImportInfo->ExportIV = IV;
+
+ LOG_T("Trying to parse metadata"
+ << ": self# " << SelfId()
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
+
+ if (!ProcessMetadata(content)) {
+ return;
+ }
+
+ auto nextStep = [this]() {
+ StartDownloadingSchemaMapping();
+ };
+
+ nextStep();
+ }
+
+ void HandleSchemaMapping(TEvExternalStorage::TEvGetObjectResponse::TPtr& ev) {
+ const auto& msg = *ev->Get();
+ const auto& result = msg.Result;
+
+ LOG_D("HandleSchemaMapping TEvExternalStorage::TEvGetObjectResponse"
+ << ": self# " << SelfId()
+ << ", result# " << result);
+
+ if (!CheckResult(result, "GetObject")) {
+ return;
+ }
+
+ TString content;
+ if (!MaybeDecrypt(msg.Body, content, NBackup::EBackupFileType::SchemaMapping)) {
+ return;
+ }
+
+ LOG_T("Trying to parse scheme"
+ << ": self# " << SelfId()
+ << ", schemaMappingKey# " << SchemaMappingKey
+ << ", body# " << SubstGlobalCopy(content, "\n", "\\n"));
+
+ ImportInfo->SchemaMapping.ConstructInPlace();
+ TString error;
+ if (!ImportInfo->SchemaMapping->Deserialize(content, error)) {
+ Reply(false, error);
+ return;
+ }
+
+ Reply();
+ }
+
+ void Reply(bool success = true, const TString& error = TString()) override {
+ LOG_I("Reply"
+ << ": self# " << SelfId()
+ << ", success# " << success
+ << ", error# " << error);
+
+ Send(ReplyTo, new TEvPrivate::TEvImportSchemaMappingReady(ImportInfo->Id, success, error));
+ PassAway();
+ }
+
+ void DownloadMetadata() {
+ Download(MetadataKey);
+ }
+
+ void DownloadSchemaMapping() {
+ Download(SchemaMappingKey);
+ }
+
+ void StartDownloadingSchemaMapping() {
+ ResetRetries();
+ DownloadSchemaMapping();
+ Become(&TThis::StateDownloadSchemaMapping);
+ }
+
+ bool ProcessMetadata(const TString& content) {
+ NJson::TJsonValue json;
+ if (!NJson::ReadJsonTree(content, &json)) {
+ Reply(false, "Failed to parse metadata json");
+ return false;
+ }
+ const NJson::TJsonValue& kind = json["kind"];
+ if (kind.GetString() != "SchemaMappingV0") {
+ Reply(false, TStringBuilder() << "Unknown kind of metadata json: " << kind.GetString());
+ return false;
+ }
+ return true;
+ }
+
+public:
+ TSchemaMappingGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo)
+ : TGetterFromS3<TSchemaMappingGetter>(std::move(importInfo), Nothing())
+ , ReplyTo(replyTo)
+ , MetadataKey(SchemaMappingMetadataKeyFromSettings(ImportInfo))
+ , SchemaMappingKey(SchemaMappingKeyFromSettings(ImportInfo))
+ {
+ }
+
+ void Bootstrap() {
+ DownloadMetadata();
+ Become(&TThis::StateDownloadMetadata);
+ }
+
+ STATEFN(StateDownloadMetadata) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvExternalStorage::TEvHeadObjectResponse, HandleMetadata);
+ hFunc(TEvExternalStorage::TEvGetObjectResponse, HandleMetadata);
+
+ sFunc(TEvents::TEvWakeup, DownloadMetadata);
+ sFunc(TEvents::TEvPoisonPill, PassAway);
+ }
+ }
+
+ STATEFN(StateDownloadSchemaMapping) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvExternalStorage::TEvHeadObjectResponse, HandleSchemaMapping);
+ hFunc(TEvExternalStorage::TEvGetObjectResponse, HandleSchemaMapping);
+
+ sFunc(TEvents::TEvWakeup, DownloadSchemaMapping);
+ sFunc(TEvents::TEvPoisonPill, PassAway);
+ }
+ }
+
+private:
+ const TActorId ReplyTo;
+ const TString MetadataKey;
+ const TString SchemaMappingKey;
+}; // TSchemaMappingGetter
+
+IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx, TMaybe<NBackup::TEncryptionIV> iv) {
+ return new TSchemeGetter(replyTo, std::move(importInfo), itemIdx, std::move(iv));
+}
+
+IActor* CreateSchemaMappingGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo) {
+ return new TSchemaMappingGetter(replyTo, std::move(importInfo));
}
} // NSchemeShard
diff --git a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.h b/ydb/core/tx/schemeshard/schemeshard_import_getters.h
index 24b67c7c65..32b108c20e 100644
--- a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter.h
+++ b/ydb/core/tx/schemeshard/schemeshard_import_getters.h
@@ -6,7 +6,9 @@
namespace NKikimr {
namespace NSchemeShard {
-IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx);
+IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx, TMaybe<NBackup::TEncryptionIV> iv);
+
+IActor* CreateSchemaMappingGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo);
} // NSchemeShard
} // NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard_import_getters_fallback.cpp b/ydb/core/tx/schemeshard/schemeshard_import_getters_fallback.cpp
new file mode 100644
index 0000000000..6cd5f85714
--- /dev/null
+++ b/ydb/core/tx/schemeshard/schemeshard_import_getters_fallback.cpp
@@ -0,0 +1,57 @@
+#include "schemeshard_import_getters.h"
+#include "schemeshard_private.h"
+
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+#include <ydb/library/actors/core/hfunc.h>
+
+namespace NKikimr {
+namespace NSchemeShard {
+
+class TSchemeGetterFallback: public TActorBootstrapped<TSchemeGetterFallback> {
+public:
+ explicit TSchemeGetterFallback(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx)
+ : ReplyTo(replyTo)
+ , ImportInfo(importInfo)
+ , ItemIdx(itemIdx)
+ {
+ }
+
+ void Bootstrap() {
+ Send(ReplyTo, new TEvPrivate::TEvImportSchemeReady(ImportInfo->Id, ItemIdx, false, "Imports from S3 are disabled"));
+ PassAway();
+ }
+
+private:
+ const TActorId ReplyTo;
+ TImportInfo::TPtr ImportInfo;
+ const ui32 ItemIdx;
+}; // TSchemeGetterFallback
+
+class TSchemaMappingGetterFallback: public TActorBootstrapped<TSchemaMappingGetterFallback> {
+public:
+ explicit TSchemaMappingGetterFallback(const TActorId& replyTo, TImportInfo::TPtr importInfo)
+ : ReplyTo(replyTo)
+ , ImportInfo(importInfo)
+ {
+ }
+
+ void Bootstrap() {
+ Send(ReplyTo, new TEvPrivate::TEvImportSchemaMappingReady(ImportInfo->Id, false, "Imports from S3 are disabled"));
+ PassAway();
+ }
+
+private:
+ const TActorId ReplyTo;
+ TImportInfo::TPtr ImportInfo;
+}; // TSchemeGetterFallback
+
+IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx, TMaybe<NBackup::TEncryptionIV>) {
+ return new TSchemeGetterFallback(replyTo, std::move(importInfo), itemIdx);
+}
+
+IActor* CreateSchemaMappingGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo) {
+ return new TSchemaMappingGetterFallback(replyTo, std::move(importInfo));
+}
+
+} // NSchemeShard
+} // NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter_fallback.cpp b/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter_fallback.cpp
deleted file mode 100644
index 9c3c9f91fb..0000000000
--- a/ydb/core/tx/schemeshard/schemeshard_import_scheme_getter_fallback.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-#include "schemeshard_import_scheme_getter.h"
-#include "schemeshard_private.h"
-
-#include <ydb/library/actors/core/actor_bootstrapped.h>
-#include <ydb/library/actors/core/hfunc.h>
-
-namespace NKikimr {
-namespace NSchemeShard {
-
-class TSchemeGetterFallback: public TActorBootstrapped<TSchemeGetterFallback> {
-public:
- explicit TSchemeGetterFallback(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx)
- : ReplyTo(replyTo)
- , ImportInfo(importInfo)
- , ItemIdx(itemIdx)
- {
- }
-
- void Bootstrap() {
- Send(ReplyTo, new TEvPrivate::TEvImportSchemeReady(ImportInfo->Id, ItemIdx, false, "Imports from S3 are disabled"));
- PassAway();
- }
-
-private:
- const TActorId ReplyTo;
- TImportInfo::TPtr ImportInfo;
- const ui32 ItemIdx;
-
-}; // TSchemeGetterFallback
-
-IActor* CreateSchemeGetter(const TActorId& replyTo, TImportInfo::TPtr importInfo, ui32 itemIdx) {
- return new TSchemeGetterFallback(replyTo, importInfo, itemIdx);
-}
-
-} // NSchemeShard
-} // NKikimr
diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.h b/ydb/core/tx/schemeshard/schemeshard_info_types.h
index 351a21adf0..08a66fd56f 100644
--- a/ydb/core/tx/schemeshard/schemeshard_info_types.h
+++ b/ydb/core/tx/schemeshard/schemeshard_info_types.h
@@ -12,6 +12,7 @@
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/control/lib/immediate_control_board_impl.h>
+#include <ydb/core/backup/common/encryption.h>
#include <ydb/core/backup/common/metadata.h>
#include <ydb/core/base/feature_flags.h>
#include <ydb/core/base/table_vector_index.h>
@@ -2846,12 +2847,13 @@ struct TImportInfo: public TSimpleRefCount<TImportInfo> {
enum class EState: ui8 {
Invalid = 0,
- Waiting,
- GetScheme,
- CreateSchemeObject,
- Transferring,
- BuildIndexes,
- CreateChangefeed,
+ Waiting = 1,
+ GetScheme = 2,
+ CreateSchemeObject = 3,
+ Transferring = 4,
+ BuildIndexes = 5,
+ CreateChangefeed = 6,
+ DownloadExportMetadata = 7,
Done = 240,
Cancellation = 250,
Cancelled = 251,
@@ -2875,6 +2877,7 @@ struct TImportInfo: public TSimpleRefCount<TImportInfo> {
TString DstPathName;
TPathId DstPathId;
+ TString SrcPrefix;
Ydb::Table::CreateTableRequest Scheme;
TString CreationQuery;
TMaybe<NKikimrSchemeOp::TModifyScheme> PreparedCreationQuery;
@@ -2892,6 +2895,7 @@ struct TImportInfo: public TSimpleRefCount<TImportInfo> {
int NextChangefeedIdx = 0;
TString Issue;
TPathId StreamImplPathId;
+ TMaybe<NBackup::TEncryptionIV> ExportItemIV;
TItem() = default;
@@ -2918,6 +2922,9 @@ struct TImportInfo: public TSimpleRefCount<TImportInfo> {
TPathId DomainPathId;
TMaybe<TString> UserSID;
TString PeerName; // required for making audit log records
+ TMaybe<NBackup::TEncryptionIV> ExportIV;
+ TMaybe<NBackup::TSchemaMapping> SchemaMapping;
+ TActorId SchemaMappingGetter;
EState State = EState::Invalid;
TString Issue;
@@ -2929,6 +2936,20 @@ struct TImportInfo: public TSimpleRefCount<TImportInfo> {
TInstant StartTime = TInstant::Zero();
TInstant EndTime = TInstant::Zero();
+ TString GetItemSrcPrefix(size_t i) const {
+ if (i < Items.size() && Items[i].SrcPrefix) {
+ return Items[i].SrcPrefix;
+ }
+
+ // Backward compatibility.
+ // But there can be no paths in settings at all.
+ if (i < ui32(Settings.items_size())) {
+ return Settings.items(i).source_prefix();
+ }
+
+ return {};
+ }
+
explicit TImportInfo(
const ui64 id,
const TString& uid,
@@ -3005,14 +3026,6 @@ private:
struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
using TPtr = TIntrusivePtr<TIndexBuildInfo>;
- struct TLimits {
- ui32 MaxBatchRows = 100;
- ui32 MaxBatchBytes = 1 << 20;
- ui32 MaxShards = 100;
- ui32 MaxRetries = 50;
- };
- TLimits Limits;
-
enum class EState: ui32 {
Invalid = 0,
AlterMainTable = 5,
@@ -3091,6 +3104,8 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
TVector<TString> FillIndexColumns;
TVector<TString> FillDataColumns;
+ NKikimrIndexBuilder::TIndexBuildScanSettings ScanSettings;
+
TVector<TColumnBuildInfo> BuildColumns;
TString TargetName;
@@ -3195,8 +3210,8 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
Parent = ParentEnd();
}
- void Set(ui32 level,
- NTableIndex::TClusterId parentBegin, NTableIndex::TClusterId parent,
+ void Set(ui32 level,
+ NTableIndex::TClusterId parentBegin, NTableIndex::TClusterId parent,
NTableIndex::TClusterId childBegin, NTableIndex::TClusterId child,
ui32 state, ui64 tableSize) {
Level = level;
@@ -3208,18 +3223,18 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
TableSize = tableSize;
}
- NKikimrTxDataShard::TEvLocalKMeansRequest::EState GetUpload() const {
+ NKikimrTxDataShard::EKMeansState GetUpload() const {
if (Level == 1) {
if (NeedsAnotherLevel()) {
- return NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_BUILD;
+ return NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_BUILD;
} else {
- return NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_MAIN_TO_POSTING;
+ return NKikimrTxDataShard::EKMeansState::UPLOAD_MAIN_TO_POSTING;
}
} else {
if (NeedsAnotherLevel()) {
- return NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_BUILD;
+ return NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_BUILD;
} else {
- return NKikimrTxDataShard::TEvLocalKMeansRequest::UPLOAD_BUILD_TO_POSTING;
+ return NKikimrTxDataShard::EKMeansState::UPLOAD_BUILD_TO_POSTING;
}
}
}
@@ -3363,13 +3378,12 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
return result;
}
};
+
TMap<TShardIdx, TShardStatus> Shards;
-
TDeque<TShardIdx> ToUploadShards;
-
THashSet<TShardIdx> InProgressShards;
-
std::vector<TShardIdx> DoneShards;
+ ui32 MaxInProgressShards = 32;
TBillingStats Processed;
TBillingStats Billed;
@@ -3588,15 +3602,15 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
row.template GetValueOrDefault<Schema::IndexBuild::InitiateTxDone>(
indexInfo->InitiateTxDone);
- indexInfo->Limits.MaxBatchRows =
- row.template GetValue<Schema::IndexBuild::MaxBatchRows>();
- indexInfo->Limits.MaxBatchBytes =
- row.template GetValue<Schema::IndexBuild::MaxBatchBytes>();
- indexInfo->Limits.MaxShards =
+ indexInfo->ScanSettings.SetMaxBatchRows(
+ row.template GetValue<Schema::IndexBuild::MaxBatchRows>());
+ indexInfo->ScanSettings.SetMaxBatchBytes(
+ row.template GetValue<Schema::IndexBuild::MaxBatchBytes>());
+ indexInfo->MaxInProgressShards =
row.template GetValue<Schema::IndexBuild::MaxShards>();
- indexInfo->Limits.MaxRetries =
+ indexInfo->ScanSettings.SetMaxBatchRetries(
row.template GetValueOrDefault<Schema::IndexBuild::MaxRetries>(
- indexInfo->Limits.MaxRetries);
+ indexInfo->ScanSettings.GetMaxBatchRetries()));
indexInfo->ApplyTxId =
row.template GetValueOrDefault<Schema::IndexBuild::ApplyTxId>(
@@ -3665,7 +3679,7 @@ struct TIndexBuildInfo: public TSimpleRefCount<TIndexBuildInfo> {
TSerializedTableRange bound{range};
LOG_DEBUG_S(TlsActivationContext->AsActorContext(), NKikimrServices::BUILD_INDEX,
- "AddShardStatus id# " << Id << " shard " << shardIdx <<
+ "AddShardStatus id# " << Id << " shard " << shardIdx <<
" range " << KMeans.RangeToDebugStr(bound, IsBuildPrefixedVectorIndex() ? 2 : 1));
AddParent(bound, shardIdx);
Shards.emplace(
diff --git a/ydb/core/tx/schemeshard/schemeshard_private.h b/ydb/core/tx/schemeshard/schemeshard_private.h
index 1e653640ce..d6bcba2d81 100644
--- a/ydb/core/tx/schemeshard/schemeshard_private.h
+++ b/ydb/core/tx/schemeshard/schemeshard_private.h
@@ -20,6 +20,7 @@ namespace TEvPrivate {
EvRunConditionalErase,
EvIndexBuildBilling,
EvImportSchemeReady,
+ EvImportSchemaMappingReady,
EvImportSchemeQueryResult,
EvExportSchemeUploadResult,
EvExportUploadMetadataResult,
@@ -105,6 +106,18 @@ namespace TEvPrivate {
{}
};
+ struct TEvImportSchemaMappingReady: public TEventLocal<TEvImportSchemaMappingReady, EvImportSchemaMappingReady> {
+ const ui64 ImportId;
+ const bool Success;
+ const TString Error;
+
+ TEvImportSchemaMappingReady(ui64 id, bool success, const TString& error)
+ : ImportId(id)
+ , Success(success)
+ , Error(error)
+ {}
+ };
+
struct TEvImportSchemeQueryResult: public TEventLocal<TEvImportSchemeQueryResult, EvImportSchemeQueryResult> {
const ui64 ImportId;
const ui32 ItemIdx;
diff --git a/ydb/core/tx/schemeshard/schemeshard_schema.h b/ydb/core/tx/schemeshard/schemeshard_schema.h
index baa342f150..4c1c693933 100644
--- a/ydb/core/tx/schemeshard/schemeshard_schema.h
+++ b/ydb/core/tx/schemeshard/schemeshard_schema.h
@@ -1596,6 +1596,8 @@ struct Schema : NIceDb::Schema {
struct NextIndexIdx : Column<9, NScheme::NTypeIds::Uint32> {};
struct NextChangefeedIdx : Column<16, NScheme::NTypeIds::Uint32> {};
struct Issue : Column<10, NScheme::NTypeIds::Utf8> {};
+ struct SrcPrefix : Column<17, NScheme::NTypeIds::Utf8> {};
+ struct EncryptionIV : Column<18, NScheme::NTypeIds::String> {};
using TKey = TableKey<ImportId, Index>;
using TColumns = TableColumns<
@@ -1614,7 +1616,9 @@ struct Schema : NIceDb::Schema {
WaitTxId,
NextIndexIdx,
NextChangefeedIdx,
- Issue
+ Issue,
+ SrcPrefix,
+ EncryptionIV
>;
};
diff --git a/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp b/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp
index c14c0934b9..6c09f5f3c6 100644
--- a/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp
+++ b/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp
@@ -5,10 +5,75 @@
#include <ydb/core/blobstorage/base/blobstorage_shred_events.h>
#include <ydb/core/mind/bscontroller/bsc.h>
#include <ydb/core/tablet_flat/tablet_flat_executed.h>
+#include <ydb/core/testlib/actors/block_events.h>
+#include <ydb/core/testlib/storage_helpers.h>
using namespace NKikimr;
using namespace NSchemeShardUT_Private;
+namespace {
+ TTestEnv SetupEnv(TTestBasicRuntime& runtime, TVector<TIntrusivePtr<NFake::TProxyDS>>& dsProxies) {
+ TTestEnv env(runtime, TTestEnvOptions()
+ .NChannels(4)
+ .EnablePipeRetries(true)
+ .EnableSystemViews(false)
+ .DSProxies(dsProxies));
+
+ runtime.SetLogPriority(NKikimrServices::TX_PROXY, NLog::PRI_DEBUG);
+ runtime.SetLogPriority(NKikimrServices::FLAT_TX_SCHEMESHARD, NActors::NLog::PRI_TRACE);
+
+ auto info = CreateTestTabletInfo(MakeBSControllerID(), TTabletTypes::BSController);
+ CreateTestBootstrapper(runtime, info, [](const TActorId &tablet, TTabletStorageInfo *info) -> IActor* {
+ return new TFakeBSController(tablet, info);
+ });
+
+ runtime.GetAppData().FeatureFlags.SetEnableDataErasure(true);
+ auto& dataErasureConfig = runtime.GetAppData().DataErasureConfig;
+ dataErasureConfig.SetDataErasureIntervalSeconds(50);
+ dataErasureConfig.SetBlobStorageControllerRequestIntervalSeconds(10);
+
+ return env;
+ }
+
+ void FillData(TTestBasicRuntime& runtime, ui64 schemeshardId, ui64& txId, const TVector<ui64>& shardsIds, TVector<TIntrusivePtr<NFake::TProxyDS>>& dsProxies, const TString& valueToDelete) {
+ TString value(size_t(100 * 1024), 't');
+ ui32 keyToDelete = 42;
+
+ for (ui32 key : xrange(100)) {
+ int partitionIdx = shardsIds.size() == 1 || key < 50 ? 0 : 1;
+ WriteRow(runtime, schemeshardId, ++txId, "/MyRoot/Database1/Simple", partitionIdx, key, key == keyToDelete ? valueToDelete : value);
+ }
+
+ auto tableVersion = TestDescribeResult(DescribePath(runtime, schemeshardId, "/MyRoot/Database1/Simple"), {NLs::PathExist});
+ for (const auto& shardsId : shardsIds) {
+ const auto result = CompactTable(runtime, shardsId, tableVersion.PathId);
+ UNIT_ASSERT_VALUES_EQUAL(result.GetStatus(), NKikimrTxDataShard::TEvCompactTableResult::OK);
+ }
+
+ DeleteRow(runtime, schemeshardId, ++txId, "/MyRoot/Database1/Simple", 0, keyToDelete);
+
+ // BlobStorage should contain deleted value yet
+ UNIT_ASSERT(BlobStorageContains(dsProxies, valueToDelete));
+ }
+
+ void CheckDataErasureStatus(TTestBasicRuntime& runtime, TActorId sender, TVector<TIntrusivePtr<NFake::TProxyDS>>& dsProxies, const TString& valueToDelete, bool completed) {
+ auto request = MakeHolder<TEvSchemeShard::TEvDataErasureInfoRequest>();
+ runtime.SendToPipe(TTestTxConfig::SchemeShard, sender, request.Release(), 0, GetPipeConfigWithRetries());
+
+ TAutoPtr<IEventHandle> handle;
+ auto response = runtime.GrabEdgeEventRethrow<TEvSchemeShard::TEvDataErasureInfoResponse>(handle);
+
+ UNIT_ASSERT_EQUAL_C(response->Record.GetGeneration(), 1, response->Record.GetGeneration());
+ if (completed) {
+ UNIT_ASSERT_EQUAL(response->Record.GetStatus(), NKikimrScheme::TEvDataErasureInfoResponse::COMPLETED);
+ UNIT_ASSERT(!BlobStorageContains(dsProxies, valueToDelete));
+ } else {
+ UNIT_ASSERT_EQUAL(response->Record.GetStatus(), NKikimrScheme::TEvDataErasureInfoResponse::IN_PROGRESS_TENANT);
+ UNIT_ASSERT(BlobStorageContains(dsProxies, valueToDelete));
+ }
+ }
+}
+
Y_UNIT_TEST_SUITE(TestDataErasure) {
Y_UNIT_TEST(SimpleDataErasureTest) {
TTestBasicRuntime runtime;
@@ -182,4 +247,176 @@ Y_UNIT_TEST_SUITE(TestDataErasure) {
RunDataErasure(2);
RunDataErasure(3);
}
+
+ Y_UNIT_TEST(DataErasureWithCopyTable) {
+ TTestBasicRuntime runtime;
+ TVector<TIntrusivePtr<NFake::TProxyDS>> dsProxies {
+ MakeIntrusive<NFake::TProxyDS>(TGroupId::FromValue(0)),
+ };
+ auto env = SetupEnv(runtime, dsProxies);
+ auto sender = runtime.AllocateEdgeActor();
+ RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
+
+ ui64 txId = 100;
+
+ auto schemeshardId = CreateTestSubdomain(runtime, env, &txId, "Database1");
+ auto shards = GetTableShards(runtime, schemeshardId, "/MyRoot/Database1/Simple");
+ TString value(size_t(100 * 1024), 'd');
+ FillData(runtime, schemeshardId, txId, shards, dsProxies, value);
+
+ // catch and hold borrow returns
+ TBlockEvents<TEvDataShard::TEvReturnBorrowedPart> borrowReturns(runtime);
+
+ TestCopyTable(runtime, schemeshardId, ++txId, "/MyRoot/Database1", "SimpleCopy", "/MyRoot/Database1/Simple");
+
+ runtime.WaitFor("borrow return", [&borrowReturns]{ return borrowReturns.size() >= 1; });
+
+ // data cleanup should not be finished due to holded borrow returns
+ CheckDataErasureStatus(runtime, sender, dsProxies, value, false);
+
+ // return borrow
+ borrowReturns.Stop().Unblock();
+
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvBlobStorage::EvControllerShredResponse, 3));
+ runtime.DispatchEvents(options);
+
+ // data cleanup should be finished after returned borrows
+ CheckDataErasureStatus(runtime, sender, dsProxies, value, true);
+ }
+
+ Y_UNIT_TEST(DataErasureWithSplit) {
+ TTestBasicRuntime runtime;
+ TVector<TIntrusivePtr<NFake::TProxyDS>> dsProxies {
+ MakeIntrusive<NFake::TProxyDS>(TGroupId::FromValue(0)),
+ };
+ auto env = SetupEnv(runtime, dsProxies);
+ auto sender = runtime.AllocateEdgeActor();
+ RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
+
+ ui64 txId = 100;
+
+ auto schemeshardId = CreateTestSubdomain(runtime, env, &txId, "Database1", false);
+
+ TestCreateTable(runtime, schemeshardId, ++txId, "/MyRoot/Database1",
+ R"____(
+ Name: "Simple"
+ Columns { Name: "key1" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key1"]
+ )____");
+ env.TestWaitNotification(runtime, txId, schemeshardId);
+ auto shards1 = GetTableShards(runtime, schemeshardId, "/MyRoot/Database1/Simple");
+ UNIT_ASSERT_VALUES_EQUAL(shards1.size(), 1);
+
+ TString valueToDelete(size_t(100 * 1024), 'd');
+ FillData(runtime, schemeshardId, txId, shards1, dsProxies, valueToDelete);
+
+ // block borrow returns to suspend SplitTable
+ TBlockEvents<TEvDataShard::TEvReturnBorrowedPart> borrowReturns(runtime);
+
+ // block CollectGarbage requests to suspend DataCleanup
+ TBlockEvents<TEvBlobStorage::TEvCollectGarbage> collectGarbageReqs(runtime);
+ runtime.WaitFor("collect garbage", [&collectGarbageReqs]{ return collectGarbageReqs.size() >= 1; });
+
+ TestSplitTable(runtime, schemeshardId, ++txId, "/MyRoot/Database1/Simple", Sprintf(
+ R"(
+ SourceTabletId: %lu
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 50 } }
+ }
+ }
+ )", shards1.at(0)));
+ env.TestWaitNotification(runtime, txId, schemeshardId);
+
+ runtime.WaitFor("borrow return", [&borrowReturns]{ return borrowReturns.size() >= 1; });
+
+ // DataErasure should be in progress because of SplitTable and DataCleanup have been suspended
+ CheckDataErasureStatus(runtime, sender, dsProxies, valueToDelete, false);
+
+ auto shards2 = GetTableShards(runtime, schemeshardId, "/MyRoot/Database1/Simple");
+ UNIT_ASSERT_VALUES_EQUAL(shards2.size(), 2);
+
+ collectGarbageReqs.Stop().Unblock();
+ borrowReturns.Stop().Unblock();
+
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvBlobStorage::EvControllerShredResponse, 3));
+ runtime.DispatchEvents(options);
+
+ // now data cleanup should be finished
+ CheckDataErasureStatus(runtime, sender, dsProxies, valueToDelete, true);
+ }
+
+ Y_UNIT_TEST(DataErasureWithMerge) {
+ TTestBasicRuntime runtime;
+ TVector<TIntrusivePtr<NFake::TProxyDS>> dsProxies {
+ MakeIntrusive<NFake::TProxyDS>(TGroupId::FromValue(0)),
+ };
+ auto env = SetupEnv(runtime, dsProxies);
+ auto sender = runtime.AllocateEdgeActor();
+ RebootTablet(runtime, TTestTxConfig::SchemeShard, sender);
+
+ ui64 txId = 100;
+
+ auto schemeshardId = CreateTestSubdomain(runtime, env, &txId, "Database1", false);
+
+ TestCreateTable(runtime, schemeshardId, ++txId, "/MyRoot/Database1",
+ R"____(
+ Name: "Simple"
+ Columns { Name: "key1" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key1"]
+ SplitBoundary {
+ KeyPrefix {
+ Tuple { Optional { Uint32: 50 } }
+ }
+ }
+ PartitionConfig {
+ PartitioningPolicy {
+ MinPartitionsCount: 1
+ MaxPartitionsCount: 2
+ }
+ }
+ )____");
+ env.TestWaitNotification(runtime, txId, schemeshardId);
+ auto shards1 = GetTableShards(runtime, schemeshardId, "/MyRoot/Database1/Simple");
+ UNIT_ASSERT_VALUES_EQUAL(shards1.size(), 2);
+
+ TString valueToDelete(size_t(100 * 1024), 'd');
+ FillData(runtime, schemeshardId, txId, shards1, dsProxies, valueToDelete);
+
+ // block borrow returns to suspend SplitTable
+ TBlockEvents<TEvDataShard::TEvReturnBorrowedPart> borrowReturns(runtime);
+
+ // block CollectGarbage requests to suspend DataCleanup
+ TBlockEvents<TEvBlobStorage::TEvCollectGarbage> collectGarbageReqs(runtime);
+ runtime.WaitFor("collect garbage", [&collectGarbageReqs]{ return collectGarbageReqs.size() >= 1; });
+
+ TestSplitTable(runtime, schemeshardId, ++txId, "/MyRoot/Database1/Simple", Sprintf(
+ R"(
+ SourceTabletId: %lu
+ SourceTabletId: %lu
+ )", shards1.at(0), shards1.at(1)));
+ env.TestWaitNotification(runtime, txId, schemeshardId);
+
+ runtime.WaitFor("borrow return", [&borrowReturns]{ return borrowReturns.size() >= 1; });
+
+ // DataErasure should be in progress because of SplitTable and DataCleanup have been suspended
+ CheckDataErasureStatus(runtime, sender, dsProxies, valueToDelete, false);
+
+ auto shards2 = GetTableShards(runtime, schemeshardId, "/MyRoot/Database1/Simple");
+ UNIT_ASSERT_VALUES_EQUAL(shards2.size(), 1);
+
+ collectGarbageReqs.Stop().Unblock();
+ borrowReturns.Stop().Unblock();
+
+ TDispatchOptions options;
+ options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvBlobStorage::EvControllerShredResponse, 3));
+ runtime.DispatchEvents(options);
+
+ // now data cleanup should be finished
+ CheckDataErasureStatus(runtime, sender, dsProxies, valueToDelete, true);
+ }
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp b/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp
index f41d0c315c..5d9ef8086a 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.cpp
@@ -43,7 +43,7 @@ void TFakeBSController::Handle(NKikimr::TEvBlobStorage::TEvControllerShredReques
ctx.Send(ev->Sender, new NKikimr::TEvBlobStorage::TEvControllerShredResponse(Generation, Completed, Progress));
}
-ui64 CreateTestSubdomain(NActors::TTestActorRuntime& runtime, TTestEnv& env, ui64* txId, const TString& name) {
+ui64 CreateTestSubdomain(NActors::TTestActorRuntime& runtime, TTestEnv& env, ui64* txId, const TString& name, bool addTable) {
TestCreateExtSubDomain(runtime, ++(*txId), "/MyRoot", Sprintf(R"(
Name: "%s"
)", name.c_str()));
@@ -74,15 +74,17 @@ ui64 CreateTestSubdomain(NActors::TTestActorRuntime& runtime, TTestEnv& env, ui6
NLs::ExtractTenantSchemeshard(&schemeshardId)
});
- TestCreateTable(runtime, schemeshardId, ++(*txId), TStringBuilder() << "/MyRoot/" << name,
- R"____(
- Name: "Simple"
- Columns { Name: "key1" Type: "Uint32"}
- Columns { Name: "Value" Type: "Utf8"}
- KeyColumnNames: ["key1"]
- UniformPartitionsCount: 2
- )____");
- env.TestWaitNotification(runtime, *txId, schemeshardId);
+ if (addTable) {
+ TestCreateTable(runtime, schemeshardId, ++(*txId), TStringBuilder() << "/MyRoot/" << name,
+ R"____(
+ Name: "Simple"
+ Columns { Name: "key1" Type: "Uint32"}
+ Columns { Name: "Value" Type: "Utf8"}
+ KeyColumnNames: ["key1"]
+ UniformPartitionsCount: 2
+ )____");
+ env.TestWaitNotification(runtime, *txId, schemeshardId);
+ }
return schemeshardId;
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.h b/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.h
index 95e6cd6c3c..3622561ad7 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/data_erasure_helpers.h
@@ -17,7 +17,7 @@ namespace NSchemeShardUT_Private {
class TTestEnv;
-ui64 CreateTestSubdomain(NActors::TTestActorRuntime& runtime, TTestEnv& env, ui64* txId, const TString& name);
+ui64 CreateTestSubdomain(NActors::TTestActorRuntime& runtime, TTestEnv& env, ui64* txId, const TString& name, bool addTable = true);
class TFakeBSController : public NActors::TActor<TFakeBSController>, public NKikimr::NTabletFlatExecutor::TTabletExecutedFlat {
void DefaultSignalTabletActive(const NActors::TActorContext&) override;
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
index dc4b25bb00..e1a7ce6dee 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.cpp
@@ -1403,6 +1403,15 @@ namespace NSchemeShardUT_Private {
return result.GetValue().GetStruct(0).GetOptional().GetOptional().GetStruct(0).GetOptional().GetUint64();
}
+ TVector<ui64> GetTableShards(TTestActorRuntime& runtime, ui64 schemeShard, const TString& path) {
+ TVector<ui64> shards;
+ const auto tableDescription = DescribePath(runtime, schemeShard, path, true);
+ for (const auto& part : tableDescription.GetPathDescription().GetTablePartitions()) {
+ shards.emplace_back(part.GetDatashardId());
+ }
+ return shards;
+ }
+
NLs::TCheckFunc ShardsIsReady(TTestActorRuntime& runtime) {
return [&] (const NKikimrScheme::TEvDescribeSchemeResult& record) {
TVector<ui64> datashards;
@@ -1732,7 +1741,7 @@ namespace NSchemeShardUT_Private {
TEvIndexBuilder::TEvCreateRequest* CreateBuildIndexRequest(ui64 id, const TString& dbName, const TString& src, const TBuildIndexConfig& cfg) {
NKikimrIndexBuilder::TIndexBuildSettings settings;
settings.set_source_path(src);
- settings.set_max_batch_rows(2);
+ settings.MutableScanSettings()->SetMaxBatchRows(1);
settings.set_max_shards_in_flight(2);
Ydb::Table::TableIndex& index = *settings.mutable_index();
@@ -1786,7 +1795,7 @@ namespace NSchemeShardUT_Private {
std::unique_ptr<TEvIndexBuilder::TEvCreateRequest> CreateBuildColumnRequest(ui64 id, const TString& dbName, const TString& src, const TString& columnName, const Ydb::TypedValue& literal) {
NKikimrIndexBuilder::TIndexBuildSettings settings;
settings.set_source_path(src);
- settings.set_max_batch_rows(2);
+ settings.MutableScanSettings()->SetMaxBatchRows(1);
settings.set_max_shards_in_flight(2);
auto* col = settings.mutable_column_build_operation()->add_column();
@@ -2441,11 +2450,12 @@ namespace NSchemeShardUT_Private {
}
NKikimrTxDataShard::TEvCompactTableResult CompactTable(
- TTestActorRuntime& runtime, ui64 shardId, const TTableId& tableId, bool compactBorrowed)
+ TTestActorRuntime& runtime, ui64 shardId, const TTableId& tableId, bool compactBorrowed, bool compactSinglePartedShards)
{
auto sender = runtime.AllocateEdgeActor();
auto request = MakeHolder<TEvDataShard::TEvCompactTable>(tableId.PathId);
request->Record.SetCompactBorrowed(compactBorrowed);
+ request->Record.SetCompactSinglePartedShards(compactSinglePartedShards);
runtime.SendToPipe(shardId, sender, request.Release(), 0, GetPipeConfigWithRetries());
auto ev = runtime.GrabEdgeEventRethrow<TEvDataShard::TEvCompactTableResult>(sender);
@@ -2547,8 +2557,18 @@ namespace NSchemeShardUT_Private {
UNIT_ASSERT_C(evResponse->Get()->Record.GetStatus() == NKikimrTxDataShard::TError::OK, "Status: " << evResponse->Get()->Record.GetStatus() << " Issues: " << evResponse->Get()->Record.GetErrorDescription());
}
- void WriteRow(TTestActorRuntime& runtime, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, const TString& value, bool successIsExpected) {
- auto tableDesc = DescribePath(runtime, tablePath, true, true);
+ void WriteOp(
+ TTestActorRuntime& runtime,
+ ui64 schemeshardId,
+ const ui64 txId,
+ const TString& tablePath,
+ int partitionIdx,
+ NKikimrDataEvents::TEvWrite_TOperation::EOperationType operationType,
+ const std::vector<ui32>& columnIds,
+ TSerializedCellMatrix&& data,
+ bool successIsExpected)
+ {
+ auto tableDesc = DescribePath(runtime, schemeshardId, tablePath, true, true);
const auto& pathDesc = tableDesc.GetPathDescription();
TTableId tableId(pathDesc.GetSelf().GetSchemeshardId(), pathDesc.GetSelf().GetPathId(), pathDesc.GetTable().GetTableSchemaVersion());
@@ -2558,15 +2578,9 @@ namespace NSchemeShardUT_Private {
const auto& sender = runtime.AllocateEdgeActor();
- std::vector<ui32> columnIds{1, 2};
-
- TVector<TCell> cells{TCell((const char*)&key, sizeof(ui32)), TCell(value.c_str(), value.size())};
-
- TSerializedCellMatrix matrix(cells, 1, 2);
-
auto evWrite = std::make_unique<NKikimr::NEvents::TDataEvents::TEvWrite>(txId, NKikimrDataEvents::TEvWrite::MODE_IMMEDIATE);
- ui64 payloadIndex = NKikimr::NEvWrite::TPayloadWriter<NKikimr::NEvents::TDataEvents::TEvWrite>(*evWrite).AddDataToPayload(std::move(matrix.ReleaseBuffer()));
- evWrite->AddOperation(NKikimrDataEvents::TEvWrite::TOperation::OPERATION_UPSERT, tableId, columnIds, payloadIndex, NKikimrDataEvents::FORMAT_CELLVEC);
+ ui64 payloadIndex = NKikimr::NEvWrite::TPayloadWriter<NKikimr::NEvents::TDataEvents::TEvWrite>(*evWrite).AddDataToPayload(std::move(data.ReleaseBuffer()));
+ evWrite->AddOperation(operationType, tableId, columnIds, payloadIndex, NKikimrDataEvents::FORMAT_CELLVEC);
ForwardToTablet(runtime, datashardTabletId, sender, evWrite.release());
@@ -2576,6 +2590,32 @@ namespace NSchemeShardUT_Private {
UNIT_ASSERT_C(successIsExpected == (status == NKikimrDataEvents::TEvWriteResult::STATUS_COMPLETED), "Status: " << ev->Get()->Record.GetStatus() << " Issues: " << ev->Get()->Record.GetIssues());
}
+ void WriteRow(TTestActorRuntime& runtime, ui64 schemeshardId, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, const TString& value, bool successIsExpected) {
+ std::vector<ui32> columnIds{1, 2};
+
+ TVector<TCell> cells{TCell((const char*)&key, sizeof(ui32)), TCell(value.c_str(), value.size())};
+ TSerializedCellMatrix matrix(cells, 1, 2);
+
+ WriteOp(runtime, schemeshardId, txId, tablePath, partitionIdx, NKikimrDataEvents::TEvWrite::TOperation::OPERATION_UPSERT, columnIds, std::move(matrix), successIsExpected);
+ }
+
+ void WriteRow(TTestActorRuntime& runtime, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, const TString& value, bool successIsExpected) {
+ WriteRow(runtime, TTestTxConfig::SchemeShard, txId, tablePath, partitionIdx, key, value, successIsExpected);
+ }
+
+ void DeleteRow(TTestActorRuntime& runtime, ui64 schemeshardId, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, bool successIsExpected) {
+ std::vector<ui32> columnIds{1};
+
+ TVector<TCell> cells{TCell((const char*)&key, sizeof(ui32))};
+ TSerializedCellMatrix matrix(cells, 1, 1);
+
+ WriteOp(runtime, schemeshardId, txId, tablePath, partitionIdx, NKikimrDataEvents::TEvWrite::TOperation::OPERATION_DELETE, columnIds, std::move(matrix), successIsExpected);
+ }
+
+ void DeleteRow(TTestActorRuntime& runtime, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, bool successIsExpected) {
+ DeleteRow(runtime, TTestTxConfig::SchemeShard, txId, tablePath, partitionIdx, key, successIsExpected);
+ }
+
void SendNextValRequest(TTestActorRuntime& runtime, const TActorId& sender, const TString& path) {
auto request = MakeHolder<NSequenceProxy::TEvSequenceProxy::TEvNextVal>(path);
runtime.Send(new IEventHandle(NSequenceProxy::MakeSequenceProxyServiceID(), sender, request.Release()));
diff --git a/ydb/core/tx/schemeshard/ut_helpers/helpers.h b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
index f0a1c5e49f..baf3bcbe2a 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/helpers.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/helpers.h
@@ -477,6 +477,7 @@ namespace NSchemeShardUT_Private {
NKikimrSchemeOp::TTableDescription GetDatashardSchema(TTestActorRuntime& runtime, ui64 tabletId, ui64 tid);
+ TVector<ui64> GetTableShards(TTestActorRuntime& runtime, ui64 schemeShard, const TString& path);
NLs::TCheckFunc ShardsIsReady(TTestActorRuntime& runtime);
template <typename TCreateFunc>
@@ -618,7 +619,7 @@ namespace NSchemeShardUT_Private {
NKikimrTxDataShard::TEvCompactTableResult CompactTable(
- TTestActorRuntime& runtime, ui64 shardId, const TTableId& tableId, bool compactBorrowed = false);
+ TTestActorRuntime& runtime, ui64 shardId, const TTableId& tableId, bool compactBorrowed = false, bool compactSinglePartedShards = false);
NKikimrPQ::TDescribeResponse GetDescribeFromPQBalancer(TTestActorRuntime& runtime, ui64 balancerId);
@@ -627,7 +628,10 @@ namespace NSchemeShardUT_Private {
void UpdateRow(TTestActorRuntime& runtime, const TString& table, const ui32 key, const TString& value, ui64 tabletId = TTestTxConfig::FakeHiveTablets);
void UpdateRowPg(TTestActorRuntime& runtime, const TString& table, const ui32 key, ui32 value, ui64 tabletId = TTestTxConfig::FakeHiveTablets);
void UploadRow(TTestActorRuntime& runtime, const TString& tablePath, int partitionIdx, const TVector<ui32>& keyTags, const TVector<ui32>& valueTags, const TVector<TCell>& keys, const TVector<TCell>& values);
+ void WriteRow(TTestActorRuntime& runtime, ui64 schemeshardId, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, const TString& value, bool successIsExpected = true);
void WriteRow(TTestActorRuntime& runtime, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, const TString& value, bool successIsExpected = true);
+ void DeleteRow(TTestActorRuntime& runtime, ui64 schemeshardId, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, bool successIsExpected = true);
+ void DeleteRow(TTestActorRuntime& runtime, const ui64 txId, const TString& tablePath, int partitionIdx, const ui32 key, bool successIsExpected = true);
void SendNextValRequest(TTestActorRuntime& runtime, const TActorId& sender, const TString& path);
i64 WaitNextValResult(
diff --git a/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp b/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
index d13441796c..aaab2c4240 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
+++ b/ydb/core/tx/schemeshard/ut_helpers/test_env.cpp
@@ -659,7 +659,7 @@ NSchemeShardUT_Private::TTestEnv::TTestEnv(TTestActorRuntime& runtime, const TTe
SetupSchemeCache(runtime, node, app.Domains->GetDomain(TTestTxConfig::DomainUid).Name);
}
- SetupTabletServices(runtime, &app);
+ SetupTabletServices(runtime, &app, !opts.DSProxies_.empty(), {}, nullptr, false, opts.DSProxies_);
if (opts.EnablePipeRetries_) {
EnableSchemeshardPipeRetriesGuard = EnableSchemeshardPipeRetries(runtime);
}
diff --git a/ydb/core/tx/schemeshard/ut_helpers/test_env.h b/ydb/core/tx/schemeshard/ut_helpers/test_env.h
index 493d2795aa..85101794ec 100644
--- a/ydb/core/tx/schemeshard/ut_helpers/test_env.h
+++ b/ydb/core/tx/schemeshard/ut_helpers/test_env.h
@@ -77,6 +77,7 @@ namespace NSchemeShardUT_Private {
OPTION(std::optional<bool>, EnableDatabaseAdmin, std::nullopt);
OPTION(std::optional<bool>, EnablePermissionsExport, std::nullopt);
OPTION(std::optional<bool>, EnableChecksumsExport, std::nullopt);
+ OPTION(TVector<TIntrusivePtr<NFake::TProxyDS>>, DSProxies, {});
#undef OPTION
};
diff --git a/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp b/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp
index 34cf342f84..2f8482a9fd 100644
--- a/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp
+++ b/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp
@@ -497,10 +497,10 @@ Y_UNIT_TEST_SUITE(IndexBuildTest) {
{
NKikimrIndexBuilder::TIndexBuildSettings settings;
settings.set_source_path("/MyRoot/Table");
- settings.set_max_batch_rows(1);
- settings.set_max_batch_bytes(1<<10);
+ settings.MutableScanSettings()->SetMaxBatchRows(0); // row by row
+ settings.MutableScanSettings()->SetMaxBatchBytes(1<<10);
+ settings.MutableScanSettings()->SetMaxBatchRetries(0);
settings.set_max_shards_in_flight(1);
- settings.set_max_retries_upload_batch(0);
Ydb::Table::TableIndex& index = *settings.mutable_index();
index.set_name("index1");
diff --git a/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp b/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp
index 676d59c9d7..0693679da5 100644
--- a/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp
+++ b/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp
@@ -1564,9 +1564,10 @@ value {
}
}
- Y_UNIT_TEST(ExportImportOnSupportedDatatypes) {
+ void ExportImportOnSupportedDatatypesImpl(bool encrypted, bool commonPrefix) {
TTestBasicRuntime runtime;
TTestEnv env(runtime, TTestEnvOptions().EnableParameterizedDecimal(true));
+ runtime.GetAppData().FeatureFlags.SetEnableEncryptedExport(true);
ui64 txId = 100;
TestCreateTable(runtime, ++txId, "/MyRoot", R"_(
@@ -1656,16 +1657,51 @@ value {
TS3Mock s3Mock({}, TS3Mock::TSettings(port));
UNIT_ASSERT(s3Mock.Start());
+ TString encryptionSettings;
+ if (encrypted) {
+ encryptionSettings = R"(encryption_settings {
+ encryption_algorithm: "ChaCha20-Poly1305"
+ symmetric_key {
+ key: "Very very secret export key!!!!!"
+ }
+ })";
+ }
+ TString exportItems, importItems;
+ if (commonPrefix) {
+ exportItems = R"(
+ source_path: "/MyRoot"
+ destination_prefix: "BackupPrefix"
+ items {
+ source_path: "Table"
+ }
+ )";
+ importItems = R"(
+ source_prefix: "BackupPrefix"
+ destination_path: "/MyRoot/Restored"
+ )";
+ } else {
+ exportItems = R"(
+ items {
+ source_path: "/MyRoot/Table"
+ destination_prefix: "Backup1"
+ }
+ )";
+ importItems = R"(
+ items {
+ source_prefix: "Backup1"
+ destination_path: "/MyRoot/Restored"
+ }
+ )";
+ }
+
TestExport(runtime, ++txId, "/MyRoot", Sprintf(R"(
ExportToS3Settings {
endpoint: "localhost:%d"
scheme: HTTP
- items {
- source_path: "/MyRoot/Table"
- destination_prefix: "Backup1"
- }
+ %s
+ %s
}
- )", port));
+ )", port, exportItems.c_str(), encryptionSettings.c_str()));
env.TestWaitNotification(runtime, txId);
TestGetExport(runtime, txId, "/MyRoot");
@@ -1673,12 +1709,10 @@ value {
ImportFromS3Settings {
endpoint: "localhost:%d"
scheme: HTTP
- items {
- source_prefix: "Backup1"
- destination_path: "/MyRoot/Restored"
- }
+ %s
+ %s
}
- )", port));
+ )", port, importItems.c_str(), encryptionSettings.c_str()));
env.TestWaitNotification(runtime, txId);
TestGetImport(runtime, txId, "/MyRoot");
@@ -1744,10 +1778,22 @@ value {
auto contentOriginalTable = ReadTable(runtime, TTestTxConfig::FakeHiveTablets, "Table", readKeyDesc, readColumns);
NKqp::CompareYson(expectedJson, contentOriginalTable);
- auto contentRestoredTable = ReadTable(runtime, TTestTxConfig::FakeHiveTablets + 2, "Restored", readKeyDesc, readColumns);
+ auto contentRestoredTable = ReadTable(runtime, TTestTxConfig::FakeHiveTablets + 2, commonPrefix ? "Table" : "Restored", readKeyDesc, readColumns);
NKqp::CompareYson(expectedJson, contentRestoredTable);
}
+ Y_UNIT_TEST(ExportImportOnSupportedDatatypes) {
+ ExportImportOnSupportedDatatypesImpl(false, false);
+ }
+
+ Y_UNIT_TEST(ExportImportOnSupportedDatatypesWithCommonDestPrefix) {
+ ExportImportOnSupportedDatatypesImpl(false, true);
+ }
+
+ Y_UNIT_TEST(ExportImportOnSupportedDatatypesEncrypted) {
+ ExportImportOnSupportedDatatypesImpl(true, true);
+ }
+
Y_UNIT_TEST(ExportImportPg) {
TTestBasicRuntime runtime;
TTestEnv env(runtime, TTestEnvOptions().EnableTablePgTypes(true));
diff --git a/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp b/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp
index 0c727ad137..4dcfd566eb 100644
--- a/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp
+++ b/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp
@@ -1,8 +1,8 @@
+#include <ydb/core/metering/metering.h>
+#include <ydb/core/testlib/actors/block_events.h>
#include <ydb/core/tx/schemeshard/ut_helpers/helpers.h>
#include <ydb/core/tx/schemeshard/schemeshard_private.h>
-#include <ydb/core/metering/metering.h>
-
using namespace NKikimr;
using namespace NSchemeShard;
using namespace NSchemeShardUT_Private;
@@ -263,6 +263,72 @@ Y_UNIT_TEST_SUITE(TSchemeShardServerLess) {
}
}
+ Y_UNIT_TEST(StorageBillingLabels) {
+ TTestBasicRuntime runtime;
+ TTestEnv env(runtime);
+ ui64 txId = 100;
+
+ SetAllowServerlessStorageBilling(&runtime, true);
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", R"(
+ Name: "SharedDB"
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot", R"(
+ Name: "SharedDB"
+ StoragePools {
+ Name: "pool-1"
+ Kind: "pool-kind-1"
+ }
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ TestCreateExtSubDomain(runtime, ++txId, "/MyRoot", Sprintf(R"(
+ Name: "ServerlessDB"
+ ResourcesDomainKey {
+ SchemeShard: %lu
+ PathId: 2
+ }
+ )", TTestTxConfig::SchemeShard));
+ env.TestWaitNotification(runtime, txId);
+
+ TestAlterExtSubDomain(runtime, ++txId, "/MyRoot", R"(
+ Name: "ServerlessDB"
+ StoragePools {
+ Name: "pool-1"
+ Kind: "pool-kind-1"
+ }
+ PlanResolution: 50
+ Coordinators: 1
+ Mediators: 1
+ TimeCastBucketsPerMediator: 2
+ ExternalSchemeShard: true
+ ExternalHive: false
+ )");
+ env.TestWaitNotification(runtime, txId);
+
+ TestUserAttrs(runtime, ++txId, "/MyRoot", "ServerlessDB", AlterUserAttrs({
+ {"cloud_id", "CLOUD_ID_VAL"},
+ {"folder_id", "FOLDER_ID_VAL"},
+ {"database_id", "DATABASE_ID_VAL"},
+ {"label_k", "v"},
+ {"not_a_label_x", "y"},
+ }));
+ env.TestWaitNotification(runtime, txId);
+
+ TBlockEvents<NMetering::TEvMetering::TEvWriteMeteringJson> block(runtime);
+ runtime.WaitFor("metering", [&]{ return block.size() >= 1; });
+
+ const auto& jsonStr = block[0]->Get()->MeteringJson;
+ UNIT_ASSERT_C(jsonStr.Contains(R"("labels":{"k":"v"})"), jsonStr);
+ UNIT_ASSERT_C(!jsonStr.Contains("not_a_label"), jsonStr);
+ }
+
Y_UNIT_TEST(TestServerlessComputeResourcesMode) {
TTestBasicRuntime runtime;
TTestEnv env(runtime, TTestEnvOptions().EnableServerlessExclusiveDynamicNodes(true));
diff --git a/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp b/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp
index bd90a89af2..d74a846fea 100644
--- a/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp
+++ b/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp
@@ -141,18 +141,6 @@ void SetStatsObserver(TTestActorRuntime& runtime, const std::function<TTestActor
});
}
-TVector<ui64> GetTableShards(TTestActorRuntime& runtime,
- const TString& path
-) {
- TVector<ui64> shards;
- auto tableDescription = DescribePath(runtime, path, true);
- for (const auto& part : tableDescription.GetPathDescription().GetTablePartitions()) {
- shards.emplace_back(part.GetDatashardId());
- }
-
- return shards;
-}
-
TTableId ResolveTableId(TTestActorRuntime& runtime, const TString& path) {
auto response = Navigate(runtime, path);
return response->ResultSet.at(0).TableId;
@@ -646,7 +634,7 @@ Y_UNIT_TEST_SUITE(TStoragePoolsStatsPersistence) {
);
env.TestWaitNotification(runtime, txId);
- auto shards = GetTableShards(runtime, "/MyRoot/SomeTable");
+ auto shards = GetTableShards(runtime, TTestTxConfig::SchemeShard, "/MyRoot/SomeTable");
UNIT_ASSERT_VALUES_EQUAL(shards.size(), 1);
auto& datashard = shards[0];
constexpr ui32 rowsCount = 100u;
diff --git a/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp b/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp
index 60de0f783f..fd526f41da 100644
--- a/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp
+++ b/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp
@@ -213,19 +213,6 @@ void CheckQuotaExceedance(TTestActorRuntime& runtime,
});
}
-TVector<ui64> GetTableShards(TTestActorRuntime& runtime,
- ui64 schemeShard,
- const TString& path
-) {
- TVector<ui64> shards;
- const auto tableDescription = DescribePath(runtime, schemeShard, path, true);
- for (const auto& part : tableDescription.GetPathDescription().GetTablePartitions()) {
- shards.emplace_back(part.GetDatashardId());
- }
-
- return shards;
-}
-
TTableId ResolveTableId(TTestActorRuntime& runtime, const TString& path) {
const auto response = Navigate(runtime, path);
return response->ResultSet.at(0).TableId;
diff --git a/ydb/core/tx/schemeshard/ya.make b/ydb/core/tx/schemeshard/ya.make
index cba1bb7fff..f622ad83d4 100644
--- a/ydb/core/tx/schemeshard/ya.make
+++ b/ydb/core/tx/schemeshard/ya.make
@@ -330,12 +330,12 @@ YQL_LAST_ABI_VERSION()
IF (OS_WINDOWS)
SRCS(
schemeshard_export_uploaders_fallback.cpp
- schemeshard_import_scheme_getter_fallback.cpp
+ schemeshard_import_getters_fallback.cpp
)
ELSE()
SRCS(
schemeshard_export_uploaders.cpp
- schemeshard_import_scheme_getter.cpp
+ schemeshard_import_getters.cpp
)
ENDIF()
diff --git a/ydb/core/tx/tx_proxy/schemereq.cpp b/ydb/core/tx/tx_proxy/schemereq.cpp
index adb81886b0..588491369a 100644
--- a/ydb/core/tx/tx_proxy/schemereq.cpp
+++ b/ydb/core/tx/tx_proxy/schemereq.cpp
@@ -2,6 +2,7 @@
#include <ydb/core/base/appdata.h>
#include <ydb/core/base/auth.h>
+#include <ydb/core/base/local_user_token.h>
#include <ydb/core/base/path.h>
#include <ydb/core/base/tablet_pipe.h>
#include <ydb/core/base/tx_processing.h>
@@ -10,6 +11,9 @@
#include <ydb/core/protos/schemeshard/operations.pb.h>
#include <ydb/core/tx/schemeshard/schemeshard.h>
+#include <ydb/library/login/login.h>
+#include <ydb/library/login/protos/login.pb.h>
+
#include <ydb/library/aclib/aclib.h>
#include <ydb/library/actors/core/hfunc.h>
#include <ydb/library/protobuf_printer/security_printer.h>
@@ -63,6 +67,7 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
bool IsClusterAdministrator = false;
bool IsDatabaseAdministrator = false;
NACLib::TSID DatabaseOwner;
+ NLoginProto::TSecurityState DatabaseSecurityState;
TBaseSchemeReq(const TTxProxyServices &services, ui64 txid, TAutoPtr<TEvTxProxyReq::TEvSchemeRequest> request, const TIntrusivePtr<TTxProxyMon> &txProxyMon)
: Services(services)
@@ -1141,7 +1146,8 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
allowACLBypass = (checkAdmin && isAdmin) || isUserChangesOwnPassword;
- // Database admin is not allowed to manage group of database admins (its the privilege of cluster admins).
+ // Database admin is not allowed to manage group of database admins or change other database admins
+ // (its the privilege of cluster admins).
if (checkAdmin && IsDatabaseAdministrator) {
TString group;
switch (alterLogin.GetAlterCase()) {
@@ -1168,6 +1174,19 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::AccessDenied, nullptr, &issue, ctx);
return false;
}
+ // Database admin still can change its own password
+ if (alterLogin.GetAlterCase() == NKikimrSchemeOp::TAlterLogin::kModifyUser && !isUserChangesOwnPassword) {
+ const auto& targetUser = alterLogin.GetModifyUser();
+ const auto targetUserToken = NKikimr::BuildLocalUserToken(DatabaseSecurityState, targetUser.GetUser());
+ if (NKikimr::IsDatabaseAdministrator(&targetUserToken, DatabaseOwner)) {
+ const auto errString = MakeAccessDeniedError(ctx, entry.Path, TStringBuilder()
+ << "attempt to change other database admin by the database admin"
+ );
+ auto issue = MakeIssue(NKikimrIssues::TIssuesIds::ACCESS_DENIED, errString);
+ ReportStatus(TEvTxUserProxy::TEvProposeTransactionStatus::EStatus::AccessDenied, nullptr, &issue, ctx);
+ return false;
+ }
+ }
}
} else if (modifyScheme.GetOperationType() == NKikimrSchemeOp::ESchemeOpModifyACL) {
@@ -1357,9 +1376,12 @@ struct TBaseSchemeReq: public TActorBootstrapped<TDerived> {
return Die(ctx);
}
- const auto& database = request.ResultSet.front();
- DatabaseOwner = database.Self->Info.GetOwner();
- IsDatabaseAdministrator = NKikimr::IsDatabaseAdministrator(&UserToken.value(), database.Self->Info.GetOwner());
+ const auto& entry = request.ResultSet.front();
+
+ DatabaseOwner = entry.Self->Info.GetOwner();
+ DatabaseSecurityState = entry.DomainDescription->Description.GetSecurityState();
+
+ IsDatabaseAdministrator = NKikimr::IsDatabaseAdministrator(&UserToken.value(), entry.Self->Info.GetOwner());
LOG_DEBUG_S(ctx, NKikimrServices::TX_PROXY, "Actor# " << ctx.SelfID.ToString() << " txid# " << TxId
<< " HandleResolveDatabase,"
diff --git a/ydb/core/tx/tx_proxy/upload_rows.cpp b/ydb/core/tx/tx_proxy/upload_rows.cpp
index 5bf5d915ee..c7e542f7e9 100644
--- a/ydb/core/tx/tx_proxy/upload_rows.cpp
+++ b/ydb/core/tx/tx_proxy/upload_rows.cpp
@@ -70,8 +70,7 @@ private:
return false;
}
- TVector<std::pair<TString, Ydb::Type>> GetRequestColumns(TString& errorMessage) const override {
- Y_UNUSED(errorMessage);
+ TConclusion<TVector<std::pair<TString, Ydb::Type>>> GetRequestColumns() const override {
return *ColumnTypes;
}
diff --git a/ydb/core/tx/tx_proxy/upload_rows_common_impl.h b/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
index 1249da74bf..6c845f0a71 100644
--- a/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
+++ b/ydb/core/tx/tx_proxy/upload_rows_common_impl.h
@@ -17,6 +17,7 @@
#include <ydb/core/tx/datashard/datashard.h>
#include <ydb/core/tx/scheme_cache/scheme_cache.h>
#include <ydb/core/tx/tx_proxy/upload_rows_counters.h>
+#include <ydb/core/formats/arrow/accessor/abstract/constructor.h>
#include <ydb/core/formats/arrow/size_calcer.h>
#include <library/cpp/monlib/dynamic_counters/counters.h>
@@ -302,7 +303,7 @@ private:
virtual const TString& GetTable() = 0;
virtual const TVector<std::pair<TSerializedCellVec, TString>>& GetRows() const = 0;
virtual bool CheckAccess(TString& errorMessage) = 0;
- virtual TVector<std::pair<TString, Ydb::Type>> GetRequestColumns(TString& errorMessage) const = 0;
+ virtual TConclusion<TVector<std::pair<TString, Ydb::Type>>> GetRequestColumns() const = 0;
virtual bool ExtractRows(TString& errorMessage) = 0;
virtual bool ExtractBatch(TString& errorMessage) = 0;
virtual void RaiseIssue(const NYql::TIssue& issue) = 0;
@@ -366,9 +367,10 @@ private:
return ok;
}
- bool BuildSchema(const NActors::TActorContext& ctx, TString& errorMessage, bool makeYqbSchema) {
+ [[nodiscard]] TConclusionStatus BuildSchema(const NActors::TActorContext& ctx, bool makeYqbSchema) {
Y_UNUSED(ctx);
Y_ABORT_UNLESS(ResolveNamesResult);
+ AFL_VERIFY(ResolveNamesResult->ResultSet.size() == 1);
auto& entry = ResolveNamesResult->ResultSet.front();
@@ -377,6 +379,7 @@ private:
THashSet<TString> keyColumnsLeft;
THashSet<TString> notNullColumnsLeft = entry.NotNullColumns;
SrcColumns.reserve(entry.Columns.size());
+ THashSet<TString> HasInternalConversion;
for (const auto& [_, colInfo] : entry.Columns) {
ui32 id = colInfo.Id;
@@ -396,87 +399,95 @@ private:
}
}
+ if (entry.ColumnTableInfo) {
+ for (const auto& colInfo : entry.ColumnTableInfo->Description.GetSchema().GetColumns()) {
+ auto& name = colInfo.GetName();
+ NArrow::NAccessor::TConstructorContainer accessor;
+ if (colInfo.HasDataAccessorConstructor()) {
+ if (!accessor.DeserializeFromProto(colInfo.GetDataAccessorConstructor())) {
+ return TConclusionStatus::Fail("cannot parse accessor for column: " + name);
+ }
+ if (accessor->HasInternalConversion()) {
+ HasInternalConversion.emplace(name);
+ }
+ }
+ }
+ }
+
KeyColumnPositions.resize(KeyColumnTypes.size());
KeyColumnNames.resize(KeyColumnTypes.size());
- auto reqColumns = GetRequestColumns(errorMessage);
- if (!errorMessage.empty()) {
- return false;
- } else if (reqColumns.empty()) {
+ auto reqColumns = GetRequestColumns();
+ if (reqColumns.IsFail()) {
+ return reqColumns;
+ } else if (reqColumns->empty()) {
for (auto& [name, typeInfo] : SrcColumns) {
Ydb::Type ydbType;
ProtoFromTypeInfo(typeInfo, ydbType);
- reqColumns.emplace_back(name, std::move(ydbType));
+ reqColumns->emplace_back(name, std::move(ydbType));
}
}
- for (size_t pos = 0; pos < reqColumns.size(); ++pos) {
- auto& name = reqColumns[pos].first;
+ for (size_t pos = 0; pos < reqColumns->size(); ++pos) {
+ auto& name = (*reqColumns)[pos].first;
const auto* cp = columnByName.FindPtr(name);
if (!cp) {
- errorMessage = Sprintf("Unknown column: %s", name.c_str());
- return false;
+ return TConclusionStatus::Fail(Sprintf("Unknown column: %s", name.c_str()));
}
i32 pgTypeMod = -1;
- ui32 colId = *cp;
+ const ui32 colId = *cp;
auto& ci = *entry.Columns.FindPtr(colId);
TString columnTypeName = NScheme::TypeName(ci.PType, ci.PTypeMod);
- const Ydb::Type& typeInProto = reqColumns[pos].second;
+ const Ydb::Type& typeInProto = (*reqColumns)[pos].second;
TString parseProtoError;
NScheme::TTypeInfoMod inTypeInfoMod;
if (!NScheme::TypeInfoFromProto(typeInProto, inTypeInfoMod, parseProtoError)){
- errorMessage = Sprintf("Type parse error for column %s: %s",
- name.c_str(), parseProtoError.c_str());
- return false;
+ return TConclusionStatus::Fail(Sprintf("Type parse error for column %s: %s",
+ name.c_str(), parseProtoError.c_str()));
}
const NScheme::TTypeInfo& typeInRequest = inTypeInfoMod.TypeInfo;
TString inTypeName = NScheme::TypeName(typeInRequest, typeInRequest.GetPgTypeMod(ci.PTypeMod));
- if (typeInProto.has_type_id()) {
- bool sourceIsArrow = GetSourceType() != EUploadSource::ProtoValues;
+ if (typeInProto.has_type_id()) {
+ bool sourceIsArrow = GetSourceType() != EUploadSource::ProtoValues;
bool ok = SameOrConvertableDstType(typeInRequest, ci.PType, sourceIsArrow); // TODO
- if (!ok) {
- errorMessage = Sprintf("Type mismatch, got type %s for column %s, but expected %s",
- inTypeName.c_str(), name.c_str(), columnTypeName.c_str());
- return false;
- }
- if (NArrow::TArrowToYdbConverter::NeedInplaceConversion(typeInRequest, ci.PType)) {
- ColumnsToConvertInplace[name] = ci.PType;
- }
- } else if (typeInProto.has_decimal_type()) {
- if (typeInRequest != ci.PType) {
- errorMessage = Sprintf("Type mismatch, got type %s for column %s, but expected %s",
- inTypeName.c_str(), name.c_str(), columnTypeName.c_str());
- return false;
- }
- } else if (typeInProto.has_pg_type()) {
- bool ok = SameDstType(typeInRequest, ci.PType, false);
- if (!ok) {
- errorMessage = Sprintf("Type mismatch, got type %s for column %s, but expected %s",
- inTypeName.c_str(), name.c_str(), columnTypeName.c_str());
- return false;
- }
- if (!ci.PTypeMod.empty() && NPg::TypeDescNeedsCoercion(typeInRequest.GetPgTypeDesc())) {
- if (inTypeInfoMod.TypeMod != ci.PTypeMod) {
- errorMessage = Sprintf("Typemod mismatch, got type %s for column %s, type mod %s, but expected %s",
- inTypeName.c_str(), name.c_str(), inTypeInfoMod.TypeMod.c_str(), ci.PTypeMod.c_str());
- return false;
+ if (!ok) {
+ return TConclusionStatus::Fail(Sprintf("Type mismatch, got type %s for column %s, but expected %s",
+ inTypeName.c_str(), name.c_str(), columnTypeName.c_str()));
}
+ if (NArrow::TArrowToYdbConverter::NeedInplaceConversion(typeInRequest, ci.PType)) {
+ ColumnsToConvertInplace[name] = ci.PType;
+ }
+ } else if (typeInProto.has_decimal_type()) {
+ if (typeInRequest != ci.PType) {
+ return TConclusionStatus::Fail(Sprintf("Type mismatch, got type %s for column %s, but expected %s",
+ inTypeName.c_str(), name.c_str(), columnTypeName.c_str()));
+ }
+ } else if (typeInProto.has_pg_type()) {
+ bool ok = SameDstType(typeInRequest, ci.PType, false);
+ if (!ok) {
+ return TConclusionStatus::Fail(Sprintf("Type mismatch, got type %s for column %s, but expected %s",
+ inTypeName.c_str(), name.c_str(), columnTypeName.c_str()));
+ }
+ if (!ci.PTypeMod.empty() && NPg::TypeDescNeedsCoercion(typeInRequest.GetPgTypeDesc())) {
+ if (inTypeInfoMod.TypeMod != ci.PTypeMod) {
+ return TConclusionStatus::Fail(Sprintf("Typemod mismatch, got type %s for column %s, type mod %s, but expected %s",
+ inTypeName.c_str(), name.c_str(), inTypeInfoMod.TypeMod.c_str(), ci.PTypeMod.c_str()));
+ }
- const auto result = NPg::BinaryTypeModFromTextTypeMod(inTypeInfoMod.TypeMod, typeInRequest.GetPgTypeDesc());
- if (result.Error) {
- errorMessage = Sprintf("Invalid typemod %s, got type %s for column %s, error %s",
- inTypeInfoMod.TypeMod.c_str(), inTypeName.c_str(), name.c_str(), result.Error->c_str());
- return false;
+ const auto result = NPg::BinaryTypeModFromTextTypeMod(inTypeInfoMod.TypeMod, typeInRequest.GetPgTypeDesc());
+ if (result.Error) {
+ return TConclusionStatus::Fail(Sprintf("Invalid typemod %s, got type %s for column %s, error %s",
+ inTypeInfoMod.TypeMod.c_str(), inTypeName.c_str(), name.c_str(), result.Error->c_str()));
+ }
+ pgTypeMod = result.Typmod;
}
- pgTypeMod = result.Typmod;
}
- }
bool notNull = entry.NotNullColumns.contains(ci.Name);
if (notNull) {
@@ -524,8 +535,7 @@ private:
}
if (!allowUpdate) {
- errorMessage = "Only async-indexed tables are supported by BulkUpsert";
- return false;
+ return TConclusionStatus::Fail("Only async-indexed tables are supported by BulkUpsert");
}
}
@@ -546,6 +556,9 @@ private:
}
for (const auto& [colName, colType] : YdbSchema) {
+ if (HasInternalConversion.contains(colName)) {
+ continue;
+ }
if (NArrow::TArrowToYdbConverter::NeedDataConversion(colType)) {
ColumnsToConvert[colName] = colType;
}
@@ -553,8 +566,7 @@ private:
}
if (!keyColumnsLeft.empty()) {
- errorMessage = Sprintf("Missing key columns: %s", JoinSeq(", ", keyColumnsLeft).c_str());
- return false;
+ return TConclusionStatus::Fail(Sprintf("Missing key columns: %s", JoinSeq(", ", keyColumnsLeft).c_str()));
}
if (!notNullColumnsLeft.empty() && UpsertIfExists) {
@@ -564,11 +576,10 @@ private:
}
if (!notNullColumnsLeft.empty()) {
- errorMessage = Sprintf("Missing not null columns: %s", JoinSeq(", ", notNullColumnsLeft).c_str());
- return false;
+ return TConclusionStatus::Fail(Sprintf("Missing not null columns: %s", JoinSeq(", ", notNullColumnsLeft).c_str()));
}
- return true;
+ return TConclusionStatus::Success();
}
void ResolveTable(const TString& table, const NActors::TActorContext& ctx) {
@@ -618,7 +629,7 @@ private:
}
TableKind = entry.Kind;
- bool isColumnTable = (TableKind == NSchemeCache::TSchemeCacheNavigate::KindColumnTable);
+ const bool isColumnTable = (TableKind == NSchemeCache::TSchemeCacheNavigate::KindColumnTable);
if (entry.TableId.IsSystemView()) {
return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, "is not supported. Table is a system view", ctx);
@@ -634,11 +645,14 @@ private:
ResolveNamesResult.reset(ev->Get()->Request.Release());
bool makeYdbSchema = isColumnTable || (GetSourceType() != EUploadSource::ProtoValues);
- TString errorMessage;
- if (!BuildSchema(ctx, errorMessage, makeYdbSchema)) {
- return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, errorMessage, ctx);
+ {
+ auto conclusion = BuildSchema(ctx, makeYdbSchema);
+ if (conclusion.IsFail()) {
+ return ReplyWithError(Ydb::StatusIds::SCHEME_ERROR, conclusion.GetErrorMessage(), ctx);
+ }
}
+ TString errorMessage;
switch (GetSourceType()) {
case EUploadSource::ProtoValues:
{
diff --git a/ydb/core/tx/tx_proxy/ya.make b/ydb/core/tx/tx_proxy/ya.make
index 1aaf29809a..b7b24aa3bc 100644
--- a/ydb/core/tx/tx_proxy/ya.make
+++ b/ydb/core/tx/tx_proxy/ya.make
@@ -51,6 +51,7 @@ PEERDIR(
ydb/core/tx/tx_allocator
ydb/core/tx/tx_allocator_client
ydb/library/aclib
+ ydb/library/login
ydb/library/mkql_proto/protos
ydb/public/lib/base
)
diff --git a/ydb/core/util/cpuinfo.cpp b/ydb/core/util/cpuinfo.cpp
index 6703a0e693..8409a5b703 100644
--- a/ydb/core/util/cpuinfo.cpp
+++ b/ydb/core/util/cpuinfo.cpp
@@ -1,9 +1,11 @@
#include "cpuinfo.h"
#include <cstdlib>
-#include <dirent.h>
+#if defined (_linux_)
+ #include <dirent.h>
+ #include <unistd.h>
+#endif
#include <fcntl.h>
#include <fstream>
-#include <unistd.h>
#include <unordered_set>
#include <util/string/ascii.h>
#include <util/string/builder.h>
@@ -181,6 +183,7 @@ void NKikimr::TSystemThreadsMonitor::UpdateSystemThread(pid_t pid, pid_t tid) {
}
#else
Y_UNUSED(tid);
+ Y_UNUSED(pid);
#endif
}
diff --git a/ydb/core/viewer/tests/canondata/result.json b/ydb/core/viewer/tests/canondata/result.json
index 782c75becc..786307c641 100644
--- a/ydb/core/viewer/tests/canondata/result.json
+++ b/ydb/core/viewer/tests/canondata/result.json
@@ -57,6 +57,271 @@
]
}
},
+ "test.test_scheme_directory": {
+ "1-get": {
+ "children": [
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": ".metadata",
+ "owner": "metadata@system",
+ "type": "DIRECTORY"
+ },
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": "topic1",
+ "owner": "root@builtin",
+ "type": "TOPIC"
+ },
+ {
+ "created_at": {},
+ "name": ".sys",
+ "type": "DIRECTORY"
+ }
+ ],
+ "self": {
+ "created_at": {
+ "tx_id": "not-zero-number-text"
+ },
+ "effective_permissions": [
+ {
+ "permission_names": [
+ "ydb.database.connect"
+ ],
+ "subject": "USERS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.list"
+ ],
+ "subject": "METADATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.select_row"
+ ],
+ "subject": "DATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.tables.modify"
+ ],
+ "subject": "DATA-WRITERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.create_directory",
+ "ydb.granular.write_attributes",
+ "ydb.granular.create_table",
+ "ydb.granular.remove_schema",
+ "ydb.granular.create_queue",
+ "ydb.granular.alter_schema"
+ ],
+ "subject": "DDL-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.access.grant"
+ ],
+ "subject": "ACCESS-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.manage"
+ ],
+ "subject": "DATABASE-ADMINS"
+ }
+ ],
+ "name": "Root/dedicated_db",
+ "owner": "root@builtin",
+ "type": "DATABASE"
+ }
+ },
+ "2-post": {},
+ "3-get": {
+ "children": [
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": ".metadata",
+ "owner": "metadata@system",
+ "type": "DIRECTORY"
+ },
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": "test_dir",
+ "owner": "root@builtin",
+ "type": "DIRECTORY"
+ },
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": "topic1",
+ "owner": "root@builtin",
+ "type": "TOPIC"
+ },
+ {
+ "created_at": {},
+ "name": ".sys",
+ "type": "DIRECTORY"
+ }
+ ],
+ "self": {
+ "created_at": {
+ "tx_id": "not-zero-number-text"
+ },
+ "effective_permissions": [
+ {
+ "permission_names": [
+ "ydb.database.connect"
+ ],
+ "subject": "USERS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.list"
+ ],
+ "subject": "METADATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.select_row"
+ ],
+ "subject": "DATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.tables.modify"
+ ],
+ "subject": "DATA-WRITERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.create_directory",
+ "ydb.granular.write_attributes",
+ "ydb.granular.create_table",
+ "ydb.granular.remove_schema",
+ "ydb.granular.create_queue",
+ "ydb.granular.alter_schema"
+ ],
+ "subject": "DDL-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.access.grant"
+ ],
+ "subject": "ACCESS-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.manage"
+ ],
+ "subject": "DATABASE-ADMINS"
+ }
+ ],
+ "name": "Root/dedicated_db",
+ "owner": "root@builtin",
+ "type": "DATABASE"
+ }
+ },
+ "4-delete": {},
+ "5-get": {
+ "children": [
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": ".metadata",
+ "owner": "metadata@system",
+ "type": "DIRECTORY"
+ },
+ {
+ "created_at": {
+ "plan_step": "not-zero-number-text",
+ "tx_id": "not-zero-number-text"
+ },
+ "name": "topic1",
+ "owner": "root@builtin",
+ "type": "TOPIC"
+ },
+ {
+ "created_at": {},
+ "name": ".sys",
+ "type": "DIRECTORY"
+ }
+ ],
+ "self": {
+ "created_at": {
+ "tx_id": "not-zero-number-text"
+ },
+ "effective_permissions": [
+ {
+ "permission_names": [
+ "ydb.database.connect"
+ ],
+ "subject": "USERS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.list"
+ ],
+ "subject": "METADATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.select_row"
+ ],
+ "subject": "DATA-READERS"
+ },
+ {
+ "permission_names": [
+ "ydb.tables.modify"
+ ],
+ "subject": "DATA-WRITERS"
+ },
+ {
+ "permission_names": [
+ "ydb.granular.create_directory",
+ "ydb.granular.write_attributes",
+ "ydb.granular.create_table",
+ "ydb.granular.remove_schema",
+ "ydb.granular.create_queue",
+ "ydb.granular.alter_schema"
+ ],
+ "subject": "DDL-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.access.grant"
+ ],
+ "subject": "ACCESS-ADMINS"
+ },
+ {
+ "permission_names": [
+ "ydb.generic.manage"
+ ],
+ "subject": "DATABASE-ADMINS"
+ }
+ ],
+ "name": "Root/dedicated_db",
+ "owner": "root@builtin",
+ "type": "DATABASE"
+ }
+ }
+ },
"test.test_storage_groups": {
"FieldsAvailable": "111111111111111111111",
"FieldsRequired": "111111111111111111111",
@@ -519,48 +784,76 @@
"TotalGroups": 5
},
"test.test_topic_data": {
+ "both_offset_and_ts": {
+ "status_code": 400,
+ "text": "Only read_timestamp or offset parameter may be specified, not both"
+ },
+ "no_partition": {
+ "status_code": 400,
+ "text": "Parameter 'partition' is necessary"
+ },
"response_compressed": {
"EndOffset": 21,
"Messages": [
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTA=",
"Offset": 11,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 12,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTE=",
"Offset": 12,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 13,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTI=",
"Offset": 13,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 14,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTM=",
"Offset": 14,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 15,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTQ=",
"Offset": 15,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 16,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
}
],
"StartOffset": 0,
@@ -571,6 +864,7 @@
"Messages": [
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZV93aXRoX21ldGE=",
"MessageMetadata": [
{
@@ -584,8 +878,11 @@
],
"Offset": 10,
"OriginalSize": 17,
+ "ProducerId": "text",
"SeqNo": 11,
- "StorageSize": 37
+ "StorageSize": 37,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
}
],
"StartOffset": 0,
@@ -596,11 +893,15 @@
"Messages": [
{
"Codec": 1,
+ "CreateTimestamp": "not-zero-number",
"Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTk=",
"Offset": 20,
"OriginalSize": 20,
+ "ProducerId": "text",
"SeqNo": 21,
- "StorageSize": 38
+ "StorageSize": 38,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
}
],
"StartOffset": 0,
@@ -611,50 +912,69 @@
"Messages": [
{
"Codec": 0,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZS0w",
"Offset": 0,
"OriginalSize": 9,
+ "ProducerId": "text",
"SeqNo": 1,
- "StorageSize": 9
+ "StorageSize": 9,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 0,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZS0x",
"Offset": 1,
"OriginalSize": 9,
+ "ProducerId": "text",
"SeqNo": 2,
- "StorageSize": 9
+ "StorageSize": 9,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 0,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZS0y",
"Offset": 2,
"OriginalSize": 9,
+ "ProducerId": "text",
"SeqNo": 3,
- "StorageSize": 9
+ "StorageSize": 9,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 0,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZS0z",
"Offset": 3,
"OriginalSize": 9,
+ "ProducerId": "text",
"SeqNo": 4,
- "StorageSize": 9
+ "StorageSize": 9,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
},
{
"Codec": 0,
+ "CreateTimestamp": "not-zero-number",
"Message": "bWVzc2FnZS00",
"Offset": 4,
"OriginalSize": 9,
+ "ProducerId": "text",
"SeqNo": 5,
- "StorageSize": 9
+ "StorageSize": 9,
+ "TimestampDiff": "not-zero-number",
+ "WriteTimestamp": "not-zero-number"
}
],
"StartOffset": 0,
"Truncated": true
}
},
-
"test.test_transfer_describe": {
"connection_params": {
"connection_string": "text",
@@ -665,7 +985,7 @@
"error": {
"issues": [
{
- "message": "Discovery error: /Root/dedicated_db/TopicNotExists: SCHEME_ERROR ({ <main>: Error: Path not found })",
+ "message": "Discovery error: /Root/dedicated_db/TableNotExists: SCHEME_ERROR ({ <main>: Error: Path not found })",
"severity": 1
}
]
@@ -725,9 +1045,9 @@
"subject": "DATABASE-ADMINS"
}
],
- "name": "TestTransfer",
+ "name": "TestAsyncReplication",
"owner": "root@builtin",
- "type": "TRANSFER"
+ "type": "REPLICATION"
}
},
"test.test_viewer_acl": {
@@ -5174,95 +5494,5 @@
},
"test.test_wait_for_cluster_ready": {
"wait_good": true
- },
- "test.test_topic_data": {
- "response_read": {
- "Messages": [
- {
- "Codec": 0, "StorageSize": 9, "SeqNo": 1, "Message": "bWVzc2FnZS0w", "OriginalSize": 9, "Offset": 0,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 0, "StorageSize": 9, "SeqNo": 2, "Message": "bWVzc2FnZS0x", "OriginalSize": 9, "Offset": 1,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 0, "StorageSize": 9, "SeqNo": 3, "Message": "bWVzc2FnZS0y", "OriginalSize": 9, "Offset": 2,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 0, "StorageSize": 9, "SeqNo": 4, "Message": "bWVzc2FnZS0z", "OriginalSize": 9, "Offset": 3,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 0, "StorageSize": 9, "SeqNo": 5, "Message": "bWVzc2FnZS00", "OriginalSize": 9, "Offset": 4,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- }
- ],
- "StartOffset": 0,
- "EndOffset": 21,
- "Truncated": true
- },
- "response_metadata": {
- "Messages": [
- {
- "Codec": 1, "StorageSize": 37, "SeqNo": 11,
- "MessageMetadata": [
- {"Value": "value1", "Key": "key1"},
- {"Value": "value2", "Key": "key2"}],
- "Message": "bWVzc2FnZV93aXRoX21ldGE=", "OriginalSize": 17, "Offset": 10,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- }
- ],
- "StartOffset": 0,
- "EndOffset": 21,
- "Truncated": true
- },
- "response_compressed": {
- "Messages": [
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 12, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTA=", "OriginalSize": 20, "Offset": 11,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 13, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTE=", "OriginalSize": 20, "Offset": 12,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 14, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTI=", "OriginalSize": 20, "Offset": 13,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 15, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTM=", "OriginalSize": 20, "Offset": 14,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- },
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 16, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTQ=", "OriginalSize": 20, "Offset": 15,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- }
- ],
- "StartOffset": 0,
- "EndOffset": 21,
- "Truncated": true
- },
- "response_not_truncated": {
- "Messages": [
- {
- "Codec": 1, "StorageSize": 38, "SeqNo": 21, "Message": "Y29tcHJlc3NlZC1tZXNzYWdlLTk=", "OriginalSize": 20, "Offset": 20,
- "CreateTimestamp": "not-zero-number", "WriteTimestamp": "not-zero-number", "TimestampDiff": "not-zero-number", "ProducerId": "text"
- }
- ],
- "StartOffset": 0,
- "EndOffset": 21,
- "Truncated": false
- },
- "no_partition": {
- "status_code": 400,
- "text": "Parameter 'partition' is necessary"
- },
- "both_offset_and_ts": {
- "status_code": 400,
- "text": "Only read_timestamp or offset parameter may be specified, not both"
- }
}
}
diff --git a/ydb/core/viewer/tests/test.py b/ydb/core/viewer/tests/test.py
index 7cc4b17c4d..7028e8523f 100644
--- a/ydb/core/viewer/tests/test.py
+++ b/ydb/core/viewer/tests/test.py
@@ -35,11 +35,21 @@ cluster.wait_tenant_up(serverless_db)
databases = [domain_name, dedicated_db, shared_db, serverless_db]
-def call_viewer_api(url):
+def call_viewer_api_get(url):
port = cluster.nodes[1].mon_port
return requests.get("http://localhost:%s%s" % (port, url))
+def call_viewer_api_post(url):
+ port = cluster.nodes[1].mon_port
+ return requests.post("http://localhost:%s%s" % (port, url))
+
+
+def call_viewer_api_delete(url):
+ port = cluster.nodes[1].mon_port
+ return requests.delete("http://localhost:%s%s" % (port, url))
+
+
def get_result(result):
if result.status_code == 200 and result.headers.get("Content-Type") == "application/json":
return result.json()
@@ -49,7 +59,19 @@ def get_result(result):
def call_viewer(url, params=None):
if params is None:
params = {}
- return get_result(call_viewer_api(url + '?' + urlencode(params)))
+ return get_result(call_viewer_api_get(url + '?' + urlencode(params)))
+
+
+def call_viewer_post(url, params=None):
+ if params is None:
+ params = {}
+ return get_result(call_viewer_api_post(url + '?' + urlencode(params)))
+
+
+def call_viewer_delete(url, params=None):
+ if params is None:
+ params = {}
+ return get_result(call_viewer_api_delete(url + '?' + urlencode(params)))
def call_viewer_db(url, params=None):
@@ -62,16 +84,28 @@ def call_viewer_db(url, params=None):
return result
+def get_viewer_db(url, params=None):
+ if params is None:
+ params = {}
+ return call_viewer_db(url, params)
+
+
def get_viewer(url, params=None):
if params is None:
params = {}
return call_viewer(url, params)
-def get_viewer_db(url, params=None):
+def post_viewer(url, params=None):
if params is None:
params = {}
- return call_viewer_db(url, params)
+ return call_viewer_post(url, params)
+
+
+def delete_viewer(url, params=None):
+ if params is None:
+ params = {}
+ return call_viewer_delete(url, params)
wait_good = False
@@ -614,6 +648,31 @@ def test_operations_list_page_bad():
})
+def test_scheme_directory():
+ result = {}
+ result["1-get"] = get_viewer_normalized("/scheme/directory", {
+ 'database': dedicated_db,
+ 'path': dedicated_db
+ })
+ result["2-post"] = post_viewer("/scheme/directory", {
+ 'database': dedicated_db,
+ 'path': dedicated_db + '/test_dir'
+ })
+ result["3-get"] = get_viewer_normalized("/scheme/directory", {
+ 'database': dedicated_db,
+ 'path': dedicated_db
+ })
+ result["4-delete"] = delete_viewer("/scheme/directory", {
+ 'database': dedicated_db,
+ 'path': dedicated_db + '/test_dir'
+ })
+ result["5-get"] = get_viewer_normalized("/scheme/directory", {
+ 'database': dedicated_db,
+ 'path': dedicated_db
+ })
+ return result
+
+
def test_topic_data():
grpc_port = cluster.nodes[1].grpc_port
@@ -711,23 +770,16 @@ def test_topic_data():
def test_transfer_describe():
grpc_port = cluster.nodes[1].grpc_port
endpoint = "grpc://localhost:{}/?database={}".format(grpc_port, dedicated_db)
- lambd = "($x) -> { RETURN <|Id:$x._offset|>; }"
-
- call_viewer("/viewer/query", {
- 'database': dedicated_db,
- 'query': 'CREATE TABLE `TransferTargetTable` ( `Id` Uint64 NOT NULL PRIMARY KEY (Id)) WITH (STORE = COLUMN)',
- 'schema': 'multi'
- })
call_viewer("/viewer/query", {
'database': dedicated_db,
- 'query': 'CREATE TRANSFER `TestTransfer` FROM `TopicNotExists` TO `Table` USING {} WITH (CONNECTION_STRING = "{}")'.format(lambd, endpoint),
+ 'query': 'CREATE ASYNC REPLICATION `TestAsyncReplication` FOR `TableNotExists` AS `TargetAsyncReplicationTable` WITH (CONNECTION_STRING = "{}")'.format(endpoint),
'schema': 'multi'
})
result = get_viewer_normalized("/viewer/describe_replication", {
'database': dedicated_db,
- 'path': '{}/TestTransfer'.format(dedicated_db),
+ 'path': '{}/TestAsyncReplication'.format(dedicated_db),
'include_stats': 'true',
'enums': 'true'
})
diff --git a/ydb/core/viewer/viewer_describe.h b/ydb/core/viewer/viewer_describe.h
index 2ec8952dc7..01eb62fdac 100644
--- a/ydb/core/viewer/viewer_describe.h
+++ b/ydb/core/viewer/viewer_describe.h
@@ -48,6 +48,9 @@ public:
}
void Bootstrap() override {
+ if (NeedToRedirect()) {
+ return;
+ }
const auto& params(Event->Get()->Request.GetParams());
JsonSettings.EnumAsNumbers = !FromStringWithDefault<bool>(params.Get("enums"), false);
JsonSettings.UI64AsString = !FromStringWithDefault<bool>(params.Get("ui64"), false);
diff --git a/ydb/core/viewer/viewer_topic_data.h b/ydb/core/viewer/viewer_topic_data.h
index df1578669e..5bc4a66179 100644
--- a/ydb/core/viewer/viewer_topic_data.h
+++ b/ydb/core/viewer/viewer_topic_data.h
@@ -95,10 +95,14 @@ public:
description: partition to read from
required: true
type: integer
+ - name: read_timestamp
+ in: query
+ description: min message timestamp to read from
+ required: false
- name: offset
in: query
description: start offset to read from
- required: true
+ required: false
type: integer
- name: limit
in: query
diff --git a/ydb/core/ydb_convert/table_description.cpp b/ydb/core/ydb_convert/table_description.cpp
index 94041248b9..a676d8f018 100644
--- a/ydb/core/ydb_convert/table_description.cpp
+++ b/ydb/core/ydb_convert/table_description.cpp
@@ -560,12 +560,17 @@ void FillColumnDescription(Ydb::Table::CreateTableRequest& out,
FillColumnDescriptionImpl(out, splitKeyType, in);
}
-void FillColumnDescription(Ydb::Table::DescribeTableResult& out, const NKikimrSchemeOp::TColumnTableDescription& in) {
+template <typename TYdbProto>
+void FillColumnDescriptionImpl(TYdbProto& out, const NKikimrSchemeOp::TColumnTableDescription& in) {
auto& schema = in.GetSchema();
for (const auto& column : schema.GetColumns()) {
auto newColumn = out.add_columns();
AddColumn(newColumn, column);
+
+ if (column.HasColumnFamilyName()) {
+ newColumn->set_family(column.GetColumnFamilyName());
+ }
}
for (auto& name : schema.GetKeyColumnNames()) {
@@ -592,6 +597,14 @@ void FillColumnDescription(Ydb::Table::DescribeTableResult& out, const NKikimrSc
out.set_store_type(Ydb::Table::StoreType::STORE_TYPE_COLUMN);
}
+void FillColumnDescription(Ydb::Table::DescribeTableResult& out, const NKikimrSchemeOp::TColumnTableDescription& in) {
+ FillColumnDescriptionImpl(out, in);
+}
+
+void FillColumnDescription(Ydb::Table::CreateTableRequest& out, const NKikimrSchemeOp::TColumnTableDescription& in) {
+ FillColumnDescriptionImpl(out, in);
+}
+
bool ExtractColumnTypeInfo(NScheme::TTypeInfo& outTypeInfo, TString& outTypeMod,
const Ydb::Type& inType, Ydb::StatusIds::StatusCode& status, TString& error)
{
@@ -1415,6 +1428,50 @@ void FillStorageSettings(Ydb::Table::CreateTableRequest& out,
FillStorageSettingsImpl(out, in);
}
+void FillColumnFamily(Ydb::Table::ColumnFamily& out, const NKikimrSchemeOp::TFamilyDescription& in, bool isColumnTable) {
+ if (in.HasName() && !in.GetName().empty()) {
+ out.set_name(in.GetName());
+ } else if (IsDefaultFamily(in)) {
+ out.set_name("default");
+ } else if (in.HasId()) {
+ out.set_name(TStringBuilder() << "<id: " << in.GetId() << ">");
+ } else {
+ out.set_name(in.GetName());
+ }
+
+ if (!isColumnTable && in.HasStorageConfig() && in.GetStorageConfig().HasData()) {
+ FillStoragePool(&out, &Ydb::Table::ColumnFamily::mutable_data, in.GetStorageConfig().GetData());
+ }
+
+ if (in.HasColumnCodec()) {
+ switch (in.GetColumnCodec()) {
+ case NKikimrSchemeOp::ColumnCodecPlain:
+ out.set_compression(Ydb::Table::ColumnFamily::COMPRESSION_NONE);
+ break;
+ case NKikimrSchemeOp::ColumnCodecLZ4:
+ out.set_compression(Ydb::Table::ColumnFamily::COMPRESSION_LZ4);
+ break;
+ case NKikimrSchemeOp::ColumnCodecZSTD: {
+ if (!isColumnTable) {
+ break; // FIXME: not supported
+ }
+ out.set_compression(Ydb::Table::ColumnFamily::COMPRESSION_ZSTD);
+ break;
+ }
+ }
+ } else if (in.GetCodec() == 1) {
+ // Legacy setting, see datashard
+ out.set_compression(Ydb::Table::ColumnFamily::COMPRESSION_LZ4);
+ } else {
+ out.set_compression(Ydb::Table::ColumnFamily::COMPRESSION_NONE);
+ }
+
+ // Check legacy settings for permanent in-memory cache
+ if (in.GetInMemory() || in.GetColumnCache() == NKikimrSchemeOp::ColumnCacheEver) {
+ out.set_keep_in_memory(Ydb::FeatureFlag::ENABLED);
+ }
+}
+
template <typename TYdbProto>
void FillColumnFamiliesImpl(TYdbProto& out,
const NKikimrSchemeOp::TTableDescription& in) {
@@ -1432,42 +1489,7 @@ void FillColumnFamiliesImpl(TYdbProto& out,
const auto& family = partConfig.GetColumnFamilies(i);
auto* r = out.add_column_families();
- if (family.HasName() && !family.GetName().empty()) {
- r->set_name(family.GetName());
- } else if (IsDefaultFamily(family)) {
- r->set_name("default");
- } else if (family.HasId()) {
- r->set_name(TStringBuilder() << "<id: " << family.GetId() << ">");
- } else {
- r->set_name(family.GetName());
- }
-
- if (family.HasStorageConfig() && family.GetStorageConfig().HasData()) {
- FillStoragePool(r, &Ydb::Table::ColumnFamily::mutable_data, family.GetStorageConfig().GetData());
- }
-
- if (family.HasColumnCodec()) {
- switch (family.GetColumnCodec()) {
- case NKikimrSchemeOp::ColumnCodecPlain:
- r->set_compression(Ydb::Table::ColumnFamily::COMPRESSION_NONE);
- break;
- case NKikimrSchemeOp::ColumnCodecLZ4:
- r->set_compression(Ydb::Table::ColumnFamily::COMPRESSION_LZ4);
- break;
- case NKikimrSchemeOp::ColumnCodecZSTD:
- break; // FIXME: not supported
- }
- } else if (family.GetCodec() == 1) {
- // Legacy setting, see datashard
- r->set_compression(Ydb::Table::ColumnFamily::COMPRESSION_LZ4);
- } else {
- r->set_compression(Ydb::Table::ColumnFamily::COMPRESSION_NONE);
- }
-
- // Check legacy settings for permanent in-memory cache
- if (family.GetInMemory() || family.GetColumnCache() == NKikimrSchemeOp::ColumnCacheEver) {
- r->set_keep_in_memory(Ydb::FeatureFlag::ENABLED);
- }
+ FillColumnFamily(*r, family, false);
}
}
@@ -1481,6 +1503,17 @@ void FillColumnFamilies(Ydb::Table::CreateTableRequest& out,
FillColumnFamiliesImpl(out, in);
}
+void FillColumnFamilies(Ydb::Table::CreateTableRequest& out,
+ const NKikimrSchemeOp::TColumnTableDescription& in) {
+ const auto& schema = in.GetSchema();
+ for (size_t i = 0; i < schema.ColumnFamiliesSize(); ++i) {
+ const auto& family = schema.GetColumnFamilies(i);
+ auto* r = out.add_column_families();
+
+ FillColumnFamily(*r, family, true);
+ }
+}
+
void FillAttributes(Ydb::Table::DescribeTableResult& out,
const NKikimrSchemeOp::TPathDescription& in) {
FillAttributesImpl(out, in);
diff --git a/ydb/core/ydb_convert/table_description.h b/ydb/core/ydb_convert/table_description.h
index 2b73db84ab..b403707168 100644
--- a/ydb/core/ydb_convert/table_description.h
+++ b/ydb/core/ydb_convert/table_description.h
@@ -57,6 +57,7 @@ void FillColumnDescription(Ydb::Table::DescribeTableResult& out,
void FillColumnDescription(Ydb::Table::CreateTableRequest& out,
NKikimrMiniKQL::TType& splitKeyType, const NKikimrSchemeOp::TTableDescription& in);
void FillColumnDescription(Ydb::Table::DescribeTableResult& out, const NKikimrSchemeOp::TColumnTableDescription& in);
+void FillColumnDescription(Ydb::Table::CreateTableRequest& out, const NKikimrSchemeOp::TColumnTableDescription& in);
// in
bool FillColumnDescription(NKikimrSchemeOp::TTableDescription& out,
const google::protobuf::RepeatedPtrField<Ydb::Table::ColumnMeta>& in, Ydb::StatusIds::StatusCode& status, TString& error);
@@ -108,6 +109,8 @@ void FillColumnFamilies(Ydb::Table::DescribeTableResult& out,
const NKikimrSchemeOp::TTableDescription& in);
void FillColumnFamilies(Ydb::Table::CreateTableRequest& out,
const NKikimrSchemeOp::TTableDescription& in);
+void FillColumnFamilies(Ydb::Table::CreateTableRequest& out,
+ const NKikimrSchemeOp::TColumnTableDescription& in);
// out
void FillAttributes(Ydb::Table::DescribeTableResult& out,
diff --git a/ydb/docs/en/core/concepts/federated_query/s3/_includes/date_formats.md b/ydb/docs/en/core/concepts/federated_query/s3/_includes/date_formats.md
new file mode 100644
index 0000000000..845e7f4068
--- /dev/null
+++ b/ydb/docs/en/core/concepts/federated_query/s3/_includes/date_formats.md
@@ -0,0 +1,7 @@
+|Name|Description|Example|
+|---|---|---|
+|`POSIX`|String in `%Y-%m-%d %H:%M:%S` format|2001-03-26 16:10:00|
+|`ISO`|Format, corresponding to the [ISO 8601](https://ru.wikipedia.org/wiki/ISO_8601) standard|2001-03-26 16:10:00Z|
+|`UNIX_TIME_SECONDS`|Number of seconds that have elapsed since the 1st of january 1970 (00:00:00 UTC)|985623000|
+|`UNIX_TIME_MILLISECONDS`|Number of milliseconds that have elapsed since the 1st of january 1970 (00:00:00 UTC)|985623000000|
+|`UNIX_TIME_MICROSECONDS`|Number of microseconds that have elapsed since the 1st of january 1970 (00:00:00 UTC)|985623000000000| \ No newline at end of file
diff --git a/ydb/docs/en/core/concepts/federated_query/s3/_includes/format_settings.md b/ydb/docs/en/core/concepts/federated_query/s3/_includes/format_settings.md
new file mode 100644
index 0000000000..60f2fd1ac1
--- /dev/null
+++ b/ydb/docs/en/core/concepts/federated_query/s3/_includes/format_settings.md
@@ -0,0 +1,10 @@
+|Setting name|Description|Possible values|
+|----|----|---|
+|`file_pattern`|File name template|File name template string. Wildcards `*` are supported.|
+|`data.interval.unit`|Unit for parsing `Interval` type|`MICROSECONDS`, `MILLISECONDS`, `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `WEEKS`|
+|`data.datetime.format_name`|Predefined format in which `Datetime` data is stored|`POSIX`, `ISO`|
+|`data.datetime.format`|Strftime-like template which defines how `Datetime` data is stored|Formatting string, for example: `%Y-%m-%dT%H-%M`|
+|`date.timestamp.format_name`|Predefined format in which `Timestamp` data is stored|`POSIX`, `ISO`, `UNIX_TIME_SECONDS`, `UNIX_TIME_MILLISECONDS`, `UNIX_TIME_MICROSECONDS`|
+|`data.timestamp.format`|Strftime-like template which defines how `Timestamp` data is stored|Formatting string, for example: `%Y-%m-%dT%H-%M-%S`|
+|`data.date.format`|The format in which `Date` data is stored|Formatting string, for example: `%Y-%m-%d`|
+|`csv_delimiter`|Delimeter for `csv_with_names` format|Any character (UTF-8)|
diff --git a/ydb/docs/en/core/concepts/federated_query/s3/_includes/path_format.md b/ydb/docs/en/core/concepts/federated_query/s3/_includes/path_format.md
index be0d6abf3b..4b15b4ea52 100644
--- a/ydb/docs/en/core/concepts/federated_query/s3/_includes/path_format.md
+++ b/ydb/docs/en/core/concepts/federated_query/s3/_includes/path_format.md
@@ -1,5 +1,5 @@
|Path format|Description|Example|
|----|----|---|
-|Path ends with a `/`|Path to a directory|The path `/a` addresses all contents of the directory:<br/>`/a/b/c/d/1.txt`<br/>`/a/b/2.csv`|
+|Path ends with a `/`|Path to a directory|The path `/a/` addresses all contents of the directory:<br/>`/a/b/c/d/1.txt`<br/>`/a/b/2.csv`|
|Path contains a wildcard character `*`|Any files nested in the path|The path `/a/*.csv` addresses files in directories:<br/>`/a/b/c/1.csv`<br/>`/a/2.csv`<br/>`/a/b/c/d/e/f/g/2.csv`|
|Path does not end with `/` and does not contain wildcard characters|Path to a single file|The path `/a/b.csv` addresses the specific file `/a/b.csv`|
diff --git a/ydb/docs/en/core/concepts/federated_query/s3/external_data_source.md b/ydb/docs/en/core/concepts/federated_query/s3/external_data_source.md
index 2fb2fa2679..d97cfd2572 100644
--- a/ydb/docs/en/core/concepts/federated_query/s3/external_data_source.md
+++ b/ydb/docs/en/core/concepts/federated_query/s3/external_data_source.md
@@ -74,7 +74,8 @@ FROM
WITH(
FORMAT = "<file_format>",
COMPRESSION = "<compression>",
- SCHEMA = (<schema_definition>))
+ SCHEMA = (<schema_definition>),
+ <format_settings>)
WHERE
<filter>;
```
@@ -86,6 +87,7 @@ Where:
* `file_format` — the [data format](formats.md#formats) in the files.
* `compression` — the [compression format](formats.md#compression_formats) of the files.
* `schema_definition` — the [schema definition](#schema) of the data stored in the files.
+* `format_settings` — optional [format settings](#format_settings)
### Data schema description {#schema}
@@ -137,12 +139,22 @@ Where:
As a result of executing such a query, the names and types of fields will be inferred.
-### Data path formats {#path_format}
+### Data path formats specified in `file_path` {#path_format}
-In {{ ydb-full-name }}, the following data paths are supported:
+In {{ ydb-full-name }}, the followingdata paths are supported:
{% include [!](_includes/path_format.md) %}
+### Format settings {#format_settings}
+
+In {{ ydb-full-name }}, the following format settings are supported:
+
+{% include [!](_includes/format_settings.md) %}
+
+You can only specify `file_pattern` setting if `file_path` is a path to a directory. Any conversion specifiers supported by [`strftime`(C99)](https://en.cppreference.com/w/c/chrono/strftime) function can be used in formatting strings. In {{ ydb-full-name }}, the following `Datetime` and `Timestamp` formats are supported:
+
+{% include [!](_includes/date_formats.md) %}
+
## Example {#read_example}
Example query to read data from S3 ({{ objstorage-full-name }}):
@@ -151,21 +163,28 @@ Example query to read data from S3 ({{ objstorage-full-name }}):
SELECT
*
FROM
- connection.`folder/filename.csv`
+ connection.`folder/`
WITH(
FORMAT = "csv_with_names",
+ COMPRESSION="gzip"
SCHEMA =
(
- Year Int32,
- Manufacturer Utf8,
- Model Utf8,
- Price Double
- )
+ Id Int32 NOT NULL,
+ UserId Int32 NOT NULL,
+ TripDate Date NOT NULL,
+ TripDistance Double NOT NULL,
+ UserComment Utf8
+ ),
+ FILE_PATTERN="*.csv.gz",
+ `DATA.DATE.FORMAT`="%Y-%m-%d",
+ CSV_DELIMITER='/'
);
```
Where:
* `connection` — the name of the external data source leading to the S3 bucket ({{ objstorage-full-name }}).
-* `folder/filename.csv` — the path to the file in the S3 bucket ({{ objstorage-full-name }}).
-* `SCHEMA` — the data schema description in the file. \ No newline at end of file
+* `folder/filename.csv` — the path to the directory in the S3 bucket ({{ objstorage-full-name }}).
+* `SCHEMA` — the data schema description in the file.
+* `*.csv.gz` — file name template.
+* `%Y-%m-%d` — format in which `Date` type is stored in S3.
diff --git a/ydb/docs/en/core/concepts/federated_query/s3/external_table.md b/ydb/docs/en/core/concepts/federated_query/s3/external_table.md
index f8456f2ef5..b3787c3500 100644
--- a/ydb/docs/en/core/concepts/federated_query/s3/external_table.md
+++ b/ydb/docs/en/core/concepts/federated_query/s3/external_table.md
@@ -37,6 +37,8 @@ Where:
- `csv_with_names` - one of the [permitted data storage formats](formats.md);
- `gzip` - one of the [permitted compression algorithms](formats.md#compression).
+You can also specify [format settings](external_data_source.md#format_settings).
+
## Data model {#data-model}
Reading data using external tables from S3 ({{ objstorage-name }}) is done with regular SQL queries as if querying a normal table.
diff --git a/ydb/docs/en/core/concepts/topic.md b/ydb/docs/en/core/concepts/topic.md
index cf8ecf98c3..c09ad6dd30 100644
--- a/ydb/docs/en/core/concepts/topic.md
+++ b/ydb/docs/en/core/concepts/topic.md
@@ -128,9 +128,9 @@ A source ID is an arbitrary string up to 2048 characters long. This is usually t
#### Sample source IDs {#source-id-examples}
-| Type | ID | Description |
---- | --- | ---
-| File | Server ID | Files are used to store application logs. In this case, it's convenient to use the server ID as a source ID. |
+| Type | ID | Description |
+|--------------| --- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| File | Server ID | Files are used to store application logs. In this case, it's convenient to use the server ID as a source ID. |
| User actions | ID of the class of user actions, such as "viewing a page", "making a purchase", and so on. | It's important to handle user actions in the order they were performed by the user. At the same time, there is no need to handle every single user action in one application. In this case, it's convenient to group user actions by class. |
### Message group ID {#group-id}
@@ -139,9 +139,9 @@ A message group ID is an arbitrary string up to 2048 characters long. This is us
#### Sample message group IDs {#group-id-examples}
-| Type | ID | Description |
---- | --- | ---
-| File | Full file path | All data from the server and the file it hosts will be sent to the same partition. |
+| Type | ID | Description |
+|--------------| --- |------------------------------------------------------------------------------------------------------------------------------------------|
+| File | Full file path | All data from the server and the file it hosts will be sent to the same partition. |
| User actions | User ID | It's important to handle user actions in the order they were performed. In this case, it's convenient to use the user ID as a source ID. |
## Message sequence numbers {#seqno}
@@ -152,9 +152,9 @@ Sequence numbers are not used if [no-deduplication mode](#no-dedup) is enabled.
### Sample message sequence numbers {#seqno-examples}
-| Type | Example | Description |
---- | --- | ---
-| File | Offset of transferred data from the beginning of a file | You can't delete lines from the beginning of a file, since this will lead to skipping some data as duplicates or losing some data. |
+| Type | Example | Description |
+|----------| --- |------------------------------------------------------------------------------------------------------------------------------------|
+| File | Offset of transferred data from the beginning of a file | You can't delete lines from the beginning of a file, since this will lead to skipping some data as duplicates or losing some data. |
| DB table | Auto-increment record ID |
## Message retention period {#retention-time}
@@ -167,9 +167,9 @@ When transferring data, the producer app indicates that a message can be compres
Supported codecs are explicitly listed in each topic. When making an attempt to write data to a topic with a codec that is not supported, a write error occurs.
-| Codec | Description |
---- | ---
-| `raw` | No compression. |
+| Codec | Description |
+|--------|---------------------------------------------------------|
+| `raw` | No compression. |
| `gzip` | [Gzip](https://en.wikipedia.org/wiki/Gzip) compression. |
{% if audience != "external" %}
`lzop` | [lzop](https://en.wikipedia.org/wiki/Lzop) compression.
diff --git a/ydb/docs/en/core/contributor/load-actors-kqp.md b/ydb/docs/en/core/contributor/load-actors-kqp.md
index df17294c78..efb8a7aeab 100644
--- a/ydb/docs/en/core/contributor/load-actors-kqp.md
+++ b/ydb/docs/en/core/contributor/load-actors-kqp.md
@@ -13,16 +13,16 @@ Before this test, the necessary tables are created. After it's completed, they a
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `DurationSeconds` | Load duration in seconds. |
-| `WindowDuration` | Statistics aggregation window duration. |
-| `WorkingDir` | Path to the directory to create test tables in. |
-| `NumOfSessions` | The number of parallel threads creating the load. Each thread writes data to its own session. |
-| `DeleteTableOnFinish` | Set it to `False` if you do not want the created tables deleted after the load stops. This might be helpful when a large table is created upon the actor's first run, and then queries are made to that table. |
-| `UniformPartitionsCount` | The number of partitions created in test tables. |
-| `WorkloadType` | Type of load.<br/>For Stock:<ul><li>`0`: InsertRandomOrder.</li><li>`1`: SubmitRandomOrder.</li><li>`2`: SubmitSameOrder.</li><li>`3`: GetRandomCustomerHistory.</li><li>`4`: GetCustomerHistory.</li></ul>For Key-Value:<ul><li>`0`: UpsertRandom.</li><li>`1`: InsertRandom.</li><li>`2`: SelectRandom.</li></ul> |
-| `Workload` | Kind of load.<br/>`Stock`:<ul><li>`ProductCount`: Number of products.</li><li>`Quantity`: Quantity of each product in stock.</li><li>`OrderCount`: Initial number of orders in the database.</li><li>`Limit`: Minimum number of shards for tables.</li></ul>`Kv`:<ul><li>`InitRowCount`: Before load is generated, the load actor writes the specified number of rows to the table.</li><li>`StringLen`: Length of the `value` string.</li><li>`ColumnsCnt`: Number of columns to use in the table.</li><li>`RowsCnt`: Number of rows to insert or read per SQL query.</li></ul> |
+| Parameter | Description |
+|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `DurationSeconds` | Load duration in seconds. |
+| `WindowDuration` | Statistics aggregation window duration. |
+| `WorkingDir` | Path to the directory to create test tables in. |
+| `NumOfSessions` | The number of parallel threads creating the load. Each thread writes data to its own session. |
+| `DeleteTableOnFinish` | Set it to `False` if you do not want the created tables deleted after the load stops. This might be helpful when a large table is created upon the actor's first run, and then queries are made to that table. |
+| `UniformPartitionsCount` | The number of partitions created in test tables. |
+| `WorkloadType` | Type of load.<br/>For Stock:<ul><li>`0`: InsertRandomOrder.</li><li>`1`: SubmitRandomOrder.</li><li>`2`: SubmitSameOrder.</li><li>`3`: GetRandomCustomerHistory.</li><li>`4`: GetCustomerHistory.</li></ul>For Key-Value:<ul><li>`0`: UpsertRandom.</li><li>`1`: InsertRandom.</li><li>`2`: SelectRandom.</li></ul> |
+| `Workload` | Kind of load.<br/>`Stock`:<ul><li>`ProductCount`: Number of products.</li><li>`Quantity`: Quantity of each product in stock.</li><li>`OrderCount`: Initial number of orders in the database.</li><li>`Limit`: Minimum number of shards for tables.</li></ul>`Kv`:<ul><li>`InitRowCount`: Before load is generated, the load actor writes the specified number of rows to the table.</li><li>`StringLen`: Length of the `value` string.</li><li>`ColumnsCnt`: Number of columns to use in the table.</li><li>`RowsCnt`: Number of rows to insert or read per SQL query.</li></ul> |
## Examples {#example}
diff --git a/ydb/docs/en/core/contributor/load-actors-memory.md b/ydb/docs/en/core/contributor/load-actors-memory.md
index c1f6cec2e0..f14e91a164 100644
--- a/ydb/docs/en/core/contributor/load-actors-memory.md
+++ b/ydb/docs/en/core/contributor/load-actors-memory.md
@@ -10,11 +10,11 @@ This ad-hoc actor is used for testing specific functionality. This is not a load
## Actor parameters {#options}
-| Parameter | Description |
---- | ---
-| `DurationSeconds` | Load duration in seconds. |
-| `BlockSize` | Allocated block size in bytes. |
-| `IntervalUs` | Interval between block allocations in microseconds. |
+| Parameter | Description |
+|-------------------|-----------------------------------------------------|
+| `DurationSeconds` | Load duration in seconds. |
+| `BlockSize` | Allocated block size in bytes. |
+| `IntervalUs` | Interval between block allocations in microseconds. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/contributor/load-actors-pdisk-log.md b/ydb/docs/en/core/contributor/load-actors-pdisk-log.md
index f2fa2be5ab..7416bac570 100644
--- a/ydb/docs/en/core/contributor/load-actors-pdisk-log.md
+++ b/ydb/docs/en/core/contributor/load-actors-pdisk-log.md
@@ -14,18 +14,18 @@ This ad-hoc actor is used for testing specific functionality. This is not a load
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `PDiskId` | ID of the Pdisk being loaded on the node. |
-| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
-| `VDiskId` | Parameters of the VDisk used to generate load.<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
-| `MaxInFlight` | Number of simultaneously processed requests. |
-| `SizeIntervalMin` | Minimum size of log record in bytes. |
-| `SizeIntervalMax` | Maximum size of log record in bytes. |
-| `BurstInterval` | Interval between logging sessions in bytes. |
-| `BurstSize` | Total amount of data to log per session, in bytes. |
-| `StorageDuration` | Virtual time in bytes. Indicates how long the VDisk should store its data in the log. |
-| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
+| Parameter | Description |
+|--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | ID of the Pdisk being loaded on the node. |
+| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
+| `VDiskId` | Parameters of the VDisk used to generate load.<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
+| `MaxInFlight` | Number of simultaneously processed requests. |
+| `SizeIntervalMin` | Minimum size of log record in bytes. |
+| `SizeIntervalMax` | Maximum size of log record in bytes. |
+| `BurstInterval` | Interval between logging sessions in bytes. |
+| `BurstSize` | Total amount of data to log per session, in bytes. |
+| `StorageDuration` | Virtual time in bytes. Indicates how long the VDisk should store its data in the log. |
+| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
## Examples {#example}
diff --git a/ydb/docs/en/core/contributor/load-actors-pdisk-read.md b/ydb/docs/en/core/contributor/load-actors-pdisk-read.md
index 55d427892c..85a06e62b3 100644
--- a/ydb/docs/en/core/contributor/load-actors-pdisk-read.md
+++ b/ydb/docs/en/core/contributor/load-actors-pdisk-read.md
@@ -11,17 +11,17 @@ You can generate two types of load:
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `PDiskId` | ID of the Pdisk being loaded on the node. |
-| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
-| `VDiskId` | The load is generated on behalf of a VDisk with the following parameters:<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
-| `Chunks` | Chunk parameters.<br/>`Slots`: Number of slots per chunk, determines the write size.<br/>You can specify multiple `Chunks`, in which case a specific chunk to read data from is selected based on its `Weight`. |
-| `DurationSeconds` | Load duration in seconds. |
-| `IntervalMsMin`,<br/>`IntervalMsMax` | Minimum and maximum intervals between requests under interval load, in milliseconds. The interval value is selected randomly from the specified range. |
-| `InFlightReads` | Number of simultaneously processed read requests. |
-| `Sequential` | Type of reads.<ul><li>`True`: Sequential.</li><li>`False`: Random.</li></ul> |
-| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
+| Parameter | Description |
+|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | ID of the Pdisk being loaded on the node. |
+| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
+| `VDiskId` | The load is generated on behalf of a VDisk with the following parameters:<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
+| `Chunks` | Chunk parameters.<br/>`Slots`: Number of slots per chunk, determines the write size.<br/>You can specify multiple `Chunks`, in which case a specific chunk to read data from is selected based on its `Weight`. |
+| `DurationSeconds` | Load duration in seconds. |
+| `IntervalMsMin`,<br/>`IntervalMsMax` | Minimum and maximum intervals between requests under interval load, in milliseconds. The interval value is selected randomly from the specified range. |
+| `InFlightReads` | Number of simultaneously processed read requests. |
+| `Sequential` | Type of reads.<ul><li>`True`: Sequential.</li><li>`False`: Random.</li></ul> |
+| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/contributor/load-actors-pdisk-write.md b/ydb/docs/en/core/contributor/load-actors-pdisk-write.md
index 5a96ef0525..d24e020ffa 100644
--- a/ydb/docs/en/core/contributor/load-actors-pdisk-write.md
+++ b/ydb/docs/en/core/contributor/load-actors-pdisk-write.md
@@ -11,18 +11,18 @@ You can generate two types of load:
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `PDiskId` | ID of the Pdisk being loaded on the node. |
-| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
-| `VDiskId` | The load is generated on behalf of a VDisk with the following parameters:<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
-| `Chunks` | Chunk parameters.<br/>`Slots`: Number of slots per chunk, determines the write size.<br/>You can specify multiple `Chunks`, in which case a specific chunk to write data to is selected based on its `Weight`. |
-| `DurationSeconds` | Load duration in seconds. |
-| `IntervalMsMin`,<br/>`IntervalMsMax` | Minimum and maximum intervals between requests under interval load, in milliseconds. The interval value is selected randomly from the specified range. |
-| `InFlightWrites` | Number of simultaneously processed write requests. |
-| `LogMode` | Logging mode. In `LOG_SEQUENTIAL` mode, data is first written to a chunk and then, once the write is committed, to a log. |
-| `Sequential` | Type of writes.<ul><li>`True`: Sequential.</li><li>`False`: Random.</li></ul> |
-| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
+| Parameter | Description |
+|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | ID of the Pdisk being loaded on the node. |
+| `PDiskGuid` | Globally unique ID of the PDisk being loaded. |
+| `VDiskId` | The load is generated on behalf of a VDisk with the following parameters:<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
+| `Chunks` | Chunk parameters.<br/>`Slots`: Number of slots per chunk, determines the write size.<br/>You can specify multiple `Chunks`, in which case a specific chunk to write data to is selected based on its `Weight`. |
+| `DurationSeconds` | Load duration in seconds. |
+| `IntervalMsMin`,<br/>`IntervalMsMax` | Minimum and maximum intervals between requests under interval load, in milliseconds. The interval value is selected randomly from the specified range. |
+| `InFlightWrites` | Number of simultaneously processed write requests. |
+| `LogMode` | Logging mode. In `LOG_SEQUENTIAL` mode, data is first written to a chunk and then, once the write is committed, to a log. |
+| `Sequential` | Type of writes.<ul><li>`True`: Sequential.</li><li>`False`: Random.</li></ul> |
+| `IsWardenlessTest` | Set it to `False` in case the PDiskReadLoad actor is run on the cluster; otherwise, e.g. when it is run during unit tests, set it to `True`. |
## Examples {#example}
diff --git a/ydb/docs/en/core/contributor/load-actors-stop.md b/ydb/docs/en/core/contributor/load-actors-stop.md
index 21def5ea14..9f09fdd57b 100644
--- a/ydb/docs/en/core/contributor/load-actors-stop.md
+++ b/ydb/docs/en/core/contributor/load-actors-stop.md
@@ -4,10 +4,10 @@ Using this command, you can stop either entire load or only the specified part o
## Actor parameters {#options}
-| Parameter | Description |
---- | ---
-| `Tag` | Tag of the load actor to stop. You can view the tag in the cluster Embedded UI. |
-| `RemoveAllTags` | If this parameter value is set to `True`, all the load actors are stopped. |
+| Parameter | Description |
+|-----------------|---------------------------------------------------------------------------------|
+| `Tag` | Tag of the load actor to stop. You can view the tag in the cluster Embedded UI. |
+| `RemoveAllTags` | If this parameter value is set to `True`, all the load actors are stopped. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/contributor/load-actors-storage.md b/ydb/docs/en/core/contributor/load-actors-storage.md
index 7428f20fbc..e526cb1bbb 100644
--- a/ydb/docs/en/core/contributor/load-actors-storage.md
+++ b/ydb/docs/en/core/contributor/load-actors-storage.md
@@ -12,39 +12,39 @@ You can generate three types of load:
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `DurationSeconds` | Load duration. The timer starts upon completion of the initial data allocation. |
-| `Tablets` | The load is generated on behalf of a tablet with the following parameters:<ul><li>`TabletId`: Tablet ID. It must be unique for each load actor across all the cluster nodes. This parameter and `TabletName` are mutually exclusive.</li><li>`TabletName`: Tablet name. If the parameter is set, tablets' IDs will be assigned automatically, tablets launched on the same node with the same name will be given the same ID, tablets launched on different nodes will be given different IDs.</li><li>`Channel`: Tablet channel.</li><li>`GroupId`: ID of the storage group to get loaded.</li><li>`Generation`: Tablet generation.</li></ul> |
-| `WriteSizes` | Size of the data to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `WriteHardRateDispatcher` | Setting up the [parameters of load with hard rate](#hard-rate-dispatcher) for write requests. If this parameter is set than the value of `WriteIntervals` is ignored. |
-| `WriteIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the records loaded at intervals (in milliseconds). You can set multiple `WriteIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `MaxInFlightWriteRequests` | The maximum number of write requests being processed simultaneously. |
-| `ReadSizes` | Size of the data to read. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `ReadSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `WriteHardRateDispatcher` | Setting up the [parameters of load with hard rate](#hard-rate-dispatcher) for read requests. If this parameter is set than the value of `ReadIntervals` is ignored. |
-| `ReadIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the queries loaded by intervals (in milliseconds). You can set multiple `ReadIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `MaxInFlightReadRequests` | The maximum number of read requests being processed simultaneously. |
-| `FlushIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals (in microseconds) between the queries used to delete data written by the write requests in the main load cycle of the StorageLoad actor. You can set multiple `FlushIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. Only one flush request will be processed concurrently. |
-| `PutHandleClass` | [Class of data writes](#write-class) to the disk subsystem. If the `TabletLog` value is set, the write operation has the highest priority. |
-| `GetHandleClass` | [Class of data reads](#read-class) from the disk subsystem. If the `FastRead` is set, the read operation is performed with the highest speed possible. |
-| `Initial allocation` | Setting up the [parameters for initial data allocation](#initial-allocation). It defines the amount of data to be written before the start of the main load cycle. This data can be read by read requests along with the data written in the main load cycle. |
+| Parameter | Description |
+|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `DurationSeconds` | Load duration. The timer starts upon completion of the initial data allocation. |
+| `Tablets` | The load is generated on behalf of a tablet with the following parameters:<ul><li>`TabletId`: Tablet ID. It must be unique for each load actor across all the cluster nodes. This parameter and `TabletName` are mutually exclusive.</li><li>`TabletName`: Tablet name. If the parameter is set, tablets' IDs will be assigned automatically, tablets launched on the same node with the same name will be given the same ID, tablets launched on different nodes will be given different IDs.</li><li>`Channel`: Tablet channel.</li><li>`GroupId`: ID of the storage group to get loaded.</li><li>`Generation`: Tablet generation.</li></ul> |
+| `WriteSizes` | Size of the data to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `WriteHardRateDispatcher` | Setting up the [parameters of load with hard rate](#hard-rate-dispatcher) for write requests. If this parameter is set than the value of `WriteIntervals` is ignored. |
+| `WriteIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the records loaded at intervals (in milliseconds). You can set multiple `WriteIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `MaxInFlightWriteRequests` | The maximum number of write requests being processed simultaneously. |
+| `ReadSizes` | Size of the data to read. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `ReadSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `WriteHardRateDispatcher` | Setting up the [parameters of load with hard rate](#hard-rate-dispatcher) for read requests. If this parameter is set than the value of `ReadIntervals` is ignored. |
+| `ReadIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the queries loaded by intervals (in milliseconds). You can set multiple `ReadIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `MaxInFlightReadRequests` | The maximum number of read requests being processed simultaneously. |
+| `FlushIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals (in microseconds) between the queries used to delete data written by the write requests in the main load cycle of the StorageLoad actor. You can set multiple `FlushIntervals` ranges, in which case a value from a specific range will be selected based on its `Weight`. Only one flush request will be processed concurrently. |
+| `PutHandleClass` | [Class of data writes](#write-class) to the disk subsystem. If the `TabletLog` value is set, the write operation has the highest priority. |
+| `GetHandleClass` | [Class of data reads](#read-class) from the disk subsystem. If the `FastRead` is set, the read operation is performed with the highest speed possible. |
+| `Initial allocation` | Setting up the [parameters for initial data allocation](#initial-allocation). It defines the amount of data to be written before the start of the main load cycle. This data can be read by read requests along with the data written in the main load cycle. |
### Write requests class {#write-class}
-| Class | Description |
---- | ---
-| `TabletLog` | The highest priority of write operation. |
-| `AsyncBlob` | Used for writing SSTables and their parts. |
-| `UserData` | Used for writing user data as separate blobs. |
+| Class | Description |
+|-------------|-----------------------------------------------|
+| `TabletLog` | The highest priority of write operation. |
+| `AsyncBlob` | Used for writing SSTables and their parts. |
+| `UserData` | Used for writing user data as separate blobs. |
### Read requests class {#read-class}
-| Class | Description |
---- | ---
-| `AsyncRead` | Used for reading compacted tablets' data. |
-| `FastRead` | Used for fast reads initiated by user. |
-| `Discover` | Reads from Discover query. |
-| `LowRead` | Low priority reads executed on the background. |
+| Class | Description |
+|-------------|------------------------------------------------|
+| `AsyncRead` | Used for reading compacted tablets' data. |
+| `FastRead` | Used for fast reads initiated by user. |
+| `Discover` | Reads from Discover query. |
+| `LowRead` | Low priority reads executed on the background. |
### Parameters of probabilistic distribution {#params}
@@ -52,21 +52,21 @@ You can generate three types of load:
### Parameters of load with hard rate {#hard-rate-dispatcher}
-| Parameter | Description |
---- | ---
-| `RequestRateAtStart` | Requests per second at the moment of load start. If load duration limit is not set then the request rate will remain the same and equal to the value of this parameter. |
-| `RequestRateOnFinish` | Requests per second at the moment of load finish. |
+| Parameter | Description |
+|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `RequestRateAtStart` | Requests per second at the moment of load start. If load duration limit is not set then the request rate will remain the same and equal to the value of this parameter. |
+| `RequestRateOnFinish` | Requests per second at the moment of load finish. |
### Parameters of initial data allocation {#initial-allocation}
-| Parameter | Description |
---- | ---
-| `TotalSize` | Total size of allocated data. This parameter and `BlobsNumber` are mutually exclusive. |
-| `BlobsNumber` | Total number of allocated blobs. |
-| `BlobSizes` | Size of the blobs to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `MaxWritesInFlight` | Maximum number of simultaneously processed write requests. If this parameter is not set then the number of simultaneously processed requests is not limited. |
-| `MaxWriteBytesInFlight` | Maximum number of total amount of simultaneously processed write requests' data. If this parameter is not set then the total amount of data being written concurrently is unlimited. |
-| `PutHandleClass` | [Class of data writes](#write-class) to the disk subsystem. |
+| Parameter | Description |
+|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `TotalSize` | Total size of allocated data. This parameter and `BlobsNumber` are mutually exclusive. |
+| `BlobsNumber` | Total number of allocated blobs. |
+| `BlobSizes` | Size of the blobs to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `MaxWritesInFlight` | Maximum number of simultaneously processed write requests. If this parameter is not set then the number of simultaneously processed requests is not limited. |
+| `MaxWriteBytesInFlight` | Maximum number of total amount of simultaneously processed write requests' data. If this parameter is not set then the total amount of data being written concurrently is unlimited. |
+| `PutHandleClass` | [Class of data writes](#write-class) to the disk subsystem. |
| `DelayAfterCompletionSec` | The amount of time in seconds the actor will wait upon completing the initial data allocation before starting the main load cycle. If its value is `0` or not set the load will start immediately after the completion of the data allocaion. |
{% include [load-actors-params](../_includes/load-actors-interval.md) %}
diff --git a/ydb/docs/en/core/contributor/load-actors-vdisk.md b/ydb/docs/en/core/contributor/load-actors-vdisk.md
index 81663bc785..28cb29a37d 100644
--- a/ydb/docs/en/core/contributor/load-actors-vdisk.md
+++ b/ydb/docs/en/core/contributor/load-actors-vdisk.md
@@ -6,20 +6,20 @@ Generates a write-only load on the VDisk. Simulates a Distributed Storage Proxy.
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-| Parameter | Description |
---- | ---
-| `VDiskId` | Parameters of the VDisk used to generate load.<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
-| `GroupInfo` | Description of the group hosting the loaded VDisk (of the appropriate generation). |
-| `TabletId` | ID of the tablet that generates the load. It must be unique for each load actor. |
-| `Channel` | ID of the channel inside the tablet that will be specified in the BLOB write and garbage collection commands. |
-| `DurationSeconds` | The total test time in seconds; when it expires, the load stops automatically. |
-| `WriteIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the records. |
-| `WriteSizes` | Size of the data to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
-| `InFlightPutsMax` | Maximum number of concurrent BLOB write queries against the VDisk (TEvVPut queries); if omitted, the number of queries is unlimited. |
-| `InFlightPutBytesMax` | Maximum number of bytes in the concurrent BLOB write queries against the VDisk (TEvVPut-requests). |
-| `PutHandleClass` | Class of data writes to the disk subsystem. If the `TabletLog` value is set, the write operation has the highest priority. |
-| `BarrierAdvanceIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the advance of the garbage collection barrier and the write step. |
-| `StepDistance` | Distance between the currently written step `Gen:Step` of the BLOB and its currently collected step. The higher is the value, the more data is stored. Data is written from `Step = X` and deleted from all the BLOBs where `Step = X - StepDistance`. The `Step` is periodically incremented by one (with the `BarrierAdvanceIntervals` period). |
+| Parameter | Description |
+|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `VDiskId` | Parameters of the VDisk used to generate load.<ul><li>`GroupID`: Group ID.</li><li>`GroupGeneration`: Group generation.</li><li>`Ring`: Group ring ID.</li><li>`Domain`: Ring fail domain ID.</li><li>`VDisk`: Index of the VDisk in the fail domain.</li></ul> |
+| `GroupInfo` | Description of the group hosting the loaded VDisk (of the appropriate generation). |
+| `TabletId` | ID of the tablet that generates the load. It must be unique for each load actor. |
+| `Channel` | ID of the channel inside the tablet that will be specified in the BLOB write and garbage collection commands. |
+| `DurationSeconds` | The total test time in seconds; when it expires, the load stops automatically. |
+| `WriteIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the records. |
+| `WriteSizes` | Size of the data to write. It is selected randomly for each request from the `Min`-`Max` range. You can set multiple `WriteSizes` ranges, in which case a value from a specific range will be selected based on its `Weight`. |
+| `InFlightPutsMax` | Maximum number of concurrent BLOB write queries against the VDisk (TEvVPut queries); if omitted, the number of queries is unlimited. |
+| `InFlightPutBytesMax` | Maximum number of bytes in the concurrent BLOB write queries against the VDisk (TEvVPut-requests). |
+| `PutHandleClass` | Class of data writes to the disk subsystem. If the `TabletLog` value is set, the write operation has the highest priority. |
+| `BarrierAdvanceIntervals` | Setting up the [parameters for probabilistic distribution](#params) of intervals between the advance of the garbage collection barrier and the write step. |
+| `StepDistance` | Distance between the currently written step `Gen:Step` of the BLOB and its currently collected step. The higher is the value, the more data is stored. Data is written from `Step = X` and deleted from all the BLOBs where `Step = X - StepDistance`. The `Step` is periodically incremented by one (with the `BarrierAdvanceIntervals` period). |
### Parameters of probabilistic distribution {#params}
diff --git a/ydb/docs/en/core/contributor/localdb-uncommitted-txs.md b/ydb/docs/en/core/contributor/localdb-uncommitted-txs.md
index b901f23bbd..40ad633ef7 100644
--- a/ydb/docs/en/core/contributor/localdb-uncommitted-txs.md
+++ b/ydb/docs/en/core/contributor/localdb-uncommitted-txs.md
@@ -28,44 +28,44 @@ Redo log (see [flat_redo_writer.h](https://github.com/ydb-platform/ydb/blob/main
[MemTable](../concepts/glossary.md#memtable) in LocalDB is a relatively small in-memory sorted tree that maps table keys to values. MemTable value is a chain of MVCC (partial) rows, each tagged with a row version (a pair of Step and TxId which is a global timestamp). Rows are normally pre-merged across the given MemTable. For example, let's suppose there have been the following operations for some key K:
-| Version | Operation |
---- | ---
+| Version | Operation |
+|------------|------------------------|
| `v1000/10` | `UPDATE ... SET A = 1` |
| `v2000/11` | `UPDATE ... SET B = 2` |
| `v3000/12` | `UPDATE ... SET C = 3` |
Then the chain of rows for key K in a single MemTable will look like this:
-| Version | Row |
---- | ---
+| Version | Row |
+|------------|---------------------------|
| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
However, if the MemTable was split between updates, it may look like this:
-| MemTable | Version | Row |
---- | --- | ---
-| Epoch 2 | `v3000/12` | `SET B = 2, C = 3` |
-| Epoch 2 | `v2000/11` | `SET B = 2` |
-| Epoch 1 | `v1000/10` | `SET A = 1` |
+| MemTable | Version | Row |
+|----------| --- |--------------------|
+| Epoch 2 | `v3000/12` | `SET B = 2, C = 3` |
+| Epoch 2 | `v2000/11` | `SET B = 2` |
+| Epoch 1 | `v1000/10` | `SET A = 1` |
Changes are applied to the current MemTable, and uncommitted changes are no exception. However, they are tagged with a special version (where Step is the maximum possible number, as if they are in some "distant" future, and TxId is their uncommitted TxId), without any pre-merging. For example, let's suppose we additionally performed the following operations:
-| TxId | Operation |
---- | ---
-| 15 | `UPDATE ... SET C = 10` |
-| 13 | `UPDATE ... SET B = 20` |
+| TxId | Operation |
+|------|-------------------------|
+| 15 | `UPDATE ... SET C = 10` |
+| 13 | `UPDATE ... SET B = 20` |
The update chain for our key K will look like this:
-| Version | Row |
---- | ---
-| `v{max}/13` | `SET B = 20` |
-| `v{max}/15` | `SET C = 10` |
-| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| Version | Row |
+|-------------|---------------------------|
+| `v{max}/13` | `SET B = 20` |
+| `v{max}/15` | `SET C = 10` |
+| `v3000/12` | `SET A = 1, B = 2, C = 3` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
When reading [iterator](https://github.com/ydb-platform/ydb/blob/main/ydb/core/tablet_flat/flat_mem_iter.h) performs a lookup for changes with `Step == max` into an [in-memory transaction map](https://github.com/ydb-platform/ydb/blob/0e69bf615395fdd48ecee032faaec81bc468b0b8/ydb/core/tablet_flat/flat_table.h#L359), which maps committed TxIds to their corresponding commit versions, and applies all committed deltas until it finds and applies a pre-merged row with `Step != max`.
@@ -73,14 +73,14 @@ Let's suppose we commit tx 13 at `v4000/20`. At that point transaction map is up
Let's suppose we now perform an `UPDATE ... SET A = 30` at version `v5000/21`, the resulting chain will look as follows:
-| Version | Row |
---- | ---
-| `v5000/21` | `SET A = 30, B = 20, C = 3` |
-| `v{max}/13` | `SET B = 20` |
-| `v{max}/15` | `SET C = 10` |
-| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| Version | Row |
+|-------------|-----------------------------|
+| `v5000/21` | `SET A = 30, B = 20, C = 3` |
+| `v{max}/13` | `SET B = 20` |
+| `v{max}/15` | `SET C = 10` |
+| `v3000/12` | `SET A = 1, B = 2, C = 3` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
Notice how the new record has its state pre-merged, including the previously committed delta for tx 13. Since tx 15 is not committed it was skipped and baked into a pre-merged state for `v5000/21`. It is important that tx 15 is not committed afterwards, and would result in a read anomaly otherwise: some versions would observe it as committed, and some won't.
@@ -92,58 +92,58 @@ Data pages (see [flat_page_data.h](https://github.com/ydb-platform/ydb/blob/main
One key may have several uncommitted delta records, as well as (optionally) the latest committed record data. Historically, data pages could only have one record (and one record pointer) per key, so the record pointer leads to the top of the delta chain, and other records are available via additional per-record offset table for other records:
-| Offset | Description |
---- | ---
-| -X*8 | offset of Main |
-| ... | ... |
-| -16 | offset of Delta 2 |
-| -8 | offset of Delta 1 |
-| 0 | header of Delta 0 |
-| ... | ... |
+| Offset | Description |
+|-------------------|-------------------|
+| -X*8 | offset of Main |
+| ... | ... |
+| -16 | offset of Delta 2 |
+| -8 | offset of Delta 1 |
+| 0 | header of Delta 0 |
+| ... | ... |
| offset of Delta 1 | header of Delta 1 |
-| ... | ... |
-| offset of Main | header of Main |
+| ... | ... |
+| offset of Main | header of Main |
Having a pointer to Delta 0, other records for the same key are available with the `GetAltRecord(size_t index)` method, where `index` is the record number (which is 1 for Delta 1). The chain of records ends either with a pointer to the record without an IsDelta flag (the Main record), or 0 (when there is no Main record for the key).
Let's suppose that after writing tx 13 above the MemTable was compacted. Entry for the 32-bit key K may look like this (offsets are relative to the record pointer on the table):
-| Offset | Value | Description |
---- | --- | ---
-| -16 | 58 | offset of Main |
-| -8 | 29 | offset of Delta 1 |
-| 0 | 0x21 | Delta 0: IsDelta + ERowOp::Upsert |
-| 1 | 0x00 | .. key column is not NULL |
-| 2 | K | .. key column (32-bit) |
-| 6 | 0x00 | .. column A is empty |
-| 7 | 0 | .. column A (32-bit) |
-| 11 | 0x01 | .. column B = ECellOp::Set |
-| 12 | 20 | .. column B (32-bit) |
-| 16 | 0x00 | .. column C is empty |
-| 17 | 0 | .. column C (32-bit) |
-| 21 | 13 | .. TDelta::TxId |
-| 29 | 0x21 | Delta 1: IsDelta + ERowOp::Upsert |
-| 30 | 0x00 | .. key column is not NULL |
-| 31 | K | .. key column (32-bit) |
-| 35 | 0x00 | .. column A is empty |
-| 36 | 0 | .. column A (32-bit) |
-| 40 | 0x00 | .. column B is empty |
-| 41 | 0 | .. column B (32-bit) |
-| 45 | 0x01 | .. column C = ECellOp::Set |
-| 46 | 10 | .. column C (32-bit) |
-| 50 | 15 | .. TDelta::TxId |
-| 58 | 0x61 | Main: HasHistory + IsVersioned + ERowOp::Upsert |
-| 59 | 0x00 | .. key column is not NULL |
-| 60 | K | .. key column (32-bit) |
-| 64 | 0x01 | .. column A = ECellOp::Set |
-| 65 | 1 | .. column A (32-bit) |
-| 69 | 0x01 | .. column B = ECellOp::Set |
-| 70 | 2 | .. column B (32-bit) |
-| 74 | 0x01 | .. column C = ECellOp::Set |
-| 75 | 3 | .. column C (32-bit) |
-| 79 | 3000 | .. RowVersion.Step |
-| 87 | 12 | .. RowVersion.TxId |
-| 95 | - | End of record |
+| Offset | Value | Description |
+|--------| --- |-------------------------------------------------|
+| -16 | 58 | offset of Main |
+| -8 | 29 | offset of Delta 1 |
+| 0 | 0x21 | Delta 0: IsDelta + ERowOp::Upsert |
+| 1 | 0x00 | .. key column is not NULL |
+| 2 | K | .. key column (32-bit) |
+| 6 | 0x00 | .. column A is empty |
+| 7 | 0 | .. column A (32-bit) |
+| 11 | 0x01 | .. column B = ECellOp::Set |
+| 12 | 20 | .. column B (32-bit) |
+| 16 | 0x00 | .. column C is empty |
+| 17 | 0 | .. column C (32-bit) |
+| 21 | 13 | .. TDelta::TxId |
+| 29 | 0x21 | Delta 1: IsDelta + ERowOp::Upsert |
+| 30 | 0x00 | .. key column is not NULL |
+| 31 | K | .. key column (32-bit) |
+| 35 | 0x00 | .. column A is empty |
+| 36 | 0 | .. column A (32-bit) |
+| 40 | 0x00 | .. column B is empty |
+| 41 | 0 | .. column B (32-bit) |
+| 45 | 0x01 | .. column C = ECellOp::Set |
+| 46 | 10 | .. column C (32-bit) |
+| 50 | 15 | .. TDelta::TxId |
+| 58 | 0x61 | Main: HasHistory + IsVersioned + ERowOp::Upsert |
+| 59 | 0x00 | .. key column is not NULL |
+| 60 | K | .. key column (32-bit) |
+| 64 | 0x01 | .. column A = ECellOp::Set |
+| 65 | 1 | .. column A (32-bit) |
+| 69 | 0x01 | .. column B = ECellOp::Set |
+| 70 | 2 | .. column B (32-bit) |
+| 74 | 0x01 | .. column C = ECellOp::Set |
+| 75 | 3 | .. column C (32-bit) |
+| 79 | 3000 | .. RowVersion.Step |
+| 87 | 12 | .. RowVersion.TxId |
+| 95 | - | End of record |
The HasHistory flag in the Main record shows that other two records are stored among history data with keys `(RowId, 2000, 11)` and `(RowId, 1000, 10)` respectively.
diff --git a/ydb/docs/en/core/public-materials/_includes/conferences/2025/fosdem.md b/ydb/docs/en/core/public-materials/_includes/conferences/2025/fosdem.md
index 11abc9458d..719412a7b3 100644
--- a/ydb/docs/en/core/public-materials/_includes/conferences/2025/fosdem.md
+++ b/ydb/docs/en/core/public-materials/_includes/conferences/2025/fosdem.md
@@ -1,4 +1,4 @@
-## Designing YDB: Constructing a Distributed cloud-native DBMS for OLTP and OLAP from the Ground Up {#2023-conf-hl-serbia-scale}
+## Designing YDB: Constructing a Distributed cloud-native DBMS for OLTP and OLAP from the Ground Up {#2025-conf-fosdem}
{% include notitle [database_internals_tag](../../tags.md#database_internals) %}
diff --git a/ydb/docs/en/core/public-materials/_includes/conferences/2025/fossasia.md b/ydb/docs/en/core/public-materials/_includes/conferences/2025/fossasia.md
new file mode 100644
index 0000000000..4ba08f5948
--- /dev/null
+++ b/ydb/docs/en/core/public-materials/_includes/conferences/2025/fossasia.md
@@ -0,0 +1,15 @@
+## Designing YDB: Constructing a Distributed cloud-native DBMS for OLTP and OLAP from the Ground Up {#2025-conf-fosdem}
+
+{% include notitle [database_internals_tag](../../tags.md#database_internals) %}
+
+Distributed systems are great in multiple aspects: they are built to be fault-tolerant and reliable, can scale almost infinitely, provide low latency in geo-distributed scenarios, and, finally, they are geeky and fun to explore. YDB is a distributed SQL database that has been running in production for years. There are installations with thousands of servers storing petabytes of data. To provide these capabilities, any distributed DBMS must achieve consistency and consensus while tolerating unreliable networks, faulty hardware, and the absence of a global clock.
+
+In this session, we will briefly introduce the problems, challenges, and fallacies of distributed computing, explaining why sharded systems like Citus are not always ACID and differ from truly distributed systems. Then, we will dive deep into the design decisions made by YDB to address these difficulties and outline YDB's architecture layer by layer, from the bare metal disks and distributed storage up to OLTP and OLAP functionalities. Ultimately, we will briefly compare our approach with Calvin's, which initially inspired YDB, and Spanner.
+
+[{{ team.ivanov.name }}]({{ team.ivanov.profile }}) ({{ team.ivanov.position }}) discussed the architecture of YDB, focusing on building a unified platform for fault-tolerant and reliable OLTP and OLAP processing.
+
+@[YouTube](https://youtu.be/kfI0r5OvYIk?si=ZVyS2OTtJxl3ZuWj)
+
+The presentation will be of interest to developers of high-load systems and platform developers for various purposes.
+
+[Slides](https://presentations.ydb.tech/2025/en/fossasia/designing_ydb/presentation.pdf)
diff --git a/ydb/docs/en/core/public-materials/_includes/conferences/2025/pgconfIndia.md b/ydb/docs/en/core/public-materials/_includes/conferences/2025/pgconfIndia.md
new file mode 100644
index 0000000000..47c758cb94
--- /dev/null
+++ b/ydb/docs/en/core/public-materials/_includes/conferences/2025/pgconfIndia.md
@@ -0,0 +1,13 @@
+## Sharded and Distributed Are Not the Same: What You Must Know When PostgreSQL Is Not Enough {#2025-conf-pgconf-India}
+
+{% include notitle [testing_tag](../../tags.md#testing) %}
+
+It's no secret that PostgreSQL is extremely efficient and scales vertically well. At the same time, it isn't a secret that PostgreSQL scales only vertically, meaning its performance is limited by the capabilities of a single server. Most Citus-like solutions allow the database to be sharded, but a sharded database is not distributed and does not provide ACID guarantees for distributed transactions. The common opinion about distributed DBMSs is diametrically opposed: they are believed to scale well horizontally and have ACID distributed transactions but have lower efficiency in smaller installations.
+
+When comparing monolithic and distributed DBMSs, discussions often focus on architecture but rarely provide specific performance metrics. This presentation, on the other hand, is entirely based on an empirical study of this issue. Our approach is simple: [{{ team.ivanov.name }}]({{ team.ivanov.profile }}) ({{ team.ivanov.position }}) installed PostgreSQL and distributed DBMSs on identical clusters of three physical servers and compared them using the popular TPC-C benchmark.
+
+@[YouTube](https://youtu.be/HR-vUI8mTVI?si=oenZT8mTr6czcZtS)
+
+The presentation will be of interest to developers of high-load systems and platform developers for various purposes.
+
+[Slides](https://presentations.ydb.tech/2025/en/pgconfin2025/sharded_and_distributed_are_not_the_same/presentation.pdf)
diff --git a/ydb/docs/en/core/public-materials/videos.md b/ydb/docs/en/core/public-materials/videos.md
index 7c3a771da3..1aa498f598 100644
--- a/ydb/docs/en/core/public-materials/videos.md
+++ b/ydb/docs/en/core/public-materials/videos.md
@@ -9,8 +9,11 @@ Video recordings from conferences and webinars. The materials are divided by cat
- 2025
- {% include [FOSDEM](./_includes/conferences/2025/fosdem.md) %}
+ {% include [FOSSASIA](./_includes/conferences/2025/fossasia.md) %}
+
+ {% include [PGConfIndia](./_includes/conferences/2025/pgconfIndia.md) %}
+ {% include [FOSDEM](./_includes/conferences/2025/fosdem.md) %}
- 2024
@@ -34,7 +37,7 @@ Video recordings from conferences and webinars. The materials are divided by cat
{% include [HighLoad](./_includes/conferences/2023/HighLoad.md) %}
- {% include [Fossasia](./_includes/conferences/2023/Fossasia.md) %}
+ {% include [FOSSASIA](./_includes/conferences/2023/Fossasia.md) %}
{% include [webinars](./_includes/webinars/2023/webinars.md) %}
diff --git a/ydb/docs/en/core/reference/configuration/index.md b/ydb/docs/en/core/reference/configuration/index.md
index 0f6d50bd14..dc8856de35 100644
--- a/ydb/docs/en/core/reference/configuration/index.md
+++ b/ydb/docs/en/core/reference/configuration/index.md
@@ -149,11 +149,11 @@ This section defines one or more types of storage pools available in the cluster
The following [fault tolerance modes](../../concepts/topology.md) are available:
-| Mode | Description |
---- | ---
-| `none` | There is no redundancy. Applies for testing. |
-| `block-4-2` | Redundancy factor of 1.5, applies to single data center clusters. |
-| `mirror-3-dc` | Redundancy factor of 3, applies to multi-data center clusters. |
+| Mode | Description |
+|---------------|-------------------------------------------------------------------|
+| `none` | There is no redundancy. Applies for testing. |
+| `block-4-2` | Redundancy factor of 1.5, applies to single data center clusters. |
+| `mirror-3-dc` | Redundancy factor of 3, applies to multi-data center clusters. |
### Syntax
@@ -237,8 +237,8 @@ domains_config:
...
```
-| Key | Description |
---- | ---
+| Key | Description |
+|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `enforce_user_token_requirement` | Require a user token.<br/>Acceptable values:<br/><ul><li>`false`: Anonymous authentication mode, no token needed (used by default if the parameter is omitted).</li><li>`true`: Username/password authentication mode. A valid user token is needed for authentication.</li></ul> |
### Examples {#domains-examples}
@@ -418,11 +418,11 @@ actor_system_config:
cpu_count: 10
```
-| Parameter | Description |
---- | ---
-| `use_auto_config` | Enabling automatic configuring of the actor system. |
-| `node_type` | Node type. Determines the expected workload and vCPU ratio between the pools. Possible values:<ul><li>`STORAGE`: The node interacts with network block store volumes and is responsible for managing the Distributed Storage.</li><li>`COMPUTE`: The node processes the workload generated by users.</li><li>`HYBRID`: The node is used for hybrid load or the usage of `System`, `User`, and `IC` for the node under load is about the same. |
-| `cpu_count` | Number of vCPUs allocated to the node. |
+| Parameter | Description |
+|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `use_auto_config` | Enabling automatic configuring of the actor system. |
+| `node_type` | Node type. Determines the expected workload and vCPU ratio between the pools. Possible values:<ul><li>`STORAGE`: The node interacts with network block store volumes and is responsible for managing the Distributed Storage.</li><li>`COMPUTE`: The node processes the workload generated by users.</li><li>`HYBRID`: The node is used for hybrid load or the usage of `System`, `User`, and `IC` for the node under load is about the same. |
+| `cpu_count` | Number of vCPUs allocated to the node. |
### Manual configuring {#tuneconfig}
@@ -458,19 +458,19 @@ actor_system_config:
spin_threshold: 0
```
-| Parameter | Description |
---- | ---
-| `executor` | Pool configuration.<br/>You should only change the number of CPU cores (the `threads` parameter) in the pool configs. |
-| `name` | Pool name that indicates its purpose. Possible values:<ul><li>`System`: A pool that is designed for running quick internal operations in {{ ydb-full-name }} (it serves system tablets, state storage, distributed storage I/O, and erasure coding).</li><li>`User`: A pool that serves the user load (user tablets, queries run in the Query Processor).</li><li>`Batch`: A pool that serves tasks with no strict limit on the execution time, background operations like garbage collection and heavy queries run in the Query Processor.</li><li>`IO`: A pool responsible for performing any tasks with blocking operations (such as authentication or writing logs to a file).</li><li>`IC`: Interconnect, it serves the load related to internode communication (system calls to wait for sending and send data across the network, data serialization, as well as message splits and merges).</li></ul> |
-| `spin_threshold` | The number of CPU cycles before going to sleep if there are no messages. In sleep mode, there is less power consumption, but it may increase request latency under low loads. |
-| `threads` | The number of CPU cores allocated per pool.<br/>Make sure the total number of cores assigned to the System, User, Batch, and IC pools does not exceed the number of available system cores. |
-| `max_threads` | Maximum vCPU that can be allocated to the pool from idle cores of other pools. When you set this parameter, the system enables the mechanism of expanding the pool at full utilization, provided that idle vCPUs are available.<br/>The system checks the current utilization and reallocates vCPUs once per second. |
-| `max_avg_ping_deviation` | Additional condition to expand the pool's vCPU. When more than 90% of vCPUs allocated to the pool are utilized, you need to worsen SelfPing by more than `max_avg_ping_deviation` microseconds from 10 milliseconds expected. |
-| `time_per_mailbox_micro_secs` | The number of messages per actor to be handled before switching to a different actor. |
-| `type` | Pool type. Possible values:<ul><li>`IO` should be set for IO pools.</li><li>`BASIC` should be set for any other pool.</li></ul> |
-| `scheduler` | Scheduler configuration. The actor system scheduler is responsible for the delivery of deferred messages exchanged by actors.<br/>We do not recommend changing the default scheduler parameters. |
-| `progress_threshold` | The actor system supports requesting message sending scheduled for a later point in time. The system might fail to send all scheduled messages at some point. In this case, it starts sending them in "virtual time" by handling message sending in each loop over a period that doesn't exceed the `progress_threshold` value in microseconds and shifting the virtual time by the `progress_threshold` value until it reaches real time. |
-| `resolution` | When making a schedule for sending messages, discrete time slots are used. The slot duration is set by the `resolution` parameter in microseconds. |
+| Parameter | Description |
+|-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `executor` | Pool configuration.<br/>You should only change the number of CPU cores (the `threads` parameter) in the pool configs. |
+| `name` | Pool name that indicates its purpose. Possible values:<ul><li>`System`: A pool that is designed for running quick internal operations in {{ ydb-full-name }} (it serves system tablets, state storage, distributed storage I/O, and erasure coding).</li><li>`User`: A pool that serves the user load (user tablets, queries run in the Query Processor).</li><li>`Batch`: A pool that serves tasks with no strict limit on the execution time, background operations like garbage collection and heavy queries run in the Query Processor.</li><li>`IO`: A pool responsible for performing any tasks with blocking operations (such as authentication or writing logs to a file).</li><li>`IC`: Interconnect, it serves the load related to internode communication (system calls to wait for sending and send data across the network, data serialization, as well as message splits and merges).</li></ul> |
+| `spin_threshold` | The number of CPU cycles before going to sleep if there are no messages. In sleep mode, there is less power consumption, but it may increase request latency under low loads. |
+| `threads` | The number of CPU cores allocated per pool.<br/>Make sure the total number of cores assigned to the System, User, Batch, and IC pools does not exceed the number of available system cores. |
+| `max_threads` | Maximum vCPU that can be allocated to the pool from idle cores of other pools. When you set this parameter, the system enables the mechanism of expanding the pool at full utilization, provided that idle vCPUs are available.<br/>The system checks the current utilization and reallocates vCPUs once per second. |
+| `max_avg_ping_deviation` | Additional condition to expand the pool's vCPU. When more than 90% of vCPUs allocated to the pool are utilized, you need to worsen SelfPing by more than `max_avg_ping_deviation` microseconds from 10 milliseconds expected. |
+| `time_per_mailbox_micro_secs` | The number of messages per actor to be handled before switching to a different actor. |
+| `type` | Pool type. Possible values:<ul><li>`IO` should be set for IO pools.</li><li>`BASIC` should be set for any other pool.</li></ul> |
+| `scheduler` | Scheduler configuration. The actor system scheduler is responsible for the delivery of deferred messages exchanged by actors.<br/>We do not recommend changing the default scheduler parameters. |
+| `progress_threshold` | The actor system supports requesting message sending scheduled for a later point in time. The system might fail to send all scheduled messages at some point. In this case, it starts sending them in "virtual time" by handling message sending in each loop over a period that doesn't exceed the `progress_threshold` value in microseconds and shifting the virtual time by the `progress_threshold` value until it reaches real time. |
+| `resolution` | When making a schedule for sending messages, discrete time slots are used. The slot duration is set by the `resolution` parameter in microseconds. |
## Memory controller {#memory-controller}
@@ -697,6 +697,34 @@ node_broker_config:
stable_node_name_prefix: <new prefix>
```
+## Configuring Health Check {#healthcheck-config}
+
+This section configures thresholds and timeout settings used by the {{ ydb-short-name }} [health check service](../ydb-sdk/health-check-api.md). These parameters help configure detection of potential [issues](../ydb-sdk/health-check-api.md#issues), such as excessive restarts or time drift between dynamic nodes.
+
+### Syntax
+
+```yaml
+healthcheck_config:
+ thresholds:
+ node_restarts_yellow: 10
+ node_restarts_orange: 30
+ nodes_time_difference_yellow: 5000
+ nodes_time_difference_orange: 25000
+ tablets_restarts_orange: 30
+ timeout: 20000
+```
+
+### Parameters
+
+| Parameter | Default | Description |
+|-------------------------------------------|---------|-------------------------------------------------------------------------------|
+| `thresholds.node_restarts_yellow` | `10` | Number of node restarts to trigger a `YELLOW` warning |
+| `thresholds.node_restarts_orange` | `30` | Number of node restarts to trigger an `ORANGE` alert |
+| `thresholds.nodes_time_difference_yellow` | `5000` | Max allowed time difference (in us) between dynamic nodes for `YELLOW` issue |
+| `thresholds.nodes_time_difference_orange` | `25000` | Max allowed time difference (in us) between dynamic nodes for `ORANGE` issue |
+| `thresholds.tablets_restarts_orange` | `30` | Number of tablet restarts to trigger an `ORANGE` alert |
+| `timeout` | `20000` | Maximum health check response time (in ms) |
+
## Sample cluster configurations {#examples}
-You can find model cluster configurations for deployment in the [repository](https://github.com/ydb-platform/ydb/tree/main/ydb/deploy/yaml_config_examples/). Check them out before deploying a cluster.
+You can find model cluster configurations for deployment in the [repository](https://github.com/ydb-platform/ydb/tree/main/ydb/deploy/yaml_config_examples/). Check them out before deploying a cluster. \ No newline at end of file
diff --git a/ydb/docs/en/core/reference/kafka-api/examples.md b/ydb/docs/en/core/reference/kafka-api/examples.md
index 0974893378..8e139e47d0 100644
--- a/ydb/docs/en/core/reference/kafka-api/examples.md
+++ b/ydb/docs/en/core/reference/kafka-api/examples.md
@@ -28,7 +28,7 @@ Consider the following limitations of using the Kafka API for reading:
Therefore, in the consumer configuration, you must always specify the **consumer group name** and the parameters:
-- `check.crc=false`
+- `check.crcs=false`
- `partition.assignment.strategy=org.apache.kafka.clients.consumer.RoundRobinAssignor`
Below are examples of reading using the Kafka protocol for various applications, programming languages, and frameworks without authentication.
diff --git a/ydb/docs/en/core/reference/ydb-cli/_includes/commands.md b/ydb/docs/en/core/reference/ydb-cli/_includes/commands.md
index 923820df61..5a48b9b56a 100644
--- a/ydb/docs/en/core/reference/ydb-cli/_includes/commands.md
+++ b/ydb/docs/en/core/reference/ydb-cli/_includes/commands.md
@@ -36,6 +36,7 @@ Any command can be run from the command line with the `--help` option to get hel
| [import file tsv](../export-import/import-file.md) | Importing data from a TSV file |
| [import s3](../export-import/import-s3.md) | Importing data from S3 storage |
| [init](../profile/create.md) | Initializing the CLI, creating a [profile](../profile/index.md) |
+| [monitoring healthcheck](../commands/monitoring-healthcheck.md) | Health check |
| [operation cancel](../operation-cancel.md) | Aborting long-running operations |
| [operation forget](../operation-forget.md) | Deleting long-running operations from the list |
| [operation get](../operation-get.md) | Status of long-running operations |
diff --git a/ydb/docs/en/core/reference/ydb-cli/commands/monitoring-healthcheck.md b/ydb/docs/en/core/reference/ydb-cli/commands/monitoring-healthcheck.md
new file mode 100644
index 0000000000..d03ca723c6
--- /dev/null
+++ b/ydb/docs/en/core/reference/ydb-cli/commands/monitoring-healthcheck.md
@@ -0,0 +1,151 @@
+# Health check
+
+{{ ydb-short-name }} has a built-in self-diagnostic system that provides a brief report on the cluster status and information about existing issues. This report can be obtained via {{ ydb-short-name }} CLI using the command explained below.
+
+General command format:
+
+```bash
+ydb [global options...] monitoring healthcheck [options...]
+```
+
+* `global options` — [global options](global-options.md),
+* `options` — [subcommand options](#options).
+
+## Subcommand options {#options}
+
+#|
+|| Name | Description ||
+|| `--timeout` | The time, in milliseconds, within which the operation should be completed on the server. ||
+|| `--format` | Output format. Available options:
+
+* `pretty` — short, human-readable output
+* `json` — detailed JSON output
+
+Default: `pretty`. ||
+|#
+
+The response structure and description are provided in the [Health Check API](../../ydb-sdk/health-check-api.md#response-structure) documentation.
+
+## Examples {#examples}
+
+### Health check result in pretty format {#example-pretty}
+
+```bash
+{{ ydb-cli }} --profile quickstart monitoring healthcheck --format pretty
+```
+
+Database is in good condition:
+
+```bash
+Healthcheck status: GOOD
+```
+
+Database is degraded:
+
+```bash
+Healthcheck status: DEGRADED
+```
+
+### Health check result in JSON format {#example-json}
+
+
+```bash
+{{ ydb-cli }} --profile quickstart monitoring healthcheck --format json
+```
+
+Database is in good condition:
+
+```json
+{
+ "self_check_result": "GOOD",
+ "location": {
+ "id": 51059,
+ "host": "my-host.net",
+ "port": 19001
+ }
+}
+```
+
+Database is degraded:
+
+```json
+{
+ "self_check_result": "DEGRADED",
+ "issue_log": [
+ {
+ "id": "YELLOW-b3c0-70fb",
+ "status": "YELLOW",
+ "message": "Database has multiple issues",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "reason": [
+ "YELLOW-b3c0-1ba8",
+ "YELLOW-b3c0-1c83"
+ ],
+ "type": "DATABASE",
+ "level": 1
+ },
+ {
+ "id": "YELLOW-b3c0-1ba8",
+ "status": "YELLOW",
+ "message": "Compute is overloaded",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "reason": [
+ "YELLOW-b3c0-343a-51059-User"
+ ],
+ "type": "COMPUTE",
+ "level": 2
+ },
+ {
+ "id": "YELLOW-b3c0-343a-51059-User",
+ "status": "YELLOW",
+ "message": "Pool usage is over than 99%",
+ "location": {
+ "compute": {
+ "node": {
+ "id": 51059,
+ "host": "my-host.net",
+ "port": 31043
+ },
+ "pool": {
+ "name": "User"
+ }
+ },
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "type": "COMPUTE_POOL",
+ "level": 4
+ },
+ {
+ "id": "YELLOW-b3c0-1c83",
+ "status": "YELLOW",
+ "message": "Storage usage over 75%",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "type": "STORAGE",
+ "level": 2
+ }
+ ],
+ "location": {
+ "id": 117,
+ "host": "my-host.net",
+ "port": 19001
+ }
+}
+```
+
+
+
+
diff --git a/ydb/docs/en/core/reference/ydb-cli/export-import/_includes/s3_conn.md b/ydb/docs/en/core/reference/ydb-cli/export-import/_includes/s3_conn.md
index 649726fc34..82193871e3 100644
--- a/ydb/docs/en/core/reference/ydb-cli/export-import/_includes/s3_conn.md
+++ b/ydb/docs/en/core/reference/ydb-cli/export-import/_includes/s3_conn.md
@@ -16,8 +16,8 @@ Except when you import data from a public bucket, to connect, log in with an acc
You need two parameters to authenticate with S3:
-- ID of the access key (access_key_id).
-- Secret access key (secret_access_key).
+- Access key ID (`--access-key`).
+- Secret access key (`--secret-key`).
The YDB CLI takes values of these parameters from the following sources (listed in descending priority):
@@ -51,7 +51,7 @@ Below is an example of getting access keys for the [{{ yandex-cloud }} Object St
1. [Install and set up]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/cli/quickstart){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/cli/quickstart){% endif %} the {{ yandex-cloud }} CLI.
-2. Use the following command to get the ID of your cloud folder (you'll need to add it to the below commands):
+2. Use the following command to get the ID of your cloud folder (`folder-id`) (you'll need to add it to the commands below):
```bash
yc config list
@@ -69,9 +69,17 @@ Below is an example of getting access keys for the [{{ yandex-cloud }} Object St
yc iam service-account create --name s3account
```
- You can indicate any account name except `s3account`, or use your existing account name (be sure to replace it when copying the commands below).
+ You can indicate any account name instead of `s3account`, or use your existing account name (be sure to replace it when copying the commands below).
-3. [Grant roles to your service account]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/iam/operations/sa/assign-role-for-sa){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/iam/operations/sa/assign-role-for-sa){% endif %} according to your intended S3 access level by running the command:
+ Account id will be printed on creation.
+
+ To get the id of an existing account, use this command:
+
+ ```bash
+ yc iam service-account get --name <account-name>
+ ```
+
+4. [Grant roles to your service account]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/iam/operations/sa/assign-role-for-sa){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/iam/operations/sa/assign-role-for-sa){% endif %} according to your intended S3 access level by running the command:
{% list tabs %}
@@ -79,23 +87,23 @@ Below is an example of getting access keys for the [{{ yandex-cloud }} Object St
```bash
yc resource-manager folder add-access-binding <folder-id> \
- --role storage.viewer --subject serviceAccount:s3account
+ --role storage.viewer --subject serviceAccount:<s3-account-id>
```
- Write (to export data from the YDB database)
```bash
yc resource-manager folder add-access-binding <folder-id> \
- --role storage.editor --subject serviceAccount:s3account
+ --role storage.editor --subject serviceAccount:<s3-account-id>
```
{% endlist %}
- Where `<folder-id>` is the cloud folder ID that you retrieved at step 2.
+ Where `<folder-id>` is the cloud folder ID that you retrieved at step 2 and `<s3-account-id>` is the id of the account you created at step 3.
You can also read a [full list]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/iam/concepts/access-control/roles#object-storage){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/iam/concepts/access-control/roles#object-storage){% endif %} of {{ yandex-cloud }} roles.
-4. Get [static access keys]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/iam/operations/sa/create-access-key){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/iam/operations/sa/create-access-key){% endif %} by running the command:
+5. Get [static access keys]{% if lang == "ru" %}(https://cloud.yandex.ru/docs/iam/operations/sa/create-access-key){% endif %}{% if lang == "en" %}(https://cloud.yandex.com/docs/iam/operations/sa/create-access-key){% endif %} by running the command:
```bash
yc iam access-key create --service-account-name s3account
@@ -114,7 +122,7 @@ Below is an example of getting access keys for the [{{ yandex-cloud }} Object St
In this result:
- - `access_key.key_id` is the access key ID
- - `secret` is the secret access key
+ - `access_key.key_id` is the access key ID (`--access-key`).
+ - `secret` is the secret access key (`--secret-key`).
{% include [s3_conn_procure_overlay.md](s3_conn_procure_overlay.md) %}
diff --git a/ydb/docs/en/core/reference/ydb-cli/interactive-cli.md b/ydb/docs/en/core/reference/ydb-cli/interactive-cli.md
index 47fa07b8f5..8644928e90 100644
--- a/ydb/docs/en/core/reference/ydb-cli/interactive-cli.md
+++ b/ydb/docs/en/core/reference/ydb-cli/interactive-cli.md
@@ -12,31 +12,31 @@ General format of the command:
## Hotkeys {#hotkeys}
-Hotkey | Description
----|---
-`CTRL + D` | Allows you to exit interactive mode.
-`Up arrow` | Scrolls through query history toward older queries.
-`Down arrow` | Scrolls through query history toward newer queries.
-`TAB` | Autocompletes the entered text to a suitable YQL command.
-`CTRL + R` | Allows searching for a query in history containing a specified substring.
+| Hotkey | Description |
+|---------------|---------------------------------------------------------------------------|
+| `CTRL + D` | Allows you to exit interactive mode. |
+| `Up arrow` | Scrolls through query history toward older queries. |
+| `Down arrow` | Scrolls through query history toward newer queries. |
+| `TAB` | Autocompletes the entered text to a suitable YQL command. |
+| `CTRL + R` | Allows searching for a query in history containing a specified substring. |
## Special commands {#spec-commands}
Special commands are CLI-specific commands and are not part of the YQL syntax. They are intended for performing various functions that cannot be accomplished through a YQL query.
-Command | Description
----|---
-| `SET param = value` | The `SET` command sets the value of the [internal variable](#internal-vars) `param` to `value`. |
-| `EXPLAIN query-text` | Outputs the query plan for `query-text`. Equivalent to the command [ydb table query explain](commands/explain-plan.md#explain-plan). |
+| Command | Description |
+|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `SET param = value` | The `SET` command sets the value of the [internal variable](#internal-vars) `param` to `value`. |
+| `EXPLAIN query-text` | Outputs the query plan for `query-text`. Equivalent to the command [ydb table query explain](commands/explain-plan.md#explain-plan). |
| `EXPLAIN AST query-text` | Outputs the query plan for `query-text` along with the [AST](commands/explain-plan.md). Equivalent to the command [ydb table query explain --ast](commands/explain-plan.md#ast). |
### List of internal variables {#internal-vars}
Internal variables determine the behavior of commands and are set using the [special command](#spec-commands) `SET`.
-Variable | Description
----|---
-| `stats` | The statistics collection mode for subsequent queries.<br/>Acceptable values:<ul><li>`none` (default): Do not collect.</li><li>`basic`: Collect statistics.</li><li>`full`: Collect statistics and query plan.</li></ul> |
+| Variable | Description |
+|----------|---|
+| `stats` | The statistics collection mode for subsequent queries.<br/>Acceptable values:<ul><li>`none` (default): Do not collect.</li><li>`basic`: Collect statistics.</li><li>`full`: Collect statistics and query plan.</li></ul> |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/operation-get.md b/ydb/docs/en/core/reference/ydb-cli/operation-get.md
index 2b43b4e304..9c4d4348df 100644
--- a/ydb/docs/en/core/reference/ydb-cli/operation-get.md
+++ b/ydb/docs/en/core/reference/ydb-cli/operation-get.md
@@ -20,8 +20,8 @@ View a description of the command to obtain the status of a long-running operati
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
+| Name | Description |
+|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--format` | Input format.<br/>Default value: `pretty`.<br/>Acceptable values:<ul><li>`pretty`: A human-readable format.</li><li>`proto-json-base64`: Protobuf result in [JSON] format{% if lang == "ru" %}(https://ru.wikipedia.org/wiki/JSON){% endif %}{% if lang == "en" %}(https://en.wikipedia.org/wiki/JSON){% endif %}, binary strings are encoded in [Base64]{% if lang == "ru" %}(https://ru.wikipedia.org/wiki/Base64){% endif %}{% if lang == "en" %}(https://en.wikipedia.org/wiki/Base64){% endif %}.</li></ul> |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/operation-list.md b/ydb/docs/en/core/reference/ydb-cli/operation-list.md
index 68b574d523..e28aaa89df 100644
--- a/ydb/docs/en/core/reference/ydb-cli/operation-list.md
+++ b/ydb/docs/en/core/reference/ydb-cli/operation-list.md
@@ -24,11 +24,11 @@ View a description of the command to get a list of long-running operations:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
-| `-s`, `--page-size` | Number of operations on one page. If the list of operations contains more strings than specified in the `--page-size` parameter, the result will be split into several pages. To get the next page, specify the `--page-token` parameter. |
-| `-t`, `--page-token` | Page token. |
-| `--format` | Input format.<br/>Default value: `pretty`.<br/>Acceptable values:<ul><li>`pretty`: A human-readable format.</li><li>`proto-json-base64`: Protobuf result in [JSON](https://en.wikipedia.org/wiki/JSON) format, binary strings are encoded in [Base64](https://en.wikipedia.org/wiki/Base64).</li></ul> |
+| Name | Description |
+|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-s`, `--page-size` | Number of operations on one page. If the list of operations contains more strings than specified in the `--page-size` parameter, the result will be split into several pages. To get the next page, specify the `--page-token` parameter. |
+| `-t`, `--page-token` | Page token. |
+| `--format` | Input format.<br/>Default value: `pretty`.<br/>Acceptable values:<ul><li>`pretty`: A human-readable format.</li><li>`proto-json-base64`: Protobuf result in [JSON](https://en.wikipedia.org/wiki/JSON) format, binary strings are encoded in [Base64](https://en.wikipedia.org/wiki/Base64).</li></ul> |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/parameterized-queries-cli.md b/ydb/docs/en/core/reference/ydb-cli/parameterized-queries-cli.md
index cfcc57a5d8..73d3358727 100644
--- a/ydb/docs/en/core/reference/ydb-cli/parameterized-queries-cli.md
+++ b/ydb/docs/en/core/reference/ydb-cli/parameterized-queries-cli.md
@@ -22,13 +22,13 @@ Among the above commands, only the `table query execute` applies retry policies.
To provide parameters for a YQL query execution, you can use command line, JSON files, and `stdin`, using the following {{ ydb-short-name }} CLI options:
-| Name | Description |
----|---
-| `-p, --param` | An expression in the format `$name=value`, where `$name` is the name of the YQL query parameter and `value` is its value (a correct [JSON value](https://www.json.org/json-ru.html)). The option can be specified repeatedly.<br/><br/>All the specified parameters must be declared in the YQL query by the [DECLARE operator](../../yql/reference/syntax/declare.md); otherwise, you will get an error "Query does not contain parameter". If you specify the same parameter several times, you will get an error "Parameter value found in more than one source".<br/><br/>Depending on your operating system, you might need to escape the `$` character or enclose your expression in single quotes (`'`). |
-| `--param-file` | Name of a file in [JSON](https://en.wikipedia.org/wiki/JSON) format in [UTF-8](https://en.wikipedia.org/wiki/UTF-8) encoding that contains parameter values matched against the YQL query parameters by key names. The option can be specified repeatedly.<br/><br/>If values of the same parameter are found in multiple files or set by the `--param` command line option, you'll get an error "Parameter value found in more than one source".<br/><br/>Names of keys in the JSON file are expected without the leading `$` sign. Keys that are present in the file but aren't declared in the YQL query will be ignored without an error message. |
-| `--input-format` | Format of parameter values, applied to all sources of parameters (command line, file, or `stdin`).<br/>Available options:<ul><li>`json-unicode` (default):[JSON](https://en.wikipedia.org/wiki/JSON).</li><li>`json-base64`: [JSON](https://en.wikipedia.org/wiki/JSON) with values of binary string parameters (`DECLARE $par AS String`) are [Base64](https://en.wikipedia.org/wiki/Base64)-encoded. This feature enables you to process binary data, being decoded from Base64 by the {{ ydb-short-name }} CLI.</li></ul> |
-| `--stdin-format` | Format of parameter values for `stdin`.<br/>The {{ ydb-short-name }} CLI automatically detects that a file or an output of another shell command has been redirected to the standard input device `stdin`. In this case, the CLI interprets the incoming data based on the following available options:<ul><li>`json-unicode`: [JSON](https://en.wikipedia.org/wiki/JSON).</li><li>`json-base64`: [JSON](https://en.wikipedia.org/wiki/JSON) with values of binary string parameters (`DECLARE $par AS String`) are [Base64](https://en.wikipedia.org/wiki/Base64)-encoded.</li><li>`raw`: Binary data.</li></ul>If format of parameter values for `stdin` isn't specified, the `--input-format` is used. |
-| `--stdin-par` | Name of a parameter whose value is provided on `stdin`, without a `$` sign. This name is required when you use the `raw` format in `--stdin-format`.<br/><br/>When used with JSON formats, `stdin` is interpreted not as a JSON document but as a JSON value passed to the parameter with the specified name. |
+| Name | Description |
+|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-p, --param` | An expression in the format `$name=value`, where `$name` is the name of the YQL query parameter and `value` is its value (a correct [JSON value](https://www.json.org/json-ru.html)). The option can be specified repeatedly.<br/><br/>All the specified parameters must be declared in the YQL query by the [DECLARE operator](../../yql/reference/syntax/declare.md); otherwise, you will get an error "Query does not contain parameter". If you specify the same parameter several times, you will get an error "Parameter value found in more than one source".<br/><br/>Depending on your operating system, you might need to escape the `$` character or enclose your expression in single quotes (`'`). |
+| `--param-file` | Name of a file in [JSON](https://en.wikipedia.org/wiki/JSON) format in [UTF-8](https://en.wikipedia.org/wiki/UTF-8) encoding that contains parameter values matched against the YQL query parameters by key names. The option can be specified repeatedly.<br/><br/>If values of the same parameter are found in multiple files or set by the `--param` command line option, you'll get an error "Parameter value found in more than one source".<br/><br/>Names of keys in the JSON file are expected without the leading `$` sign. Keys that are present in the file but aren't declared in the YQL query will be ignored without an error message. |
+| `--input-format` | Format of parameter values, applied to all sources of parameters (command line, file, or `stdin`).<br/>Available options:<ul><li>`json-unicode` (default):[JSON](https://en.wikipedia.org/wiki/JSON).</li><li>`json-base64`: [JSON](https://en.wikipedia.org/wiki/JSON) with values of binary string parameters (`DECLARE $par AS String`) are [Base64](https://en.wikipedia.org/wiki/Base64)-encoded. This feature enables you to process binary data, being decoded from Base64 by the {{ ydb-short-name }} CLI.</li></ul> |
+| `--stdin-format` | Format of parameter values for `stdin`.<br/>The {{ ydb-short-name }} CLI automatically detects that a file or an output of another shell command has been redirected to the standard input device `stdin`. In this case, the CLI interprets the incoming data based on the following available options:<ul><li>`json-unicode`: [JSON](https://en.wikipedia.org/wiki/JSON).</li><li>`json-base64`: [JSON](https://en.wikipedia.org/wiki/JSON) with values of binary string parameters (`DECLARE $par AS String`) are [Base64](https://en.wikipedia.org/wiki/Base64)-encoded.</li><li>`raw`: Binary data.</li></ul>If format of parameter values for `stdin` isn't specified, the `--input-format` is used. |
+| `--stdin-par` | Name of a parameter whose value is provided on `stdin`, without a `$` sign. This name is required when you use the `raw` format in `--stdin-format`.<br/><br/>When used with JSON formats, `stdin` is interpreted not as a JSON document but as a JSON value passed to the parameter with the specified name. |
The query will be executed on the server once, provided that values are specified for all the parameters [in the `DECLARE` clause](../../yql/reference/syntax/declare.md). If a value is absent for at least one parameter, the command fails with the "Missing value for parameter" message.
@@ -253,15 +253,15 @@ The adaptive mode solves two basic issues of dynamic stream processing:
To use the batching capbilities, define the `List<...>` or `List<Struct<...>>` parameter in the YQL query's DECLARE clause, and use the following options:
-| Name | Description |
----|---
+| Name | Description |
+|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--batch` | The batch mode applied to parameter sets on `stdin`.<br/>Available options:<ul><li>`iterative` (default): Batching is [disabled](#streaming-iterate).</li><li>`full`: Full batch mode. The YQL query runs only once when `stdin` is closed, with all the received sets of parameters wrapped into `List<>`, the parameter name is set by the `--stdin-par` option.</li><li>`adaptive`: Adaptive batch mode. The YQL query runs every time when limits are exceeded either on the number of parameter sets per query (`--batch-limit`) or on the batch processing delay (`--batch-max-delay`). All the sets of parameters received by that moment are wrapped into a `List<>`, the parameter name is set by the `--stdin-par` option. |
In the adaptive batch mode, you can use the following additional parameters:
-| Name | Description |
----|---
-| `--batch-limit` | The maximum number of sets of parameters per batch in the adaptive batch mode. The next batch will be sent to the YQL query if the number of parameter sets in it reaches the specified limit. When it's `0`, there's no limit.<br/><br/>Default value: `1000`.<br/><br/>Parameter values are sent to each YQL execution without streaming, so the total size per GRPC request that includes the parameter values has the upper limit of about 5 MB. |
+| Name | Description |
+|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--batch-limit` | The maximum number of sets of parameters per batch in the adaptive batch mode. The next batch will be sent to the YQL query if the number of parameter sets in it reaches the specified limit. When it's `0`, there's no limit.<br/><br/>Default value: `1000`.<br/><br/>Parameter values are sent to each YQL execution without streaming, so the total size per GRPC request that includes the parameter values has the upper limit of about 5 MB. |
| `--batch-max-delay` | The maximum delay to submit a received parameter set for processing in the adaptive batch mode. It's set as a number with a time unit - `s`, `ms`, `m`.<br/><br/>Default value: `1s` (1 second).<br/><br/>The {{ ydb-short-name }} CLI starts a timer when it receives a first set of parameters for the batch on `stdin`, and sends the whole accumulated batch for execution once the timer expires. With this parameter, you can batch efficiently when new parameter sets arrival rate on `stdin` is unpredictable. |
### Examples: Full batch processing {#example-batch-full}
diff --git a/ydb/docs/en/core/reference/ydb-cli/table-attribute-add.md b/ydb/docs/en/core/reference/ydb-cli/table-attribute-add.md
index 51ac796323..eaca80200b 100644
--- a/ydb/docs/en/core/reference/ydb-cli/table-attribute-add.md
+++ b/ydb/docs/en/core/reference/ydb-cli/table-attribute-add.md
@@ -20,8 +20,8 @@ Look up the description of the command to add a custom attribute:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
+| Name | Description |
+|---------------|------------------------------------------------------------------------------------------------------------------------------------------|
| `--attribute` | The custom attribute in the `<key>=<value>` format. You can use `--attribute` many times to add multiple attributes by a single command. |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/table-attribute-drop.md b/ydb/docs/en/core/reference/ydb-cli/table-attribute-drop.md
index e87194d79b..3f94aacd1b 100644
--- a/ydb/docs/en/core/reference/ydb-cli/table-attribute-drop.md
+++ b/ydb/docs/en/core/reference/ydb-cli/table-attribute-drop.md
@@ -20,8 +20,8 @@ Look up the description of the command to add a custom attribute:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
+| Name | Description |
+|----------------|-------------------------------------------------------------------------------------------------------|
| `--attributes` | The key of the custom attribute to be dropped. You can list multiple keys separated by a comma (`,`). |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/table-drop.md b/ydb/docs/en/core/reference/ydb-cli/table-drop.md
index 8f7df9486b..9a26ced7e6 100644
--- a/ydb/docs/en/core/reference/ydb-cli/table-drop.md
+++ b/ydb/docs/en/core/reference/ydb-cli/table-drop.md
@@ -20,8 +20,8 @@ To view a description of the table delete command:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
+| Name | Description |
+|-------------|------------------------------------------------------------------------|
| `--timeout` | The time within which the operation should be completed on the server. |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/table-ttl-set.md b/ydb/docs/en/core/reference/ydb-cli/table-ttl-set.md
index e84ec5b8ba..83cbaf966c 100644
--- a/ydb/docs/en/core/reference/ydb-cli/table-ttl-set.md
+++ b/ydb/docs/en/core/reference/ydb-cli/table-ttl-set.md
@@ -20,12 +20,12 @@ View a description of the TTL set command:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
-| `--column` | The name of the column that will be used to calculate the lifetime of the rows. The column must have the [numeric](../../yql/reference/types/primitive.md#numeric) or [date and time](../../yql/reference/types/primitive.md#datetime) type.<br/>In case of the numeric type, the value will be interpreted as the time elapsed since the beginning of the [Unix epoch](https://en.wikipedia.org/wiki/Unix_time). Measurement units must be specified in the `--unit` parameter. |
-| `--expire-after` | Additional time before deleting that must elapse after the lifetime of the row has expired. Specified in seconds.<br/>The default value is `0`. |
-| `--unit` | The value measurement units of the column specified in the `--column` parameter. It is mandatory if the column has the [numeric](../../yql/reference/types/primitive.md#numeric) type.<br/>Possible values:<ul><li>`seconds (s, sec)`: Seconds.</li><li>`milliseconds (ms, msec)`: Milliseconds.</li><li>`microseconds (us, usec)`: Microseconds.</li><li>`nanoseconds (ns, nsec)`: Nanoseconds.</li></ul> |
-| `--run-interval` | The interval for running the operation to delete rows with expired TTL. Specified in seconds. The default database settings do not allow an interval of less than 15 minutes (900 seconds).<br/>The default value is `3600`. |
+| Name | Description |
+|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--column` | The name of the column that will be used to calculate the lifetime of the rows. The column must have the [numeric](../../yql/reference/types/primitive.md#numeric) or [date and time](../../yql/reference/types/primitive.md#datetime) type.<br/>In case of the numeric type, the value will be interpreted as the time elapsed since the beginning of the [Unix epoch](https://en.wikipedia.org/wiki/Unix_time). Measurement units must be specified in the `--unit` parameter. |
+| `--expire-after` | Additional time before deleting that must elapse after the lifetime of the row has expired. Specified in seconds.<br/>The default value is `0`. |
+| `--unit` | The value measurement units of the column specified in the `--column` parameter. It is mandatory if the column has the [numeric](../../yql/reference/types/primitive.md#numeric) type.<br/>Possible values:<ul><li>`seconds (s, sec)`: Seconds.</li><li>`milliseconds (ms, msec)`: Milliseconds.</li><li>`microseconds (us, usec)`: Microseconds.</li><li>`nanoseconds (ns, nsec)`: Nanoseconds.</li></ul> |
+| `--run-interval` | The interval for running the operation to delete rows with expired TTL. Specified in seconds. The default database settings do not allow an interval of less than 15 minutes (900 seconds).<br/>The default value is `3600`. |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/toc_i.yaml b/ydb/docs/en/core/reference/ydb-cli/toc_i.yaml
index a7e23c5c39..3dc4805a98 100644
--- a/ydb/docs/en/core/reference/ydb-cli/toc_i.yaml
+++ b/ydb/docs/en/core/reference/ydb-cli/toc_i.yaml
@@ -111,6 +111,8 @@ items:
href: commands/config-info.md
- name: Getting the YDB CLI version
href: version.md
+ - name: Health check
+ href: commands/monitoring-healthcheck.md
- name: Load testing
items:
- name: Overview
diff --git a/ydb/docs/en/core/reference/ydb-cli/tools-copy.md b/ydb/docs/en/core/reference/ydb-cli/tools-copy.md
index 1b57000862..d0dbe74d85 100644
--- a/ydb/docs/en/core/reference/ydb-cli/tools-copy.md
+++ b/ydb/docs/en/core/reference/ydb-cli/tools-copy.md
@@ -19,9 +19,9 @@ View a description of the command to copy a table:
## Parameters of the subcommand {#options}
-| Parameter name | Parameter description |
----|---
-| `--timeout` | The time within which the operation should be completed on the server. |
+| Parameter name | Parameter description |
+|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--timeout` | The time within which the operation should be completed on the server. |
| `--item <property>=<value>,...` | Operation properties. You can specify the parameter more than once to copy several tables in a single transaction.<br/>Required properties:<ul><li>`destination`, `dst`, `d`: Path to target table. If the destination path contains folders, they must be created in advance. No table with the destination name should exist.</li><li>`source`, `src`, `s`: Path to source table.</li></ul> |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-alter.md b/ydb/docs/en/core/reference/ydb-cli/topic-alter.md
index 6d50344d4c..544a0ed278 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-alter.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-alter.md
@@ -22,14 +22,14 @@ View the description of the update topic command:
The command changes the values of parameters specified in the command line. The other parameter values remain unchanged.
-| Name | Description |
----|---
-| `--partitions-count` | The number of topic [partitions](../../concepts/topic.md#partitioning). You can only increase the number of partitions. |
-| `--retention-period-hours` | The retention period for topic data, in hours. |
-| `--partition-write-speed-kbps` | The maximum write speed to a [partition](../../concepts/topic.md#partitioning), specified in KB/s.<br/>The default value is `1024`. |
-| `--retention-storage-mb` | The maximum storage size, specified in MB. When the limit is reached, the oldest data will be deleted.<br/>The default value is `0` (no limit). |
-| `--supported-codecs` | Supported data compression methods.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
-| `--metering-mode` | The topic pricing method for a serverless database.<br/>Possible values:<ul><li>`request-units`: Based on actual usage.</li><li>`reserved-capacity`: Based on dedicated resources.</li></ul> |
+| Name | Description |
+|--------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--partitions-count` | The number of topic [partitions](../../concepts/topic.md#partitioning). You can only increase the number of partitions. |
+| `--retention-period-hours` | The retention period for topic data, in hours. |
+| `--partition-write-speed-kbps` | The maximum write speed to a [partition](../../concepts/topic.md#partitioning), specified in KB/s.<br/>The default value is `1024`. |
+| `--retention-storage-mb` | The maximum storage size, specified in MB. When the limit is reached, the oldest data will be deleted.<br/>The default value is `0` (no limit). |
+| `--supported-codecs` | Supported data compression methods.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
+| `--metering-mode` | The topic pricing method for a serverless database.<br/>Possible values:<ul><li>`request-units`: Based on actual usage.</li><li>`reserved-capacity`: Based on dedicated resources.</li></ul> |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-add.md b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-add.md
index effde854fd..06438f50fe 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-add.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-add.md
@@ -20,11 +20,11 @@ View the description of the add consumer command:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
-| `--consumer VAL` | Name of the consumer to be added. |
-| `--starting-message-timestamp VAL` | Time in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format. Consumption starts as soon as the first [message](../../concepts/topic.md#message) is received after the specified time. If the time is not specified, consumption will start from the oldest message in the topic. |
-| `--supported-codecs` | Supported data compression methods.<br/>The default value is `raw`.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
+| Name | Description |
+|------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--consumer VAL` | Name of the consumer to be added. |
+| `--starting-message-timestamp VAL` | Time in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format. Consumption starts as soon as the first [message](../../concepts/topic.md#message) is received after the specified time. If the time is not specified, consumption will start from the oldest message in the topic. |
+| `--supported-codecs` | Supported data compression methods.<br/>The default value is `raw`.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-drop.md b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-drop.md
index cbb691fea5..f20378784d 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-drop.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-drop.md
@@ -19,8 +19,8 @@ View the description of the delete consumer command:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
+| Name | Description |
+|------------------|-------------------------------------|
| `--consumer VAL` | Name of the consumer to be deleted. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-offset-commit.md b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-offset-commit.md
index c83a67e5c2..a832ff8f1f 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-consumer-offset-commit.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-consumer-offset-commit.md
@@ -22,11 +22,11 @@ Viewing the command description:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
-| `--consumer <value>` | Consumer name. |
-| `--partition <value>` | Partition number. |
-| `--offset <value>` | Offset value that you want to set. |
+| Name | Description |
+|-----------------------|------------------------------------|
+| `--consumer <value>` | Consumer name. |
+| `--partition <value>` | Partition number. |
+| `--offset <value>` | Offset value that you want to set. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-create.md b/ydb/docs/en/core/reference/ydb-cli/topic-create.md
index c45e2084e3..2c794edacc 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-create.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-create.md
@@ -20,14 +20,14 @@ View the description of the create topic command:
## Parameters of the subcommand {#options}
-| Name | Description |
----|---
-| `--partitions-count` | The number of topic [partitions](../../concepts/topic.md#partitioning).<br/>The default value is `1`. |
-| `--retention-period-hours` | Data retention time in a topic, set in hours.<br/>The default value is `18`. |
-| `--partition-write-speed-kbps` | The maximum write speed to a [partition](../../concepts/topic.md#partitioning), specified in KB/s.<br/>The default value is `1024`. |
-| `--retention-storage-mb` | The maximum storage size, specified in MB. When the limit is reached, the oldest data will be deleted.<br/>The default value is `0` (no limit). |
-| `--supported-codecs` | Supported data compression methods. Set with a comma.<br/>The default value is `raw`.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
-| `--metering-mode` | The topic pricing method for a serverless database.<br/>Possible values:<ul><li>`request-units`: Based on actual usage.</li><li>`reserved-capacity`: Based on dedicated resources.</li></ul> |
+| Name | Description |
+|--------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--partitions-count` | The number of topic [partitions](../../concepts/topic.md#partitioning).<br/>The default value is `1`. |
+| `--retention-period-hours` | Data retention time in a topic, set in hours.<br/>The default value is `18`. |
+| `--partition-write-speed-kbps` | The maximum write speed to a [partition](../../concepts/topic.md#partitioning), specified in KB/s.<br/>The default value is `1024`. |
+| `--retention-storage-mb` | The maximum storage size, specified in MB. When the limit is reached, the oldest data will be deleted.<br/>The default value is `0` (no limit). |
+| `--supported-codecs` | Supported data compression methods. Set with a comma.<br/>The default value is `raw`.<br/>Possible values:<ul><li>`RAW`: No compression.</li><li>`ZSTD`: [zstd](https://en.wikipedia.org/wiki/Zstandard) compression.</li><li>`GZIP`: [gzip](https://en.wikipedia.org/wiki/Gzip) compression.</li><li>`LZOP`: [lzop](https://en.wikipedia.org/wiki/Lzop) compression.</li></ul> |
+| `--metering-mode` | The topic pricing method for a serverless database.<br/>Possible values:<ul><li>`request-units`: Based on actual usage.</li><li>`reserved-capacity`: Based on dedicated resources.</li></ul> |
## Examples {examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-read.md b/ydb/docs/en/core/reference/ydb-cli/topic-read.md
index 7a9f5301fb..5b8fb3e95c 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-read.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-read.md
@@ -36,12 +36,12 @@ If consumer name is not specified, message consumption will start from the first
- Specifies how to format messages at the output. Some formats don't support streaming mode.
- List of supported formats:
- | Name | Description | Is<br/>streaming mode supported? |
- ---|---|---
- | `single-message`<br/>(default) | The contents of no more than one message are output without formatting. | - |
- | `pretty` | Output to a pseudo-graphic table with columns containing message metadata. The message itself is output to the `body` column. | No |
- | `newline-delimited` | Messages are output with a delimiter (`0x0A` newline character) added after each message. | Yes |
- | `concatenated` | Messages are output one after another with no delimiter added. | Yes |
+ | Name | Description | Is<br/>streaming mode supported? |
+ |--------------------------------|-------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
+ | `single-message`<br/>(default) | The contents of no more than one message are output without formatting. | - |
+ | `pretty` | Output to a pseudo-graphic table with columns containing message metadata. The message itself is output to the `body` column. | No |
+ | `newline-delimited` | Messages are output with a delimiter (`0x0A` newline character) added after each message. | Yes |
+ | `concatenated` | Messages are output one after another with no delimiter added. | Yes |
`--wait` (`-w`): Waiting for new messages to arrive.
@@ -53,9 +53,9 @@ If consumer name is not specified, message consumption will start from the first
- The default and acceptable values depend on the selected output format:
| Does the format<br/>support streaming selection mode? | Default limit value | Acceptable values |
- ---|---|---
- | No | 10 | 1-500 |
- | Yes | 0 (no limit) | 0-500 |
+ |-------------------------------------------------------|---------------------|-------------------|
+ | No | 10 | 1-500 |
+ | Yes | 0 (no limit) | 0-500 |
`--transform VAL`: Method for transforming messages.
@@ -74,12 +74,12 @@ If consumer name is not specified, message consumption will start from the first
### Other optional parameters
-| Name | Description |
----|---
-| `--idle-timeout VAL` | Timeout for deciding if a topic is empty, meaning that it contains no messages for processing. <br/>The time is counted from the point when a connection is established once the command is run or when the last message is received. If no new messages arrive from the server during the specified timeout, the topic is considered to be empty.<br/>Defaults to `1s` (1 second). |
-| `--timestamp VAL` | Message consumption starts from the point in time specified in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format.<br/>If not set, messages are consumed starting from the consumer's current offset in the topic.<br/>If set, consumption starts from the first [message](../../concepts/topic.md#message) received after the specified time. |
+| Name | Description |
+|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--idle-timeout VAL` | Timeout for deciding if a topic is empty, meaning that it contains no messages for processing. <br/>The time is counted from the point when a connection is established once the command is run or when the last message is received. If no new messages arrive from the server during the specified timeout, the topic is considered to be empty.<br/>Defaults to `1s` (1 second). |
+| `--timestamp VAL` | Message consumption starts from the point in time specified in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format.<br/>If not set, messages are consumed starting from the consumer's current offset in the topic.<br/>If set, consumption starts from the first [message](../../concepts/topic.md#message) received after the specified time. |
| `--metadata-fields VAL` | List of [message attributes](../../concepts/topic.md#message) whose values should be output in columns with metadata in `pretty` format. If not set, columns with all attributes are output. <br/>Possible values:<ul><li>`write_time`: The time a message is written to the server in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format.</li><li>`meta`: Message metadata.</li><li>`create_time`: The time a message is created by the source in [UNIX timestamp](https://en.wikipedia.org/wiki/Unix_time) format.</li><li>`seq_no`: Message [sequence number](../../concepts/topic.md#seqno).</li><li>`offset`: [Message sequence number within a partition](../../concepts/topic.md#offset).</li><li>`message_group_id`: [Message group ID](../../concepts/topic.md#producer-id).</li><li>`body`: Message body.</li></ul> |
-| `--partition-ids VAL` | Comma-separated list of [partition](../../concepts/topic.md#partitioning) identifiers to read from.<br/>If not specified, messages are read from all partitions. |
+| `--partition-ids VAL` | Comma-separated list of [partition](../../concepts/topic.md#partitioning) identifiers to read from.<br/>If not specified, messages are read from all partitions. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/topic-write.md b/ydb/docs/en/core/reference/ydb-cli/topic-write.md
index 06d4ddddd3..59bf8109f0 100644
--- a/ydb/docs/en/core/reference/ydb-cli/topic-write.md
+++ b/ydb/docs/en/core/reference/ydb-cli/topic-write.md
@@ -20,10 +20,10 @@ The `topic write` command writes messages to a topic from a file or `stdin`:
`--format STR`: Format of the incoming message stream. Supported formats:
- | Name | Description |
- ---|---
- | `single-message`<br/>(default) | The entire input stream is treated as a single message to be written to the topic. |
- | `newline-delimited` | A stream at the input contains multiple messages delimited with the `0x0A` newline character. |
+ | Name | Description |
+ |--------------------------------|-----------------------------------------------------------------------------------------------|
+ | `single-message`<br/>(default) | The entire input stream is treated as a single message to be written to the topic. |
+ | `newline-delimited` | A stream at the input contains multiple messages delimited with the `0x0A` newline character. |
`--transform VAL`: Method for transforming messages.
@@ -35,11 +35,11 @@ The `topic write` command writes messages to a topic from a file or `stdin`:
### Additional parameters
-| Name | Description |
----|---
-| `--delimiter STR` | Delimiter byte. The input stream is delimited into messages with the specified byte. Specified only if no `--format` is set. Specified as an escaped string. |
-| `--message-group-id STR` | Message group string ID. If not set, all messages generated from the input stream are assigned the same ID value as a hexadecimal string representation of a random three-byte integer. |
-| `--codec STR` | Codec used for message compression on the client before sending them to the server. Possible values: `RAW` (no compression, default), `GZIP`, and `ZSTD`. Compression causes higher CPU utilization on the client when reading and writing messages, but usually lets you reduce the volume of data transferred over the network and stored. When consumers read messages, they're automatically decompressed with the codec used when writing them, without specifying any special options. Make sure the specified codec is listed in the [topic parameters](topic-create.md#create-options) as supported. |
+| Name | Description |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--delimiter STR` | Delimiter byte. The input stream is delimited into messages with the specified byte. Specified only if no `--format` is set. Specified as an escaped string. |
+| `--message-group-id STR` | Message group string ID. If not set, all messages generated from the input stream are assigned the same ID value as a hexadecimal string representation of a random three-byte integer. |
+| `--codec STR` | Codec used for message compression on the client before sending them to the server. Possible values: `RAW` (no compression, default), `GZIP`, and `ZSTD`. Compression causes higher CPU utilization on the client when reading and writing messages, but usually lets you reduce the volume of data transferred over the network and stored. When consumers read messages, they're automatically decompressed with the codec used when writing them, without specifying any special options. Make sure the specified codec is listed in the [topic parameters](topic-create.md#create-options) as supported. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/version.md b/ydb/docs/en/core/reference/ydb-cli/version.md
index 699db4a3f2..9288e30692 100644
--- a/ydb/docs/en/core/reference/ydb-cli/version.md
+++ b/ydb/docs/en/core/reference/ydb-cli/version.md
@@ -21,12 +21,12 @@ View a description of the command:
## Parameters of the subcommand {#options}
-| Parameter | Description |
----|---
-| `--semantic` | Get only the version number. |
-| `--check` | Check if a new version is available. |
+| Parameter | Description |
+|--------------------|------------------------------------------|
+| `--semantic` | Get only the version number. |
+| `--check` | Check if a new version is available. |
| `--disable-checks` | Disable new version availability checks. |
-| `--enable-checks` | Enable new version availability checks. |
+| `--enable-checks` | Enable new version availability checks. |
## Examples {#examples}
diff --git a/ydb/docs/en/core/reference/ydb-cli/workload-click-bench.md b/ydb/docs/en/core/reference/ydb-cli/workload-click-bench.md
index 43441dfedb..f21855393c 100644
--- a/ydb/docs/en/core/reference/ydb-cli/workload-click-bench.md
+++ b/ydb/docs/en/core/reference/ydb-cli/workload-click-bench.md
@@ -86,13 +86,13 @@ See the command description to run the load:
### ClickBench-specific options { #run_clickbench_options }
-Name | Description | Default value
----|---|---
-`--ext-queries <queries>` or `-q <queries>` | External queries to execute during the load, separated by semicolons. |
-`--ext-queries-file <name>` | Name of the file containing external queries to execute during the load, separated by semicolons. |
-`--ext-query-dir <name>` | Directory containing external queries for the load. Queries should be in files named `q[0-42].sql`. |
-`--ext-results-dir <name>` | Directory containing external query results for comparison. Results should be in files named `q[0-42].sql`. |
-`--check-canonical` or `-c` | Use special deterministic internal queries and compare the results against canonical ones. |
+| Name | Description | Default value |
+|---------------------------------------------|-------------------------------------------------------------------------------------------------------------|---------------|
+| `--ext-queries <queries>` or `-q <queries>` | External queries to execute during the load, separated by semicolons. | |
+| `--ext-queries-file <name>` | Name of the file containing external queries to execute during the load, separated by semicolons. | |
+| `--ext-query-dir <name>` | Directory containing external queries for the load. Queries should be in files named `q[0-42].sql`. | |
+| `--ext-results-dir <name>` | Directory containing external query results for comparison. Results should be in files named `q[0-42].sql`. | |
+| `--check-canonical` or `-c` | Use special deterministic internal queries and compare the results against canonical ones. | |
## Cleanup test data { #cleanup }
diff --git a/ydb/docs/en/core/reference/ydb-cli/workload-topic.md b/ydb/docs/en/core/reference/ydb-cli/workload-topic.md
index 080d9d18f1..399478bddc 100644
--- a/ydb/docs/en/core/reference/ydb-cli/workload-topic.md
+++ b/ydb/docs/en/core/reference/ydb-cli/workload-topic.md
@@ -27,12 +27,12 @@ Before executing the load, you need to initialize it. During initialization, you
Subcommand options:
-| Option name | Option description |
----|---
-| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
-| `--partitions`, `-p` | Number of topic partitions.<br/>Default value: `128`. |
-| `--consumers`, `-c` | Number of topic consumers.<br/>Default value: `1`. |
-| `--consumer-prefix` | Consumer name prefix.<br/>Default value: `workload-consumer`.<br/>For example, if the number of consumers `--consumers` is `2` and the prefix `--consumer-prefix` is `workload-consumer`, then the following consumer names will be used: `workload-consumer-0`, `workload-consumer-1`. |
+| Option name | Option description |
+|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
+| `--partitions`, `-p` | Number of topic partitions.<br/>Default value: `128`. |
+| `--consumers`, `-c` | Number of topic consumers.<br/>Default value: `1`. |
+| `--consumer-prefix` | Consumer name prefix.<br/>Default value: `workload-consumer`.<br/>For example, if the number of consumers `--consumers` is `2` and the prefix `--consumer-prefix` is `workload-consumer`, then the following consumer names will be used: `workload-consumer-0`, `workload-consumer-1`. |
> To create a topic with `256` partitions and `2` consumers, run this command:
>
@@ -61,23 +61,23 @@ View the description of the command that generates the write load:
Subcommand options:
-| Option name | Option description |
----|---
-| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
-| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
-| `--quiet`, `-q` | Output only the final test result. |
-| `--print-timestamp` | Print the time together with the statistics of each time window. |
-| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
-| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
-| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
-| `--threads`, `-t` | Number of producer threads. Each thread will write to all partitions of the specified topic.<br/>Default value: `1`. |
-| `--message-size`, `-m` | Message size in bytes. Use the `K`, `M`, or `G` suffix to set the size in KB, MB, or GB, respectively.<br/>Default value: `10K`. |
-| `--message-rate` | Total target write rate in messages per second. Can't be used together with the `--byte-rate` option.<br/>Default value: `0` (no limit). |
-| `--byte-rate` | Total target write rate in bytes per second. Can't be used together with the `--message-rate` option. Use the `K`, `M`, or `G` suffix to set the rate in KB/s, MB/s, or GB/s, respectively.<br/>Default value: `0` (no limit). |
-| `--codec` | Codec used to compress messages on the client before sending them to the server.<br/>Compression increases CPU usage on the client when reading and writing messages, but usually enables you to reduce the amounts of data stored and transmitted over the network. When consumers read messages, they decompress them by the codec that was used to write the messages, with no special options needed.<br/>Acceptable values: `RAW` - no compression (default), `GZIP`, `ZSTD`. |
-| `--use-tx` | Use transactions.<br/>Disabled by default. |
-| `--tx-commit-interval` | Transaction commit interval, in milliseconds. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1000`. |
-| `--tx-commit-messages` | Number of messages required to commit a transaction. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1 000 000`. |
+| Option name | Option description |
+|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
+| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
+| `--quiet`, `-q` | Output only the final test result. |
+| `--print-timestamp` | Print the time together with the statistics of each time window. |
+| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
+| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
+| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
+| `--threads`, `-t` | Number of producer threads. Each thread will write to all partitions of the specified topic.<br/>Default value: `1`. |
+| `--message-size`, `-m` | Message size in bytes. Use the `K`, `M`, or `G` suffix to set the size in KB, MB, or GB, respectively.<br/>Default value: `10K`. |
+| `--message-rate` | Total target write rate in messages per second. Can't be used together with the `--byte-rate` option.<br/>Default value: `0` (no limit). |
+| `--byte-rate` | Total target write rate in bytes per second. Can't be used together with the `--message-rate` option. Use the `K`, `M`, or `G` suffix to set the rate in KB/s, MB/s, or GB/s, respectively.<br/>Default value: `0` (no limit). |
+| `--codec` | Codec used to compress messages on the client before sending them to the server.<br/>Compression increases CPU usage on the client when reading and writing messages, but usually enables you to reduce the amounts of data stored and transmitted over the network. When consumers read messages, they decompress them by the codec that was used to write the messages, with no special options needed.<br/>Acceptable values: `RAW` - no compression (default), `GZIP`, `ZSTD`. |
+| `--use-tx` | Use transactions.<br/>Disabled by default. |
+| `--tx-commit-interval` | Transaction commit interval, in milliseconds. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1000`. |
+| `--tx-commit-messages` | Number of messages required to commit a transaction. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1 000 000`. |
To write data to `100` producer threads at the target rate of `80` MB/s for `10` seconds, run this command:
@@ -133,18 +133,18 @@ View the description of the command to generate the read load:
Subcommand options:
-| Option name | Option description |
----|---
-| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
-| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
-| `--quiet`, `-q` | Output only the final test result. |
-| `--print-timestamp` | Print the time together with the statistics of each time window. |
-| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
-| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
-| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
-| `--consumers`, `-c` | Number of consumers.<br/>Default value: `1`. |
+| Option name | Option description |
+|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
+| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
+| `--quiet`, `-q` | Output only the final test result. |
+| `--print-timestamp` | Print the time together with the statistics of each time window. |
+| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
+| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
+| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
+| `--consumers`, `-c` | Number of consumers.<br/>Default value: `1`. |
| `--consumer-prefix` | Consumer name prefix.<br/>Default value: `workload-consumer`.<br/>For example, if the number of consumers `--consumers` is `2` and the prefix `--consumer-prefix` is `workload-consumer`, then the following consumer names will be used: `workload-consumer-0`, `workload-consumer-1`. |
-| `--threads`, `-t` | Number of consumer threads.<br/>Default value: `1`. |
+| `--threads`, `-t` | Number of consumer threads.<br/>Default value: `1`. |
To use `2` consumers to read data from the topic, with `100` threads per consumer, run the following command:
@@ -199,26 +199,26 @@ View the description of the command to run the read and write load:
Subcommand options:
-| Option name | Option description |
----|---
-| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
-| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
-| `--quiet`, `-q` | Output only the final test result. |
-| `--print-timestamp` | Print the time together with the statistics of each time window. |
-| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
-| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
-| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
-| `--producer-threads`, `-p` | Number of producer threads. Each thread will write to all partitions of the specified topic.<br/>Default value: `1`. |
-| `--message-size`, `-m` | Message size in bytes. Use the `K`, `M`, or `G` suffix to set the size in KB, MB, or GB, respectively.<br/>Default value: `10K`. |
-| `--message-rate` | Total target write rate in messages per second. Can't be used together with the `--message-rate` option.<br/>Default value: `0` (no limit). |
-| `--byte-rate` | Total target write rate in bytes per second. Can't be used together with the `--byte-rate` option. Use the `K`, `M`, or `G` suffix to set the rate in KB/s, MB/s, or GB/s, respectively.<br/>Default value: `0` (no limit). |
-| `--codec` | Codec used to compress messages on the client before sending them to the server.<br/>Compression increases CPU usage on the client when reading and writing messages, but usually enables you to reduce the amounts of data stored and transmitted over the network. When consumers read messages, they decompress them by the codec that was used to write the messages, with no special options needed.<br/>Acceptable values: `RAW` - no compression (default), `GZIP`, `ZSTD`. |
-| `--consumers`, `-c` | Number of consumers.<br/>Default value: `1`. |
-| `--consumer-prefix` | Consumer name prefix.<br/>Default value: `workload-consumer`.<br/>For example, if the number of consumers `--consumers` is `2` and the prefix `--consumer-prefix` is `workload-consumer`, then the following consumer names will be used: `workload-consumer-0`, `workload-consumer-1`. |
-| `--threads`, `-t` | Number of consumer threads.<br/>Default value: `1`. |
-| `--use-tx` | Use transactions.<br/>Disabled by default. |
-| `--tx-commit-interval` | Transaction commit interval, in milliseconds. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1000`. |
-| `--tx-commit-messages` | Number of messages required to commit a transaction. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1 000 000`. |
+| Option name | Option description |
+|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--seconds`, `-s` | Test duration in seconds.<br/>Default value: `60`. |
+| `--window`, `-w` | Statistics window in seconds.<br/>Default value: `1`. |
+| `--quiet`, `-q` | Output only the final test result. |
+| `--print-timestamp` | Print the time together with the statistics of each time window. |
+| `--warmup` | Test warm-up period (in seconds).<br/>Within the period, no statistics are calculated. It's needed to eliminate the effect of transition processes at startup.<br/>Default value: `5`. |
+| `--percentile` | Percentile that is output in statistics.<br/>Default value: `50`. |
+| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
+| `--producer-threads`, `-p` | Number of producer threads. Each thread will write to all partitions of the specified topic.<br/>Default value: `1`. |
+| `--message-size`, `-m` | Message size in bytes. Use the `K`, `M`, or `G` suffix to set the size in KB, MB, or GB, respectively.<br/>Default value: `10K`. |
+| `--message-rate` | Total target write rate in messages per second. Can't be used together with the `--message-rate` option.<br/>Default value: `0` (no limit). |
+| `--byte-rate` | Total target write rate in bytes per second. Can't be used together with the `--byte-rate` option. Use the `K`, `M`, or `G` suffix to set the rate in KB/s, MB/s, or GB/s, respectively.<br/>Default value: `0` (no limit). |
+| `--codec` | Codec used to compress messages on the client before sending them to the server.<br/>Compression increases CPU usage on the client when reading and writing messages, but usually enables you to reduce the amounts of data stored and transmitted over the network. When consumers read messages, they decompress them by the codec that was used to write the messages, with no special options needed.<br/>Acceptable values: `RAW` - no compression (default), `GZIP`, `ZSTD`. |
+| `--consumers`, `-c` | Number of consumers.<br/>Default value: `1`. |
+| `--consumer-prefix` | Consumer name prefix.<br/>Default value: `workload-consumer`.<br/>For example, if the number of consumers `--consumers` is `2` and the prefix `--consumer-prefix` is `workload-consumer`, then the following consumer names will be used: `workload-consumer-0`, `workload-consumer-1`. |
+| `--threads`, `-t` | Number of consumer threads.<br/>Default value: `1`. |
+| `--use-tx` | Use transactions.<br/>Disabled by default. |
+| `--tx-commit-interval` | Transaction commit interval, in milliseconds. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1000`. |
+| `--tx-commit-messages` | Number of messages required to commit a transaction. A transaction is committed if the time specified in the `--tx-commit-interval` parameter elapses or if the number of messages specified in the `--tx-commit-messages` parameter is written.<br/>Default value: `1 000 000`. |
Example of a command that reads `50` threads by `2` consumers and writes data to `100` producer threads at the target rate of `80` MB/s and duration of `10` seconds:
@@ -268,9 +268,9 @@ When the work is complete, you can delete the test topic: General format of the
Subcommand options:
-| Option name | Option description |
----|---
-| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
+| Option name | Option description |
+|-------------|--------------------------------------------------|
+| `--topic` | Topic name.<br/>Default value: `workload-topic`. |
To delete the `workload-topic` test topic, run the following command:
diff --git a/ydb/docs/en/core/reference/ydb-cli/workload-tpcds.md b/ydb/docs/en/core/reference/ydb-cli/workload-tpcds.md
index 21b7e0878c..91bd57cd37 100644
--- a/ydb/docs/en/core/reference/ydb-cli/workload-tpcds.md
+++ b/ydb/docs/en/core/reference/ydb-cli/workload-tpcds.md
@@ -14,9 +14,9 @@ All commands support the common option `--path`, which specifies the path to the
### Available options { #common_options }
-Name | Description | Default value
----|---|---
-`--path` or `-p` | Path to the directory with tables. | `/`
+| Name | Description | Default value |
+|-------------------|-------------------------------------|---------------|
+| `--path` or `-p` | Path to the directory with tables. | `/` |
## Initializing the load test {#init}
@@ -50,14 +50,14 @@ See the command description:
### Available options {#load_files_options}
-| Name | Description | Default value |
-|---------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
-| `--scale <value>` | Data scale. Typically, powers of ten are used. | |
-| `--tables <value>` | Comma-separated list of tables to generate. Available tables: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | All tables |
-| `--process-count <value>` or `-C <value>` | Specifies the number of processes for parallel data generation. | `1` |
-| `--process-index <value>` or `-i <value>` | Specifies the process number when data generation is split into multiple processes. | `0` |
-| `--state <path>` | Path to the state file for resuming generation. If the generation is interrupted, it will resume from the same point when restarted. | |
-| `--clear-state` | Relevant if the `--state` parameter is specified. Clears the state file and restarts the download from the beginning. | |
+| Name | Description | Default value |
+|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| `--scale <value>` | Data scale. Typically, powers of ten are used. | |
+| `--tables <value>` | Comma-separated list of tables to generate. Available tables: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | All tables |
+| `--process-count <value>` or `-C <value>` | Specifies the number of processes for parallel data generation. | `1` |
+| `--process-index <value>` or `-i <value>` | Specifies the process number when data generation is split into multiple processes. | `0` |
+| `--state <path>` | Path to the state file for resuming generation. If the generation is interrupted, it will resume from the same point when restarted. | |
+| `--clear-state` | Relevant if the `--state` parameter is specified. Clears the state file and restarts the download from the beginning. | |
{% include [load_options](./_includes/workload/load_options.md) %}
@@ -81,9 +81,9 @@ See the command description:
### TPC-DS-specific options { #run_tpcds_options }
-Name | Description | Default value
----|---|---
-`--ext-query-dir <name>` | Directory with external queries for load execution. Queries should be in files named `q[1-99].sql`. |
+| Name | Description | Default value |
+|----------------------------|-----------------------------------------------------------------------------------------------------|---------------|
+| `--ext-query-dir <name>` | Directory with external queries for load execution. Queries should be in files named `q[1-99].sql`. | |
## Test data cleanup { #cleanup }
diff --git a/ydb/docs/en/core/reference/ydb-cli/workload-tpch.md b/ydb/docs/en/core/reference/ydb-cli/workload-tpch.md
index ca4dd5d788..1319fe42b0 100644
--- a/ydb/docs/en/core/reference/ydb-cli/workload-tpch.md
+++ b/ydb/docs/en/core/reference/ydb-cli/workload-tpch.md
@@ -14,9 +14,9 @@ All commands support the common `--path` option, which specifies the path to the
### Available options {#common_options}
-Name | Description | Default value
----|---|---
-`--path` or `-p` | Path to the directory with tables. | `/`
+| Name | Description | Default value |
+|------------------|------------------------------------|---------------|
+| `--path` or `-p` | Path to the directory with tables. | `/` |
## Initializing a load test { #init }
@@ -50,14 +50,14 @@ See the command description:
### Available options { #load_files_options }
-Name | Description | Default value
----|---|---
-`--scale <value>` | Data scale. Powers of ten are usually used. |
-`--tables <value>` | Comma-separated list of tables to generate. Available tables: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | All tables
-`--proccess-count <value>` or `-C <value>` | Data generation can be split into several processes, this parameter specifies the number of processes. | 1
-`--proccess-index <value>` or `-i <value>` | Data generation can be split into several processes, this parameter specifies the process number. | 0
-`--state <path>` | Path to the generation state file. If the generation was interrupted for some reason, the download will be continued from the same place when it is started again. |
-`--clear-state` | Relevant if the `--state` parameter is specified. Clear the state file and start the download from the beginning. |
+| Name | Description | Default value |
+|---------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| `--scale <value>` | Data scale. Powers of ten are usually used. | |
+| `--tables <value>` | Comma-separated list of tables to generate. Available tables: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | All tables |
+| `--proccess-count <value>` or `-C <value>` | Data generation can be split into several processes, this parameter specifies the number of processes. | 1 |
+| `--proccess-index <value>` or `-i <value>` | Data generation can be split into several processes, this parameter specifies the process number. | 0 |
+| `--state <path>` | Path to the generation state file. If the generation was interrupted for some reason, the download will be continued from the same place when it is started again. | |
+| `--clear-state` | Relevant if the `--state` parameter is specified. Clear the state file and start the download from the beginning. | |
{% include [load_options](./_includes/workload/load_options.md) %}
@@ -82,9 +82,9 @@ See the command description:
### TPC-H-specific options { #run_tpch_options }
-Name | Description | Default value
----|---|---
-`--ext-query-dir <name>` | Directory with external queries for load execution. Queries should be in files named `q[1-23].sql`. |
+| Name | Description | Default value |
+|----------------------------|-----------------------------------------------------------------------------------------------------|---------------|
+| `--ext-query-dir <name>` | Directory with external queries for load execution. Queries should be in files named `q[1-23].sql`. | |
## Test data cleaning { #cleanup }
diff --git a/ydb/docs/en/core/reference/ydb-sdk/feature-parity.md b/ydb/docs/en/core/reference/ydb-sdk/feature-parity.md
index f0eb9880c5..b3b7c677c2 100644
--- a/ydb/docs/en/core/reference/ydb-sdk/feature-parity.md
+++ b/ydb/docs/en/core/reference/ydb-sdk/feature-parity.md
@@ -1,147 +1,147 @@
# Comparison of SDK features
-| Feature | C\+\+ | Python | Go | Java | NodeJS | C# | Rust | PHP |
-|:---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
-| SSL/TLS support (system certificates) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| SSL/TLS support (custom certificates) | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| Configure/enable GRPC KeepAlive (keeping the connection alive in the background) | \+ | \+ | \+ | ? | \- | \- | | \+ |
-| Regular SLO testing on the latest code version | \+ | \+/- | \+ | \+ | \+/- | \- | \- | \- |
-| Issue templates on GitHub | \- | ? | \+ | \- | \+ | \+ | | \+ |
-| **Client-side balancing** |
-| Load balancer initialization through Discovery/ListEndpoints | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Disable client-side load balancing (all requests to the initial Endpoint) | \+/- | \- | \+ | \- | \- | \+ | | \+ |
-| Background Discovery/ListEndpoints (by default, once a minute) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
-| Support for multiple IP addresses in the initial Endpoint DNS record, some of which may not be available (DNS load balancing) | ? | \+ | \+ | ? | \- | ? | ? | ? |
-| Node pessimization on transport errors | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Forced Discovery/ListEndpoints if more than half of the nodes are pessimized | \+ | \+ | \+ | \+ | \- | \+ | \+ |
-| Automatic detection of the nearest DC/availability zone by TCP pings | \- | \- | \+ | \- | \- | \- | \- |
-| Automatic detection of the nearest DC/availability zone by Discovery/ListEndpoints response\* | \+ | \+ | \- | \- | \- | \- | \- |
-| Uniform random selection of nodes (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Load balancing across all nodes of all DCs (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Load balancing across all nodes of a particular DC/availability zone (for example, “a”, “vla”) | \+ | \+ | \+ | ? | \- | \- | \- |
-| Load balancing across all nodes of all local DCs | \+ | \+ | \+ | ? | \- | \- | \- |
-| **Credentials providers** |
-| Anonymous (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Static (user - password) | \+ | \+ | \+ | \+ | \- | \- | \+ | \- |
-| Token: IAM, Access token | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Service account (Yandex.Cloud specific) | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
-| Metadata (Yandex.Cloud specific) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| **Working with Table service sessions** |
-| Session pool | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Limit the number of concurrent sessions on the client) | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Minimum number of sessions in the pool | \+ | \+ | \+ | \+ | \- | \- | \- |
-| Warm up the pool to the specified number of sessions when the pool is created | \- | \+ | \- | \- | \+ | \- | \- |
-| Background KeepAlive for idle sessions in the pool | \+ | \- | \- | \+ | \+ | \+ | \+ |
-| Background closing of idle sessions in the pool (redundant sessions) | \+ | \+ | \+ | \+ | \- | \- | \- |
-| Automatic dumping of a session from the pool in case of BAD_SESSION/SESSION_BUSY errors | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Storage of sessions for possible future reuse\~ | \+ | \- | \- | \- | \- | \- | \- |
-| Retryer on the session pool (a repeat object is a session) | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Retryer on the session pool (a repeat object is a transaction within a session) | \- | \- | \+ | \- | \- | \- | \+ |
-| Graceful session shutdown support ("session-close" in "x-ydb-server-hints" metadata means to "forget" a session and not use it again) | \+ | \+ | \+ | \+ | \- | \- |
-| Support for server-side load balancing of sessions (a CreateSession request must contain the "session-balancer" value in the "x-ydb-client-capabilities" metadata header) | \+ | \+ | \+ | \- | \- | \- |
-| **Support for YDB data types** |
-| Int/Uint(8,16,32,64) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Int128, UInt128 (not available publicly?) | \- | \- | \- | \- | \- | \- | \- | \- |
-| Float,Double | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| Bool | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| String, Bytes | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+/\- |
-| Utf8, Text | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| NULL,Optional,Void | \+ | \+ | \+ | \+ | \+ | \+/- | \+ | \+/\- |
-| Struct | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| List | \+ | \+ | \+ | \+ | \+ | \+/- | \+ | \+ |
-| Set | ? | ? | \- | ? | ? | \- | ? | ? |
-| Tuple | \+ | \+ | \+ | \+ | \+ | \+ | \+ | ?\+ |
-| Variant\<Struct\>,Variant\<Tuple\> | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
-| Date,DateTime,Timestamp,Interval | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
-| TzDate,TzDateTime,TzTimestamp | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
-| DyNumber | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
-| Decimal (120 bits) | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
-| Json,JsonDocument,Yson | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+/\- |
-| **Scheme client** |
-| MakeDirectory | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| RemoveDirectory | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| ListDirectory | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| ModifyPermissions | \+ | \+ | \+ | \- | \+ | \- | | \+ |
-| DescribePath | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| **Table service** |
-| CreateSession | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| DeleteSession | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| KeepAlive | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
-| CreateTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| DropTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| AlterTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| CopyTable | \+ | \+ | \+ | \+ | \- | \- | | \+ |
-| CopyTables | \+ | \+ | \+ | \- | \- | \- | | \+ |
-| DescribeTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| ExplainDataQuery | \+ | \+ | \+ | \+ | \- | \- | | \+ |
-| PrepareDataQuery | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| ExecuteDataQuery | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| \* By default, server cache for all parameter requests (KeepInCache) | \- | \+ | \+ | \+ | \+ | \+ | | \+ |
-| \* A separate option to enable/disable server cache for a specific request | \+ | \+ | \+ | \+ | \+ | \+ | | \+ |
-| \* Truncated result as an error (by default) | \- | \- | \+ | ? | \+ | \- | | \+ |
-| \* Truncated result as an error (as an opt-in, opt-out option) | \- | \- | \+ | ? | \+ | \- | | \- |
-| ExecuteSchemeQuery | \+ | \+ | \+ | \+ | \- | \+ | \+ | \+ |
-| BeginTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| CommitTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| RollbackTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
-| DescribeTableOptions | \+ | \+ | \+ | \- | \- | \- | | \-|
-| StreamExecuteScanQuery | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
-| StreamReadTable | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
-| BulkUpsert | \+ | \+ | \+ | \+ | \+ | \- | \- | \+ |
-| **Operation** |
-| Consumed Units from metadata of a response to a grpc-request request (for the user to obtain this) | \+ | \+ | \- | \+ | \+ | \- | \- | \- |
-| Obtaining OperationId of the operation for a long-polling status of operation execution | \+ | \+ | \+ | \- | \- | \+ | \- | \- |
-| **ScriptingYQL** |
-| ExecuteYql | \+ | ? | \+ | \- | \- | \- | | \+ |
-| ExplainYql | \+ | ? | \+ | \- | \- | \- | | \+ |
-| StreamExecuteYql | \+ | ? | \+ | \- | \- | \- | | \- |
-| **Coordination service** |
-| CreateNode | \+ | ? | \+ | \- | \- | \- | | \- |
-| AlterNode | \+ | ? | \+ | \- | \- | \- | | \- |
-| DropNode | \+ | ? | \+ | \- | \- | \- | | \- |
-| DescribeNode | \+ | ? | \+ | \- | \- | \- | | \- |
-| Session (leader election, distributed lock) | \+ | ? | \- | \- | \- | \- | | \- |
-| **Topic service** |
-| CreateTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
-| DescribeTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
-| AlterTopic | \+ | \- | \+ | \- | \- | \- | \- | \- |
-| DropTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
-| StreamWrite | \+ | \+ | \+ | \- | \- | \- | \- | \- |
-| StreamRead | \+ | \+ | \+ | \- | \- | \- | \- | \- |
-| **Ratelimiter service** |
-| CreateResource | \+ | ? | \+ | \- | \- | \- | \- |
-| AlterResource | \+ | ? | \+ | \- | \- | \- | \- |
-| DropResource | \+ | ? | \+ | \- | \- | \- | \- |
-| ListResources | \+ | ? | \+ | \- | \- | \- | \- |
-| DescribeResource | \+ | ? | \+ | \- | \- | \- | \- |
-| AcquireResource | \+ | ? | \+ | \- | \- | \- | \- |
-| **Monitoring** (sending SDK metrics to the monitoring system) |
-| Solomon / Monitoring | \+ | ? | \+ | \- | \- | \- | \- | \- |
-| Prometheus | \- | ? | \+ | \- | \- | \- | \- | \- |
-| SDK event **logging** | \- | ? | \+ | \+ | \+ | \+ | \+ | \+ |
-| SDK event **tracing** |
-| in OpenTelemetry | \- | ? | \- | \- | \- | \- | \- | \- |
-| in OpenTracing | \- | ? | \+ | \- | \- | \- | \- | \- |
-| **Examples** |
-| Auth |
-| \* token | ? | ? | \+ | \+ | \+ | \+ | \+ | \+ |
-| \* anonymous | ? | ? | \+ | \+ | \+ | \+ | | \+ |
-| \* environ | ? | ? | \+ | \+ | \+ | \- | | \+ |
-| \* metadata | ? | ? | \+ | \+ | \+ | \+ | | \+ |
-| \* service_account | ? | ? | \+ | \+ | \+ | \- | | \+ |
-| \* static (username \+ password) | ? | ? | \+ | \+ | \+ | \+ | \+ | \- |
-| Basic (series) | \+ | ? | \+ | \+ | \+ | \+ | \+ | \+ |
-| Bulk Upsert | \+/- | ? | \+ | \+ | \+ | \- | | \- |
-| Containers (Struct,Variant,List,Tuple) | \- | ? | \+ | \- | \- | \- | | \- |
-| Pagination | \+ | ? | \+ | \+ | \- | \- | | \- |
-| Partition policies | \- | ? | \+ | \- | \- | \- | | \-|
-| Read table | ? | ? | \+ | \- | \+ | \- | | \+ |
-| Secondary index Workaround | \+ | ? | \- | \+ | \- | \- | | \- |
-| Secondary index builtin | \+ | ? | \- | \- | \- | \- | | \- |
-| TTL | \+ | ? | \+ | \- | \- | \- | | \- |
-| TTL Readtable | \+ | ? | \+ | \- | \- | \- | | \- |
-| URL Shortener (serverless yandex function) | ? | ? | \+ | ? | \+ | \- | | \- |
-| Topic reader | \+ | \+ | \- | \- | | \- | | \- |
-| Topic writer | \- | \+ | \- | \- | | \- | | \- |
+| Feature | C\+\+ | Python | Go | Java | NodeJS | C# | Rust | PHP |
+|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----:|:------:|:--:|:----:|:------:|:----:|:----:|:-----:|
+| SSL/TLS support (system certificates) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| SSL/TLS support (custom certificates) | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| Configure/enable GRPC KeepAlive (keeping the connection alive in the background) | \+ | \+ | \+ | ? | \- | \- | | \+ |
+| Regular SLO testing on the latest code version | \+ | \+/- | \+ | \+ | \+/- | \- | \- | \- |
+| Issue templates on GitHub | \- | ? | \+ | \- | \+ | \+ | | \+ |
+| **Client-side balancing** | | | | | | | | |
+| Load balancer initialization through Discovery/ListEndpoints | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Disable client-side load balancing (all requests to the initial Endpoint) | \+/- | \- | \+ | \- | \- | \+ | | \+ |
+| Background Discovery/ListEndpoints (by default, once a minute) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
+| Support for multiple IP addresses in the initial Endpoint DNS record, some of which may not be available (DNS load balancing) | ? | \+ | \+ | ? | \- | ? | ? | ? |
+| Node pessimization on transport errors | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Forced Discovery/ListEndpoints if more than half of the nodes are pessimized | \+ | \+ | \+ | \+ | \- | \+ | \+ | |
+| Automatic detection of the nearest DC/availability zone by TCP pings | \- | \- | \+ | \- | \- | \- | \- | |
+| Automatic detection of the nearest DC/availability zone by Discovery/ListEndpoints response\* | \+ | \+ | \- | \- | \- | \- | \- | |
+| Uniform random selection of nodes (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Load balancing across all nodes of all DCs (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Load balancing across all nodes of a particular DC/availability zone (for example, “a”, “vla”) | \+ | \+ | \+ | ? | \- | \- | \- | |
+| Load balancing across all nodes of all local DCs | \+ | \+ | \+ | ? | \- | \- | \- | |
+| **Credentials providers** | | | | | | | | |
+| Anonymous (default) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Static (user - password) | \+ | \+ | \+ | \+ | \- | \- | \+ | \- |
+| Token: IAM, Access token | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Service account (Yandex.Cloud specific) | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
+| Metadata (Yandex.Cloud specific) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| **Working with Table service sessions** | | | | | | | | |
+| Session pool | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Limit the number of concurrent sessions on the client) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Minimum number of sessions in the pool | \+ | \+ | \+ | \+ | \- | \- | \- | |
+| Warm up the pool to the specified number of sessions when the pool is created | \- | \+ | \- | \- | \+ | \- | \- | |
+| Background KeepAlive for idle sessions in the pool | \+ | \- | \- | \+ | \+ | \+ | \+ | |
+| Background closing of idle sessions in the pool (redundant sessions) | \+ | \+ | \+ | \+ | \- | \- | \- | |
+| Automatic dumping of a session from the pool in case of BAD_SESSION/SESSION_BUSY errors | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Storage of sessions for possible future reuse\~ | \+ | \- | \- | \- | \- | \- | \- | |
+| Retryer on the session pool (a repeat object is a session) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | |
+| Retryer on the session pool (a repeat object is a transaction within a session) | \- | \- | \+ | \- | \- | \- | \+ | |
+| Graceful session shutdown support ("session-close" in "x-ydb-server-hints" metadata means to "forget" a session and not use it again) | \+ | \+ | \+ | \+ | \- | \- | | |
+| Support for server-side load balancing of sessions (a CreateSession request must contain the "session-balancer" value in the "x-ydb-client-capabilities" metadata header) | \+ | \+ | \+ | \- | \- | \- | | |
+| **Support for YDB data types** | | | | | | | | |
+| Int/Uint(8,16,32,64) | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Int128, UInt128 (not available publicly?) | \- | \- | \- | \- | \- | \- | \- | \- |
+| Float,Double | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| Bool | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| String, Bytes | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+/\- |
+| Utf8, Text | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| NULL,Optional,Void | \+ | \+ | \+ | \+ | \+ | \+/- | \+ | \+/\- |
+| Struct | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| List | \+ | \+ | \+ | \+ | \+ | \+/- | \+ | \+ |
+| Set | ? | ? | \- | ? | ? | \- | ? | ? |
+| Tuple | \+ | \+ | \+ | \+ | \+ | \+ | \+ | ?\+ |
+| Variant\<Struct\>,Variant\<Tuple\> | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
+| Date,DateTime,Timestamp,Interval | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
+| TzDate,TzDateTime,TzTimestamp | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
+| DyNumber | \+ | \+ | \+ | \+ | \+ | \- | \- | \- |
+| Decimal (120 bits) | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
+| Json,JsonDocument,Yson | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+/\- |
+| **Scheme client** | | | | | | | | |
+| MakeDirectory | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| RemoveDirectory | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| ListDirectory | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| ModifyPermissions | \+ | \+ | \+ | \- | \+ | \- | | \+ |
+| DescribePath | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| **Table service** | | | | | | | | |
+| CreateSession | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| DeleteSession | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| KeepAlive | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \- |
+| CreateTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| DropTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| AlterTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| CopyTable | \+ | \+ | \+ | \+ | \- | \- | | \+ |
+| CopyTables | \+ | \+ | \+ | \- | \- | \- | | \+ |
+| DescribeTable | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| ExplainDataQuery | \+ | \+ | \+ | \+ | \- | \- | | \+ |
+| PrepareDataQuery | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| ExecuteDataQuery | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| \* By default, server cache for all parameter requests (KeepInCache) | \- | \+ | \+ | \+ | \+ | \+ | | \+ |
+| \* A separate option to enable/disable server cache for a specific request | \+ | \+ | \+ | \+ | \+ | \+ | | \+ |
+| \* Truncated result as an error (by default) | \- | \- | \+ | ? | \+ | \- | | \+ |
+| \* Truncated result as an error (as an opt-in, opt-out option) | \- | \- | \+ | ? | \+ | \- | | \- |
+| ExecuteSchemeQuery | \+ | \+ | \+ | \+ | \- | \+ | \+ | \+ |
+| BeginTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| CommitTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| RollbackTransaction | \+ | \+ | \+ | \+ | \+ | \- | | \+ |
+| DescribeTableOptions | \+ | \+ | \+ | \- | \- | \- | | \- |
+| StreamExecuteScanQuery | \+ | \+ | \+ | \+ | \+ | \+ | \+ | \+ |
+| StreamReadTable | \+ | \+ | \+ | \+ | \+ | \+ | \- | \+ |
+| BulkUpsert | \+ | \+ | \+ | \+ | \+ | \- | \- | \+ |
+| **Operation** | | | | | | | | |
+| Consumed Units from metadata of a response to a grpc-request request (for the user to obtain this) | \+ | \+ | \- | \+ | \+ | \- | \- | \- |
+| Obtaining OperationId of the operation for a long-polling status of operation execution | \+ | \+ | \+ | \- | \- | \+ | \- | \- |
+| **ScriptingYQL** | | | | | | | | |
+| ExecuteYql | \+ | ? | \+ | \- | \- | \- | | \+ |
+| ExplainYql | \+ | ? | \+ | \- | \- | \- | | \+ |
+| StreamExecuteYql | \+ | ? | \+ | \- | \- | \- | | \- |
+| **Coordination service** | | | | | | | | |
+| CreateNode | \+ | ? | \+ | \- | \- | \- | | \- |
+| AlterNode | \+ | ? | \+ | \- | \- | \- | | \- |
+| DropNode | \+ | ? | \+ | \- | \- | \- | | \- |
+| DescribeNode | \+ | ? | \+ | \- | \- | \- | | \- |
+| Session (leader election, distributed lock) | \+ | ? | \- | \- | \- | \- | | \- |
+| **Topic service** | | | | | | | | |
+| CreateTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
+| DescribeTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
+| AlterTopic | \+ | \- | \+ | \- | \- | \- | \- | \- |
+| DropTopic | \+ | \+ | \+ | \- | \- | \- | \- | \- |
+| StreamWrite | \+ | \+ | \+ | \- | \- | \- | \- | \- |
+| StreamRead | \+ | \+ | \+ | \- | \- | \- | \- | \- |
+| **Ratelimiter service** | | | | | | | | |
+| CreateResource | \+ | ? | \+ | \- | \- | \- | \- | |
+| AlterResource | \+ | ? | \+ | \- | \- | \- | \- | |
+| DropResource | \+ | ? | \+ | \- | \- | \- | \- | |
+| ListResources | \+ | ? | \+ | \- | \- | \- | \- | |
+| DescribeResource | \+ | ? | \+ | \- | \- | \- | \- | |
+| AcquireResource | \+ | ? | \+ | \- | \- | \- | \- | |
+| **Monitoring** (sending SDK metrics to the monitoring system) | | | | | | | | |
+| Solomon / Monitoring | \+ | ? | \+ | \- | \- | \- | \- | \- |
+| Prometheus | \- | ? | \+ | \- | \- | \- | \- | \- |
+| SDK event **logging** | \- | ? | \+ | \+ | \+ | \+ | \+ | \+ |
+| SDK event **tracing** | | | | | | | | |
+| in OpenTelemetry | \- | ? | \- | \- | \- | \- | \- | \- |
+| in OpenTracing | \- | ? | \+ | \- | \- | \- | \- | \- |
+| **Examples** | | | | | | | | |
+| Auth | | | | | | | | |
+| \* token | ? | ? | \+ | \+ | \+ | \+ | \+ | \+ |
+| \* anonymous | ? | ? | \+ | \+ | \+ | \+ | | \+ |
+| \* environ | ? | ? | \+ | \+ | \+ | \- | | \+ |
+| \* metadata | ? | ? | \+ | \+ | \+ | \+ | | \+ |
+| \* service_account | ? | ? | \+ | \+ | \+ | \- | | \+ |
+| \* static (username \+ password) | ? | ? | \+ | \+ | \+ | \+ | \+ | \- |
+| Basic (series) | \+ | ? | \+ | \+ | \+ | \+ | \+ | \+ |
+| Bulk Upsert | \+/- | ? | \+ | \+ | \+ | \- | | \- |
+| Containers (Struct,Variant,List,Tuple) | \- | ? | \+ | \- | \- | \- | | \- |
+| Pagination | \+ | ? | \+ | \+ | \- | \- | | \- |
+| Partition policies | \- | ? | \+ | \- | \- | \- | | \- |
+| Read table | ? | ? | \+ | \- | \+ | \- | | \+ |
+| Secondary index Workaround | \+ | ? | \- | \+ | \- | \- | | \- |
+| Secondary index builtin | \+ | ? | \- | \- | \- | \- | | \- |
+| TTL | \+ | ? | \+ | \- | \- | \- | | \- |
+| TTL Readtable | \+ | ? | \+ | \- | \- | \- | | \- |
+| URL Shortener (serverless yandex function) | ? | ? | \+ | ? | \+ | \- | | \- |
+| Topic reader | \+ | \+ | \- | \- | | \- | | \- |
+| Topic writer | \- | \+ | \- | \- | | \- | | \- |
{wide-content}
diff --git a/ydb/docs/en/core/reference/ydb-sdk/health-check-api.md b/ydb/docs/en/core/reference/ydb-sdk/health-check-api.md
index 1cc64fd923..a1c0063e59 100644
--- a/ydb/docs/en/core/reference/ydb-sdk/health-check-api.md
+++ b/ydb/docs/en/core/reference/ydb-sdk/health-check-api.md
@@ -190,6 +190,12 @@ The status (severity) of the current issue:
**Description:** This situation is not expected; it is an internal issue.
+#### Group layout is incorrect
+
+**Description:** The storage group was configured incorrectly.
+
+**Actions:** In the [Embedded UI](../embedded-ui/ydb-monitoring.md), navigate to the database page, select the `Storage` tab, and use the known group `id` to check the configuration of nodes and disks on the nodes.
+
#### Group degraded
**Description:** A number of disks allowed in the group are not available.
@@ -212,7 +218,7 @@ The status (severity) of the current issue:
**Logic of work:** `HealthCheck` monitors various parameters (fault tolerance mode, number of failed disks, disk status, etc.) and sets the appropriate status for the group accordingly.
-**Actions:** In [Embedded UI](../embedded-ui/ydb-monitoring.md), navigate to the database page, select the `Storage` tab, apply the `Groups` and `Degraded` filters, and use the known group `id` to check the availability of nodes and disks on those nodes.```
+**Actions:** In [Embedded UI](../embedded-ui/ydb-monitoring.md), navigate to the database page, select the `Storage` tab, apply the `Groups` and `Degraded` filters, and use the known group `id` to check the availability of nodes and disks on those nodes.
### VDISK
diff --git a/ydb/docs/en/core/reference/ydb-sdk/topic.md b/ydb/docs/en/core/reference/ydb-sdk/topic.md
index ea0136553f..3bc677f89a 100644
--- a/ydb/docs/en/core/reference/ydb-sdk/topic.md
+++ b/ydb/docs/en/core/reference/ydb-sdk/topic.md
@@ -127,13 +127,13 @@ Before performing the examples, [create a topic](../ydb-cli/topic-create.md) and
{
ProducerId = "ProducerId_Example"
}.Build();
-
+
await using var reader = new ReaderBuilder<string>(driver)
{
ConsumerName = "Consumer_Example",
SubscribeSettings = { new SubscribeSettings(topicName) }
}.Build();
- ```
+ ```
{% endlist %}
@@ -205,7 +205,7 @@ The topic path is mandatory. Other parameters are optional.
.build())
.build());
```
-
+
- С#
Example of creating a topic with a list of supported codecs and a minimum number of partitions:
@@ -788,7 +788,7 @@ Only connections with matching [producer and message group](../../concepts/topic
```c#
var writeCts = new CancellationTokenSource();
writeCts.CancelAfter(TimeSpan.FromSeconds(3));
-
+
await writer.WriteAsync("Hello, Example YDB Topics!", writeCts.Token);
```
@@ -1005,6 +1005,54 @@ All the metadata provided when writing a message is sent to a consumer with the
})
```
+- Python
+
+ To write to a topic within a transaction, create a transactional writer by calling `topic_client.tx_writer` with the `tx` argument. Once created, you can send messages as usual. There's no need to close the transactional writer manually, as it will be closed automatically when the transaction ends.
+
+ In the example below, there is no explicit call to `tx.commit()`; it occurs implicitly upon the successful execution of the `callee` lambda.
+
+ [Example on GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_example.py)
+
+ ```python
+ with ydb.QuerySessionPool(driver) as session_pool:
+
+ def callee(tx: ydb.QueryTxContext):
+ tx_writer: ydb.TopicTxWriter = driver.topic_client.tx_writer(tx, topic)
+
+ for i in range(message_count):
+ result_stream = tx.execute(query=f"select {i} as res;")
+ for result_set in result_stream:
+ message = str(result_set.rows[0]["res"])
+ tx_writer.write(ydb.TopicWriterMessage(message))
+ print(f"Message {message} was written with tx.")
+
+ session_pool.retry_tx_sync(callee)
+ ```
+
+- Python (asyncio)
+
+ To write to a topic within a transaction, create a transactional writer by calling `topic_client.tx_writer` with the `tx` argument. Once created, you can send messages as usual. There's no need to close the transactional writer manually, as it will be closed automatically when the transaction ends.
+
+ In the example below, there is no explicit call to `tx.commit()`; it occurs implicitly upon the successful execution of the `callee` lambda.
+
+ [Example on GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_async_example.py)
+
+ ```python
+ async with ydb.aio.QuerySessionPool(driver) as session_pool:
+
+ async def callee(tx: ydb.aio.QueryTxContext):
+ tx_writer: ydb.TopicTxWriterAsyncIO = driver.topic_client.tx_writer(tx, topic)
+
+ for i in range(message_count):
+ async with await tx.execute(query=f"select {i} as res;") as result_stream:
+ async for result_set in result_stream:
+ message = str(result_set.rows[0]["res"])
+ await tx_writer.write(ydb.TopicWriterMessage(message))
+ print(f"Message {result_set.rows[0]['res']} was written with tx.")
+
+ await session_pool.retry_tx_async(callee)
+ ```
+
- Java (sync)
[Example on GitHub](https://github.com/ydb-platform/ydb-java-examples/blob/develop/ydb-cookbook/src/main/java/tech/ydb/examples/topic/transactions/TransactionWriteSync.java)
@@ -1285,7 +1333,7 @@ Topic can have several Consumers and for each of them server stores its own read
{
ConsumerName = "Consumer_Example",
SubscribeSettings = { new SubscribeSettings(topicName) }
- }.Build();
+ }.Build();
```
{% endlist %}
@@ -1360,7 +1408,7 @@ To establish a connection to the `my-topic` and `my-specific-topic` topics using
}
}.Build();
```
-
+
{% endlist %}
### Reading messages {#reading-messages}
@@ -1465,7 +1513,7 @@ Data from topics can be read in the context of [transactions](#read-tx). In this
{
}
```
-
+
{% endlist %}
#### Reading message batches
@@ -1544,7 +1592,7 @@ Data from topics can be read in the context of [transactions](#read-tx). In this
foreach (var message in batchMessages.Batch)
{
- logger.LogInformation("Received message: [{MessageData}]", message.Data);
+ logger.LogInformation("Received message: [{MessageData}]", message.Data);
}
}
}
@@ -1644,7 +1692,7 @@ If a commit fails with an error, the application should log it and continue; it
{
}
```
-
+
{% endlist %}
#### Reading message batches with commits
@@ -1736,7 +1784,7 @@ If a commit fails with an error, the application should log it and continue; it
foreach (var message in batchMessages.Batch)
{
- logger.LogInformation("Received message: [{MessageData}]", message.Data);
+ logger.LogInformation("Received message: [{MessageData}]", message.Data);
}
try
@@ -1963,6 +2011,42 @@ Reading progress is usually saved on a server for each Consumer. However, such p
}
```
+- Python
+
+ To read messages from a topic within a transaction, use the `reader.receive_batch_with_tx` method. It reads a batch of messages and adds their commit to the transaction, so there's no need to commit them separately. The reader can be reused across different transactions. However, it's essential to commit transactions in the same order as the messages are read from the reader, as message commits in the topic must be performed strictly in order - otherwise transaction will get an error during commit. The simplest way to ensure this is by using the reader within a loop.
+
+ [Example on GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_example.py)
+
+ ```python
+ with driver.topic_client.reader(topic, consumer) as reader:
+ with ydb.QuerySessionPool(driver) as session_pool:
+ for _ in range(message_count):
+
+ def callee(tx: ydb.QueryTxContext):
+ batch = reader.receive_batch_with_tx(tx, max_messages=1)
+ print(f"Message {batch.messages[0].data.decode()} was read with tx.")
+
+ session_pool.retry_tx_sync(callee)
+ ```
+
+- Python (asyncio)
+
+ To read messages from a topic within a transaction, use the `reader.receive_batch_with_tx` method. It reads a batch of messages and adds their commit to the transaction, so there's no need to commit them separately. The reader can be reused across different transactions. However, it's essential to commit transactions in the same order as the messages are read from the reader, as message commits in the topic must be performed strictly in order - otherwise transaction will get an error during commit. The simplest way to ensure this is by using the reader within a loop.
+
+ [Example on GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_async_example.py)
+
+ ```python
+ async with driver.topic_client.reader(topic, consumer) as reader:
+ async with ydb.aio.QuerySessionPool(driver) as session_pool:
+ for _ in range(message_count):
+
+ async def callee(tx: ydb.aio.QueryTxContext):
+ batch = await reader.receive_batch_with_tx(tx, max_messages=1)
+ print(f"Message {batch.messages[0].data.decode()} was read with tx.")
+
+ await session_pool.retry_tx_async(callee)
+ ```
+
- Java (sync)
[Example on GitHub](https://github.com/ydb-platform/ydb-java-examples/blob/develop/ydb-cookbook/src/main/java/tech/ydb/examples/topic/transactions/TransactionReadSync.java)
diff --git a/ydb/docs/en/core/yql/reference/_includes/permissions_list.md b/ydb/docs/en/core/yql/reference/_includes/permissions_list.md
index 4c3a108b2d..edbbe71373 100644
--- a/ydb/docs/en/core/yql/reference/_includes/permissions_list.md
+++ b/ydb/docs/en/core/yql/reference/_includes/permissions_list.md
@@ -12,8 +12,8 @@ The possible names of rights are listed in the table below.
| `ydb.database.drop` | `DROP` | The right to delete databases in the cluster |
| **Elementary rights for database objects** | | |
| `ydb.granular.select_row` | `SELECT ROW` | The right to read rows from a table (select), read messages from topics |
-| `ydb.granular.update_row` | `UPDATE ROW` | The right to update rows in a table (insert, update, erase), write messages to topics |
-| `ydb.granular.erase_row` | `ERASE ROW` | The right to delete rows from a table |
+| `ydb.granular.update_row` | `UPDATE ROW` | The right to update rows in a table (insert, update, upsert, replace), write messages to topics |
+| `ydb.granular.erase_row` | `ERASE ROW` | The right to delete rows from a table (delete) |
| `ydb.granular.create_directory` | `CREATE DIRECTORY` | The right to create and delete directories, including existing and nested ones |
| `ydb.granular.create_table` | `CREATE TABLE` | The right to create tables (including index, external, columnar), views, sequences |
| `ydb.granular.create_queue` | `CREATE QUEUE` | The right to create topics |
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/_includes/date_formats.md b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/date_formats.md
new file mode 100644
index 0000000000..ad2a6dd217
--- /dev/null
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/date_formats.md
@@ -0,0 +1,7 @@
+|Имя|Описание|Пример|
+|---|---|---|
+|`POSIX`|Строка формата `%Y-%m-%d %H:%M:%S`|2001-03-26 16:10:00|
+|`ISO`|Формат, соответствующий стандарту [ISO 8601](https://ru.wikipedia.org/wiki/ISO_8601)|2001-03-26 16:10:00Z|
+|`UNIX_TIME_SECONDS`|Количество секунд прошедших с 1 января 1970 года (00:00:00 UTC)|985623000|
+|`UNIX_TIME_MILLISECONDS`|Количество миллисекунд прошедших с 1 января 1970 года (00:00:00 UTC)|985623000000|
+|`UNIX_TIME_MICROSECONDS`|Количество микросекунд прошедших с 1 января 1970 года (00:00:00 UTC)|985623000000000| \ No newline at end of file
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/_includes/format_settings.md b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/format_settings.md
new file mode 100644
index 0000000000..0297de1fa7
--- /dev/null
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/format_settings.md
@@ -0,0 +1,10 @@
+|Имя параметра|Описание|Принимаемые значения|
+|----|----|---|
+|`file_pattern`|Шаблон имени файла|Строка шаблона имени. Поддерживаются wildcards `*`.|
+|`data.interval.unit`|Единица измерения для парсинга типа `Interval`|`MICROSECONDS`, `MILLISECONDS`, `SECONDS`, `MINUTES`, `HOURS`, `DAYS`, `WEEKS`|
+|`data.datetime.format_name`|Предопределенный формат, в котором записаны данные типа `Datetime`|`POSIX`, `ISO`|
+|`data.datetime.format`|Шаблон, определяющий как записаны данные типа `Datetime`|Строка форматирования, например: `%Y-%m-%dT%H-%M`|
+|`data.timestamp.format_name`|Предопределенный формат, в котором записаны данные типа `Timestamp`|`POSIX`, `ISO`, `UNIX_TIME_SECONDS`, `UNIX_TIME_MILLISECONDS`, `UNIX_TIME_MICROSECONDS`|
+|`data.timestamp.format`|Шаблон, определяющий как записаны данные типа `Timestamp`|Строка форматирования, например: `%Y-%m-%dT%H-%M-%S`|
+|`data.date.format`|Формат, в котором записаны данные типа `Date`|Строка форматирования, например: `%Y-%m-%d`|
+|`csv_delimiter`|Разделитель данных в формате `csv_with_names`|Любой символ (UTF-8)|
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/_includes/path_format.md b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/path_format.md
index ea21735d7b..abab7b53ba 100644
--- a/ydb/docs/ru/core/concepts/federated_query/s3/_includes/path_format.md
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/_includes/path_format.md
@@ -1,5 +1,5 @@
|Формат пути|Описание|Пример|
|----|----|---|
-|Путь завершается символом `/`|Путь к каталогу|Путь `/a` адресует все содержимое каталога:<br/>`/a/b/c/d/1.txt`<br/>`/a/b/2.csv`|
+|Путь завершается символом `/`|Путь к каталогу|Путь `/a/` адресует все содержимое каталога:<br/>`/a/b/c/d/1.txt`<br/>`/a/b/2.csv`|
|Путь содержит символ макроподстановки `*`|Любые файлы, вложенные в путь|Путь `/a/*.csv` адресует файлы в каталогах:<br/>`/a/b/c/1.csv`<br/>`/a/2.csv`<br/>`/a/b/c/d/e/f/g/2.csv`|
|Путь не завершается символом `/` и не содержит символов макроподстановок|Путь к отдельному файлу|Путь `/a/b.csv` адресует конкретный файл `/a/b.csv`|
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/external_data_source.md b/ydb/docs/ru/core/concepts/federated_query/s3/external_data_source.md
index 5645f21bb4..bfbedbc4fd 100644
--- a/ydb/docs/ru/core/concepts/federated_query/s3/external_data_source.md
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/external_data_source.md
@@ -74,7 +74,8 @@ FROM
WITH(
FORMAT = "<file_format>",
COMPRESSION = "<compression>",
- SCHEMA = (<schema_definition>))
+ SCHEMA = (<schema_definition>),
+ <format_settings>)
WHERE
<filter>;
```
@@ -86,6 +87,7 @@ WHERE
* `file_format` — [формат данных](formats.md#formats) в файлах.
* `compression` — [формат сжатия](formats.md#compression_formats) файлов.
* `schema_definition` — [описание схемы хранимых данных](#schema) в файлах.
+* `format_settings` — опциональные [параметры форматирования](#format_settings)
### Описание схемы данных {#schema}
@@ -137,12 +139,22 @@ WHERE
В результате выполнения такого запроса будут автоматически выведены названия и типы полей.
-### Форматы путей к данным {#path_format}
+### Форматы путей к данным задаваемых в параметре `file_path` {#path_format}
В {{ ydb-full-name }} поддерживаются следующие пути к данным:
{% include [!](_includes/path_format.md) %}
+### Параметры форматирования {#format_settings}
+
+В {{ ydb-full-name }} поддерживаются следующие параметры форматирования:
+
+{% include [!](_includes/format_settings.md) %}
+
+Параметр `file_pattern` можно использовать только в том случае, если `file_path` – путь к каталогу. В строках форматирования можно использовать любые шаблонные переменные, поддерживаемые функцией [`strftime`(C99)](https://en.cppreference.com/w/c/chrono/strftime). В {{ ydb-full-name }} поддерживаются следующие форматы типов `Datetime` и `Timestamp`:
+
+{% include [!](_includes/date_formats.md) %}
+
## Пример {#read_example}
Пример запроса для чтения данных из S3 ({{ objstorage-full-name }}):
@@ -151,21 +163,28 @@ WHERE
SELECT
*
FROM
- connection.`folder/filename.csv`
+ connection.`folder/`
WITH(
FORMAT = "csv_with_names",
+ COMPRESSION="gzip"
SCHEMA =
(
- Year Int32,
- Manufacturer Utf8,
- Model Utf8,
- Price Double
- )
+ Id Int32 NOT NULL,
+ UserId Int32 NOT NULL,
+ TripDate Date NOT NULL,
+ TripDistance Double NOT NULL,
+ UserComment Utf8
+ ),
+ FILE_PATTERN="*.csv.gz",
+ `DATA.DATE.FORMAT`="%Y-%m-%d",
+ CSV_DELIMITER='/'
);
```
Где:
* `connection` — название внешнего источника данных, ведущего на бакет S3 ({{ objstorage-full-name }}).
-* `folder/filename.csv` — путь к файлу в бакете S3 ({{ objstorage-full-name }}).
+* `folder/` — путь к папке с данными в бакете S3 ({{ objstorage-full-name }}).
* `SCHEMA` — описание схемы данных в файле.
+* `*.csv.gz` — шаблон имени файлов с данными.
+* `%Y-%m-%d` — формат записи данных типа `Date` в S3.
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/external_table.md b/ydb/docs/ru/core/concepts/federated_query/s3/external_table.md
index c55250c4b0..bde27c24c2 100644
--- a/ydb/docs/ru/core/concepts/federated_query/s3/external_table.md
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/external_table.md
@@ -37,6 +37,7 @@ CREATE EXTERNAL TABLE `s3_test_data` (
- `csv_with_names` - один из [допустимых типов хранения данных](formats.md);
- `gzip` - один из [допустимых алгоритмов сжатия](formats.md#compression).
+Также при создании внешних таблиц поддерживаются [параметры форматирования](external_data_source.md#format_settings).
## Модель данных {#data-model}
diff --git a/ydb/docs/ru/core/concepts/federated_query/s3/formats.md b/ydb/docs/ru/core/concepts/federated_query/s3/formats.md
index 7a16c15641..29f89ddae6 100644
--- a/ydb/docs/ru/core/concepts/federated_query/s3/formats.md
+++ b/ydb/docs/ru/core/concepts/federated_query/s3/formats.md
@@ -6,15 +6,15 @@
Список поддерживаемых в {{ ydb-short-name }} форматов данных приведен в таблице ниже.
-|Формат|Чтение|Запись|
-|----|-----|------|
-|[`csv_with_names`](#csv_with_names)|✓|✓|
-|[`tsv_with_names`](#tsv_with_names)|✓||
-|[`json_list`](#json_list)|✓||
-|[`json_each_row`](#json_each_row)|✓||
-|[`json_as_string`](#json_as_string)|✓||
-|[`parquet`](#parquet)|✓|✓|
-|[`raw`](#raw)|✓||
+| Формат | Чтение | Запись |
+|-------------------------------------|--------|--------|
+| [`csv_with_names`](#csv_with_names) | ✓ | ✓ |
+| [`tsv_with_names`](#tsv_with_names) | ✓ | |
+| [`json_list`](#json_list) | ✓ | |
+| [`json_each_row`](#json_each_row) | ✓ | |
+| [`json_as_string`](#json_as_string) | ✓ | |
+| [`parquet`](#parquet) | ✓ | ✓ |
+| [`raw`](#raw) | ✓ | |
### Формат csv_with_names {#csv_with_names}
@@ -52,7 +52,7 @@ WITH
|#|Manufacturer|Model|Price|Year|
|-|-|-|-|-|
|1|Man_1|Model_1|3000|1997|
-|2|Man_2|Model_2|4900|1999
+|2|Man_2|Model_2|4900|1999|
{% endcut %}
@@ -93,7 +93,7 @@ WITH
|#|Manufacturer|Model|Price|Year|
|-|-|-|-|-|
|1|Man_1|Model_1|3000|1997|
-|2|Man_2|Model_2|4900|1999
+|2|Man_2|Model_2|4900|1999|
{% endcut %}
@@ -153,7 +153,7 @@ WITH
|#|Manufacturer|Model|Price|Year|
|-|-|-|-|-|
|1|Man_1|Model_1|3000|1997|
-|2|Man_2|Model_2|4900|1999
+|2|Man_2|Model_2|4900|1999|
{% endcut %}
diff --git a/ydb/docs/ru/core/concepts/glossary.md b/ydb/docs/ru/core/concepts/glossary.md
index a2e37d05f2..49951d4799 100644
--- a/ydb/docs/ru/core/concepts/glossary.md
+++ b/ydb/docs/ru/core/concepts/glossary.md
@@ -305,6 +305,10 @@
**[Право доступа](../security/authorization.md#right)** или **access right** — сущность, отражающая разрешение [субъекту доступа](#access-subject) выполнять конкретный набор операций в кластере или базе данных над конкретным [объектом доступа](#access-object).
+### Наследование прав доступа {#access-right-inheritance}
+
+**Наследование прав доступа** — механизм, при котором [права доступа](#access-right) автоматически передаются от родительских [объектов доступа](#access-object) дочерним объектам в структуре базы данных. Это гарантирует, что разрешения, предоставленные на более высоком уровне иерархии, применяются ко всем нижестоящим уровням, если они не [переопределены явно](../reference/ydb-cli/commands/scheme-permissions.md#clear-inheritance).
+
### Список прав {#access-control-list}
**[Список прав](../security/authorization.md#right)**, **access control list** или **ACL** — список всех [прав](#access-right), предоставленных [субъектам доступа](#access-subject) (пользователям и группам) на конкретный [объект доступа](#access-object).
diff --git a/ydb/docs/ru/core/contributor/documentation/toc_p.yaml b/ydb/docs/ru/core/contributor/documentation/toc_p.yaml
index 79f1b397e2..0f44dbe00b 100644
--- a/ydb/docs/ru/core/contributor/documentation/toc_p.yaml
+++ b/ydb/docs/ru/core/contributor/documentation/toc_p.yaml
@@ -1,9 +1,9 @@
items:
- name: Процесс ревью
- path: review.md
+ href: review.md
- name: Руководство по стилю
- path: style-guide.md
+ href: style-guide.md
- name: Структура
- path: structure.md
+ href: structure.md
- name: Жанры
- path: genres.md \ No newline at end of file
+ href: genres.md
diff --git a/ydb/docs/ru/core/contributor/load-actors-kqp.md b/ydb/docs/ru/core/contributor/load-actors-kqp.md
index 2c49fe588c..e6c1e723f6 100644
--- a/ydb/docs/ru/core/contributor/load-actors-kqp.md
+++ b/ydb/docs/ru/core/contributor/load-actors-kqp.md
@@ -13,16 +13,16 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`DurationSeconds` | Продолжительность нагрузки в секундах.
-`WindowDuration` | Размер окна для агрегации статистики.
-`WorkingDir` | Путь директории, в которой будут созданы тестовые таблицы.
-`NumOfSessions` | Количество параллельных потоков, подающих нагрузку. Каждый поток пишет в свою сессию.
-`DeleteTableOnFinish` | Если `False`, то созданные таблицы не удаляются после завершения работы нагрузки. Может быть полезно в случае, когда при первом запуске актора создается большая таблица, а при последующих выполняются запросы к ней.
-`UniformPartitionsCount` | Количество партиций, создаваемых в тестовых таблицах.
-`WorkloadType` | Тип нагрузки.<br/>В случае Stoсk:<ul><li>`0` — InsertRandomOrder;</li><li>`1` — SubmitRandomOrder;</li><li>`2` — SubmitSameOrder;</li><li>`3` — GetRandomCustomerHistory;</li><li>`4` — GetCustomerHistory.</li></ul>В случае Key-Value:<ul><li>`0` — UpsertRandom;</li><li>`1` — InsertRandom;</li><li>`2` — SelectRandom.</li></ul>
-`Workload` | Вид нагрузки.<br/>`Stock`:<ul><li>`ProductCount` — количество видов товаров.</li><li>`Quantity` — количество товаров каждого вида на складе.</li><li>`OrderCount` — первоначальное количество заказов в БД.</li><li>`Limit` — минимальное количество шардов для таблиц.</li></ul>`Kv`:<ul><li>`InitRowCount` — до начала нагрузки нагружающий актор запишет в таблицу указанное количество строк.</li><li>`StringLen` — длина строки `value`.</li><li>`ColumnsCnt` — сколько столбцов использовать в таблице.</li><li>`RowsCnt` — сколько строк вставлять или читать в одном SQL запросе.</li></ul>
+| Параметр | Описание |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `DurationSeconds` | Продолжительность нагрузки в секундах. |
+| `WindowDuration` | Размер окна для агрегации статистики. |
+| `WorkingDir` | Путь директории, в которой будут созданы тестовые таблицы. |
+| `NumOfSessions` | Количество параллельных потоков, подающих нагрузку. Каждый поток пишет в свою сессию. |
+| `DeleteTableOnFinish` | Если `False`, то созданные таблицы не удаляются после завершения работы нагрузки. Может быть полезно в случае, когда при первом запуске актора создается большая таблица, а при последующих выполняются запросы к ней. |
+| `UniformPartitionsCount` | Количество партиций, создаваемых в тестовых таблицах. |
+| `WorkloadType` | Тип нагрузки.<br/>В случае Stoсk:<ul><li>`0` — InsertRandomOrder;</li><li>`1` — SubmitRandomOrder;</li><li>`2` — SubmitSameOrder;</li><li>`3` — GetRandomCustomerHistory;</li><li>`4` — GetCustomerHistory.</li></ul>В случае Key-Value:<ul><li>`0` — UpsertRandom;</li><li>`1` — InsertRandom;</li><li>`2` — SelectRandom.</li></ul> |
+| `Workload` | Вид нагрузки.<br/>`Stock`:<ul><li>`ProductCount` — количество видов товаров.</li><li>`Quantity` — количество товаров каждого вида на складе.</li><li>`OrderCount` — первоначальное количество заказов в БД.</li><li>`Limit` — минимальное количество шардов для таблиц.</li></ul>`Kv`:<ul><li>`InitRowCount` — до начала нагрузки нагружающий актор запишет в таблицу указанное количество строк.</li><li>`StringLen` — длина строки `value`.</li><li>`ColumnsCnt` — сколько столбцов использовать в таблице.</li><li>`RowsCnt` — сколько строк вставлять или читать в одном SQL запросе.</li></ul> |
## Примеры {#example}
diff --git a/ydb/docs/ru/core/contributor/load-actors-memory.md b/ydb/docs/ru/core/contributor/load-actors-memory.md
index 290b5937b4..0456a53669 100644
--- a/ydb/docs/ru/core/contributor/load-actors-memory.md
+++ b/ydb/docs/ru/core/contributor/load-actors-memory.md
@@ -10,11 +10,11 @@
## Параметры актора {#options}
-Параметр | Описание
---- | ---
-`DurationSeconds` | Продолжительность нагрузки в секундах.
-`BlockSize` | Размер аллоцируемого блока в байтах.
-`IntervalUs` | Интервал времени между аллоцированиями блоков в микросекундах.
+| Параметр | Описание |
+|-------------------|----------------------------------------------------------------|
+| `DurationSeconds` | Продолжительность нагрузки в секундах. |
+| `BlockSize` | Размер аллоцируемого блока в байтах. |
+| `IntervalUs` | Интервал времени между аллоцированиями блоков в микросекундах. |
## Примеры {#examples}
diff --git a/ydb/docs/ru/core/contributor/load-actors-overview.md b/ydb/docs/ru/core/contributor/load-actors-overview.md
index 283dba84eb..bbea1ddd63 100644
--- a/ydb/docs/ru/core/contributor/load-actors-overview.md
+++ b/ydb/docs/ru/core/contributor/load-actors-overview.md
@@ -17,17 +17,17 @@
## Типы акторов {#load-actor-type}
-Тип | Описание
---- | ---
-[KqpLoad](load-actors-kqp.md) | Подает нагрузку на слой Query Processor и нагружает все компоненты кластера.
-[KeyValueLoad](load-actors-key-value.md) | Нагружает Key-value таблетку.
-[StorageLoad](load-actors-storage.md) | Нагружает Distributed Storage без задействования слоев таблеток и Query Processor.
-[VDiskLoad](load-actors-vdisk.md) | Тестирует производительность записи на VDisk.
-[PDiskWriteLoad](load-actors-pdisk-write.md) | Тестирует производительность записи на PDisk.
-[PDiskReadLoad](load-actors-pdisk-read.md) | Тестирует производительность чтения с PDisk.
-[PDiskLogLoad](load-actors-pdisk-log.md) | Тестирует корректность вырезания из середины лога PDisk.
-[MemoryLoad](load-actors-memory.md) | Аллоцирует память, полезен при тестировании логики.
-[Stop](load-actors-stop.md) | Останавливает все акторы, либо только указанные.
+| Тип | Описание |
+|-------------------------------------------------|------------------------------------------------------------------------------------|
+| [KqpLoad](load-actors-kqp.md) | Подает нагрузку на слой Query Processor и нагружает все компоненты кластера. |
+| [KeyValueLoad](load-actors-key-value.md) | Нагружает Key-value таблетку. |
+| [StorageLoad](load-actors-storage.md) | Нагружает Distributed Storage без задействования слоев таблеток и Query Processor. |
+| [VDiskLoad](load-actors-vdisk.md) | Тестирует производительность записи на VDisk. |
+| [PDiskWriteLoad](load-actors-pdisk-write.md) | Тестирует производительность записи на PDisk. |
+| [PDiskReadLoad](load-actors-pdisk-read.md) | Тестирует производительность чтения с PDisk. |
+| [PDiskLogLoad](load-actors-pdisk-log.md) | Тестирует корректность вырезания из середины лога PDisk. |
+| [MemoryLoad](load-actors-memory.md) | Аллоцирует память, полезен при тестировании логики. |
+| [Stop](load-actors-stop.md) | Останавливает все акторы, либо только указанные. |
## Запуск нагрузки {#load-actor-start}
diff --git a/ydb/docs/ru/core/contributor/load-actors-pdisk-log.md b/ydb/docs/ru/core/contributor/load-actors-pdisk-log.md
index 6b00855896..79804a1edb 100644
--- a/ydb/docs/ru/core/contributor/load-actors-pdisk-log.md
+++ b/ydb/docs/ru/core/contributor/load-actors-pdisk-log.md
@@ -14,18 +14,18 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`PDiskId` | Идентификатор нагружаемого PDisk на узле.
-`PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk.
-`VDiskId` | Параметры VDisk, от имени которого подается нагрузка.<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul>
-`MaxInFlight` | Количество одновременно обрабатываемых запросов.
-`SizeIntervalMin` | Минимальный размер записи в лог в байтах.
-`SizeIntervalMax` | Максимальный размер записи в лог в байтах.
-`BurstInterval` | Интервал между сеансами записи в лог в байтах.
-`BurstSize` | Общее количество данных, которое будет записано в одном сеансе, в байтах.
-`StorageDuration` | Виртуальное время в байтах. Показывает, как долго VDisk должен хранить свои данные в логе.
-`IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `false`. Иначе (например, при запуске в юнит-тестах) укажите `true`.
+| Параметр | Описание |
+|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | Идентификатор нагружаемого PDisk на узле. |
+| `PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk. |
+| `VDiskId` | Параметры VDisk, от имени которого подается нагрузка.<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul> |
+| `MaxInFlight` | Количество одновременно обрабатываемых запросов. |
+| `SizeIntervalMin` | Минимальный размер записи в лог в байтах. |
+| `SizeIntervalMax` | Максимальный размер записи в лог в байтах. |
+| `BurstInterval` | Интервал между сеансами записи в лог в байтах. |
+| `BurstSize` | Общее количество данных, которое будет записано в одном сеансе, в байтах. |
+| `StorageDuration` | Виртуальное время в байтах. Показывает, как долго VDisk должен хранить свои данные в логе. |
+| `IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `false`. Иначе (например, при запуске в юнит-тестах) укажите `true`. |
## Примеры {#example}
diff --git a/ydb/docs/ru/core/contributor/load-actors-pdisk-read.md b/ydb/docs/ru/core/contributor/load-actors-pdisk-read.md
index 29a326fd88..0b479095cf 100644
--- a/ydb/docs/ru/core/contributor/load-actors-pdisk-read.md
+++ b/ydb/docs/ru/core/contributor/load-actors-pdisk-read.md
@@ -11,17 +11,17 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`PDiskId` | Идентификатор нагружаемого PDisk на узле.
-`PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk.
-`VDiskId` | Нагрузка подается от имени VDisk со следующими реквизитами:<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul>
-`Chunks` | Параметры чанка.<br/>`Slots` — количество слотов в чанке, определяет размер записи.<br/>Вы можете указать несколько `Chunks`, и тогда выбор конкретного чанка для чтения будет определяться его `Weight`.
-`DurationSeconds` | Продолжительность нагрузки в секундах.
-`IntervalMsMin`,<br/>`IntervalMsMax` | Минимальный и максимальный промежутки времени между запросами при интервальной нагрузке в миллисекундах. Значение промежутка выбирается случайно из указанного диапазона.
-`InFlightReads` | Количество одновременно обрабатываемых запросов на чтение.
-`Sequential` | Тип чтения.<ul><li>`True` — последовательное.</li><li>`False` — случайное.</li></ul>
-`IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `False`. Иначе (например, при запуске в юнит-тестах) укажите `True`.
+| Параметр | Описание |
+|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | Идентификатор нагружаемого PDisk на узле. |
+| `PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk. |
+| `VDiskId` | Нагрузка подается от имени VDisk со следующими реквизитами:<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul> |
+| `Chunks` | Параметры чанка.<br/>`Slots` — количество слотов в чанке, определяет размер записи.<br/>Вы можете указать несколько `Chunks`, и тогда выбор конкретного чанка для чтения будет определяться его `Weight`. |
+| `DurationSeconds` | Продолжительность нагрузки в секундах. |
+| `IntervalMsMin`,<br/>`IntervalMsMax` | Минимальный и максимальный промежутки времени между запросами при интервальной нагрузке в миллисекундах. Значение промежутка выбирается случайно из указанного диапазона. |
+| `InFlightReads` | Количество одновременно обрабатываемых запросов на чтение. |
+| `Sequential` | Тип чтения.<ul><li>`True` — последовательное.</li><li>`False` — случайное.</li></ul> |
+| `IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `False`. Иначе (например, при запуске в юнит-тестах) укажите `True`. |
## Примеры {#examples}
diff --git a/ydb/docs/ru/core/contributor/load-actors-pdisk-write.md b/ydb/docs/ru/core/contributor/load-actors-pdisk-write.md
index 0779eb6286..a804c48f3c 100644
--- a/ydb/docs/ru/core/contributor/load-actors-pdisk-write.md
+++ b/ydb/docs/ru/core/contributor/load-actors-pdisk-write.md
@@ -11,18 +11,18 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`PDiskId` | Идентификатор нагружаемого PDisk на узле.
-`PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk.
-`VDiskId` | Нагрузка подается от имени VDisk со следующими реквизитами:<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul>
-`Chunks` | Параметры чанка.<br/>`Slots` — количество слотов в чанке, определяет размер записи.<br/>Вы можете указать несколько `Chunks`, и тогда выбор конкретного чанка для записи будет определяться его `Weight`.
-`DurationSeconds` | Продолжительность нагрузки в секундах.
-`IntervalMsMin`,<br/>`IntervalMsMax` | Минимальный и максимальный промежутки времени между запросами при интервальной нагрузке в миллисекундах. Значение промежутка выбирается случайно из указанного диапазона.
-`InFlightWrites` | Количество одновременно обрабатываемых запросов на запись.
-`LogMode` | Режим записи лога. В режиме `LOG_SEQUENTIAL` сначала происходит запись в чанк, а после ее подтверждения запись — в лог.
-`Sequential` | Тип записи.<ul><li>`True` — последовательная.</li><li>`False` — случайная.</li></ul>
-`IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `False`. Иначе (например, при запуске в юнит-тестах) укажите `True`.
+| Параметр | Описание |
+|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `PDiskId` | Идентификатор нагружаемого PDisk на узле. |
+| `PDiskGuid` | Глобально-уникальный идентификатор нагружаемого PDisk. |
+| `VDiskId` | Нагрузка подается от имени VDisk со следующими реквизитами:<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul> |
+| `Chunks` | Параметры чанка.<br/>`Slots` — количество слотов в чанке, определяет размер записи.<br/>Вы можете указать несколько `Chunks`, и тогда выбор конкретного чанка для записи будет определяться его `Weight`. |
+| `DurationSeconds` | Продолжительность нагрузки в секундах. |
+| `IntervalMsMin`,<br/>`IntervalMsMax` | Минимальный и максимальный промежутки времени между запросами при интервальной нагрузке в миллисекундах. Значение промежутка выбирается случайно из указанного диапазона. |
+| `InFlightWrites` | Количество одновременно обрабатываемых запросов на запись. |
+| `LogMode` | Режим записи лога. В режиме `LOG_SEQUENTIAL` сначала происходит запись в чанк, а после ее подтверждения запись — в лог. |
+| `Sequential` | Тип записи.<ul><li>`True` — последовательная.</li><li>`False` — случайная.</li></ul> |
+| `IsWardenlessTest` | Если PDiskReadLoad запускается на кластере, укажите `False`. Иначе (например, при запуске в юнит-тестах) укажите `True`. |
## Примеры {#example}
diff --git a/ydb/docs/ru/core/contributor/load-actors-stop.md b/ydb/docs/ru/core/contributor/load-actors-stop.md
index ad1e5bbf97..6b18bd7935 100644
--- a/ydb/docs/ru/core/contributor/load-actors-stop.md
+++ b/ydb/docs/ru/core/contributor/load-actors-stop.md
@@ -4,10 +4,10 @@
## Параметры актора {#options}
-Параметр | Описание
---- | ---
-`Tag` | Тег нагружающего актора, который нужно остановить. Тег можно посмотреть в Embedded UI кластера.
-`RemoveAllTags` | При значении параметра `True` будут остановлены все нагружающие акторы.
+| Параметр | Описание |
+|------------------|-------------------------------------------------------------------------------------------------|
+| `Tag` | Тег нагружающего актора, который нужно остановить. Тег можно посмотреть в Embedded UI кластера. |
+| `RemoveAllTags` | При значении параметра `True` будут остановлены все нагружающие акторы. |
## Примеры {#examples}
diff --git a/ydb/docs/ru/core/contributor/load-actors-storage.md b/ydb/docs/ru/core/contributor/load-actors-storage.md
index 9138ac85c8..644f4c8384 100644
--- a/ydb/docs/ru/core/contributor/load-actors-storage.md
+++ b/ydb/docs/ru/core/contributor/load-actors-storage.md
@@ -12,39 +12,39 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`DurationSeconds` | Продолжительность нагрузки. Таймер запускается после завершения начальной записи данных.
-`Tablets` | Нагрузка подается от имени таблетки со следующими реквизитами:<ul><li>`TabletId` — идентификатор таблетки. Должен быть уникальным для каждого нагружающего актора в кластере. Этот параметр и `TabletName` – взаимоисключающие.</li><li>`TabletName` — имя таблетки. Если задан этот параметр, идентификаторы таблеток назначаются автоматически, таблеткам, запущенным на одной и той же ноде с одним и тем же именем, присваиваются одинаковые идентификаторы, таблеткам на разных нодах присваиваются различные идентификаторы.</li><li>`Channel` — канал таблетки.</li><li>`GroupId` — идентификатор группы хранения, на которую будет подана нагрузка.</li><li>`Generation` — поколение таблетки.</li></ul>
-`WriteSizes` | Размер записываемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `WriteSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`WriteHardRateDispatcher` | Описание [параметров нагрузки с фиксированной частотой](#hard-rate-dispatcher) для запросов записи. Если задан этот параметр, то значение `WriteIntervals` игнорируется.
-`WriteIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между записями для интервальной нагрузки в микросекундах. Вы можете задать несколько диапазонов `WriteIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`MaxInFlightWriteRequests` | Максимальное количество одновременно обрабатываемых запросов на запись.
-`ReadSizes` | Размер читаемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `ReadSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`ReadIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между запросами для интервальной нагрузки в микросекундах. Вы можете задать несколько диапазонов `ReadIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`ReadHardRateDispatcher` | Описание [параметров нагрузки с фиксированной частотой](#hard-rate-dispatcher) для запросов чтения. Если задан этот параметр, то значение `ReadIntervals` игнорируется.
-`MaxInFlightReadRequests` | Максимальное количество одновременно обрабатываемых запросов на чтение.
-`FlushIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между запросами на удаление записанных основным циклом StorageLoad данных в микросекундах. Вы можете задать несколько диапазонов `FlushIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. Одновременно будет обрабатываться только один запрос на удаление записанных данных.
-`PutHandleClass` | [Класс записи данных](#write-class) в дисковую подсистему. В случае `TabletLog` запись выполняется с максимальным приоритетом.
-`GetHandleClass` | [Класс чтения данных](#read-class) с дисковой подсистемы. В случае `FastRead` чтение выполняется с максимальной скоростью.
-`InitialAllocation` | Описание [параметров начальной записи данных](#initial-allocation). Определяет объем данных, который будет записан до старта основной нагрузки, и которые затем могут быть прочитаны запросами чтения, наряду с данными, записанными в основном цикле нагрузки.
+| Параметр | Описание |
+|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `DurationSeconds` | Продолжительность нагрузки. Таймер запускается после завершения начальной записи данных. |
+| `Tablets` | Нагрузка подается от имени таблетки со следующими реквизитами:<ul><li>`TabletId` — идентификатор таблетки. Должен быть уникальным для каждого нагружающего актора в кластере. Этот параметр и `TabletName` – взаимоисключающие.</li><li>`TabletName` — имя таблетки. Если задан этот параметр, идентификаторы таблеток назначаются автоматически, таблеткам, запущенным на одной и той же ноде с одним и тем же именем, присваиваются одинаковые идентификаторы, таблеткам на разных нодах присваиваются различные идентификаторы.</li><li>`Channel` — канал таблетки.</li><li>`GroupId` — идентификатор группы хранения, на которую будет подана нагрузка.</li><li>`Generation` — поколение таблетки.</li></ul> |
+| `WriteSizes` | Размер записываемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `WriteSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `WriteHardRateDispatcher` | Описание [параметров нагрузки с фиксированной частотой](#hard-rate-dispatcher) для запросов записи. Если задан этот параметр, то значение `WriteIntervals` игнорируется. |
+| `WriteIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между записями для интервальной нагрузки в микросекундах. Вы можете задать несколько диапазонов `WriteIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `MaxInFlightWriteRequests` | Максимальное количество одновременно обрабатываемых запросов на запись. |
+| `ReadSizes` | Размер читаемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `ReadSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `ReadIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между запросами для интервальной нагрузки в микросекундах. Вы можете задать несколько диапазонов `ReadIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `ReadHardRateDispatcher` | Описание [параметров нагрузки с фиксированной частотой](#hard-rate-dispatcher) для запросов чтения. Если задан этот параметр, то значение `ReadIntervals` игнорируется. |
+| `MaxInFlightReadRequests` | Максимальное количество одновременно обрабатываемых запросов на чтение. |
+| `FlushIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между запросами на удаление записанных основным циклом StorageLoad данных в микросекундах. Вы можете задать несколько диапазонов `FlushIntervals`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. Одновременно будет обрабатываться только один запрос на удаление записанных данных. |
+| `PutHandleClass` | [Класс записи данных](#write-class) в дисковую подсистему. В случае `TabletLog` запись выполняется с максимальным приоритетом. |
+| `GetHandleClass` | [Класс чтения данных](#read-class) с дисковой подсистемы. В случае `FastRead` чтение выполняется с максимальной скоростью. |
+| `InitialAllocation` | Описание [параметров начальной записи данных](#initial-allocation). Определяет объем данных, который будет записан до старта основной нагрузки, и которые затем могут быть прочитаны запросами чтения, наряду с данными, записанными в основном цикле нагрузки. |
### Write requests class {#write-class}
-| Class | Description |
---- | ---
-| `TabletLog` | Самый высокий приоритет запросов записи. |
-| `AsyncBlob` | Используется для записи таблиц SSTable и их частей. |
-| `UserData` | Используется для записи пользовательских данных отдельными блобами. |
+| Class | Description |
+|-------------|---------------------------------------------------------------------|
+| `TabletLog` | Самый высокий приоритет запросов записи. |
+| `AsyncBlob` | Используется для записи таблиц SSTable и их частей. |
+| `UserData` | Используется для записи пользовательских данных отдельными блобами. |
### Read requests class {#read-class}
-| Class | Description |
---- | ---
+| Class | Description |
+|-------------|------------------------------------------------------------------------|
| `AsyncRead` | Используется для чтения данных таблеток, прошедших процесс компакшена. |
-| `FastRead` | Используется для быстрого чтения пользовательских данных. |
-| `Discover` | Чтения запросов Discover. |
-| `LowRead` | Низкоприоритетные чтения, выполняемые на фоне. |
+| `FastRead` | Используется для быстрого чтения пользовательских данных. |
+| `Discover` | Чтения запросов Discover. |
+| `LowRead` | Низкоприоритетные чтения, выполняемые на фоне. |
### Параметры вероятностного распределения {#params}
@@ -52,22 +52,22 @@
### Параметры нагрузки с фиксированной частотой {#hard-rate-dispatcher}
-Параметр | Описание
---- | ---
-`RequestsPerSecondAtStart` | Частота запросов в секунду в момент старта нагрузки. Если продолжительность нагрузки не задана, то частота запросов остается постоянной и равной величине этого параметра.
-`RequestsPerSecondOnFinish` | Частота запросов в секунду в момент завершения нагрузки.
+| Параметр | Описание |
+|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `RequestsPerSecondAtStart` | Частота запросов в секунду в момент старта нагрузки. Если продолжительность нагрузки не задана, то частота запросов остается постоянной и равной величине этого параметра. |
+| `RequestsPerSecondOnFinish` | Частота запросов в секунду в момент завершения нагрузки. |
### Параметры начальной записи данных {#initial-allocation}
-Параметр | Описание
---- | ---
-`TotalSize` | Суммарный размер записанных данных. Этот параметр и `BlobsNumber` – взаимосключающие.
-`BlobsNumber` | Количество записанных блобов.
-`BlobSizes` | Размер записываемых блобов. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `BlobSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`MaxWritesInFlight` | Максимальное количество одновременно обрабатываемых запросов на запись. Если парамаетр не задан, то ограничения на количество одновременно обрабатываемых запросов нет.
-`MaxWriteBytesInFlight` | Максимальный суммарный объем данных одновременно обрабатываемых запросов на запись. Если парамаетр не задан, то ограничения на суммарный объем данных одновременно обрабатываемых запросов нет.
-`PutHandleClass` | [Класс записи данных](#write-class) в дисковую подсистему.
-`DelayAfterCompletionSec` | Время в секундах, которое актор ждет от момента завершения начальной записи данных до момента старта основной нагрузки. Если параметр не задан, нагрузка начнется сразу же после завершения начальной записи данных.
+| Параметр | Описание |
+|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `TotalSize` | Суммарный размер записанных данных. Этот параметр и `BlobsNumber` – взаимосключающие. |
+| `BlobsNumber` | Количество записанных блобов. |
+| `BlobSizes` | Размер записываемых блобов. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `BlobSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `MaxWritesInFlight` | Максимальное количество одновременно обрабатываемых запросов на запись. Если парамаетр не задан, то ограничения на количество одновременно обрабатываемых запросов нет. |
+| `MaxWriteBytesInFlight` | Максимальный суммарный объем данных одновременно обрабатываемых запросов на запись. Если парамаетр не задан, то ограничения на суммарный объем данных одновременно обрабатываемых запросов нет. |
+| `PutHandleClass` | [Класс записи данных](#write-class) в дисковую подсистему. |
+| `DelayAfterCompletionSec` | Время в секундах, которое актор ждет от момента завершения начальной записи данных до момента старта основной нагрузки. Если параметр не задан, нагрузка начнется сразу же после завершения начальной записи данных. |
## Примеры {#examples}
diff --git a/ydb/docs/ru/core/contributor/load-actors-vdisk.md b/ydb/docs/ru/core/contributor/load-actors-vdisk.md
index d0cc148826..aa41af5ffb 100644
--- a/ydb/docs/ru/core/contributor/load-actors-vdisk.md
+++ b/ydb/docs/ru/core/contributor/load-actors-vdisk.md
@@ -6,20 +6,20 @@
{% include [load-actors-params](../_includes/load-actors-params.md) %}
-Параметр | Описание
---- | ---
-`VDiskId` | Параметры VDisk, от имени которого подается нагрузка.<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul>
-`GroupInfo` | Описание группы, в которую входит нагружаемый VDisk (в корректном поколении).
-`TabletId` | Идентификатор таблетки, от имени которой подается нагрузка. Должен быть уникальным для каждого нагружающего актора.
-`Channel` | Номер канала внутри таблетки, который будет указан в командах записи блобов и сборки мусора.
-`DurationSeconds` | Полная длительность теста в секундах, по достижению которой нагрузка автоматически прекращается.
-`WriteIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между записями.
-`WriteSizes` | Размер записываемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `WriteSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`.
-`InFlightPutsMax` | Максимальное количество одновременно выполняемых запросов на запись блоба в VDisk (TEvVPut-запросы); если не указан, то число запросов не ограничивается.
-`InFlightPutBytesMax` | Максимальное количество байт в одновременно выполняемых запросах на запись блоба в VDisk (TEvVPut-запросы).
-`PutHandleClass` | Класс записи данных в дисковую подсистему. В случае `TabletLog` запись выполняется с максимальным приоритетом.
-`BarrierAdvanceIntervals` | Описание [параметров вероятностного распределения](#params) интервалов между передвижением барьера сборки мусора и шага записи.
-`StepDistance` | Расстояние между текущим записываемым шагом `Gen:Step` блоба и собираемым. Чем больше эта величина, тем больше данных хранится. Запись происходит с `Step = X`, удаление — всех блобов со `Step = X - StepDistance`. При этом периодически (с периодом `BarrierAdvanceIntervals`) `Step` увеличивается на единицу.
+| Параметр | Описание |
+|---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `VDiskId` | Параметры VDisk, от имени которого подается нагрузка.<ul><li>`GroupID` — идентификатор группы.</li><li>`GroupGeneration` — поколение группы.</li><li>`Ring` — идентификатор кольца в группе.</li><li>`Domain` — идентификатор фэйл-домена в кольце.</li><li>`VDisk` — индекс VDisk в фэйл-домене.</li></ul> |
+| `GroupInfo` | Описание группы, в которую входит нагружаемый VDisk (в корректном поколении). |
+| `TabletId` | Идентификатор таблетки, от имени которой подается нагрузка. Должен быть уникальным для каждого нагружающего актора. |
+| `Channel` | Номер канала внутри таблетки, который будет указан в командах записи блобов и сборки мусора. |
+| `DurationSeconds` | Полная длительность теста в секундах, по достижению которой нагрузка автоматически прекращается. |
+| `WriteIntervals` | Описание [параметров вероятностного распределения](#params) временных интервалов между записями. |
+| `WriteSizes` | Размер записываемых данных. Для каждого запроса выбирается случайным образом из интервала `Min`-`Max`. Вы можете задать несколько диапазонов `WriteSizes`, и тогда выбор значения из конкретного диапазона будет определяться его `Weight`. |
+| `InFlightPutsMax` | Максимальное количество одновременно выполняемых запросов на запись блоба в VDisk (TEvVPut-запросы); если не указан, то число запросов не ограничивается. |
+| `InFlightPutBytesMax` | Максимальное количество байт в одновременно выполняемых запросах на запись блоба в VDisk (TEvVPut-запросы). |
+| `PutHandleClass` | Класс записи данных в дисковую подсистему. В случае `TabletLog` запись выполняется с максимальным приоритетом. |
+| `BarrierAdvanceIntervals` | Описание [параметров вероятностного распределения](#params) интервалов между передвижением барьера сборки мусора и шага записи. |
+| `StepDistance` | Расстояние между текущим записываемым шагом `Gen:Step` блоба и собираемым. Чем больше эта величина, тем больше данных хранится. Запись происходит с `Step = X`, удаление — всех блобов со `Step = X - StepDistance`. При этом периодически (с периодом `BarrierAdvanceIntervals`) `Step` увеличивается на единицу. |
### Параметры вероятностного распределения {#params}
diff --git a/ydb/docs/ru/core/contributor/localdb-uncommitted-txs.md b/ydb/docs/ru/core/contributor/localdb-uncommitted-txs.md
index fdd391cc2f..f9ac999aca 100644
--- a/ydb/docs/ru/core/contributor/localdb-uncommitted-txs.md
+++ b/ydb/docs/ru/core/contributor/localdb-uncommitted-txs.md
@@ -28,44 +28,44 @@
[MemTable](../concepts/glossary.md#memtable) в LocalDB — это небольшое отсортированное по ключу дерево недавно сделанных изменений, которое хранится в памяти. В качестве ключа в этом дереве используется ключ таблицы, а в качестве значения — указатель на цепочку изменений для соответствующего ключа. Для каждого изменения указывается MVCC версия этого изменения (пара `Step`/`TxId` с глобальным временем коммита). Строки в цепочке изменений смержены в рамках одного MemTable. Например, представим что у нас были следующие операции для ключа K:
-| Версия | Операция |
---- | ---
+| Версия | Операция |
+|------------|------------------------|
| `v1000/10` | `UPDATE ... SET A = 1` |
| `v2000/11` | `UPDATE ... SET B = 2` |
| `v3000/12` | `UPDATE ... SET C = 3` |
Тогда цепочка строк с изменениями для этого ключа в единственном MemTable будет выглядеть следующим образом:
-| Версия | Строка |
---- | ---
+| Версия | Строка |
+|------------|---------------------------|
| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
В некоторых случаях MemTable может разделиться между операциями, что происходит, например, с началом компакшена. Тогда MemTable из примера выше выглядел бы так:
-| MemTable | Версия | Строка |
---- | --- | ---
-| Эпоха 2 | `v3000/12` | `SET B = 2, C = 3` |
-| Эпоха 2 | `v2000/11` | `SET B = 2` |
-| Эпоха 1 | `v1000/10` | `SET A = 1` |
+| MemTable | Версия | Строка |
+|----------| --- |--------------------|
+| Эпоха 2 | `v3000/12` | `SET B = 2, C = 3` |
+| Эпоха 2 | `v2000/11` | `SET B = 2` |
+| Эпоха 1 | `v1000/10` | `SET A = 1` |
Новые изменения применяются к текущему MemTable, даже если они ещё не были закомичены. При записи незакомиченные изменения помечаются специальной версией, с указанием максимального значения для `Step` (как бы подразумевая далёкое будущее) и `TxId` транзакции в качестве `TxId`. Мёрж нижестоящих изменений не производится. Например, представим, что далее мы выполнили следующие незакомиченные операции для ключа K:
-| TxId | Операция |
---- | ---
-| 15 | `UPDATE ... SET C = 10` |
-| 13 | `UPDATE ... SET B = 20` |
+| TxId | Операция |
+|------|-------------------------|
+| 15 | `UPDATE ... SET C = 10` |
+| 13 | `UPDATE ... SET B = 20` |
Цепочка изменений в MemTable теперь будет выглядеть так:
-| Версия | Строка |
---- | ---
-| `v{max}/13` | `SET B = 20` |
-| `v{max}/15` | `SET C = 10` |
-| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| Версия | Строка |
+|-------------|---------------------------|
+| `v{max}/13` | `SET B = 20` |
+| `v{max}/15` | `SET C = 10` |
+| `v3000/12` | `SET A = 1, B = 2, C = 3` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
Во время чтения для изменений с версией `Step == max`, [итератор](https://github.com/ydb-platform/ydb/blob/main/ydb/core/tablet_flat/flat_mem_iter.h) проверяет их `TxId` по [таблице транзакций](https://github.com/ydb-platform/ydb/blob/0e69bf615395fdd48ecee032faaec81bc468b0b8/ydb/core/tablet_flat/flat_table.h#L359). Для закомиченных транзакций там указана MVCC версия коммита. Далее он применяет все закомиченные изменения, пока не найдёт и не применит первую смерженную запись, у которой версия `Step != max`.
@@ -73,14 +73,14 @@
Теперь представим, что мы выполнили `SET A = 30` в версии `v5000/21`. Итоговая цепочка изменений будет выглядеть следующим образом:
-| Версия | Строка |
---- | ---
-| `v5000/21` | `SET A = 30, B = 20, C = 3` |
-| `v{max}/13` | `SET B = 20` |
-| `v{max}/15` | `SET C = 10` |
-| `v3000/12` | `SET A = 1, B = 2, C = 3` |
-| `v2000/11` | `SET A = 1, B = 2` |
-| `v1000/10` | `SET A = 1` |
+| Версия | Строка |
+|-------------|-----------------------------|
+| `v5000/21` | `SET A = 30, B = 20, C = 3` |
+| `v{max}/13` | `SET B = 20` |
+| `v{max}/15` | `SET C = 10` |
+| `v3000/12` | `SET A = 1, B = 2, C = 3` |
+| `v2000/11` | `SET A = 1, B = 2` |
+| `v1000/10` | `SET A = 1` |
Новая строка была добавлена в смёрженном состоянии, включая изменения для `TxId = 13`. Так как транзакция `TxId = 15` не закоммичена, её изменения были пропущены, что отразилось в смёрженном состоянии для `v5000/21`. Важно, чтобы вышестоящий код не закоммитил `TxId = 15` после этого, так как подобный коммит приведёт в аномалии: часть версий на чтении будут видеть транзакцию как закомиченную, а часть не будет.
@@ -92,58 +92,58 @@
По одному ключу может быть несколько дельта записей, а также опционально самая свежая закомиченная запись. Так как по одному ключу на странице данных может быть только одна запись, таблица смещений указывает на первую дельта запись, а остальные записи при этом доступны через таблицу альтернативных свещений для этой записи:
-| Смещение | Описание |
---- | ---
-| -X*8 | смещение Main |
-| ... | ... |
-| -16 | смещение Delta 2 |
-| -8 | смещение Delta 1 |
-| 0 | заголовок Delta 0 |
-| ... | ... |
+| Смещение | Описание |
+|------------------|-------------------|
+| -X*8 | смещение Main |
+| ... | ... |
+| -16 | смещение Delta 2 |
+| -8 | смещение Delta 1 |
+| 0 | заголовок Delta 0 |
+| ... | ... |
| смещение Delta 1 | заголовок Delta 1 |
-| ... | ... |
-| смещение Main | заголовок Main |
+| ... | ... |
+| смещение Main | заголовок Main |
Имея указатель на запись Delta 0, остальные записи для ключа можно получить через метод `GetAltRecord(size_t index)`, где `index` — номер записи в цепочке (1 для Delta 1). Цепочка заканчивается либо указателем на основную запись (без дельта флага), либо 0, если основная запись отсутствует.
Продолжая начатый выше пример, предположим, что после записи транзакции с `TxId = 13` текущий MemTable целиком скомпактили. Запись по 32-битному ключу K может выглядеть следующим образом (смещения указаны относительно указателя в списке записей на странице):
-| Смещение | Значение | Описание |
---- | --- | ---
-| -16 | 58 | смещение Main |
-| -8 | 29 | смещение Delta 1 |
-| 0 | 0x21 | Delta 0: IsDelta + ERowOp::Upsert |
-| 1 | 0x00 | .. ключевая колонка не-NULL |
-| 2 | K | .. ключевая колонка (32-bit) |
-| 6 | 0x00 | .. колонка A пустая |
-| 7 | 0 | .. колонка A (32-bit) |
-| 11 | 0x01 | .. колонка B = ECellOp::Set |
-| 12 | 20 | .. колонка B (32-bit) |
-| 16 | 0x00 | .. колонка C пустая |
-| 17 | 0 | .. колонка C (32-bit) |
-| 21 | 13 | .. TDelta::TxId |
-| 29 | 0x21 | Delta 1: IsDelta + ERowOp::Upsert |
-| 30 | 0x00 | .. ключевая колонка не-NULL |
-| 31 | K | .. ключевая колонка (32-bit) |
-| 35 | 0x00 | .. колонка A пустая |
-| 36 | 0 | .. колонка A (32-bit) |
-| 40 | 0x00 | .. колонка B пустая |
-| 41 | 0 | .. колонка B (32-bit) |
-| 45 | 0x01 | .. колонка C = ECellOp::Set |
-| 46 | 10 | .. колонка C (32-bit) |
-| 50 | 15 | .. TDelta::TxId |
-| 58 | 0x61 | Main: HasHistory + IsVersioned + ERowOp::Upsert |
-| 59 | 0x00 | .. ключевая колонка не-NULL |
-| 60 | K | .. ключевая колонка (32-bit) |
-| 64 | 0x01 | .. колонка A = ECellOp::Set |
-| 65 | 1 | .. колонка A (32-bit) |
-| 69 | 0x01 | .. колонка B = ECellOp::Set |
-| 70 | 2 | .. колонка B (32-bit) |
-| 74 | 0x01 | .. колонка C = ECellOp::Set |
-| 75 | 3 | .. колонка C (32-bit) |
-| 79 | 3000 | .. RowVersion.Step |
-| 87 | 12 | .. RowVersion.TxId |
-| 95 | - | Конец записи |
+| Смещение | Значение | Описание |
+|----------| --- |-------------------------------------------------|
+| -16 | 58 | смещение Main |
+| -8 | 29 | смещение Delta 1 |
+| 0 | 0x21 | Delta 0: IsDelta + ERowOp::Upsert |
+| 1 | 0x00 | .. ключевая колонка не-NULL |
+| 2 | K | .. ключевая колонка (32-bit) |
+| 6 | 0x00 | .. колонка A пустая |
+| 7 | 0 | .. колонка A (32-bit) |
+| 11 | 0x01 | .. колонка B = ECellOp::Set |
+| 12 | 20 | .. колонка B (32-bit) |
+| 16 | 0x00 | .. колонка C пустая |
+| 17 | 0 | .. колонка C (32-bit) |
+| 21 | 13 | .. TDelta::TxId |
+| 29 | 0x21 | Delta 1: IsDelta + ERowOp::Upsert |
+| 30 | 0x00 | .. ключевая колонка не-NULL |
+| 31 | K | .. ключевая колонка (32-bit) |
+| 35 | 0x00 | .. колонка A пустая |
+| 36 | 0 | .. колонка A (32-bit) |
+| 40 | 0x00 | .. колонка B пустая |
+| 41 | 0 | .. колонка B (32-bit) |
+| 45 | 0x01 | .. колонка C = ECellOp::Set |
+| 46 | 10 | .. колонка C (32-bit) |
+| 50 | 15 | .. TDelta::TxId |
+| 58 | 0x61 | Main: HasHistory + IsVersioned + ERowOp::Upsert |
+| 59 | 0x00 | .. ключевая колонка не-NULL |
+| 60 | K | .. ключевая колонка (32-bit) |
+| 64 | 0x01 | .. колонка A = ECellOp::Set |
+| 65 | 1 | .. колонка A (32-bit) |
+| 69 | 0x01 | .. колонка B = ECellOp::Set |
+| 70 | 2 | .. колонка B (32-bit) |
+| 74 | 0x01 | .. колонка C = ECellOp::Set |
+| 75 | 3 | .. колонка C (32-bit) |
+| 79 | 3000 | .. RowVersion.Step |
+| 87 | 12 | .. RowVersion.TxId |
+| 95 | - | Конец записи |
При этом оставшиеся две записи будут в исторических данных в ключами `(RowId, 2000, 11)` и `(RowId, 1000, 10)`, соответственно. Об их наличии которых говорит флаг `HasHistory` в основной записи.
diff --git a/ydb/docs/ru/core/dev/system-views.md b/ydb/docs/ru/core/dev/system-views.md
index 135ecd1af6..118d19c8e4 100644
--- a/ydb/docs/ru/core/dev/system-views.md
+++ b/ydb/docs/ru/core/dev/system-views.md
@@ -10,6 +10,8 @@
* [Топы запросов по определенным характеристикам](#top-queries).
* [Подробная информация о запросах](#query-metrics).
* [История перегруженных партиций](#top-overload-partitions).
+* [Информация о пулах ресурсов](#resource_pools).
+* [Сущности управления доступом](#auth).
{% note info %}
@@ -29,8 +31,8 @@
Структура представления:
-Поле | Описание
---- | ---
+Колонка | Описание
+------- | --------
`OwnerId` | Идентификатор SchemeShard, обслуживающего таблицу.<br/>Тип: `Uint64`.<br/>Ключ: `0`.
`PathId` | Идентификатор пути в SchemeShard.<br/>Тип: `Uint64`.<br/>Ключ: `1`.
`PartIdx` | Порядковый номер партиции.<br/>Тип: `Uint64`.<br/>Ключ: `2`.
@@ -99,10 +101,10 @@ GROUP BY Path
Текст запроса ограничен 10 килобайтами.
-Все представления содержат одинаковый набор полей:
+Все представления имеют одинаковую структуру:
-Поле | Описание
---- | ---
+Колонка | Описание
+------- | --------
`IntervalEnd` | Момент закрытия минутного или часового интервала.<br/>Тип: `Timestamp`.<br/>Ключ: `0`.
`Rank` | Ранг запроса в топе.<br/>Тип: `Uint32`.<br/>Ключ: `1`.
`QueryText` | Текст запроса.<br/>Тип: `Utf8`.
@@ -181,7 +183,7 @@ WHERE Rank = 1
Структура представления:
-Поле | Описание
+Колонка | Описание
---|---
`IntervalEnd` | Момент закрытия минутного интервала.<br/>Тип: `Timestamp`.<br/>Ключ: `0`.
`Rank` | Ранг запроса в пределах интервала (по полю SumCPUTime).<br/>Тип: `Uint32`.<br/>Ключ: `1`.
@@ -248,10 +250,10 @@ LIMIT 100
В представления попадают партиции с пиковой нагрузкой более 70 % (`CPUCores` > 0,7). В пределах одного интервала партиции ранжированы по пиковому значению нагрузки.
-Оба представления содержат одинаковый набор полей:
+Все представления имеют одинаковую структуру:
-Поле | Описание
---- | ---
+Колонка | Описание
+------- | --------
`IntervalEnd` | Момент закрытия минутного или часового интервала.<br/>Тип: `Timestamp`.<br/>Ключ: `0`.
`Rank` | Ранг партиции в пределах интервала (по CPUCores).<br/>Тип: `Uint32`.<br/>Ключ: `1`.
`TabletId` | Идентификатор таблетки, обслуживающей партицию.<br/>Тип: `Uint64`.
@@ -308,8 +310,8 @@ ORDER BY IntervalEnd desc, CPUCores desc
Структура системного представления:
-Поле | Описание
---- | ---
+Колонка | Описание
+------- | --------
`Name` | Имя пула ресурсов.<br/>Тип: `Utf8`.<br/>Ключ: `0`.
`ConcurrentQueryLimit` | Максимальное количество параллельно выполняющихся запросов в пуле ресурсов.<br/>Тип: `Int32`.
`QueueSize` | Максимальный размер очереди ожидания.<br/>Тип: `Int32`.
@@ -353,8 +355,8 @@ WHERE Name = "default";
Структура системного представления:
-Поле | Описание
---- | ---
+Колонка | Описание
+------- | --------
`Name` | Имя классификатора пула ресурсов.<br/>Тип: `Utf8`.<br/>Ключ: `0`.
`Rank` | Приоритет выбора классификатора пулов ресурсов.<br/>Тип: `Int64`.
`MemberName` | Пользователь или группа пользователей, которые будут отправлены в указанный пул ресурсов.<br/>Тип: `Utf8`.
@@ -380,4 +382,110 @@ WHERE Name = "olap";
--- | --- | --- | --- | ---
1 | olap | 1000 | olap_group@builtin | olap
-{% endif %} \ No newline at end of file
+{% endif %}
+
+## Сущности управления доступом {#auth}
+
+Следующие системные представления содержат информацию о различных [сущностях управления доступом](../security/authorization.md).
+
+### Информация о пользователях
+
+Представление `auth_users` содержит список внутренних [пользователей](../concepts/glossary.md#access-user) {{ ydb-short-name }}. В него не входят пользователи, аутентифицированные через внешние системы, такие как LDAP.
+
+Полный доступ к этому представлению имеют администраторы. Обычные пользователи могут просматривать только свои собственные данные.
+
+Структура таблицы:
+
+| Колонка | Описание |
+|---------|----------|
+| `Sid` | [SID](../concepts/glossary.md#sid) пользователя.<br />Тип: `Utf8`.<br />Ключ: `0`. |
+| `IsEnabled` | Указывает, разрешён ли вход данному пользователю; используется для явной блокировки администратором. Независим от `IsLockedOut`.<br />Тип: `Bool`. |
+| `IsLockedOut` | Автоматическая блокировка из-за превышения количества неудачных попыток входа. Независима от `IsEnabled`.<br />Тип: `Bool`. |
+| `CreatedAt` | Время создания пользователя.<br />Тип: `Timestamp`. |
+| `LastSuccessfulAttemptAt` | Время последней успешной попытки входа.<br />Тип: `Timestamp`. |
+| `LastFailedAttemptAt` | Время последней неудачной попытки входа.<br />Тип: `Timestamp`. |
+| `FailedAttemptCount` | Количество неудачных попыток входа.<br />Тип: `Uint32`. |
+| `PasswordHash` | JSON-строка, содержащая хеш пароля, соль и алгоритм хеширования.<br />Тип: `Utf8`. |
+
+### Информация о группах
+
+Представление `auth_groups` содержит список [групп доступа](../concepts/glossary.md#access-group).
+
+Доступ к этому представлению имеют только администраторы.
+
+Структура таблицы:
+
+| Колонка | Описание |
+|---------|----------|
+| `Sid` | [SID](../concepts/glossary.md#sid) группы.<br />Тип: `Utf8`.<br />Ключ: `0`. |
+
+### Информация о членстве в группах
+
+Представление `auth_group_members` содержит информацию о членстве в [группах доступа](../concepts/glossary.md#access-group).
+
+Доступ к этому представлению имеют только администраторы.
+
+Структура таблицы:
+
+| Колонка | Описание |
+|---------|----------|
+| `GroupSid` | SID группы.<br />Тип: `Utf8`.<br />Ключ: `0`. |
+| `MemberSid` | SID участника группы.<br />Тип: `Utf8`.<br />Ключ: `1`. |
+
+### Информация о правах доступа
+
+Представления содержат список выданных [прав доступа](../concepts/glossary.md#access-right).
+
+Включают два представления:
+
+* `auth_permissions`: Явно выданные права доступа.
+* `auth_effective_permissions`: Эффективные права доступа с учётом [наследования](../concepts/glossary.md#access-right-inheritance).
+
+Пользователь может видеть [объект доступа](../concepts/glossary.md#access-object) в результатах, если у него есть разрешение `ydb.granular.describe_schema` для этого объекта.
+
+Структура таблицы:
+
+| Колонка | Описание |
+|---------|----------|
+| `Path` | Путь к объекту доступа.<br />Тип: `Utf8`.<br />Ключ: `0`. |
+| `Sid` | SID [субъекта доступа](../concepts/glossary.md#access-subject).<br />Тип: `Utf8`.<br />Ключ: `1`. |
+| `Permission` | Название [права доступа](../yql/reference/syntax/grant.md#permissions-list) {{ ydb-short-name }}.<br />Тип: `Utf8`.<br />Ключ: `2`. |
+
+#### Примеры запросов
+
+Все явно выданные права для таблицы `my_table`:
+
+```yql
+SELECT *
+FROM `.sys/auth_permissions`
+WHERE Path = "my_table"
+```
+
+Все эффективные права для таблицы `my_table`, включая унаследованные:
+
+```yql
+SELECT *
+FROM `.sys/auth_effective_permissions`
+WHERE Path = "my_table"
+```
+
+Все права, явно выданные пользователю `user3`:
+
+```yql
+SELECT *
+FROM `.sys/auth_permissions`
+WHERE Sid = "user3"
+```
+
+### Информация о владельцах объектов доступа {#auth-owners}
+
+Представление `auth_owners` отображает информацию о [владельцах](../concepts/glossary.md#access-owner) [объектов доступа](../concepts/glossary.md#access-object).
+
+Пользователь может видеть [объект доступа](../concepts/glossary.md#access-object) в результатах, если у него есть право `ydb.granular.describe_schema` для этого объекта.
+
+Структура таблицы:
+
+| Колонка | Описание |
+|---------|----------|
+| `Path` | Путь к объекту доступа.<br />Тип: `Utf8`.<br />Ключ: `0`. |
+| `Sid` | SID владельца объекта доступа.<br />Тип: `Utf8`. |
diff --git a/ydb/docs/ru/core/postgresql/import.md b/ydb/docs/ru/core/postgresql/import.md
index 7df0ec2167..ffce54042e 100644
--- a/ydb/docs/ru/core/postgresql/import.md
+++ b/ydb/docs/ru/core/postgresql/import.md
@@ -62,10 +62,10 @@
### Параметры подкоманды {#options}
-Имя | Описание
----|---
-`-i` | Имя файла, в котором находится изначальный дамп. Если опция не указана, дамп считывается из stdin'a.
-`--ignore-unsupported` | При указании этой опции, неподдерживаемые конструкции будут закомментированы в итоговом дампе и продублированы в stderr. По умолчанию, при обнаружении неподдерживаемых конструкций, команда возвращает ошибку. Не относится к выражениям `ALTER TABLE`, задающим первичный ключ таблицы, они комментируются в любом случае.
+| Имя | Описание |
+|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `-i` | Имя файла, в котором находится изначальный дамп. Если опция не указана, дамп считывается из stdin'a. |
+| `--ignore-unsupported` | При указании этой опции, неподдерживаемые конструкции будут закомментированы в итоговом дампе и продублированы в stderr. По умолчанию, при обнаружении неподдерживаемых конструкций, команда возвращает ошибку. Не относится к выражениям `ALTER TABLE`, задающим первичный ключ таблицы, они комментируются в любом случае. |
{% note warning %}
diff --git a/ydb/docs/ru/core/public-materials/_includes/conferences/2024/SmartData.md b/ydb/docs/ru/core/public-materials/_includes/conferences/2024/SmartData.md
new file mode 100644
index 0000000000..77d6d79dad
--- /dev/null
+++ b/ydb/docs/ru/core/public-materials/_includes/conferences/2024/SmartData.md
@@ -0,0 +1,11 @@
+## Шардированный не значит распределенный: что важно знать, когда PostgreSQL мало {#2024-conf-smartdata}
+
+{% include notitle [testing_tag](../../tags.md#testing) %}
+
+[{{ team.ivanov.name }}]({{ team.ivanov.profile }}) ({{ team.ivanov.position }}) и [{{ team.bondar.name }}]({{ team.bondar.profile }}) ({{ team.bondar.position }}) рассказали, чем отличаются распределённые СУБД от шардированных. Особое внимание уделили тому, почему решения, подобные Citus, не являются ACID в случае широких транзакций. В конце выступления на примере бенчмарка TPC-C показали, что в PostgreSQL вертикальное масштабирование ограничено ботлнеком в синхронной репликации, и сравнили производительность PostgreSQL и распределённых СУБД CockroachDB и YDB.
+
+@[YouTube](https://youtu.be/BDpLLmV37hY)
+
+Доклад будет интересен как разработчикам приложений, которым требуется надёжная СУБД, так и людям, интересующимся распределёнными системами и базами данных.
+
+[Слайды](https://presentations.ydb.tech/2024/ru/smartdataconf/sharded_is_not_distributed/presentation.pdf) \ No newline at end of file
diff --git a/ydb/docs/ru/core/public-materials/videos.md b/ydb/docs/ru/core/public-materials/videos.md
index 759a136573..7537359111 100644
--- a/ydb/docs/ru/core/public-materials/videos.md
+++ b/ydb/docs/ru/core/public-materials/videos.md
@@ -10,6 +10,8 @@
- 2024
+ {% include [SmartData](./_includes/conferences/2024/SmartData.md) %}
+
{% include [HighLoad](./_includes/conferences/2024/HighLoad.md) %}
{% include [Saint_HighLoad](./_includes/conferences/2024/Saint_HighLoad.md) %}
diff --git a/ydb/docs/ru/core/reference/configuration/index.md b/ydb/docs/ru/core/reference/configuration/index.md
index b0ca531439..c3d52e8cf4 100644
--- a/ydb/docs/ru/core/reference/configuration/index.md
+++ b/ydb/docs/ru/core/reference/configuration/index.md
@@ -178,11 +178,11 @@ domains_config:
Доступны следующие [режимы отказоустойчивости](../../concepts/topology.md):
-Режим | Описание
---- | ---
-`none` | Избыточность отсутствует. Применяется для тестирования.
-`block-4-2` | Избыточность с коэффициентом 1,5, применяется для однодатацентровых кластеров.
-`mirror-3-dc` | Избыточность с коэффициентом 3, применяется для мультидатацентровых кластеров.
+| Режим | Описание |
+|---------------|--------------------------------------------------------------------------------|
+| `none` | Избыточность отсутствует. Применяется для тестирования. |
+| `block-4-2` | Избыточность с коэффициентом 1,5, применяется для однодатацентровых кластеров. |
+| `mirror-3-dc` | Избыточность с коэффициентом 3, применяется для мультидатацентровых кластеров. |
``` yaml
domains_config:
@@ -624,11 +624,11 @@ actor_system_config:
cpu_count: 10
```
-Параметр | Описание
---- | ---
-`use_auto_config` | Включение автоматического конфигурирования акторной системы.
-`node_type` | Тип узла. Определяет ожидаемую нагрузку и соотношение ядер CPU между пулами. Одно из значений:<ul><li>`STORAGE` — узел работает с блочными устройствами и отвечает за Distributed Storage;</li><li>`COMPUTE` — узел обслуживает пользовательскую нагрузку;</li><li>`HYBRID` — узел работает со смешанной нагрузкой или потребление `System`, `User` и `IC` узла под нагрузкой приблизительно одинаково.
-`cpu_count` | Количество ядер CPU, выделенных узлу.
+| Параметр | Описание |
+|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `use_auto_config` | Включение автоматического конфигурирования акторной системы. |
+| `node_type` | Тип узла. Определяет ожидаемую нагрузку и соотношение ядер CPU между пулами. Одно из значений:<ul><li>`STORAGE` — узел работает с блочными устройствами и отвечает за Distributed Storage;</li><li>`COMPUTE` — узел обслуживает пользовательскую нагрузку;</li><li>`HYBRID` — узел работает со смешанной нагрузкой или потребление `System`, `User` и `IC` узла под нагрузкой приблизительно одинаково. |
+| `cpu_count` | Количество ядер CPU, выделенных узлу. |
### Ручное конфигурирование {#tuneconfig}
@@ -664,19 +664,19 @@ actor_system_config:
spin_threshold: 0
```
-Параметр | Описание
---- | ---
-`executor` | Конфигурация пулов.<br/>В конфигах пулов рекомендуется менять только количество ядер процессора (параметр `threads`).
-`name` | Имя пула, определяет его назначение. Одно из значений:<ul><li>`System` — предназначен для выполнения быстрых внутренних операций {{ ydb-full-name }} (обслуживает системные таблетки, State Storage, ввод и вывод Distributed Storage, Erasure Сoding);</li><li>`User` — обслуживает пользовательскую нагрузку (пользовательские таблетки, выполнение запросов в Query Processor);</li><li>`Batch` — обслуживает задачи, которые не имеют строгого лимита на время выполнения, фоновых операции (сборка мусора, тяжелые запросы Query Processor);</li><li>`IO` — отвечает за выполнение всех задач с блокирующими операциями (аутентификация, запись логов в файл);</li><li>`IC` — Interconnect, включает нагрузку, связанную с коммуникацией между узлами (системные вызовы для ожидания и отправки по сети данных, сериализация данных, разрезание и склеивание сообщений).</li></ul>
-`spin_threshold` | Количество тактов процессора перед уходом в сон при отсутствии сообщений. Состояние сна снижает энергопотребление, но может увеличивать latency запросов во время слабой нагрузки.
-`threads` | Количество ядер процессора, выделенных пулу.<br/>Не рекомендуется суммарно назначать в пулы System, User, Batch, IC больше ядер, чем доступно в системе.
-`max_threads` | Максимальное количество ядер процессора, которые могут быть выданы пулу в случае использования простаивающих ядер из других пулов. При выставлении параметра включается механизм увеличения размера пула при полном потреблении пула и наличия свободных ядер.<br/>Проверка текущей нагрузки и перераспределение ядер происходит 1 раз в секунду.
-`max_avg_ping_deviation` | Дополнительное условие для расширения пула по количеству ядер. При потреблении более чем 90% ядер процессора, выделенных пулу, требуется ухудшение показателя SelfPing более чем на `max_avg_ping_deviation` микросекунд от ожидаемых 10 миллисекунд.
-`time_per_mailbox_micro_secs` | Количество сообщений в каждом акторе, которое будет обработано перед переключением на другой актор.
-`type` | Тип пула. Одно из значений:<ul><li>`IO` — укажите для пула IO;</li><li>`BASIC` — укажите для всех остальных пулов.</li></ul>
-`scheduler` | Конфигурация шедулера. Шедулер акторной системы отвечает за доставку отложенных сообщений между акторами.<br/>Не рекомендуется изменять параметры шедулера по умолчанию.
-`progress_threshold` | В акторной системе есть возможность запросить отправку сообщения в будущем по расписанию. Возможна ситуация, когда в определенный момент времени системе не удастся отправить все запланированные сообщения. В этом случае система начинает рассылать сообщения в «виртуальном времени», обрабатывая в каждом цикле отправку сообщений за период, не превышающий `progress_threshold` в микросекундах, и продвигая виртуальное время на `progress_threshold`, пока оно не догонит реальное.
-`resolution` | При составлении расписания отправки сообщений используются дискретные временные слоты. Длительность слота задается параметром `resolution` в микросекундах.
+| Параметр | Описание |
+|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `executor` | Конфигурация пулов.<br/>В конфигах пулов рекомендуется менять только количество ядер процессора (параметр `threads`). |
+| `name` | Имя пула, определяет его назначение. Одно из значений:<ul><li>`System` — предназначен для выполнения быстрых внутренних операций {{ ydb-full-name }} (обслуживает системные таблетки, State Storage, ввод и вывод Distributed Storage, Erasure Сoding);</li><li>`User` — обслуживает пользовательскую нагрузку (пользовательские таблетки, выполнение запросов в Query Processor);</li><li>`Batch` — обслуживает задачи, которые не имеют строгого лимита на время выполнения, фоновых операции (сборка мусора, тяжелые запросы Query Processor);</li><li>`IO` — отвечает за выполнение всех задач с блокирующими операциями (аутентификация, запись логов в файл);</li><li>`IC` — Interconnect, включает нагрузку, связанную с коммуникацией между узлами (системные вызовы для ожидания и отправки по сети данных, сериализация данных, разрезание и склеивание сообщений).</li></ul> |
+| `spin_threshold` | Количество тактов процессора перед уходом в сон при отсутствии сообщений. Состояние сна снижает энергопотребление, но может увеличивать latency запросов во время слабой нагрузки. |
+| `threads` | Количество ядер процессора, выделенных пулу.<br/>Не рекомендуется суммарно назначать в пулы System, User, Batch, IC больше ядер, чем доступно в системе. |
+| `max_threads` | Максимальное количество ядер процессора, которые могут быть выданы пулу в случае использования простаивающих ядер из других пулов. При выставлении параметра включается механизм увеличения размера пула при полном потреблении пула и наличия свободных ядер.<br/>Проверка текущей нагрузки и перераспределение ядер происходит 1 раз в секунду. |
+| `max_avg_ping_deviation` | Дополнительное условие для расширения пула по количеству ядер. При потреблении более чем 90% ядер процессора, выделенных пулу, требуется ухудшение показателя SelfPing более чем на `max_avg_ping_deviation` микросекунд от ожидаемых 10 миллисекунд. |
+| `time_per_mailbox_micro_secs` | Количество сообщений в каждом акторе, которое будет обработано перед переключением на другой актор. |
+| `type` | Тип пула. Одно из значений:<ul><li>`IO` — укажите для пула IO;</li><li>`BASIC` — укажите для всех остальных пулов.</li></ul> |
+| `scheduler` | Конфигурация шедулера. Шедулер акторной системы отвечает за доставку отложенных сообщений между акторами.<br/>Не рекомендуется изменять параметры шедулера по умолчанию. |
+| `progress_threshold` | В акторной системе есть возможность запросить отправку сообщения в будущем по расписанию. Возможна ситуация, когда в определенный момент времени системе не удастся отправить все запланированные сообщения. В этом случае система начинает рассылать сообщения в «виртуальном времени», обрабатывая в каждом цикле отправку сообщений за период, не превышающий `progress_threshold` в микросекундах, и продвигая виртуальное время на `progress_threshold`, пока оно не догонит реальное. |
+| `resolution` | При составлении расписания отправки сообщений используются дискретные временные слоты. Длительность слота задается параметром `resolution` в микросекундах. |
## Контроллер памяти {#memory-controller}
@@ -860,24 +860,24 @@ auth_config:
...
```
-Параметр | Описание
---- | ---
-`hosts` | Список имен хостов, на котором работает LDAP сервер
-`port` | Порт для подключения к LDAP серверу
-`base_dn` | Корень поддерева в LDAP каталоге, начиная с которого будет производиться поиск записи пользователя
-`bind_dn` | Отличительное имя (Distinguished Name, DN) сервисного аккаунта, от имени которого выполняется поиск записи пользователя
-`bind_password` | Пароль сервисного аккаунта, от имени которого выполняется поиск записи пользователя
-`search_filter` | Фильтр для поиска записи пользователя в LDAP каталоге. В строке фильтра может встречаться последовательность символов *$username*, которая будет заменена на имя пользователя, запрошенное для аутентификации в базе данных
-`use_tls` | Настройки для конфигурирования TLS соединения между {{ ydb-short-name }} и LDAP сервером
-`enable` | Определяет, будет ли произведена попытка установить TLS-соединение с [использованием запроса `StartTls`](../../security/authentication.md#starttls). При установке значения этого параметра в `true`, необходимо отключить использование схемы соединения `ldaps`, присвоив параметру `ldap_authentication.scheme` значение `ldap`.
-`ca_cert_file` | Путь до файла сертификата удостоверяющего центра
-`cert_require` | Уровень требований к сертификату LDAP сервера.<br/>Возможные значения:<ul><li>`NEVER` - {{ ydb-short-name }} не запрашивает сертификат или проверку проходит любой сертификат.</li><li>`ALLOW` - {{ ydb-short-name }} требует, что бы LDAP сервер предоставил сертификат. Если предоставленному сертификату нельзя доверять, TLS сессия все равно установится.</li><li>`TRY` - {{ ydb-short-name }} требует, что бы LDAP сервер предоставил сертификат. Если предоставленному сертификату нельзя доверять, установление TLS соединения прекращается.</li><li>`DEMAND` и `HARD` - Эти требования эквивалентны параметру `TRY`. По умолчанию установлено значение `DEMAND`.</li></ul>
-`ldap_authentication_domain` | Идентификатор, прикрепляемый к имени пользователя, позволяющий отличать пользователей из LDAP каталога от пользователей аутентифицируемых с помощью других провайдеров. Значение по умолчанию `ldap`
-`scheme` | Схема соединения с LDAP-сервером.<br>Возможные значения:<ul><li>`ldap` — {{ ydb-short-name }} будет выполнять соединение с LDAP-сервером без какого-либо шифрования. Пароли будут отправляться на LDAP-сервер в открытом виде. Это значение установлено по умолчанию.</li><li>`ldaps` — {{ ydb-short-name }} будет выполнять зашифрованное соединение с LDAP-сервером по протоколу TLS с самого первого запроса. Для успешного установления соединения по схеме `ldaps` необходимо отключить использование [запроса `StartTls`](../../security/authentication.md#starttls) в секции `ldap_authentication.use_tls.enable: false` и заполнить информацию о сертификате `ldap_authentication.use_tls.ca_cert_file` и уровне требования сертификата `ldap_authentication.use_tls.cert_require`.</li><li>При использовании любого другого значения будет браться значение по умолчанию - `ldap`.</li></ul>
-`requested_group_attribute` | Атрибут обратного членства в группе. По умолчанию `memberOf`.
-`extended_settings.enable_nested_groups_search` | Флаг определяет, будет ли выполнятся запрос для получения всего дерева групп, в которые входят непосредственные группы пользователя.
-`host` | Имя хоста, на котором работает LDAP-сервер. Это устаревший параметр, вместо него должен использоваться параметр `hosts`.
-`refresh_time` | Определяет время, когда будет попытка обновить информацию о пользователе. Конкретное время обновления будет лежать в интервале от `refresh_time/2` до `refresh_time`
+| Параметр | Описание |
+|-------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `hosts` | Список имен хостов, на котором работает LDAP сервер |
+| `port` | Порт для подключения к LDAP серверу |
+| `base_dn` | Корень поддерева в LDAP каталоге, начиная с которого будет производиться поиск записи пользователя |
+| `bind_dn` | Отличительное имя (Distinguished Name, DN) сервисного аккаунта, от имени которого выполняется поиск записи пользователя |
+| `bind_password` | Пароль сервисного аккаунта, от имени которого выполняется поиск записи пользователя |
+| `search_filter` | Фильтр для поиска записи пользователя в LDAP каталоге. В строке фильтра может встречаться последовательность символов *$username*, которая будет заменена на имя пользователя, запрошенное для аутентификации в базе данных |
+| `use_tls` | Настройки для конфигурирования TLS соединения между {{ ydb-short-name }} и LDAP сервером |
+| `enable` | Определяет, будет ли произведена попытка установить TLS-соединение с [использованием запроса `StartTls`](../../security/authentication.md#starttls). При установке значения этого параметра в `true`, необходимо отключить использование схемы соединения `ldaps`, присвоив параметру `ldap_authentication.scheme` значение `ldap`. |
+| `ca_cert_file` | Путь до файла сертификата удостоверяющего центра |
+| `cert_require` | Уровень требований к сертификату LDAP сервера.<br/>Возможные значения:<ul><li>`NEVER` - {{ ydb-short-name }} не запрашивает сертификат или проверку проходит любой сертификат.</li><li>`ALLOW` - {{ ydb-short-name }} требует, что бы LDAP сервер предоставил сертификат. Если предоставленному сертификату нельзя доверять, TLS сессия все равно установится.</li><li>`TRY` - {{ ydb-short-name }} требует, что бы LDAP сервер предоставил сертификат. Если предоставленному сертификату нельзя доверять, установление TLS соединения прекращается.</li><li>`DEMAND` и `HARD` - Эти требования эквивалентны параметру `TRY`. По умолчанию установлено значение `DEMAND`.</li></ul> |
+| `ldap_authentication_domain` | Идентификатор, прикрепляемый к имени пользователя, позволяющий отличать пользователей из LDAP каталога от пользователей аутентифицируемых с помощью других провайдеров. Значение по умолчанию `ldap` |
+| `scheme` | Схема соединения с LDAP-сервером.<br>Возможные значения:<ul><li>`ldap` — {{ ydb-short-name }} будет выполнять соединение с LDAP-сервером без какого-либо шифрования. Пароли будут отправляться на LDAP-сервер в открытом виде. Это значение установлено по умолчанию.</li><li>`ldaps` — {{ ydb-short-name }} будет выполнять зашифрованное соединение с LDAP-сервером по протоколу TLS с самого первого запроса. Для успешного установления соединения по схеме `ldaps` необходимо отключить использование [запроса `StartTls`](../../security/authentication.md#starttls) в секции `ldap_authentication.use_tls.enable: false` и заполнить информацию о сертификате `ldap_authentication.use_tls.ca_cert_file` и уровне требования сертификата `ldap_authentication.use_tls.cert_require`.</li><li>При использовании любого другого значения будет браться значение по умолчанию - `ldap`.</li></ul> |
+| `requested_group_attribute` | Атрибут обратного членства в группе. По умолчанию `memberOf`. |
+| `extended_settings.enable_nested_groups_search` | Флаг определяет, будет ли выполнятся запрос для получения всего дерева групп, в которые входят непосредственные группы пользователя. |
+| `host` | Имя хоста, на котором работает LDAP-сервер. Это устаревший параметр, вместо него должен использоваться параметр `hosts`. |
+| `refresh_time` | Определяет время, когда будет попытка обновить информацию о пользователе. Конкретное время обновления будет лежать в интервале от `refresh_time/2` до `refresh_time` |
## Настройка стабильных имен узлов кластера {#node-broker-config}
@@ -912,13 +912,13 @@ node_broker_config:
Разные виды активностей (фоновые операции, удаление данных по [TTL](../../concepts/ttl.md) и т.д.) запускаются в разных *очередях* брокера ресурсов. Каждая такая очередь имеет лимитированное число ресурсов:
-Название очереди | CPU | Memory | Описание
---- | --- | --- | ---
-`queue_ttl` | 2 | — | Операции удаления данных по [TTL](../../concepts/ttl.md).
-`queue_backup` | 2 | — | Операции [резервного копирования](../../devops/manual/backup-and-recovery.md#s3).
-`queue_restore` | 2 | — | Операции [восстановления из резервной копии](../../devops/manual/backup-and-recovery.md#s3).
-`queue_build_index` | 10 | — | Операции [онлайн-создания вторичного индекса](../../concepts/secondary_indexes.md#index-add).
-`queue_cdc_initial_scan` | 4 | — | [Первоначальное сканирование таблицы](../../concepts/cdc.md#initial-scan).
+| Название очереди | CPU | Memory | Описание |
+|---------------------------| --- | --- |----------------------------------------------------|
+| `queue_ttl` | 2 | — | Операции удаления данных по [TTL](../../concepts/ttl.md). |
+| `queue_backup` | 2 | — | Операции [резервного копирования](../../devops/manual/backup-and-recovery.md#s3). |
+| `queue_restore` | 2 | — | Операции [восстановления из резервной копии](../../devops/manual/backup-and-recovery.md#s3). |
+| `queue_build_index` | 10 | — | Операции [онлайн-создания вторичного индекса](../../concepts/secondary_indexes.md#index-add). |
+| `queue_cdc_initial_scan` | 4 | — | [Первоначальное сканирование таблицы](../../concepts/cdc.md#initial-scan). |
{% note info %}
@@ -936,6 +936,34 @@ resource_broker_config: !inherit
cpu: 4
```
+## Настройка Health Check {#healthcheck-config}
+
+В этом разделе настраиваются пороговые значения и таймауты, используемые [сервисом Health Check](../ydb-sdk/health-check-api.md) {{ ydb-short-name }}. Эти параметры помогают настраивать возможные [проблемы](../ydb-sdk/health-check-api.md#issues), такие как чрезмерные перезапуски или расхождение по времени между динамическими узлами.
+
+### Синтаксис
+
+```yaml
+healthcheck_config:
+ thresholds:
+ node_restarts_yellow: 10
+ node_restarts_orange: 30
+ nodes_time_difference_yellow: 5000
+ nodes_time_difference_orange: 25000
+ tablets_restarts_orange: 30
+ timeout: 20000
+```
+
+### Параметры
+
+| Параметр | Значение по умолчанию | Описание |
+|------------------------------------------|------------------------|---------------------------------------------------------------------------------|
+| `thresholds.node_restarts_yellow` | `10` | Количество перезапусков узлов для генерации предупреждения уровня `YELLOW` |
+| `thresholds.node_restarts_orange` | `30` | Количество перезапусков узлов для генерации предупреждения уровня `ORANGE` |
+| `thresholds.nodes_time_difference_yellow` | `5000` | Максимально допустимое расхождение по времени (в µs) между динамическими узлами для уровня `YELLOW` |
+| `thresholds.nodes_time_difference_orange` | `25000` | Максимально допустимое расхождение по времени (в µs) между динамическими узлами для уровня `ORANGE` |
+| `thresholds.tablets_restarts_orange` | `30` | Количество перезапусков таблеток для генерации предупреждения уровня `ORANGE` |
+| `timeout` | `20000` | Максимальное время ответа от healthcheck (в мс) |
+
## Примеры конфигураций кластеров {#examples}
В [репозитории](https://github.com/ydb-platform/ydb/tree/main/ydb/deploy/yaml_config_examples/) можно найти модельные примеры конфигураций кластеров для самостоятельного развертывания. Ознакомьтесь с ними перед развертыванием кластера.
diff --git a/ydb/docs/ru/core/reference/kafka-api/examples.md b/ydb/docs/ru/core/reference/kafka-api/examples.md
index 88762f37c7..1b70df9afd 100644
--- a/ydb/docs/ru/core/reference/kafka-api/examples.md
+++ b/ydb/docs/ru/core/reference/kafka-api/examples.md
@@ -27,7 +27,7 @@
Поэтому в конфигурации читателя всегда нужно указывать **имя группы читателей** и параметры:
-- `check.crc=false`
+- `check.crcs=false`
- `partition.assignment.strategy=org.apache.kafka.clients.consumer.RoundRobinAssignor`
Ниже даны примеры чтения по Kafka протоколу для разных приложений, языков программирования и фреймворков подключения без аутентификации.
diff --git a/ydb/docs/ru/core/reference/ydb-cli/_includes/commands.md b/ydb/docs/ru/core/reference/ydb-cli/_includes/commands.md
index 24d0147b30..ec8226b89c 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/_includes/commands.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/_includes/commands.md
@@ -36,6 +36,7 @@
[import file tsv](../export-import/import-file.md) | Импорт данных из TSV-файла
[import s3](../export-import/import-s3.md) | Импорт данных из хранилища S3
[init](../profile/create.md) | Инициализация CLI, создание [профиля](../profile/index.md)
+[monitoring healthcheck](../commands/monitoring-healthcheck.md) | Проверка состояния базы
[operation cancel](../operation-cancel.md) | Прерывание исполнения фоновой операции
[operation forget](../operation-forget.md) | Удаление фоновой операции из списка
[operation get](../operation-get.md) | Статус фоновой операции
diff --git a/ydb/docs/ru/core/reference/ydb-cli/commands/monitoring-healthcheck.md b/ydb/docs/ru/core/reference/ydb-cli/commands/monitoring-healthcheck.md
new file mode 100644
index 0000000000..6d3f92162d
--- /dev/null
+++ b/ydb/docs/ru/core/reference/ydb-cli/commands/monitoring-healthcheck.md
@@ -0,0 +1,147 @@
+# Проверка состояния базы данных
+
+{{ ydb-short-name }} имеет встроенную систему самодиагностики, с помощью которой можно получить краткий отчёт о состоянии базы данных и информацию о выявленных проблемах.
+
+Общий вид команды:
+
+```bash
+ydb [global options...] monitoring healthcheck [options...]
+```
+
+* `global options` — [глобальные параметры](global-options.md),
+* `options` — [параметры подкоманды](#options).
+
+## Параметры подкоманды {#options}
+
+#|
+|| Имя | Описание ||
+||`--timeout` | Время, в течение которого должна быть выполнена операция на сервере, мс.||
+||`--format` | Формат вывода. Возможные значения:
+
+* `pretty` — краткий ответ в человекочитаемом формате,
+* `json` — подробный ответ в формате JSON.
+
+Значение по умолчанию — `pretty`.||
+|#
+
+Структура и описание полей ответа приведены в статье [Health Check API](../../ydb-sdk/health-check-api.md#response-structure)
+
+## Примеры {#examples}
+
+### Краткий результат проверки {#example-pretty}
+
+```bash
+{{ ydb-cli }} --profile quickstart monitoring healthcheck --format pretty
+```
+
+Проблем с базой не обнаружено:
+
+```bash
+Healthcheck status: GOOD
+```
+
+Обнаружена деградация базы данных:
+
+```bash
+Healthcheck status: DEGRADED
+```
+
+### Подробный результат проверки {#example-json}
+
+
+```bash
+{{ ydb-cli }} --profile quickstart monitoring healthcheck --format json
+```
+
+Проблем с базой не обнаружено:
+
+```json
+{
+ "self_check_result": "GOOD",
+ "location": {
+ "id": 51059,
+ "host": "my-host.net",
+ "port": 19001
+ }
+}
+```
+
+Обнаружена деградация базы данных:
+
+```json
+{
+ "self_check_result": "DEGRADED",
+ "issue_log": [
+ {
+ "id": "YELLOW-b3c0-70fb",
+ "status": "YELLOW",
+ "message": "Database has multiple issues",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "reason": [
+ "YELLOW-b3c0-1ba8",
+ "YELLOW-b3c0-1c83"
+ ],
+ "type": "DATABASE",
+ "level": 1
+ },
+ {
+ "id": "YELLOW-b3c0-1ba8",
+ "status": "YELLOW",
+ "message": "Compute is overloaded",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "reason": [
+ "YELLOW-b3c0-343a-51059-User"
+ ],
+ "type": "COMPUTE",
+ "level": 2
+ },
+ {
+ "id": "YELLOW-b3c0-343a-51059-User",
+ "status": "YELLOW",
+ "message": "Pool usage is over than 99%",
+ "location": {
+ "compute": {
+ "node": {
+ "id": 51059,
+ "host": "my-host.net",
+ "port": 31043
+ },
+ "pool": {
+ "name": "User"
+ }
+ },
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "type": "COMPUTE_POOL",
+ "level": 4
+ },
+ {
+ "id": "YELLOW-b3c0-1c83",
+ "status": "YELLOW",
+ "message": "Storage usage over 75%",
+ "location": {
+ "database": {
+ "name": "/my-cluster/my-database"
+ }
+ },
+ "type": "STORAGE",
+ "level": 2
+ }
+ ],
+ "location": {
+ "id": 117,
+ "host": "my-host.net",
+ "port": 19001
+ }
+}
+```
diff --git a/ydb/docs/ru/core/reference/ydb-cli/export-import/_includes/auth-s3.md b/ydb/docs/ru/core/reference/ydb-cli/export-import/_includes/auth-s3.md
index c0ef1db0b9..a267f71ac7 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/export-import/_includes/auth-s3.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/export-import/_includes/auth-s3.md
@@ -16,8 +16,8 @@
Для аутентификации в S3 необходимы два параметра:
-- Идентификатор ключа доступа (access_key_id).
-- Секретный ключ доступа (secret_access_key).
+- Идентификатор ключа доступа (`--access-key`).
+- Секретный ключ доступа (`--secret-key`).
{{ ydb-short-name }} CLI определяет значения этих параметров из следующих источников (в порядке убывания приоритета):
@@ -51,7 +51,7 @@
1. [Установите и сконфигурируйте](https://cloud.yandex.ru/docs/cli/quickstart) {{ yandex-cloud }} CLI.
-2. Получите ID вашего каталога в облаке следующей командой, его понадобится указывать в командах ниже:
+2. Получите ID вашего каталога (`folder-id`) в облаке следующей командой, его понадобится указывать в командах ниже:
```bash
yc config list
@@ -70,7 +70,13 @@
yc iam service-account create --name s3account
```
- Вы можете указать любое имя аккаунта кроме `s3account` или использовать существующий, тогда вам понадобится его также заменять при копировании команд ниже через буфер обмена.
+ Здесь и ниже используется имя аккаунта `s3account`, но вы можете использовать любое другое. При создании аккаунта будет выведен его id, он потребуется далее.
+
+ Вы также можете использовать уже существующий сервисный аккаунт. Чтобы получить id существующего аккаунта по имени, используйте команду:
+
+ ```bash
+ yc iam service-account get --name <account-name>
+ ```
4. [Назначьте сервисному аккаунту](https://cloud.yandex.ru/docs/iam/operations/sa/assign-role-for-sa) роли в соответствии с необходимым уровнем доступа к S3, выполнив команду:
@@ -80,19 +86,19 @@
```bash
yc resource-manager folder add-access-binding <folder-id> \
- --role storage.viewer --subject serviceAccount:s3account
+ --role storage.viewer --subject serviceAccount:<s3-account-id>
```
- Запись (для выгрузки из базы данных {{ ydb-short-name }})
```bash
yc resource-manager folder add-access-binding <folder-id> \
- --role storage.editor --subject serviceAccount:s3account
+ --role storage.editor --subject serviceAccount:<s3-account-id>
```
{% endlist %}
- , где `<folder-id>` - это идентификатор каталога в облаке, полученный на шаге 2.
+ , где `<folder-id>` - это идентификатор каталога в облаке, полученный на шаге 2, а `<s3-account-id>` - идентификатор аккаунта, созданного на шаге 3.
Вы можете также ознакомиться с [полным перечнем](https://cloud.yandex.ru/docs/iam/concepts/access-control/roles#object-storage) ролей {{ yandex-cloud }}.
@@ -114,7 +120,7 @@
```
В данном выводе:
- - `access_key.key_id` - это идентификатор ключа доступа
- - `secret` - это секретный ключ доступа
+ - `access_key.key_id` - это идентификатор ключа доступа (`--access-key`).
+ - `secret` - это секретный ключ доступа (`--secret-key`).
{% include [s3_conn_procure_overlay.md](s3_conn_procure_overlay.md) %}
diff --git a/ydb/docs/ru/core/reference/ydb-cli/toc_i.yaml b/ydb/docs/ru/core/reference/ydb-cli/toc_i.yaml
index 5f8f63f89e..47f8c51ccf 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/toc_i.yaml
+++ b/ydb/docs/ru/core/reference/ydb-cli/toc_i.yaml
@@ -113,6 +113,8 @@ items:
href: commands/config-info.md
- name: Вывод версии YDB CLI
href: version.md
+ - name: Проверка состояния базы данных
+ href: commands/monitoring-healthcheck.md
- name: Нагрузочное тестирование
items:
- name: Обзор
diff --git a/ydb/docs/ru/core/reference/ydb-cli/topic-read.md b/ydb/docs/ru/core/reference/ydb-cli/topic-read.md
index ebccf4d18c..ed8aa08ca5 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/topic-read.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/topic-read.md
@@ -36,12 +36,12 @@
- Задает правило оформления сообщений на выходе. Не все форматы могут работать в потоковом режиме.
- Перечень поддерживаемых форматов:
- Имя | Описание | Поддерживает<br/>потоковый режим?
- ---|---|---
- `single-message`<br/>(по умолчанию)|Вывод содержимого не более одного сообщения без оформления.|-|
- `pretty`|Вывод в псевдографическую таблицу с колонками, содержащими метаинформацию о сообщениях. В колонке `body` выводится само сообщение.|Нет
- `newline-delimited`|Вывод сообщений с добавлением после каждого сообщения разделителя - символа перевода строки `0x0A`|Да
- `concatenated`|Вывод сообщений одного за другим без добавления какого-либо разделителя|Да
+ | Имя | Описание | Поддерживает<br/>потоковый режим? |
+ |-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------|
+ | `single-message`<br/>(по умолчанию) | Вывод содержимого не более одного сообщения без оформления. | - |
+ | `pretty` | Вывод в псевдографическую таблицу с колонками, содержащими метаинформацию о сообщениях. В колонке `body` выводится само сообщение. | Нет |
+ | `newline-delimited` | Вывод сообщений с добавлением после каждого сообщения разделителя - символа перевода строки `0x0A` | Да |
+ | `concatenated` | Вывод сообщений одного за другим без добавления какого-либо разделителя | Да |
`--wait` (`-w`): Ожидание появления сообщений
@@ -52,10 +52,10 @@
- Значения по умолчанию и допустимые значения зависят от выбранного формата вывода:
- Поддерживает ли формат<br/>потоковый режим отбора | Значение лимита по умолчанию | Допустимые значения
- ---|---|---
- Нет|10|1-500
- Да|0 (без ограничений)|0-500
+ | Поддерживает ли формат<br/>потоковый режим отбора | Значение лимита по умолчанию | Допустимые значения |
+ |---------------------------------------------------|------------------------------|---------------------|
+ | Нет | 10 | 1-500 |
+ | Да | 0 (без ограничений) | 0-500 |
`--transform VAL`: Метод преобразования сообщений
@@ -74,12 +74,12 @@
### Другие опциональные параметры
-Имя | Описание
----|---
-`--idle-timeout VAL` | Таймаут принятия решения о том что топик пуст, то есть новые сообщения для обработки отсутствуют. <br/>Замеряется время с момента установки соединения при запуске команды, или получения последнего сообщения. Если в течение заданного таймаута с сервера не приходит новых сообщений, топик считается пустым.<br/>Значение по умолчанию — `1s` (1 секунда).
-`--timestamp VAL` | Чтение с заданного в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время) времени.<br/>Если параметр не указан, то чтение выполняется с текущей рабочей позиции читателя в топике.<br/>Если параметр указан, то чтение начнется с первого [сообщения](../../concepts/topic.md#message), полученного после указанного времени.
-`--metadata-fields VAL` | Список [атрибутов сообщения](../../concepts/topic.md#message), значения которых нужно выводить в колонках с метаинформацией в формате `pretty`. Если параметр не указан, то выводятся колонки со всеми атрибутами. <br/>Возможные значения:<ul><li>`write_time` — время записи сообщения на сервер в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время);</li><li>`meta` — метаданные сообщения;</li><li>`create_time` — время создания сообщения источником в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время);</li><li>`seq_no` — [порядковый номер](../../concepts/topic.md#seqno) сообщения;</li><li>`offset` — [порядковый номер сообщения внутри партиции](../../concepts/topic.md#offset);</li><li>`message_group_id` — [идентификатор группы сообщений](../../concepts/topic.md#producer-id);</li><li>`body` — тело сообщения.</li></ul>
-`--partition-ids VAL` | Идентификаторы (порядковые номера) [партиций](../../concepts/topic.md#partitioning), из которых будет производиться чтение.<br/>Если параметр не указан, то чтение производится из всех партиций.
+| Имя | Описание |
+|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--idle-timeout VAL` | Таймаут принятия решения о том что топик пуст, то есть новые сообщения для обработки отсутствуют. <br/>Замеряется время с момента установки соединения при запуске команды, или получения последнего сообщения. Если в течение заданного таймаута с сервера не приходит новых сообщений, топик считается пустым.<br/>Значение по умолчанию — `1s` (1 секунда). |
+| `--timestamp VAL` | Чтение с заданного в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время) времени.<br/>Если параметр не указан, то чтение выполняется с текущей рабочей позиции читателя в топике.<br/>Если параметр указан, то чтение начнется с первого [сообщения](../../concepts/topic.md#message), полученного после указанного времени. |
+| `--metadata-fields VAL` | Список [атрибутов сообщения](../../concepts/topic.md#message), значения которых нужно выводить в колонках с метаинформацией в формате `pretty`. Если параметр не указан, то выводятся колонки со всеми атрибутами. <br/>Возможные значения:<ul><li>`write_time` — время записи сообщения на сервер в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время);</li><li>`meta` — метаданные сообщения;</li><li>`create_time` — время создания сообщения источником в формате [UNIX timestamp](https://ru.wikipedia.org/wiki/Unix-время);</li><li>`seq_no` — [порядковый номер](../../concepts/topic.md#seqno) сообщения;</li><li>`offset` — [порядковый номер сообщения внутри партиции](../../concepts/topic.md#offset);</li><li>`message_group_id` — [идентификатор группы сообщений](../../concepts/topic.md#producer-id);</li><li>`body` — тело сообщения.</li></ul> |
+| `--partition-ids VAL` | Идентификаторы (порядковые номера) [партиций](../../concepts/topic.md#partitioning), из которых будет производиться чтение.<br/>Если параметр не указан, то чтение производится из всех партиций. |
## Примеры {#examples}
diff --git a/ydb/docs/ru/core/reference/ydb-cli/workload-click-bench.md b/ydb/docs/ru/core/reference/ydb-cli/workload-click-bench.md
index 113ff509bd..f8f9d497e9 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/workload-click-bench.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/workload-click-bench.md
@@ -16,9 +16,9 @@
### Доступные параметры { #common_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--path` или `-p` | Путь к таблице | `clickbench/hits`
+| Имя | Описание | Значение по умолчанию |
+|--------------------|----------------|-----------------------|
+| `--path` или `-p` | Путь к таблице | `clickbench/hits` |
## Инициализация нагрузочного теста { #init }
@@ -36,14 +36,14 @@
### Доступные параметры { #init_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--store <значение>` | Тип хранилища таблиц. Возможные значения: `row`, `column`, `external-s3 | `row`
-`--external-s3-prefix <значение>` | Актуально только для внешних таблиц. Корневой путь к набору данных в S3-хранилище |
-`--external-s3-endpoint <значение>` или `-e <значение>` | Актуально только для внешних таблиц. Ссылка на S3-Bucket с данными |
-`--string` | Использовать для текстовых полей тип `String` | `Utf8`
-`--datetime` | Использовать для полей, связанных со временем типа `Date`, `Datetime` и `Timestamp` | `Date32`, `Datetime64` и `Timestamp64`.
-`--clear` | Если по указанному пути таблица уже была создана, она будет удалена |
+| Имя | Описание | Значение по умолчанию |
+|---------------------------------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------|
+| `--store <значение>` | Тип хранилища таблиц. Возможные значения: `row`, `column`, `external-s3 | `row` |
+| `--external-s3-prefix <значение>` | Актуально только для внешних таблиц. Корневой путь к набору данных в S3-хранилище | |
+| `--external-s3-endpoint <значение>` или `-e <значение>` | Актуально только для внешних таблиц. Ссылка на S3-Bucket с данными | |
+| `--string` | Использовать для текстовых полей тип `String` | `Utf8` |
+| `--datetime` | Использовать для полей, связанных со временем типа `Date`, `Datetime` и `Timestamp` | `Date32`, `Datetime64` и `Timestamp64`. |
+| `--clear` | Если по указанному пути таблица уже была создана, она будет удалена | |
## Загрузка данных в таблицу { #load }
@@ -58,11 +58,11 @@ wget https://datasets.clickhouse.com/hits_compatible/hits.csv.gz
### Доступные параметры { #load_files_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--input <путь>` или `-i <путь>` | Путь к исходным файлам с данными. Поддерживаются как распакованные и запакованные csv и tsv файлы, так и директории с такими файлами. Данные могут быть загружены с официального сайта ClickBench: [csv.gz](https://datasets.clickhouse.com/hits_compatible/hits.csv.gz), [tsv.gz](https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz). Для ускорения загрузки можно разбить эти файлы на более мелкие части, в этом случае части буду загружаться параллельно. |
-`--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. |
-`--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. |
+| Имя | Описание | Значение по умолчанию |
+|----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--input <путь>` или `-i <путь>` | Путь к исходным файлам с данными. Поддерживаются как распакованные и запакованные csv и tsv файлы, так и директории с такими файлами. Данные могут быть загружены с официального сайта ClickBench: [csv.gz](https://datasets.clickhouse.com/hits_compatible/hits.csv.gz), [tsv.gz](https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz). Для ускорения загрузки можно разбить эти файлы на более мелкие части, в этом случае части буду загружаться параллельно. | |
+| `--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. | |
+| `--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. | |
{% include [load_options](./_includes/workload/load_options.md) %}
@@ -86,13 +86,13 @@ wget https://datasets.clickhouse.com/hits_compatible/hits.csv.gz
### Опции, специфичные для ClickBench { #run_clickbench_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--ext-queries <запросы>` или `-q <запросы>` | Внешние запросы для выполнения нагрузки, разделенные точкой с запятой. |
-`--ext-queries-file <имя>` | Имя файла, в котором можно указать внешние запросы для выполнения нагрузки, разделенные точкой с запятой. |
-`--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[0-42].sql`. |
-`--ext-results-dir <имя>` | Директория с внешними результатами запросов для сравнения. Результаты должны лежать в файлах с именами `q[0-42].sql`. |
-`--check-canonical` или `-c` | Использовать специальные детерминированные внутренние запросы и сверять результаты с каноническими. |
+| Имя | Описание | Значение по умолчанию |
+|----------------------------------------------|-----------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--ext-queries <запросы>` или `-q <запросы>` | Внешние запросы для выполнения нагрузки, разделенные точкой с запятой. | |
+| `--ext-queries-file <имя>` | Имя файла, в котором можно указать внешние запросы для выполнения нагрузки, разделенные точкой с запятой. | |
+| `--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[0-42].sql`. | |
+| `--ext-results-dir <имя>` | Директория с внешними результатами запросов для сравнения. Результаты должны лежать в файлах с именами `q[0-42].sql`. | |
+| `--check-canonical` или `-c` | Использовать специальные детерминированные внутренние запросы и сверять результаты с каноническими. | |
## Очистка данных теста { #cleanup }
diff --git a/ydb/docs/ru/core/reference/ydb-cli/workload-tpcds.md b/ydb/docs/ru/core/reference/ydb-cli/workload-tpcds.md
index d216696de2..95e83a61c7 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/workload-tpcds.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/workload-tpcds.md
@@ -14,9 +14,9 @@
### Доступные параметры { #common_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--path` или `-p` | Путь к каталогу с таблицами. | `/`
+| Имя | Описание | Значение по умолчанию |
+|-------------------|------------------------------|-----------------------|
+| `--path` или `-p` | Путь к каталогу с таблицами. | `/` |
## Инициализация нагрузочного теста {#init}
@@ -50,14 +50,14 @@
### Доступные параметры {#load_files_options}
-Имя | Описание | Значение по умолчанию
----|---|---
-`--scale <значение>` | Масштаб данных. Обычно используются степени десяти. |
-`--tables <значение>` | Список таблиц для генерации, разделенный запятыми. Доступные таблицы: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | Все таблицы
-`--proccess-count <значение>` или `-C <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает количество процессов. | 1
-`--proccess-index <значение>` или `-i <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает номер процесса. | 0.
-`--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. |
-`--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. |
+| Имя | Описание | Значение по умолчанию |
+|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--scale <значение>` | Масштаб данных. Обычно используются степени десяти. | |
+| `--tables <значение>` | Список таблиц для генерации, разделенный запятыми. Доступные таблицы: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | Все таблицы |
+| `--proccess-count <значение>` или `-C <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает количество процессов. | 1 |
+| `--proccess-index <значение>` или `-i <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает номер процесса. | 0. |
+| `--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. | |
+| `--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. | |
{% include [load_options](./_includes/workload/load_options.md) %}
@@ -81,9 +81,9 @@
### Опции, специфичные для TPC-DS { #run_tpcds_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[1-99].sql`. |
+| Имя | Описание | Значение по умолчанию |
+|--------------------------|------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[1-99].sql`. | |
## Очистка данных теста { #cleanup }
diff --git a/ydb/docs/ru/core/reference/ydb-cli/workload-tpch.md b/ydb/docs/ru/core/reference/ydb-cli/workload-tpch.md
index 31100a5e6a..8a3aa7a41a 100644
--- a/ydb/docs/ru/core/reference/ydb-cli/workload-tpch.md
+++ b/ydb/docs/ru/core/reference/ydb-cli/workload-tpch.md
@@ -14,9 +14,9 @@
### Доступные параметры {#common_options}
-Имя | Описание | Значение по умолчанию
----|---|---
-`--path` или `-p` | Путь к каталогу с таблицами. | `/`
+| Имя | Описание | Значение по умолчанию |
+|--------------------|------------------------------|------------------------|
+| `--path` или `-p` | Путь к каталогу с таблицами. | `/` |
## Инициализация нагрузочного теста { #init }
@@ -50,14 +50,14 @@
### Доступные параметры { #load_files_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--scale <значение>` | Масштаб данных. Обычно используются степени десяти. |
-`--tables <значение>` | Список таблиц для генерации, разделенный запятыми. Доступные таблицы: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | Все таблицы.
-`--proccess-count <значение>` или `-C <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает количество процессов. | 1
-`--proccess-index <значение>` или `-i <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает номер процесса. | 0
-`--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. |
-`--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. |
+| Имя | Описание | Значение по умолчанию |
+|---------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--scale <значение>` | Масштаб данных. Обычно используются степени десяти. | |
+| `--tables <значение>` | Список таблиц для генерации, разделенный запятыми. Доступные таблицы: `customer`, `nation`, `order_line`, `part_psupp`, `region`, `supplier`. | Все таблицы. |
+| `--proccess-count <значение>` или `-C <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает количество процессов. | 1 |
+| `--proccess-index <значение>` или `-i <значение>` | Генерация данных может быть на разбита на несколько процессов, этот параметр задает номер процесса. | 0 |
+| `--state <путь>` | Путь к файлу состояния загрузки. Если загрузка была прервана по какой-то причине, при новом запуске загрузка будет продолжена с того же места. | |
+| `--clear-state` | Актуально, если задан параметр `--state`. Очистить файл состояния и начать загрузку сначала. | |
{% include [load_options](./_includes/workload/load_options.md) %}
@@ -81,9 +81,9 @@
### Опции, специфичные для TPC-H { #run_tpch_options }
-Имя | Описание | Значение по умолчанию
----|---|---
-`--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[1-23].sql`. |
+| Имя | Описание | Значение по умолчанию |
+|-------------------------|------------------------------------------------------------------------------------------------------------------|-----------------------|
+| `--ext-query-dir <имя>` | Директория с внешними запросами для выполнения нагрузки. Запросы должны лежать в файлах с именами `q[1-23].sql`. | |
## Очистка данных теста { #cleanup }
diff --git a/ydb/docs/ru/core/reference/ydb-sdk/health-check-api.md b/ydb/docs/ru/core/reference/ydb-sdk/health-check-api.md
index 008f65c1f3..97eb972b06 100644
--- a/ydb/docs/ru/core/reference/ydb-sdk/health-check-api.md
+++ b/ydb/docs/ru/core/reference/ydb-sdk/health-check-api.md
@@ -188,6 +188,12 @@ message IssueLog {
**Описание:** Эта ошибка не ожидается. Внутренняя ошибка.
+#### Group layout is incorrect
+
+**Описание:** Группа хранилища была настроена некорректно.
+
+**Действия при срабатывании:** В [Embedded UI](../embedded-ui/ydb-monitoring.md) перейти на страницу базы данных, выбрать вкладку `Storage`, по известному `id` группы проверить конфигурацию узлов и дисков.
+
#### Group degraded
**Описание:** В группе недоступно допустимое число дисков.
diff --git a/ydb/docs/ru/core/reference/ydb-sdk/topic.md b/ydb/docs/ru/core/reference/ydb-sdk/topic.md
index 5c4025d82f..753d38630f 100644
--- a/ydb/docs/ru/core/reference/ydb-sdk/topic.md
+++ b/ydb/docs/ru/core/reference/ydb-sdk/topic.md
@@ -25,7 +25,7 @@
[Примеры на GitHub](https://github.com/ydb-platform/ydb-python-sdk/tree/main/examples/topic)
- C#
-
+
[Примеры на GitHub](https://github.com/ydb-platform/ydb-dotnet-sdk/tree/main/examples/src/Topic)
@@ -117,7 +117,7 @@
loggerFactory: loggerFactory
);
```
-
+
В этом примере используется анонимная аутентификация. Подробнее про [соединение с базой данных](../../concepts/connect.md) и [аутентификацию](../../security/authentication.md).
Фрагмент кода приложения для создания различных клиентов к топикам:
@@ -129,7 +129,7 @@
{
ProducerId = "ProducerId_Example"
}.Build();
-
+
await using var reader = new ReaderBuilder<string>(driver)
{
ConsumerName = "Consumer_Example",
@@ -209,7 +209,7 @@
.build())
.build());
```
-
+
- C#
Пример создания топика со списком поддерживаемых кодеков и минимальным количеством партиций:
@@ -385,7 +385,7 @@
```java
topicClient.dropTopic(topicPath);
```
-
+
- C#
```c#
@@ -503,7 +503,7 @@
return null;
});
```
-
+
- C#
```c#
@@ -770,7 +770,7 @@
}
});
```
-
+
- С#
Асинхронная запись сообщения в топик. В случае переполнения внутреннего буфера будет ожидать, когда буфер освободится для повторной отправки.
@@ -784,7 +784,7 @@
```c#
var writeCts = new CancellationTokenSource();
writeCts.CancelAfter(TimeSpan.FromSeconds(3));
-
+
await writer.WriteAsync("Hello, Example YDB Topics!", writeCts.Token);
```
@@ -1002,6 +1002,54 @@
})
```
+- Python
+
+ Для записи в топик в транзакции необходимо создать транзакционного писателя через вызов `topic_client.tx_writer`. После этого можно отправлять сообщения, как обычно. Закрывать транзакционного писателя не требуется — это происходит автоматически при завершении транзакции.
+
+ В примере ниже нет явного вызова `tx.commit()` — он происходит неявно при успешном завершении лямбды `callee`.
+
+ [Пример на GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_example.py)
+
+ ```python
+ with ydb.QuerySessionPool(driver) as session_pool:
+
+ def callee(tx: ydb.QueryTxContext):
+ tx_writer: ydb.TopicTxWriter = driver.topic_client.tx_writer(tx, topic)
+
+ for i in range(message_count):
+ result_stream = tx.execute(query=f"select {i} as res;")
+ for result_set in result_stream:
+ message = str(result_set.rows[0]["res"])
+ tx_writer.write(ydb.TopicWriterMessage(message))
+ print(f"Message {message} was written with tx.")
+
+ session_pool.retry_tx_sync(callee)
+ ```
+
+- Python (asyncio)
+
+ Для записи в топик в транзакции необходимо создать транзакционного писателя через вызов `topic_client.tx_writer`. После этого можно отправлять сообщения, как обычно. Закрывать транзакционного писателя не требуется — это происходит автоматически при завершении транзакции.
+
+ В примере ниже нет явного вызова `tx.commit()` — он происходит неявно при успешном завершении лямбды `callee`.
+
+ [Пример на GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_async_example.py)
+
+ ```python
+ async with ydb.aio.QuerySessionPool(driver) as session_pool:
+
+ async def callee(tx: ydb.aio.QueryTxContext):
+ tx_writer: ydb.TopicTxWriterAsyncIO = driver.topic_client.tx_writer(tx, topic)
+
+ for i in range(message_count):
+ async with await tx.execute(query=f"select {i} as res;") as result_stream:
+ async for result_set in result_stream:
+ message = str(result_set.rows[0]["res"])
+ await tx_writer.write(ydb.TopicWriterMessage(message))
+ print(f"Message {result_set.rows[0]['res']} was written with tx.")
+
+ await session_pool.retry_tx_async(callee)
+ ```
+
- Java (sync)
[Пример на GitHub](https://github.com/ydb-platform/ydb-java-examples/blob/develop/ydb-cookbook/src/main/java/tech/ydb/examples/topic/transactions/TransactionWriteSync.java)
@@ -1281,7 +1329,7 @@
{
ConsumerName = "Consumer_Example",
SubscribeSettings = { new SubscribeSettings(topicName) }
- }.Build();
+ }.Build();
```
{% endlist %}
@@ -1343,7 +1391,7 @@
.build())
.build();
```
-
+
- C#
```c#
@@ -1530,7 +1578,7 @@
}
}
```
-
+
- C#
```c#
@@ -1542,7 +1590,7 @@
foreach (var message in batchMessages.Batch)
{
- logger.LogInformation("Received message: [{MessageData}]", message.Data);
+ logger.LogInformation("Received message: [{MessageData}]", message.Data);
}
}
}
@@ -1620,7 +1668,7 @@
}
});
```
-
+
- C#
```c#
@@ -1726,7 +1774,7 @@
});
}
```
-
+
- С#
```c#
@@ -1738,7 +1786,7 @@
foreach (var message in batchMessages.Batch)
{
- logger.LogInformation("Received message: [{MessageData}]", message.Data);
+ logger.LogInformation("Received message: [{MessageData}]", message.Data);
}
try
@@ -1965,6 +2013,42 @@
}
```
+- Python
+
+ Для чтения сообщений в рамках транзакции следует использовать метод `reader.receive_batch_with_tx`. Он прочитает пакет сообщений и добавит их коммит в транзакцию, при этом отдельно коммитить эти сообщения не требуется. Читателя сообщений можно использовать повторно в разных транзакциях. При этом важно, чтобы порядок коммита транзакций соответствовал порядку получения сообщений от читателя, так как коммиты сообщений в топике должны выполняться строго по порядку - в противном случае транзакция получит ошибку на попытке сделать коммит. Проще всего это сделать, если использовать читателя в цикле.
+
+ [Пример на GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_example.py)
+
+ ```python
+ with driver.topic_client.reader(topic, consumer) as reader:
+ with ydb.QuerySessionPool(driver) as session_pool:
+ for _ in range(message_count):
+
+ def callee(tx: ydb.QueryTxContext):
+ batch = reader.receive_batch_with_tx(tx, max_messages=1)
+ print(f"Message {batch.messages[0].data.decode()} was read with tx.")
+
+ session_pool.retry_tx_sync(callee)
+ ```
+
+- Python (asyncio)
+
+ Для чтения сообщений в рамках транзакции следует использовать метод `reader.receive_batch_with_tx`. Он прочитает пакет сообщений и добавит их коммит в транзакцию, при этом отдельно коммитить эти сообщения не требуется. Читателя сообщений можно использовать повторно в разных транзакциях. При этом важно, чтобы порядок коммита транзакций соответствовал порядку получения сообщений от читателя, так как коммиты сообщений в топике должны выполняться строго по порядку - в противном случае транзакция получит ошибку на попытке сделать коммит. Проще всего это сделать, если использовать читателя в цикле.
+
+ [Пример на GitHub](https://github.com/ydb-platform/ydb-python-sdk/blob/main/examples/topic/topic_transactions_async_example.py)
+
+ ```python
+ async with driver.topic_client.reader(topic, consumer) as reader:
+ async with ydb.aio.QuerySessionPool(driver) as session_pool:
+ for _ in range(message_count):
+
+ async def callee(tx: ydb.aio.QueryTxContext):
+ batch = await reader.receive_batch_with_tx(tx, max_messages=1)
+ print(f"Message {batch.messages[0].data.decode()} was read with tx.")
+
+ await session_pool.retry_tx_async(callee)
+ ```
+
- Java (sync)
[Пример на GitHub](https://github.com/ydb-platform/ydb-java-examples/blob/develop/ydb-cookbook/src/main/java/tech/ydb/examples/topic/transactions/TransactionReadSync.java)
diff --git a/ydb/docs/ru/core/security/audit-log.md b/ydb/docs/ru/core/security/audit-log.md
index 5c94302fc4..fd7df8ca92 100644
--- a/ydb/docs/ru/core/security/audit-log.md
+++ b/ydb/docs/ru/core/security/audit-log.md
@@ -24,28 +24,28 @@ _Аудитный лог_ — это поток, который содержит
Информация о каждой операции записывается в аудитный лог в виде отдельного события. Каждое событие содержит набор атрибутов. Одни атрибуты являются общими для любых событий, другие определяются компонентом {{ ydb-short-name }}, в котором произошло событие.
-| Атрибут | Описание |
-|:----|:----|
-| **Общие атрибуты** ||
-| `subject`| SID источника события (формат `<login>@<subsystem>`). Если обязательная аутентификация не включена, атрибут будет иметь значение `{none}`.<br/>Обязательный.
-| `operation`| Название операции или действия, сходны с синтаксисом YQL (например, `ALTER DATABASE`, `CREATE TABLE`).<br/>Обязательный.
-| `status` | Статус завершения операции.<br/>Возможные значения:<ul><li>`SUCCESS` — операция завершена успешно;</li><li>`ERROR` — операция завершена с ошибкой;</li><li>`IN-PROCESS` — операция выполняется.</li></ul>Обязательный.
-| `reason` | Сообщение об ошибке.<br/>Необязательный.
-| `component` | Имя компонента {{ ydb-short-name }} — источника события (например, `schemeshard`).<br/>Необязательный.
-| `request_id`| Уникальный идентификатор запроса, вызвавшего операцию. По `request_id` можно отличать события разных операций и связывать события в единый аудитный контекст операции.<br/>Необязательный.
-| `remote_address` | IP-адрес клиента, приславшего запрос.<br/>Необязательный.
-| `detailed_status` | Статус, который передает компонент {{ ydb-short-name }} (например `StatusAccepted`, `StatusInvalidParameter`, `StatusNameConflict`).<br/>Необязательный.
-| **Атрибуты владения и разрешений** ||
-| `new_owner` | SID нового владельца объекта при передаче владения.<br/>Необязательный.
-| `acl_add` | Список добавленных разрешений при создании объектов или изменении разрешений в [краткой записи](./short-access-control-notation.md) (например, `[+R:someuser]`).<br/>Необязательный.
-| `acl_remove` | Список удаленных разрешений при изменении разрешений в [краткой записи](./short-access-control-notation.md) (например, `[-R:somegroup]`).<br/>Необязательный.
-| **Пользовательские атрибуты** ||
-| `user_attrs_add` | Список добавленных пользовательских атрибутов при создании объектов или изменении атрибутов (например, `[attr_name1: A, attr_name2: B]`).<br/>Необязательный.
-| `user_attrs_remove` | Список удаленных пользовательских атрибутов при изменении атрибутов (например, `[attr_name1, attr_name2]`).<br/>Необязательный.
-| **Атрибуты компонента SchemeShard** ||
-| `tx_id`| Уникальный идентификатор транзакции. Как и `request_id`, может быть использован для различения событий разных операций.<br/>Обязательный.
-| `database` | Путь базы данных (например, `/my_dir/db`).<br/>Обязательный.
-| `paths` | Список путей внутри БД, которые изменяет операция (например, `[/my_dir/db/table-a, /my_dir/db/table-b]`).<br/>Обязательный.
+| Атрибут | Описание |
+|:------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **Общие атрибуты** | |
+| `subject` | SID источника события (формат `<login>@<subsystem>`). Если обязательная аутентификация не включена, атрибут будет иметь значение `{none}`.<br/>Обязательный. |
+| `operation` | Название операции или действия, сходны с синтаксисом YQL (например, `ALTER DATABASE`, `CREATE TABLE`).<br/>Обязательный. |
+| `status` | Статус завершения операции.<br/>Возможные значения:<ul><li>`SUCCESS` — операция завершена успешно;</li><li>`ERROR` — операция завершена с ошибкой;</li><li>`IN-PROCESS` — операция выполняется.</li></ul>Обязательный. |
+| `reason` | Сообщение об ошибке.<br/>Необязательный. |
+| `component` | Имя компонента {{ ydb-short-name }} — источника события (например, `schemeshard`).<br/>Необязательный. |
+| `request_id` | Уникальный идентификатор запроса, вызвавшего операцию. По `request_id` можно отличать события разных операций и связывать события в единый аудитный контекст операции.<br/>Необязательный. |
+| `remote_address` | IP-адрес клиента, приславшего запрос.<br/>Необязательный. |
+| `detailed_status` | Статус, который передает компонент {{ ydb-short-name }} (например `StatusAccepted`, `StatusInvalidParameter`, `StatusNameConflict`).<br/>Необязательный. |
+| **Атрибуты владения и разрешений** | |
+| `new_owner` | SID нового владельца объекта при передаче владения.<br/>Необязательный. |
+| `acl_add` | Список добавленных разрешений при создании объектов или изменении разрешений в [краткой записи](./short-access-control-notation.md) (например, `[+R:someuser]`).<br/>Необязательный. |
+| `acl_remove` | Список удаленных разрешений при изменении разрешений в [краткой записи](./short-access-control-notation.md) (например, `[-R:somegroup]`).<br/>Необязательный. |
+| **Пользовательские атрибуты** | |
+| `user_attrs_add` | Список добавленных пользовательских атрибутов при создании объектов или изменении атрибутов (например, `[attr_name1: A, attr_name2: B]`).<br/>Необязательный. |
+| `user_attrs_remove` | Список удаленных пользовательских атрибутов при изменении атрибутов (например, `[attr_name1, attr_name2]`).<br/>Необязательный. |
+| **Атрибуты компонента SchemeShard** | |
+| `tx_id` | Уникальный идентификатор транзакции. Как и `request_id`, может быть использован для различения событий разных операций.<br/>Обязательный. |
+| `database` | Путь базы данных (например, `/my_dir/db`).<br/>Обязательный. |
+| `paths` | Список путей внутри БД, которые изменяет операция (например, `[/my_dir/db/table-a, /my_dir/db/table-b]`).<br/>Обязательный. |
## Включение аудитного лога {#enabling-audit-log}
@@ -63,14 +63,14 @@ audit_config:
format: audit_log_format
```
-Ключ | Описание
---- | ---
-`file_backend` | Сохранять аудитный лог в файл на каждом узле кластера.</ul>Необязательный.
-`format` | Формат аудитного лога. Значение по умолчанию: `JSON`.<br/>Возможные значения:<ul><li>`JSON` — [JSON]{% if lang == "ru" %}(https://ru.wikipedia.org/wiki/JSON){% endif %}{% if lang == "en" %}(https://en.wikipedia.org/wiki/JSON){% endif %} в сериализованном виде;</li><li>`TXT` — текст.</ul>Необязательный.
-`file_path` | Путь к файлу, в который будет направлен аудитный лог. Путь и файл в случае их отсутствия будут созданы на каждом узле при старте кластера. Если файл существует, запись в него будет продолжена.<br/>Обязательный при использовании `file_backend`.
-`unified_agent_backend` | Направить аудитный лог в Unified Agent. Необходимо также описать секцию `uaclient_config` в [конфигурации кластера](../reference/configuration/index.md).</ul>Необязательный.
-`log_name` | Метаданные сессии, которые передаются вместе с сообщением. Позволяют направить логирующий поток в один или несколько дочерних каналов по условию `_log_name: "session_meta_log_name"`.<br/>Необязательный.
-`stderr_backend` | Направить аудитный лог в стандартный вывод ошибок `stderr`.</ul>Необязательный.
+| Ключ | Описание |
+|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `file_backend` | Сохранять аудитный лог в файл на каждом узле кластера.</ul>Необязательный. |
+| `format` | Формат аудитного лога. Значение по умолчанию: `JSON`.<br/>Возможные значения:<ul><li>`JSON` — [JSON]{% if lang == "ru" %}(https://ru.wikipedia.org/wiki/JSON){% endif %}{% if lang == "en" %}(https://en.wikipedia.org/wiki/JSON){% endif %} в сериализованном виде;</li><li>`TXT` — текст.</ul>Необязательный. |
+| `file_path` | Путь к файлу, в который будет направлен аудитный лог. Путь и файл в случае их отсутствия будут созданы на каждом узле при старте кластера. Если файл существует, запись в него будет продолжена.<br/>Обязательный при использовании `file_backend`. |
+| `unified_agent_backend` | Направить аудитный лог в Unified Agent. Необходимо также описать секцию `uaclient_config` в [конфигурации кластера](../reference/configuration/index.md).</ul>Необязательный. |
+| `log_name` | Метаданные сессии, которые передаются вместе с сообщением. Позволяют направить логирующий поток в один или несколько дочерних каналов по условию `_log_name: "session_meta_log_name"`.<br/>Необязательный. |
+| `stderr_backend` | Направить аудитный лог в стандартный вывод ошибок `stderr`.</ul>Необязательный. |
Пример конфигурации для сохранения аудитного лога в текстовом формате в файл `/var/log/ydb-audit.log`:
diff --git a/ydb/docs/ru/core/security/short-access-control-notation.md b/ydb/docs/ru/core/security/short-access-control-notation.md
index b79a2f2948..97534869bf 100644
--- a/ydb/docs/ru/core/security/short-access-control-notation.md
+++ b/ydb/docs/ru/core/security/short-access-control-notation.md
@@ -27,48 +27,49 @@
Группы разрешений — это объединения нескольких разрешений. По возможности в краткой записи будет указано одна из групп.
Например, `+R:subject` — разрешение на чтение.
-| Группа | Описание |
-|:----:|:----|
-| `L` | (list) перечисление. Состоит из разрешений на чтение аттрибутов ACL и описание объектов.
-| `R` | (read) чтение. Состоит из разрешений на чтение из таблиц, топиков и перечисления.
-| `W` | (write) запись. Состоит из разрешений на обновление и удаление записей из таблиц, запись атрибутов ACL, создание подкаталогов, создание таблиц, очередей, изменение и удаление объектов, изменении пользовательских атрибутов
-| `UL` | (use legacy) использование (устаревшее). Состоит из разрешений за чтение, запись и предоставления прав доступа
-| `U` | (use) использование. Состоит из разрешений за чтение, запись, предоставления прав доступа и отправку запросов к БД
-| `M` | (manage) управление. Состоит из разрешений на создание и удаление БД
-| `FL` | (full legacy) все права (устаревшее). Состоит из разрешений на использование (устаревшее) и управление
-| `F` | (full) все права. Состоит из разрешений на использование и управление
+
+| Группа | Описание |
+|:------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `L` | (list) перечисление. Состоит из разрешений на чтение аттрибутов ACL и описание объектов. |
+| `R` | (read) чтение. Состоит из разрешений на чтение из таблиц, топиков и перечисления. |
+| `W` | (write) запись. Состоит из разрешений на обновление и удаление записей из таблиц, запись атрибутов ACL, создание подкаталогов, создание таблиц, очередей, изменение и удаление объектов, изменении пользовательских атрибутов |
+| `UL` | (use legacy) использование (устаревшее). Состоит из разрешений за чтение, запись и предоставления прав доступа |
+| `U` | (use) использование. Состоит из разрешений за чтение, запись, предоставления прав доступа и отправку запросов к БД |
+| `M` | (manage) управление. Состоит из разрешений на создание и удаление БД |
+| `FL` | (full legacy) все права (устаревшее). Состоит из разрешений на использование (устаревшее) и управление |
+| `F` | (full) все права. Состоит из разрешений на использование и управление |
### Простые разрешения
Если нет другой возможности, в сокращенной записи будет приведен список разрешений в круглых скобках через символ вертикальной черты `|`.
Например, `+(SR|UR):subject` — разрешение на чтение и обновление записей в таблицы.
-| Разрешение | Описание |
-|:----:|:----|
-| `SR` | (select row) чтение из таблиц
-| `UR` | (update row) обновление записей таблиц
-| `ER` | (erase row) удаление записей из таблиц
-| `RA` | (read attributes) чтение атрибутов ACL
-| `WA` | (write attributes) запись атрибутов ACL
-| `CD` | (create directory) создание подкаталога
-| `CT` | (create table) создание таблиц
-| `CQ` | (create queue) создание очередей
-| `RS` | (remove schema) удаление объектов
-| `DS` | (describe schema) описание объектов, содержимое каталогов
-| `AS` | (alter schema) изменение объектов
-| `CDB` | (create database) создание БД
-| `DDB` | (drop database) удаление БД
-| `GAR` | (grant access rights) предоставление прав доступа (только из списка собственных)
-| `WUA` | (write user attributes) изменение пользовательских атрибутов
-| `ConnDB` | (connect database) подключение и отправка запросов в БД
+| Разрешение | Описание |
+|:----------:|:---------------------------------------------------------------------------------|
+| `SR` | (select row) чтение из таблиц |
+| `UR` | (update row) обновление записей таблиц |
+| `ER` | (erase row) удаление записей из таблиц |
+| `RA` | (read attributes) чтение атрибутов ACL |
+| `WA` | (write attributes) запись атрибутов ACL |
+| `CD` | (create directory) создание подкаталога |
+| `CT` | (create table) создание таблиц |
+| `CQ` | (create queue) создание очередей |
+| `RS` | (remove schema) удаление объектов |
+| `DS` | (describe schema) описание объектов, содержимое каталогов |
+| `AS` | (alter schema) изменение объектов |
+| `CDB` | (create database) создание БД |
+| `DDB` | (drop database) удаление БД |
+| `GAR` | (grant access rights) предоставление прав доступа (только из списка собственных) |
+| `WUA` | (write user attributes) изменение пользовательских атрибутов |
+| `ConnDB` | (connect database) подключение и отправка запросов в БД |
## Типы наследования {#inheritance-types}
Для описания передачи наследования дочерним объектам могут использоваться один или несколько флагов наследования.
-| Флаг | Описание |
-|:----:|:----|
-| `-` | без наследования
-| `O` | эта запись будут наследоваться дочерними объектами
-| `C` | эта запись будет наследоваться дочерними контейнерами
-| `+` | эта запись будет использоваться только для наследования и не будут использоваться для проверки доступа
+| Флаг | Описание |
+|:-----:|:-------------------------------------------------------------------------------------------------------|
+| `-` | без наследования |
+| `O` | эта запись будут наследоваться дочерними объектами |
+| `C` | эта запись будет наследоваться дочерними контейнерами |
+| `+` | эта запись будет использоваться только для наследования и не будут использоваться для проверки доступа |
diff --git a/ydb/docs/ru/core/yql/reference/_includes/permissions_list.md b/ydb/docs/ru/core/yql/reference/_includes/permissions_list.md
index 08ceae13ba..6fc0552878 100644
--- a/ydb/docs/ru/core/yql/reference/_includes/permissions_list.md
+++ b/ydb/docs/ru/core/yql/reference/_includes/permissions_list.md
@@ -11,8 +11,8 @@
`ydb.database.drop` | `DROP` | Право удалять базы данных в кластере
Элементарные права на объекты базы данных
`ydb.granular.select_row` | `SELECT ROW` | Право читать строки из таблицы (select), читать сообщения сообщения из топиков
-`ydb.granular.update_row` | `UPDATE ROW` | Право обновлять строки в таблице (insert, update, insert, erase), писать сообщения в топики
-`ydb.granular.erase_row` | `ERASE ROW` | Право удалять строки из таблицы
+`ydb.granular.update_row` | `UPDATE ROW` | Право обновлять строки в таблице (insert, update, upsert, replace), писать сообщения в топики
+`ydb.granular.erase_row` | `ERASE ROW` | Право удалять строки из таблицы (delete)
`ydb.granular.create_directory` | `CREATE DIRECTORY` | Право создавать и удалять директории, в том числе существующие и вложенные
`ydb.granular.create_table` | `CREATE TABLE` | Право создавать таблицы (в том числе индексные, внешние, колоночные), представления, последовательности
`ydb.granular.create_queue` | `CREATE QUEUE` | Право создавать топики
diff --git a/ydb/docs/ru/core/yql/reference/syntax/lexer.md b/ydb/docs/ru/core/yql/reference/syntax/lexer.md
index e3770419ad..5b5bcce916 100644
--- a/ydb/docs/ru/core/yql/reference/syntax/lexer.md
+++ b/ydb/docs/ru/core/yql/reference/syntax/lexer.md
@@ -233,31 +233,31 @@ SELECT
### Целочисленные литералы {#intliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgInt4` | 32-битное знаковое целое (в PostgreSQL нет беззнаковых типов)
-`ps`| `PgInt2` | 16-битное знаковое целое
-`pi`| `PgInt4` |
-`pb`| `PgInt8` | 64-битное знаковое цело
-`pn`| `PgNumeric` | знаковое целое произвольной точности (до 131072 цифр)
+| Суффикс | Тип | Комментарий |
+|---------|-------------|---------------------------------------------------------------|
+| `p` | `PgInt4` | 32-битное знаковое целое (в PostgreSQL нет беззнаковых типов) |
+| `ps` | `PgInt2` | 16-битное знаковое целое |
+| `pi` | `PgInt4` | |
+| `pb` | `PgInt8` | 64-битное знаковое цело |
+| `pn` | `PgNumeric` | знаковое целое произвольной точности (до 131072 цифр) |
### Литералы с плавающей точкой {#floatliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgFloat8` | число с плавающей точкой (64 бит double)
-`pf4`| `PgFloat4` | число с плавающей точкой (32 бит float)
-`pf8`| `PgFloat8` |
-`pn` | `PgNumeric` | число с плавающей точкой произвольной точности (до 131072 цифр перед запятой, до 16383 цифр после запятой)
+| Суффикс | Тип | Комментарий |
+|--------|-------------|------------------------------------------------------------------------------------------------------------|
+| `p` | `PgFloat8` | число с плавающей точкой (64 бит double) |
+| `pf4` | `PgFloat4` | число с плавающей точкой (32 бит float) |
+| `pf8` | `PgFloat8` | |
+| `pn` | `PgNumeric` | число с плавающей точкой произвольной точности (до 131072 цифр перед запятой, до 16383 цифр после запятой) |
### Строковые литералы {#stringliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgText` | текстовая строка
-`pt`| `PgText` |
-`pv`| `PgVarchar` | текстовая строка
-`pb`| `PgBytea` | бинарная строка
+| Суффикс | Тип | Комментарий |
+|--------|-------------|------------------|
+| `p` | `PgText` | текстовая строка |
+| `pt` | `PgText` | |
+| `pv` | `PgVarchar` | текстовая строка |
+| `pb` | `PgBytea` | бинарная строка |
{% note warning "Внимание" %}
diff --git a/ydb/docs/ru/core/yql/reference/types/primitive.md b/ydb/docs/ru/core/yql/reference/types/primitive.md
index 8676cab095..8e6158cc41 100644
--- a/ydb/docs/ru/core/yql/reference/types/primitive.md
+++ b/ydb/docs/ru/core/yql/reference/types/primitive.md
@@ -6,35 +6,35 @@
## Числовые типы {#numeric}
-Тип | Описание | Примечания
------ | ----- | -----
-`Bool` | Логическое значение. |
-`Int8` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>7</sup> до 2<sup>7</sup>–1. |
-`Int16` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>15</sup> до 2<sup>15</sup>–1. |
-`Int32` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>31</sup> до 2<sup>31</sup>–1. |
-`Int64` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>63</sup> до 2<sup>63</sup>–1. |
-`Uint8` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>8</sup>–1. |
-`Uint16` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>16</sup>–1. |
-`Uint32` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>32</sup>–1. |
-`Uint64` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>64</sup>–1. |
-`Float` | Вещественное число с переменной точностью размером 4 байта. |{% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %}
-`Double` | Вещественное число с переменной точностью размером 8 байт. |{% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %}
-`Decimal` | Вещественное число с указанной точностью, до 35 десятичных знаков |{% if feature_map_tables %} При использовании в колонках таблиц точность фиксирована: Decimal (22,9).{% endif %}
+| Тип | Описание | Примечания |
+|------------|-----------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
+| `Bool` | Логическое значение. | |
+| `Int8` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>7</sup> до 2<sup>7</sup>–1. | |
+| `Int16` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>15</sup> до 2<sup>15</sup>–1. | |
+| `Int32` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>31</sup> до 2<sup>31</sup>–1. | |
+| `Int64` | Целое число со знаком.<br/>Допустимые значения: от –2<sup>63</sup> до 2<sup>63</sup>–1. | |
+| `Uint8` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>8</sup>–1. | |
+| `Uint16` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>16</sup>–1. | |
+| `Uint32` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>32</sup>–1. | |
+| `Uint64` | Беззнаковое целое число.<br/>Допустимые значения: от 0 до 2<sup>64</sup>–1. | |
+| `Float` | Вещественное число с переменной точностью размером 4 байта. | {% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %} |
+| `Double` | Вещественное число с переменной точностью размером 8 байт. | {% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %} |
+| `Decimal` | Вещественное число с указанной точностью, до 35 десятичных знаков | {% if feature_map_tables %} При использовании в колонках таблиц точность фиксирована: Decimal (22,9).{% endif %} |
{% if feature_map_tables %}
-`DyNumber` | Бинарное представление вещественного числа точностью до 38 знаков.<br/>Допустимые значения: положительные от 1×10<sup>-130</sup> до 1×10<sup>126</sup>–1, отрицательные от -1×10<sup>126</sup>–1 до -1×10<sup>-130</sup> и 0.<br/>Совместим с типом `Number` AWS DynamoDB. Не рекомендуется для использования в {{ backend_name_lower }}-native приложениях. |
+| `DyNumber` | Бинарное представление вещественного числа точностью до 38 знаков.<br/>Допустимые значения: положительные от 1×10<sup>-130</sup> до 1×10<sup>126</sup>–1, отрицательные от -1×10<sup>126</sup>–1 до -1×10<sup>-130</sup> и 0.<br/>Совместим с типом `Number` AWS DynamoDB. Не рекомендуется для использования в {{ backend_name_lower }}-native приложениях. |
{% endif %}
## Строковые типы {#string}
-Тип | Описание | Примечания
------ | ----- | -----
-`String` | Строка, может содержать произвольные бинарные данные |
-`Utf8` | Текст в кодировке [UTF-8](https://en.wikipedia.org/wiki/UTF-8) |
-`Json` | [JSON](https://en.wikipedia.org/wiki/JSON) в текстовом представлении|Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %}
-`JsonDocument` | [JSON](https://en.wikipedia.org/wiki/JSON) в бинарном индексированном представлении | Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %}
-`Yson` | [YSON](yson.md) в текстовом или бинарном представлении | Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %}
-`Uuid` | Универсальный идентификатор [UUID](https://tools.ietf.org/html/rfc4122) |
+| Тип | Описание | Примечания |
+|----------------|-------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
+| `String` | Строка, может содержать произвольные бинарные данные |
+| `Utf8` | Текст в кодировке [UTF-8](https://en.wikipedia.org/wiki/UTF-8) |
+| `Json` | [JSON](https://en.wikipedia.org/wiki/JSON) в текстовом представлении | Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %} |
+| `JsonDocument` | [JSON](https://en.wikipedia.org/wiki/JSON) в бинарном индексированном представлении | Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %} |
+| `Yson` | [YSON](yson.md) в текстовом или бинарном представлении | Не поддерживает возможность сравнения{% if feature_map_tables %}, не может быть использован в первичном ключе{% endif %} |
+| `Uuid` | Универсальный идентификатор [UUID](https://tools.ietf.org/html/rfc4122) |
{% note info "Ограничения на размер" %}
@@ -56,15 +56,15 @@
## Дата и время {#datetime}
-Тип | Описание | Примечания
------ | ----- | -----
-`Date` | Дата, точность до дней | Диапазон значений для всех временных типов кроме `Interval` - от нуля часов 01.01.1970 до нуля часов 01.01.2106. Внутреннее представление `Date` – беззнаковое целое 16 бит |
-`Datetime` | Дата/время, точность до секунд | Внутреннее представление – беззнаковое целое 32 бит |
-`Timestamp` | Дата/время, точность до микросекунд | Внутреннее представление – беззнаковое целое 64 бит |
-`Interval` | Интервал времени (знаковый), точность до микросекунд | Диапазон значений – от -136 лет до +136 лет. Внутреннее представление – знаковое целое 64 бит.{% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %}
-`TzDate` | Дата с меткой временной зоны, точность до дней | Не поддерживается в столбцах таблиц
-`TzDateTime` | Дата/время с меткой временной зоны, точность до секунд | Не поддерживается в столбцах таблиц
-`TzTimestamp` | Дата/время с меткой временной зоны, точность до микросекунд | Не поддерживается в столбцах таблиц
+| Тип | Описание | Примечания |
+|----------------|-------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `Date` | Дата, точность до дней | Диапазон значений для всех временных типов кроме `Interval` - от нуля часов 01.01.1970 до нуля часов 01.01.2106. Внутреннее представление `Date` – беззнаковое целое 16 бит |
+| `Datetime` | Дата/время, точность до секунд | Внутреннее представление – беззнаковое целое 32 бит |
+| `Timestamp` | Дата/время, точность до микросекунд | Внутреннее представление – беззнаковое целое 64 бит |
+| `Interval` | Интервал времени (знаковый), точность до микросекунд | Диапазон значений – от -136 лет до +136 лет. Внутреннее представление – знаковое целое 64 бит.{% if feature_map_tables %} Не может быть использован в первичном ключе{% endif %} |
+| `TzDate` | Дата с меткой временной зоны, точность до дней | Не поддерживается в столбцах таблиц |
+| `TzDateTime` | Дата/время с меткой временной зоны, точность до секунд | Не поддерживается в столбцах таблиц |
+| `TzTimestamp` | Дата/время с меткой временной зоны, точность до микросекунд | Не поддерживается в столбцах таблиц |
### Особенности поддержки типов с меткой временной зоны
@@ -94,29 +94,29 @@ SELECT --эти выражения всегда true для любых тайм�
#### Приведение к численным типам
-Тип | Bool | Int8 | Int16 | Int32 | Int64 | Uint8 | Uint16 | Uint32 | Uint64 | Float | Double | Decimal
---- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | ---
-**Bool** | — | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Нет
-**Int8** | Да<sup>2</sup> | — | Да | Да | Да | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да
-**Int16** | Да<sup>2</sup> | Да<sup>4</sup> | — | Да | Да | Да<sup>3,4</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да
-**Int32** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да
-**Int64** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да | Да | Да
-**Uint8** | Да<sup>2</sup> | Да<sup>4</sup> | Да | Да | Да | — | Да | Да | Да | Да | Да | Да
-**Uint16** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да<sup>4</sup> | — | Да | Да | Да | Да | Да
-**Uint32** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да | Да | Да
-**Uint64** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да | Да
-**Float** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | — | Да | Нет
-**Double** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да | — | Нет
-**Decimal** | Нет | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | —
-**String** | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да
-**Utf8** | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да
-**Json** | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет
-**Yson** | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Нет
-**Uuid** | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет
-**Date** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да<sup>4</sup> | Да | Да | Да | Да | Да | Да | Нет
-**Datetime** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да | Да | Нет
-**Timestamp** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да | Нет
-**Interval** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да | Да | Нет
+| Тип | Bool | Int8 | Int16 | Int32 | Int64 | Uint8 | Uint16 | Uint32 | Uint64 | Float | Double | Decimal |
+|---------------|----------------|----------------|----------------|----------------|----------------|------------------|------------------|------------------|------------------|----------------|----------------|---------|
+| **Bool** | — | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Да<sup>1</sup> | Нет |
+| **Int8** | Да<sup>2</sup> | — | Да | Да | Да | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да |
+| **Int16** | Да<sup>2</sup> | Да<sup>4</sup> | — | Да | Да | Да<sup>3,4</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да |
+| **Int32** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да<sup>3</sup> | Да | Да | Да |
+| **Int64** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да | Да | Да |
+| **Uint8** | Да<sup>2</sup> | Да<sup>4</sup> | Да | Да | Да | — | Да | Да | Да | Да | Да | Да |
+| **Uint16** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да<sup>4</sup> | — | Да | Да | Да | Да | Да |
+| **Uint32** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да | Да | Да |
+| **Uint64** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | — | Да | Да | Да |
+| **Float** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | — | Да | Нет |
+| **Double** | Да<sup>2</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да | — | Нет |
+| **Decimal** | Нет | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | — |
+| **String** | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да |
+| **Utf8** | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да | Да |
+| **Json** | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет |
+| **Yson** | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Да<sup>5</sup> | Нет |
+| **Uuid** | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет | Нет |
+| **Date** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да<sup>4</sup> | Да | Да | Да | Да | Да | Да | Нет |
+| **Datetime** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да | Да | Нет |
+| **Timestamp** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да | Да | Нет |
+| **Interval** | Нет | Да<sup>4</sup> | Да<sup>4</sup> | Да<sup>4</sup> | Да | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3,4</sup> | Да<sup>3</sup> | Да | Да | Нет |
<sup>1</sup> `True` преобразуется в `1`, `False` преобразуется в `0`.
<sup>2</sup> Любое значение кроме `0` преобразуется в `True`, `0` преобразуется в `False`.
@@ -126,55 +126,55 @@ SELECT --эти выражения всегда true для любых тайм�
#### Приведение к типам данных даты и времени
-Тип | Date | Datetime | Timestamp | Interval
---- | --- | --- | --- | ---
-**Bool** | Нет | Нет | Нет | Нет
-**Int8** | Да | Да | Да | Да
-**Int16** | Да | Да | Да | Да
-**Int32** | Да | Да | Да | Да
-**Int64** | Да | Да | Да | Да
-**Uint8** | Да | Да | Да | Да
-**Uint16** | Да | Да | Да | Да
-**Uint32** | Да | Да | Да | Да
-**Uint64** | Да | Да | Да | Да
-**Float** | Нет | Нет | Нет | Нет
-**Double** | Нет | Нет | Нет | Нет
-**Decimal** | Нет | Нет | Нет | Нет
-**String** | Да | Да | Да | Да
-**Utf8** | Да | Да | Да | Да
-**Json** | Нет | Нет | Нет | Нет
-**Yson** | Нет | Нет | Нет | Нет
-**Uuid** | Нет | Нет | Нет | Нет
-**Date** | — | Да | Да | Нет
-**Datetime** | Да | — | Да | Нет
-**Timestamp** | Да | Да | — | Нет
-**Interval** | Нет | Нет | Нет | — | —
+| Тип | Date | Datetime | Timestamp | Interval |
+|---------------|------|----------|-----------|----------|
+| **Bool** | Нет | Нет | Нет | Нет |
+| **Int8** | Да | Да | Да | Да |
+| **Int16** | Да | Да | Да | Да |
+| **Int32** | Да | Да | Да | Да |
+| **Int64** | Да | Да | Да | Да |
+| **Uint8** | Да | Да | Да | Да |
+| **Uint16** | Да | Да | Да | Да |
+| **Uint32** | Да | Да | Да | Да |
+| **Uint64** | Да | Да | Да | Да |
+| **Float** | Нет | Нет | Нет | Нет |
+| **Double** | Нет | Нет | Нет | Нет |
+| **Decimal** | Нет | Нет | Нет | Нет |
+| **String** | Да | Да | Да | Да |
+| **Utf8** | Да | Да | Да | Да |
+| **Json** | Нет | Нет | Нет | Нет |
+| **Yson** | Нет | Нет | Нет | Нет |
+| **Uuid** | Нет | Нет | Нет | Нет |
+| **Date** | — | Да | Да | Нет |
+| **Datetime** | Да | — | Да | Нет |
+| **Timestamp** | Да | Да | — | Нет |
+| **Interval** | Нет | Нет | Нет | — | — |
#### Приведение к другим типам данных
-Тип | String | Utf8 | Json | Yson | Uuid
---- | --- | --- | --- | --- | ---
-**Bool** | Да | Нет | Нет | Нет | Нет |
-**Int8** | Да | Нет | Нет | Нет | Нет
-**Int16** | Да | Нет | Нет | Нет | Нет
-**Int32** | Да | Нет | Нет | Нет | Нет
-**Int64** | Да | Нет | Нет | Нет | Нет
-**Uint8** | Да | Нет | Нет | Нет | Нет
-**Uint16** | Да | Нет | Нет | Нет | Нет
-**Uint32** | Да | Нет | Нет | Нет | Нет
-**Uint64** | Да | Нет | Нет | Нет | Нет
-**Float** | Да | Нет | Нет | Нет | Нет
-**Double** | Да | Нет | Нет | Нет | Нет
-**Decimal** | Да | Нет | Нет | Нет | Нет
-**String** | — | Да | Да | Да | Да
-**Utf8** | Да | — | Нет | Нет | Нет
-**Json** | Да | Да | — | Нет | Нет
-**Yson** | Да<sup>1</sup> | Нет | Нет | Нет | Нет
-**Uuid** | Да | Да | Нет | Нет | —
-**Date** | Да | Да | Нет | Нет | Нет
-**Datetime** | Да | Да | Нет | Нет | Нет
-**Timestamp** | Да | Да | Нет | Нет | Нет
-**Interval** | Да | Да | Нет | Нет | Нет
+| Тип | String | Utf8 | Json | Yson | Uuid |
+|---------------|----------------|------|------|------|-------|
+| **Bool** | Да | Нет | Нет | Нет | Нет |
+| **Int8** | Да | Нет | Нет | Нет | Нет |
+| **Int16** | Да | Нет | Нет | Нет | Нет |
+| **Int32** | Да | Нет | Нет | Нет | Нет |
+| **Int64** | Да | Нет | Нет | Нет | Нет |
+| **Uint8** | Да | Нет | Нет | Нет | Нет |
+| **Uint16** | Да | Нет | Нет | Нет | Нет |
+| **Uint32** | Да | Нет | Нет | Нет | Нет |
+| **Uint64** | Да | Нет | Нет | Нет | Нет |
+| **Float** | Да | Нет | Нет | Нет | Нет |
+| **Double** | Да | Нет | Нет | Нет | Нет |
+| **Decimal** | Да | Нет | Нет | Нет | Нет |
+| **String** | — | Да | Да | Да | Да |
+| **Utf8** | Да | — | Нет | Нет | Нет |
+| **Json** | Да | Да | — | Нет | Нет |
+| **Yson** | Да<sup>1</sup> | Нет | Нет | Нет | Нет |
+| **Uuid** | Да | Да | Нет | Нет | — |
+| **Date** | Да | Да | Нет | Нет | Нет |
+| **Datetime** | Да | Да | Нет | Нет | Нет |
+| **Timestamp** | Да | Да | Нет | Нет | Нет |
+| **Interval** | Да | Да | Нет | Нет | Нет |
<sup>1</sup> При помощи встроенной функции [Yson::ConvertTo](../udf/list/yson.md#ysonconvertto).
@@ -190,28 +190,28 @@ SELECT --эти выражения всегда true для любых тайм�
При несовпадении численных типов сначала выполняется BitCast обоих аргументов к типу результата, а потом уже операция.
-Тип | Int8 | Int16 | Int32 | Int64 | Uint8 | Uint16 | Uint32 | Uint64 | Float | Double
---- | --- | --- | --- | --- | --- | --- | --- | --- | --- | ---
-**Int8** | — | `Int16` | `Int32` | `Int64` | `Int8` | `Uint16` | `Uint32` | `Uint64` | `Float` | `Double`
-**Int16** | `Int16` | — | `Int32` | `Int64` | `Int16` | `Int16` | `Uint32` | `Uint64` | `Float` | `Double`
-**Int32** | `Int32` | `Int32` | — | `Int64` | `Int32` | `Int32` | `Int32` | `Uint64` | `Float` | `Double`
-**Int64** | `Int64` | `Int64` | `Int64` | — | `Int64` | `Int64` | `Int64` | `Int64` | `Float` | `Double`
-**Uint8** | `Int8` | `Int16` | `Int32` | `Int64` | — | `Uint16` | `Uint32` | `Uint64` | `Float` | `Double`
-**Uint16** | `Uint16` | `Int16` | `Int32` | `Int64` | `Uint16` | — | `Uint32` | `Uint64` | `Float` | `Double`
-**Uint32** | `Uint32` | `Uint32` | `Int32` | `Int64` | `Uint32` | `Uint32` | — | `Uint64` | `Float` | `Double`
-**Uint64** | `Uint64` | `Uint64` | `Uint64` | `Int64` | `Uint64` | `Uint64` | `Uint64` | — | `Float` | `Double`
-**Float** | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | — | `Double`
-**Double** | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | —
+| Тип | Int8 | Int16 | Int32 | Int64 | Uint8 | Uint16 | Uint32 | Uint64 | Float | Double |
+|------------|----------|----------|----------|----------|----------|----------|----------|----------|----------|----------|
+| **Int8** | — | `Int16` | `Int32` | `Int64` | `Int8` | `Uint16` | `Uint32` | `Uint64` | `Float` | `Double` |
+| **Int16** | `Int16` | — | `Int32` | `Int64` | `Int16` | `Int16` | `Uint32` | `Uint64` | `Float` | `Double` |
+| **Int32** | `Int32` | `Int32` | — | `Int64` | `Int32` | `Int32` | `Int32` | `Uint64` | `Float` | `Double` |
+| **Int64** | `Int64` | `Int64` | `Int64` | — | `Int64` | `Int64` | `Int64` | `Int64` | `Float` | `Double` |
+| **Uint8** | `Int8` | `Int16` | `Int32` | `Int64` | — | `Uint16` | `Uint32` | `Uint64` | `Float` | `Double` |
+| **Uint16** | `Uint16` | `Int16` | `Int32` | `Int64` | `Uint16` | — | `Uint32` | `Uint64` | `Float` | `Double` |
+| **Uint32** | `Uint32` | `Uint32` | `Int32` | `Int64` | `Uint32` | `Uint32` | — | `Uint64` | `Float` | `Double` |
+| **Uint64** | `Uint64` | `Uint64` | `Uint64` | `Int64` | `Uint64` | `Uint64` | `Uint64` | — | `Float` | `Double` |
+| **Float** | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | `Float` | — | `Double` |
+| **Double** | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | `Double` | — |
#### Типы даты и времени
-Тип | Date | Datetime | Timestamp | Interval | TzDate | TzDatetime | TzTimestamp
---- | --- | --- | --- | --- | --- | --- | ---
-**Date** | — | — | — | `Date` | — | — | —
-**Datetime** | — | — | — | `Datetime` | — | — | —
-**Timestamp** | — | — | — | `Timestamp` | — | — | —
-**Interval** | `Date` | `Datetime` | `Timestamp` | — | `TzDate` | `TzDatetime` | `TzTimestamp`
-**TzDate** | — | — | — | `TzDate` | — | — | —
-**TzDatetime** | — | — | — | `TzDatetime` | — | — | —
-**TzTimestamp** | — | — | — | `TzTimestamp` | — | — | —
+| Тип | Date | Datetime | Timestamp | Interval | TzDate | TzDatetime | TzTimestamp |
+|-----------------|--------|------------|-------------|---------------|----------|--------------|---------------|
+| **Date** | — | — | — | `Date` | — | — | — |
+| **Datetime** | — | — | — | `Datetime` | — | — | — |
+| **Timestamp** | — | — | — | `Timestamp` | — | — | — |
+| **Interval** | `Date` | `Datetime` | `Timestamp` | — | `TzDate` | `TzDatetime` | `TzTimestamp` |
+| **TzDate** | — | — | — | `TzDate` | — | — | — |
+| **TzDatetime** | — | — | — | `TzDatetime` | — | — | — |
+| **TzTimestamp** | — | — | — | `TzTimestamp` | — | — | — |
diff --git a/ydb/docs/ru/core/yql/reference/types/serial.md b/ydb/docs/ru/core/yql/reference/types/serial.md
index 34a63b730b..64b6b1e7fc 100644
--- a/ydb/docs/ru/core/yql/reference/types/serial.md
+++ b/ydb/docs/ru/core/yql/reference/types/serial.md
@@ -24,11 +24,11 @@ REPLACE INTO users (name, email) VALUES ('John', 'john@example.com');
SELECT * FROM users;
```
-email | name | user_id
------ | ----- | -----
-`alice@example.com` | Alice | 1
-`bob@example.com` | Bob | 2
-`john@example.com` | John | 3
+| email | name | user_id |
+|---------------------|-------|---------|
+| `alice@example.com` | Alice | 1 |
+| `bob@example.com` | Bob | 2 |
+| `john@example.com` | John | 3 |
Можно самостоятельно указать значение `Serial` колонки при вставке, в этом случае вставка будет выполняться, как с обычной целочисленной колонкой, и `Sequence` затрагиваться при таком запросе никак не будет:
@@ -44,14 +44,14 @@ UPSERT INTO users (user_id, name, email) VALUES (4, 'Peter', 'peter@example.com'
Значения последовательности начинаются с единицы, выдаются с шагом, равным единице, и ограничены в зависимости от используемого типа.
-Тип | Максимальное значение | Тип значения
------ | ----- | -----
-`SmallSerial` | $2^{15}–1$ | `Int16`
-`Serial2` | $2^{15}–1$ | `Int16`
-`Serial` | $2^{31}–1$ | `Int32`
-`Serial4` | $2^{31}–1$ | `Int32`
-`Serial8` | $2^{63}–1$ | `Int64`
-`BigSerial` | $2^{63}–1$ | `Int64`
+| Тип | Максимальное значение | Тип значения |
+|---------------|-----------------------|--------------|
+| `SmallSerial` | $2^{15}–1$ | `Int16` |
+| `Serial2` | $2^{15}–1$ | `Int16` |
+| `Serial` | $2^{31}–1$ | `Int32` |
+| `Serial4` | $2^{31}–1$ | `Int32` |
+| `Serial8` | $2^{63}–1$ | `Int64` |
+| `BigSerial` | $2^{63}–1$ | `Int64` |
При переполнении `Sequence` на вставке будет возвращаться ошибка:
diff --git a/ydb/docs/ru/core/yql/reference/types/special.md b/ydb/docs/ru/core/yql/reference/types/special.md
index b3f5cd264f..e8e9637da7 100644
--- a/ydb/docs/ru/core/yql/reference/types/special.md
+++ b/ydb/docs/ru/core/yql/reference/types/special.md
@@ -1,16 +1,16 @@
# Специальные типы данных
-Тип | Описание
------ | -----
-`Callable` | Вызываемое значение, которое можно исполнить, передав аргументы в круглых скобках в SQL-синтаксисе YQL{% if feature_mapreduce %}, либо с помощью функции `Apply` при использовании [s-expressions](/docs/s_expressions) синтаксиса{% endif %}.
-`Resource` | Непрозрачный указатель на ресурс, который можно передавать между пользовательскими функциями (UDF, user defined function). Тип возвращаемого и принимаемого ресурса объявляется внутри функции строковой меткой. При передаче ресурса YQL проверяет совпадение меток, чтобы предотвратить передачу ресурсов между несовместимыми функциями. В случае несовпадения меток происходит ошибка типизации.
-`Tagged` | Возможность дать прикладное имя какому-либо другому типу.
-`Generic` | Тип данных у типа данных.
-`Unit` | Тип данных у невычисляемых сущностей (источники и приемники данных, атомы и т.&nbsp;п.).
-`Null` | Сингулярный тип данных с единственным возможным значением null. Является типом литерала `NULL` и может преобразовываться к любому `Optional` типу.
-`Void` | Сингулярный тип данных с единственным возможным значением `null`.
-`EmptyList` | сингулярный тип данных с единственным возможным значением []; является типом литерала `[]` и может преобразовываться к любому `List` типу.
-`EmptyDict` | сингулярный тип данных с единственным возможным значением {}; является типом литерала `{}` и может преобразовываться к любому `Dict` или `Set` типу.
+| Тип | Описание |
+|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `Callable` | Вызываемое значение, которое можно исполнить, передав аргументы в круглых скобках в SQL-синтаксисе YQL{% if feature_mapreduce %}, либо с помощью функции `Apply` при использовании [s-expressions](/docs/s_expressions) синтаксиса{% endif %}. |
+| `Resource` | Непрозрачный указатель на ресурс, который можно передавать между пользовательскими функциями (UDF, user defined function). Тип возвращаемого и принимаемого ресурса объявляется внутри функции строковой меткой. При передаче ресурса YQL проверяет совпадение меток, чтобы предотвратить передачу ресурсов между несовместимыми функциями. В случае несовпадения меток происходит ошибка типизации. |
+| `Tagged` | Возможность дать прикладное имя какому-либо другому типу. |
+| `Generic` | Тип данных у типа данных. |
+| `Unit` | Тип данных у невычисляемых сущностей (источники и приемники данных, атомы и т.&nbsp;п.). |
+| `Null` | Сингулярный тип данных с единственным возможным значением null. Является типом литерала `NULL` и может преобразовываться к любому `Optional` типу. |
+| `Void` | Сингулярный тип данных с единственным возможным значением `null`. |
+| `EmptyList` | сингулярный тип данных с единственным возможным значением []; является типом литерала `[]` и может преобразовываться к любому `List` типу. |
+| `EmptyDict` | сингулярный тип данных с единственным возможным значением {}; является типом литерала `{}` и может преобразовываться к любому `Dict` или `Set` типу. |
diff --git a/ydb/docs/ru/core/yql/reference/udf/list/postgres.md b/ydb/docs/ru/core/yql/reference/udf/list/postgres.md
index 1a81354874..84d831b8d2 100644
--- a/ydb/docs/ru/core/yql/reference/udf/list/postgres.md
+++ b/ydb/docs/ru/core/yql/reference/udf/list/postgres.md
@@ -15,31 +15,31 @@
### Целочисленные литералы {#intliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgInt4` | 32-битное знаковое целое (в PostgreSQL нет беззнаковых типов)
-`ps`| `PgInt2` | 16-битное знаковое целое
-`pi`| `PgInt4` |
-`pb`| `PgInt8` | 64-битное знаковое цело
-`pn`| `PgNumeric` | знаковое целое произвольной точности (до 131072 цифр)
+| Суффикс | Тип | Комментарий |
+|---------| ----- |------------------------------------------------------------------|
+| `p` | `PgInt4` | 32-битное знаковое целое (в PostgreSQL нет беззнаковых типов) |
+| `ps` | `PgInt2` | 16-битное знаковое целое |
+| `pi` | `PgInt4` | |
+| `pb` | `PgInt8` | 64-битное знаковое цело |
+| `pn` | `PgNumeric` | знаковое целое произвольной точности (до 131072 цифр) |
### Литералы с плавающей точкой {#floatliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgFloat8` | число с плавающей точкой (64 бит double)
-`pf4`| `PgFloat4` | число с плавающей точкой (32 бит float)
-`pf8`| `PgFloat8` |
-`pn` | `PgNumeric` | число с плавающей точкой произвольной точности (до 131072 цифр перед запятой, до 16383 цифр после запятой)
+| Суффикс | Тип | Комментарий |
+|--------|-------------|------------------------------------------------------------------------------------------------------------|
+| `p` | `PgFloat8` | число с плавающей точкой (64 бит double) |
+| `pf4` | `PgFloat4` | число с плавающей точкой (32 бит float) |
+| `pf8` | `PgFloat8` | |
+| `pn` | `PgNumeric` | число с плавающей точкой произвольной точности (до 131072 цифр перед запятой, до 16383 цифр после запятой) |
### Строковые литералы {#stringliterals}
-Суффикс | Тип | Комментарий
------ | ----- | -----
-`p` | `PgText` | текстовая строка
-`pt`| `PgText` |
-`pv`| `PgVarchar` | текстовая строка
-`pb`| `PgBytea` | бинарная строка
+| Суффикс | Тип | Комментарий |
+|---------|-------------|-------------------|
+| `p` | `PgText` | текстовая строка |
+| `pt` | `PgText` | |
+| `pv` | `PgVarchar` | текстовая строка |
+| `pb` | `PgBytea` | бинарная строка |
{% note warning "Внимание" %}
diff --git a/ydb/library/benchmarks/gen/tpcds-dbgen/tdefs.c b/ydb/library/benchmarks/gen/tpcds-dbgen/tdefs.c
index 3bc6bf2b9e..6448224509 100644
--- a/ydb/library/benchmarks/gen/tpcds-dbgen/tdefs.c
+++ b/ydb/library/benchmarks/gen/tpcds-dbgen/tdefs.c
@@ -172,21 +172,21 @@ getSimpleTdefsByNumber(int nTable)
tdef *
getTdefsByNumber(int nTable)
{
- if (is_set("UPDATE") && is_set("VALIDATE"))
+ if (is_set("UPDATE") && is_set("VALIDATE")
+ && nTable >= S_BRAND && (s_tdefs[nTable - S_BRAND].flags & FL_PASSTHRU))
{
- checkTdefsSize(nTable);
- if (s_tdefs[nTable].flags & FL_PASSTHRU)
+ checkTdefsSize(nTable - S_BRAND);
+ int wtdefsIndex = -1;
+ switch(nTable)
{
- switch(nTable + S_BRAND)
- {
- case S_CATALOG_PAGE: nTable = CATALOG_PAGE; break;
- case S_CUSTOMER_ADDRESS: nTable = CUSTOMER_ADDRESS; break;
- case S_PROMOTION: nTable = PROMOTION; break;
- }
+ case S_CATALOG_PAGE: wtdefsIndex = CATALOG_PAGE; break;
+ case S_CUSTOMER_ADDRESS: wtdefsIndex = CUSTOMER_ADDRESS; break;
+ case S_PROMOTION: wtdefsIndex = PROMOTION; break;
+ }
+ // Use w_tdefs only if we decreased the index
+ if (wtdefsIndex != -1) {
return(&w_tdefs[nTable]);
}
- else
- return(&s_tdefs[nTable]);
}
return(getSimpleTdefsByNumber(nTable));
diff --git a/ydb/library/benchmarks/gen/tpcds-dbgen/w_datetbl.c b/ydb/library/benchmarks/gen/tpcds-dbgen/w_datetbl.c
index abbbc00c3b..673e639b8c 100644
--- a/ydb/library/benchmarks/gen/tpcds-dbgen/w_datetbl.c
+++ b/ydb/library/benchmarks/gen/tpcds-dbgen/w_datetbl.c
@@ -100,7 +100,7 @@ mk_w_date (void * row, ds_key_t index)
mk_bkey(&r->d_date_id[0], nTemp, D_DATE_ID);
jtodt (&temp_date, nTemp);
r->d_year = temp_date.year;
- r->d_dow = set_dow (&temp_date);
+ r->d_dow = set_dow (&temp_date) % 7;
r->d_moy = temp_date.month;
r->d_dom = temp_date.day;
/* set the sequence counts; assumes that the date table starts on a year boundary */
@@ -113,10 +113,6 @@ mk_w_date (void * row, ds_key_t index)
r->d_fy_year = r->d_year;
r->d_fy_quarter_seq = r->d_quarter_seq;
r->d_fy_week_seq = r->d_week_seq;
- if (r->d_dow >= 7) {
- INTERNAL("weekday_names array overflow");
- exit(EXIT_FAILURE);
- }
r->d_day_name = weekday_names[r->d_dow + 1];
dist_member (&r->d_holiday, "calendar", day_index, 8);
if ((r->d_dow == 5) || (r->d_dow == 6))
@@ -281,7 +277,7 @@ vld_w_date(int nTable, ds_key_t kRow, int *Permutation)
mk_bkey(&r->d_date_id[0], nTemp, D_DATE_ID);
jtodt (&temp_date, nTemp);
r->d_year = temp_date.year;
- r->d_dow = set_dow (&temp_date);
+ r->d_dow = set_dow (&temp_date) % 7;
r->d_moy = temp_date.month;
r->d_dom = temp_date.day;
/* set the sequence counts; assumes that the date table starts on a year boundary */
@@ -294,10 +290,6 @@ vld_w_date(int nTable, ds_key_t kRow, int *Permutation)
r->d_fy_year = r->d_year;
r->d_fy_quarter_seq = r->d_quarter_seq;
r->d_fy_week_seq = r->d_week_seq;
- if (r->d_dow >= 7) {
- INTERNAL("weekday_names array overflow");
- exit(EXIT_FAILURE);
- }
r->d_day_name = weekday_names[r->d_dow + 1];
dist_member (&r->d_holiday, "calendar", day_index, 8);
if ((r->d_dow == 5) || (r->d_dow == 6))
diff --git a/ydb/library/formats/arrow/protos/accessor.proto b/ydb/library/formats/arrow/protos/accessor.proto
index b9edfe1be1..7cb298cf69 100644
--- a/ydb/library/formats/arrow/protos/accessor.proto
+++ b/ydb/library/formats/arrow/protos/accessor.proto
@@ -1,5 +1,23 @@
package NKikimrArrowAccessorProto;
+message TDataExtractor {
+ optional string ClassName = 1;
+
+ message TJsonScanner {
+ optional bool FirstLevelOnly = 1 [default = false];
+ optional bool ForceSIMDJsonParsing = 2 [default = false];
+ }
+
+ message TSIMDJsonScanner {
+ optional bool FirstLevelOnly = 1 [default = false];
+ }
+
+ oneof Implementation {
+ TJsonScanner JsonScanner = 20;
+ TSIMDJsonScanner SIMDJsonScanner = 21;
+ }
+}
+
message TRequestedConstructor {
optional string ClassName = 1;
@@ -15,6 +33,7 @@ message TRequestedConstructor {
optional uint32 ColumnsLimit = 2 [default = 1024];
optional uint32 ChunkMemoryLimit = 3 [default = 50000000];
optional double OthersAllowedFraction = 4 [default = 0.05];
+ optional TDataExtractor DataExtractor = 5;
}
optional TSettings Settings = 1;
}
@@ -41,6 +60,7 @@ message TConstructor {
optional uint32 ColumnsLimit = 2 [default = 1024];
optional uint32 ChunkMemoryLimit = 3 [default = 50000000];
optional double OthersAllowedFraction = 4 [default = 0.05];
+ optional TDataExtractor DataExtractor = 5;
}
optional TSettings Settings = 1;
}
diff --git a/ydb/library/grpc/server/grpc_server.h b/ydb/library/grpc/server/grpc_server.h
index f09dc3ecaf..6777271b45 100644
--- a/ydb/library/grpc/server/grpc_server.h
+++ b/ydb/library/grpc/server/grpc_server.h
@@ -104,6 +104,8 @@ struct TServerOptions {
// Mapping to particular compression algorithm depends on client.
DECLARE_FIELD(DefaultCompressionLevel, grpc_compression_level, GRPC_COMPRESS_LEVEL_NONE);
+ DECLARE_FIELD(DefaultCompressionAlgorithm, grpc_compression_algorithm, GRPC_COMPRESS_NONE);
+
//! Custom configurator for ServerBuilder.
DECLARE_FIELD(ServerBuilderMutator, std::function<void(grpc::ServerBuilder&)>, [](grpc::ServerBuilder&){});
diff --git a/ydb/library/login/login.cpp b/ydb/library/login/login.cpp
index 569b4651da..28e5d0a2fe 100644
--- a/ydb/library/login/login.cpp
+++ b/ydb/library/login/login.cpp
@@ -343,7 +343,7 @@ TLoginProvider::TRemoveGroupResponse TLoginProvider::RemoveGroup(const TString&
return response;
}
-std::vector<TString> TLoginProvider::GetGroupsMembership(const TString& member) {
+std::vector<TString> TLoginProvider::GetGroupsMembership(const TString& member) const {
std::vector<TString> groups;
std::unordered_set<TString> visited;
std::deque<TString> queue;
diff --git a/ydb/library/login/login.h b/ydb/library/login/login.h
index f124b47aaa..e5dcbd61eb 100644
--- a/ydb/library/login/login.h
+++ b/ydb/library/login/login.h
@@ -225,7 +225,7 @@ public:
const TCacheSettings& cacheSettings);
~TLoginProvider();
- std::vector<TString> GetGroupsMembership(const TString& member);
+ std::vector<TString> GetGroupsMembership(const TString& member) const;
static TString GetTokenAudience(const TString& token);
static std::chrono::system_clock::time_point GetTokenExpiresAt(const TString& token);
static TString SanitizeJwtToken(const TString& token);
diff --git a/ydb/library/workload/benchmark_base/workload.cpp b/ydb/library/workload/benchmark_base/workload.cpp
index 1b99fe6ab0..7e5d09aaff 100644
--- a/ydb/library/workload/benchmark_base/workload.cpp
+++ b/ydb/library/workload/benchmark_base/workload.cpp
@@ -82,13 +82,18 @@ void TWorkloadGeneratorBase::GenerateDDLForTable(IOutputStream& result, const NJ
}
result << "WITH (" << Endl;
- if (Params.GetStoreType() == TWorkloadBaseParams::EStoreType::ExternalS3) {
+ switch (Params.GetStoreType()) {
+ case TWorkloadBaseParams::EStoreType::ExternalS3:
result << " DATA_SOURCE = \""+ Params.GetFullTableName(nullptr) + "_s3_external_source\", FORMAT = \"parquet\", LOCATION = \"" << Params.GetS3Prefix()
<< "/" << (single ? TFsPath(Params.GetPath()).GetName() : (tableName + "/")) << "\"" << Endl;
- } else {
- if (Params.GetStoreType() == TWorkloadBaseParams::EStoreType::Column) {
- result << " STORE = COLUMN," << Endl;
- }
+ break;
+ case TWorkloadBaseParams::EStoreType::Column:
+ result << " STORE = COLUMN," << Endl;
+ result << " AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = " << table["partitioning"].GetUIntegerSafe(64) << Endl;
+ break;
+ case TWorkloadBaseParams::EStoreType::Row:
+ result << " STORE = ROW," << Endl;
+ result << " AUTO_PARTITIONING_PARTITION_SIZE_MB = " << Params.GetPartitionSizeMb() << ", " << Endl;
result << " AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = " << table["partitioning"].GetUIntegerSafe(64) << Endl;
}
result << ");" << Endl;
@@ -170,6 +175,8 @@ void TWorkloadBaseParams::ConfigureOpts(NLastGetopt::TOpts& opts, const ECommand
opts.AddLongOption("string", "Use String type in tables instead Utf8 one.").NoArgument().StoreValue(&StringType, "String");
opts.AddLongOption("datetime", "Use Date and Timestamp types in tables instead Date32 and Timestamp64 ones.").NoArgument()
.StoreValue(&DateType, "Date").StoreValue(&TimestampType, "Timestamp");
+ opts.AddLongOption("partition-size", "Maximum partition size in megabytes (AUTO_PARTITIONING_PARTITION_SIZE_MB) for row tables.")
+ .DefaultValue(PartitionSizeMb).StoreResult(&PartitionSizeMb);
break;
case TWorkloadParams::ECommandType::Root:
opts.AddLongOption('p', "path", "Path where benchmark tables are located")
diff --git a/ydb/library/workload/benchmark_base/workload.h b/ydb/library/workload/benchmark_base/workload.h
index 9809dd6866..52cb0c2774 100644
--- a/ydb/library/workload/benchmark_base/workload.h
+++ b/ydb/library/workload/benchmark_base/workload.h
@@ -32,6 +32,7 @@ public:
YDB_READONLY(TString, StringType, "Utf8");
YDB_READONLY(TString, DateType, "Date32");
YDB_READONLY(TString, TimestampType, "Timestamp64");
+ YDB_READONLY(ui64, PartitionSizeMb, 2000);
};
class TWorkloadGeneratorBase : public IWorkloadQueryGenerator {
diff --git a/ydb/library/workload/log/select_queries.sql b/ydb/library/workload/log/select_queries.sql
index 92d58e4aad..f8d3c66aff 100644
--- a/ydb/library/workload/log/select_queries.sql
+++ b/ydb/library/workload/log/select_queries.sql
@@ -6,8 +6,97 @@ SELECT COUNT(*) FROM `{table}` WHERE level <> 0;
SELECT SUM(level), COUNT(*), AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE)) FROM `{table}`;
-- Q3: Средние значения
SELECT AVG(CAST(request_id AS Int64)) FROM `{table}`;
+-- Q4: Уникальные значения
+SELECT COUNT(DISTINCT request_id) FROM `{table}`;
+-- Q5: Уникальные сообщения об ошибках
+SELECT COUNT(DISTINCT message) FROM `{table}`;
-- Q6: Временной диапазон логов
SELECT MIN(timestamp), MAX(timestamp) FROM `{table}`;
+-- Q7: Группировка по уровню важности
+SELECT level, COUNT(*) as count
+FROM `{table}`
+WHERE level <> 0
+GROUP BY level
+ORDER BY count DESC;
+-- Q8: Топ-10 сервисов по уникальным request_id
+SELECT service_name, COUNT(DISTINCT request_id) AS u
+FROM `{table}`
+GROUP BY service_name
+ORDER BY u DESC
+LIMIT 10;
+-- Q9: Комплексная агрегация по сервисам
+SELECT
+ service_name,
+ SUM(level),
+ COUNT(*) AS c,
+ AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE)),
+ COUNT(DISTINCT request_id)
+FROM `{table}`
+GROUP BY service_name
+ORDER BY c DESC
+LIMIT 10;
+-- Q10: Группировка по компонентам с подсчетом уникальных request_id
+SELECT component, COUNT(DISTINCT request_id) AS u
+FROM `{table}`
+WHERE component <> ''
+GROUP BY component
+ORDER BY u DESC
+LIMIT 10;
+-- Q11: Группировка по компоненту и сервису
+SELECT component, service_name, COUNT(DISTINCT request_id) AS u
+FROM `{table}`
+WHERE component <> ''
+GROUP BY component, service_name
+ORDER BY u DESC
+LIMIT 10;
+-- Q12: Топ сообщений об ошибках
+SELECT message, COUNT(*) AS c
+FROM `{table}`
+WHERE message <> ''
+GROUP BY message
+ORDER BY c DESC
+LIMIT 10;
+-- Q13: Уникальные request_id по сообщениям
+SELECT message, COUNT(DISTINCT request_id) AS u
+FROM `{table}`
+WHERE message <> ''
+GROUP BY message
+ORDER BY u DESC
+LIMIT 10;
+-- Q14: Группировка по уровню и сообщению
+SELECT level, message, COUNT(*) AS c
+FROM `{table}`
+WHERE message <> ''
+GROUP BY level, message
+ORDER BY c DESC
+LIMIT 10;
+-- Q15: Топ по request_id
+SELECT request_id, COUNT(*) as count
+FROM `{table}`
+GROUP BY request_id
+ORDER BY count DESC
+LIMIT 10;
+-- Q16: Группировка по request_id и сообщению
+SELECT request_id, message, COUNT(*) as count
+FROM `{table}`
+GROUP BY request_id, message
+ORDER BY count DESC
+LIMIT 10;
+-- Q17: Простая группировка без сортировки
+SELECT request_id, message, COUNT(*)
+FROM `{table}`
+GROUP BY request_id, message
+LIMIT 10;
+-- Q18: Группировка с извлечением минут
+SELECT
+ request_id,
+ m,
+ message,
+ COUNT(*) as count
+FROM `{table}`
+GROUP BY request_id, CAST(CAST(timestamp AS Uint64) / 60000000 * 60000000 AS DateTime) AS m, message
+ORDER BY count DESC
+LIMIT 10;
-- Q19: Поиск по конкретному request_id
SELECT request_id
FROM `{table}`
@@ -16,6 +105,31 @@ WHERE request_id = '435090932899640449';
SELECT COUNT(*)
FROM `{table}`
WHERE message LIKE '%error%';
+-- Q21: Анализ ошибок с URL
+SELECT
+ message,
+ MIN(JSON_VALUE(metadata, "$.url")),
+ COUNT(*) AS c
+FROM `{table}`
+WHERE JSON_VALUE(metadata, "$.url") LIKE '%api%'
+ AND message <> ''
+GROUP BY message
+ORDER BY c DESC
+LIMIT 10;
+-- Q22: Сложный поиск с несколькими условиями
+SELECT
+ message,
+ MIN(JSON_VALUE(metadata, "$.url")),
+ MIN(JSON_VALUE(metadata, "$.title")),
+ COUNT(*) AS c,
+ COUNT(DISTINCT request_id)
+FROM `{table}`
+WHERE JSON_VALUE(metadata, "$.title") LIKE '%Error%'
+ AND JSON_VALUE(metadata, "$.url") NOT LIKE '%api%'
+ AND message <> ''
+GROUP BY message
+ORDER BY c DESC
+LIMIT 10;
-- Q23: Выборка с сортировкой по времени
SELECT *
FROM `{table}`
@@ -40,3 +154,299 @@ FROM `{table}`
WHERE message <> ''
ORDER BY timestamp, message
LIMIT 10;
+-- Q27: Анализ длины сообщений
+SELECT
+ service_name,
+ AVG(Unicode::GetLength(message)) AS l,
+ COUNT(*) AS c
+FROM `{table}`
+WHERE message <> ''
+GROUP BY service_name
+HAVING COUNT(*) > 1000
+ORDER BY l DESC
+LIMIT 25;
+-- Q28: Анализ источников ошибок
+SELECT
+ component,
+ AVG(Unicode::GetLength(message)) AS l,
+ COUNT(*) AS c,
+ MIN(message)
+FROM `{table}`
+WHERE message <> ''
+GROUP BY component
+HAVING COUNT(*) > 1000
+ORDER BY l DESC
+LIMIT 25;
+-- Q29: Множественные суммы
+SELECT
+ SUM(level),
+ SUM(level + 1),
+ SUM(level + 2),
+ SUM(level + 3),
+ SUM(level + 4),
+ SUM(level + 5),
+ SUM(level + 6),
+ SUM(level + 7),
+ SUM(level + 8),
+ SUM(level + 9),
+ SUM(level + 10),
+ SUM(level + 11),
+ SUM(level + 12),
+ SUM(level + 13),
+ SUM(level + 14),
+ SUM(level + 15),
+ SUM(level + 16),
+ SUM(level + 17),
+ SUM(level + 18),
+ SUM(level + 19),
+ SUM(level + 20),
+ SUM(level + 21),
+ SUM(level + 22),
+ SUM(level + 23),
+ SUM(level + 24),
+ SUM(level + 25),
+ SUM(level + 26),
+ SUM(level + 27),
+ SUM(level + 28),
+ SUM(level + 29),
+ SUM(level + 30),
+ SUM(level + 31),
+ SUM(level + 32),
+ SUM(level + 33),
+ SUM(level + 34),
+ SUM(level + 35),
+ SUM(level + 36),
+ SUM(level + 37),
+ SUM(level + 38),
+ SUM(level + 39),
+ SUM(level + 40),
+ SUM(level + 41),
+ SUM(level + 42),
+ SUM(level + 43),
+ SUM(level + 44),
+ SUM(level + 45),
+ SUM(level + 46),
+ SUM(level + 47),
+ SUM(level + 48),
+ SUM(level + 49),
+ SUM(level + 50),
+ SUM(level + 51),
+ SUM(level + 52),
+ SUM(level + 53),
+ SUM(level + 54),
+ SUM(level + 55),
+ SUM(level + 56),
+ SUM(level + 57),
+ SUM(level + 58),
+ SUM(level + 59),
+ SUM(level + 60),
+ SUM(level + 61),
+ SUM(level + 62),
+ SUM(level + 63),
+ SUM(level + 64),
+ SUM(level + 65),
+ SUM(level + 66),
+ SUM(level + 67),
+ SUM(level + 68),
+ SUM(level + 69),
+ SUM(level + 70),
+ SUM(level + 71),
+ SUM(level + 72),
+ SUM(level + 73),
+ SUM(level + 74),
+ SUM(level + 75),
+ SUM(level + 76),
+ SUM(level + 77),
+ SUM(level + 78),
+ SUM(level + 79),
+ SUM(level + 80),
+ SUM(level + 81),
+ SUM(level + 82),
+ SUM(level + 83),
+ SUM(level + 84),
+ SUM(level + 85),
+ SUM(level + 86),
+ SUM(level + 87),
+ SUM(level + 88),
+ SUM(level + 89)
+FROM `{table}`;
+-- Q30: Группировка по нескольким полям
+SELECT
+ level,
+ client_ip,
+ COUNT(*) AS c,
+ SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
+ AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
+FROM `{table}`
+WHERE message <> ''
+GROUP BY level, JSON_VALUE(metadata, "$.client_ip") AS client_ip
+ORDER BY c DESC
+LIMIT 10;
+-- Q31: Аналогичная группировка с другими полями
+SELECT
+ request_id,
+ client_ip,
+COUNT(*) AS c,
+ SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
+ AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
+FROM `{table}`
+WHERE message <> ''
+GROUP BY request_id, JSON_VALUE(metadata, "$.client_ip") AS client_ip
+ORDER BY c DESC
+LIMIT 10;
+-- Q32: Похожая группировка без фильтра
+SELECT
+ request_id,
+ client_ip,
+ COUNT(*) AS c,
+ SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
+ AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
+FROM `{table}`
+GROUP BY request_id, JSON_VALUE(metadata, "$.client_ip") AS client_ip
+ORDER BY c DESC
+LIMIT 10;
+-- Q33: Группировка по URL
+SELECT
+ url,
+ COUNT(*) AS c
+FROM `{table}`
+GROUP BY JSON_VALUE(metadata, "$.url") AS url
+ORDER BY c DESC
+LIMIT 10;
+-- Q34: Группировка с константой
+SELECT
+ 1,
+ url,
+ COUNT(*) AS c
+FROM `{table}`
+GROUP BY JSON_VALUE(metadata, "$.url") AS url
+ORDER BY c DESC
+LIMIT 10;
+-- Q35: Группировка по IP с вычислениями
+SELECT
+ client_ip
+ client_ip_1,
+ client_ip_2,
+ client_ip_3,
+ COUNT(*) AS c
+FROM `{table}`
+GROUP BY
+ JSON_VALUE(metadata, "$.client_ip") AS client_ip,
+ CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 1 AS client_ip_1,
+ CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 2 AS client_ip_2,
+ CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 3 AS client_ip_3
+ORDER BY c DESC
+LIMIT 10;
+-- Q36: Сложная фильтрация по датам
+SELECT
+ url,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND JSON_VALUE(metadata, "$.url") <> ''
+GROUP BY JSON_VALUE(metadata, "$.url") as url
+ORDER BY views DESC
+LIMIT 10;
+-- Q37: Аналогичный запрос для заголовков
+SELECT
+ title,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND JSON_VALUE(metadata, "$.title") <> ''
+GROUP BY JSON_VALUE(metadata, "$.title") as title
+ORDER BY views DESC
+LIMIT 10;
+-- Q38: Сложная фильтрация с отступом
+SELECT
+ url,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND CAST(JSON_VALUE(metadata, "$.is_link") AS Int32) <> 0
+ AND CAST(JSON_VALUE(metadata, "$.is_download") AS Int32) = 0
+GROUP BY JSON_VALUE(metadata, "$.url") AS url
+ORDER BY views DESC
+LIMIT 10
+OFFSET 1000;
+-- Q39: Сложная группировка с множеством условий
+SELECT
+ traffic_source_id,
+ search_engine_id,
+ adv_engine_id,
+ src,
+ dst,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+GROUP BY
+ JSON_VALUE(metadata, "$.traffic_source_id") AS traffic_source_id,
+ JSON_VALUE(metadata, "$.search_engine_id") AS search_engine_id,
+ JSON_VALUE(metadata, "$.adv_engine_id") AS adv_engine_id,
+ CASE
+ WHEN JSON_VALUE(metadata, "$.search_engine_id") = '0'
+ AND JSON_VALUE(metadata, "$.adv_engine_id") = '0'
+ THEN JSON_VALUE(metadata, "$.referer")
+ ELSE ''
+ END AS src,
+ JSON_VALUE(metadata, "$.url") AS dst
+ORDER BY views DESC
+LIMIT 10
+OFFSET 1000;
+-- Q40: Группировка по хешу URL
+SELECT
+ url_hash,
+ date,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND JSON_VALUE(metadata, "$.traffic_source_id") IN ('-1', '6')
+ AND JSON_VALUE(metadata, "$.referer_hash") = '3594120000172545465'
+GROUP BY
+ JSON_VALUE(metadata, "$.url_hash") AS url_hash,
+ CAST(timestamp AS Date) AS date
+ORDER BY views DESC
+LIMIT 10
+OFFSET 100;
+-- Q41: Сложная группировка по размерам окна
+SELECT
+ window_client_width,
+ window_client_height,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
+ AND JSON_VALUE(metadata, "$.url_hash") = '2868770270353813622'
+GROUP BY
+ JSON_VALUE(metadata, "$.window_client_width") AS window_client_width,
+ JSON_VALUE(metadata, "$.window_client_height") AS window_client_height
+ORDER BY views DESC
+LIMIT 10
+OFFSET 10000;
+-- Q42: Группировка по минутам
+SELECT
+ M,
+ COUNT(*) AS views
+FROM `{table}`
+WHERE service_name = 'service1'
+ AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
+ AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
+ AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
+GROUP BY CAST(CAST(timestamp AS Uint64) / 60000000 * 60000000 AS DateTime) AS M
+ORDER BY M
+LIMIT 10
+OFFSET 1000; \ No newline at end of file
diff --git a/ydb/library/workload/log/select_queries_original.sql b/ydb/library/workload/log/select_queries_original.sql
deleted file mode 100644
index f8d3c66aff..0000000000
--- a/ydb/library/workload/log/select_queries_original.sql
+++ /dev/null
@@ -1,452 +0,0 @@
--- Q0: Базовый подсчет всех записей
-SELECT COUNT(*) FROM `{table}`;
--- Q1: Подсчет записей с определенным уровнем (аналог AdvEngineID <> 0)
-SELECT COUNT(*) FROM `{table}` WHERE level <> 0;
--- Q2: Агрегация нескольких метрик
-SELECT SUM(level), COUNT(*), AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE)) FROM `{table}`;
--- Q3: Средние значения
-SELECT AVG(CAST(request_id AS Int64)) FROM `{table}`;
--- Q4: Уникальные значения
-SELECT COUNT(DISTINCT request_id) FROM `{table}`;
--- Q5: Уникальные сообщения об ошибках
-SELECT COUNT(DISTINCT message) FROM `{table}`;
--- Q6: Временной диапазон логов
-SELECT MIN(timestamp), MAX(timestamp) FROM `{table}`;
--- Q7: Группировка по уровню важности
-SELECT level, COUNT(*) as count
-FROM `{table}`
-WHERE level <> 0
-GROUP BY level
-ORDER BY count DESC;
--- Q8: Топ-10 сервисов по уникальным request_id
-SELECT service_name, COUNT(DISTINCT request_id) AS u
-FROM `{table}`
-GROUP BY service_name
-ORDER BY u DESC
-LIMIT 10;
--- Q9: Комплексная агрегация по сервисам
-SELECT
- service_name,
- SUM(level),
- COUNT(*) AS c,
- AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE)),
- COUNT(DISTINCT request_id)
-FROM `{table}`
-GROUP BY service_name
-ORDER BY c DESC
-LIMIT 10;
--- Q10: Группировка по компонентам с подсчетом уникальных request_id
-SELECT component, COUNT(DISTINCT request_id) AS u
-FROM `{table}`
-WHERE component <> ''
-GROUP BY component
-ORDER BY u DESC
-LIMIT 10;
--- Q11: Группировка по компоненту и сервису
-SELECT component, service_name, COUNT(DISTINCT request_id) AS u
-FROM `{table}`
-WHERE component <> ''
-GROUP BY component, service_name
-ORDER BY u DESC
-LIMIT 10;
--- Q12: Топ сообщений об ошибках
-SELECT message, COUNT(*) AS c
-FROM `{table}`
-WHERE message <> ''
-GROUP BY message
-ORDER BY c DESC
-LIMIT 10;
--- Q13: Уникальные request_id по сообщениям
-SELECT message, COUNT(DISTINCT request_id) AS u
-FROM `{table}`
-WHERE message <> ''
-GROUP BY message
-ORDER BY u DESC
-LIMIT 10;
--- Q14: Группировка по уровню и сообщению
-SELECT level, message, COUNT(*) AS c
-FROM `{table}`
-WHERE message <> ''
-GROUP BY level, message
-ORDER BY c DESC
-LIMIT 10;
--- Q15: Топ по request_id
-SELECT request_id, COUNT(*) as count
-FROM `{table}`
-GROUP BY request_id
-ORDER BY count DESC
-LIMIT 10;
--- Q16: Группировка по request_id и сообщению
-SELECT request_id, message, COUNT(*) as count
-FROM `{table}`
-GROUP BY request_id, message
-ORDER BY count DESC
-LIMIT 10;
--- Q17: Простая группировка без сортировки
-SELECT request_id, message, COUNT(*)
-FROM `{table}`
-GROUP BY request_id, message
-LIMIT 10;
--- Q18: Группировка с извлечением минут
-SELECT
- request_id,
- m,
- message,
- COUNT(*) as count
-FROM `{table}`
-GROUP BY request_id, CAST(CAST(timestamp AS Uint64) / 60000000 * 60000000 AS DateTime) AS m, message
-ORDER BY count DESC
-LIMIT 10;
--- Q19: Поиск по конкретному request_id
-SELECT request_id
-FROM `{table}`
-WHERE request_id = '435090932899640449';
--- Q20: Поиск по подстроке в message
-SELECT COUNT(*)
-FROM `{table}`
-WHERE message LIKE '%error%';
--- Q21: Анализ ошибок с URL
-SELECT
- message,
- MIN(JSON_VALUE(metadata, "$.url")),
- COUNT(*) AS c
-FROM `{table}`
-WHERE JSON_VALUE(metadata, "$.url") LIKE '%api%'
- AND message <> ''
-GROUP BY message
-ORDER BY c DESC
-LIMIT 10;
--- Q22: Сложный поиск с несколькими условиями
-SELECT
- message,
- MIN(JSON_VALUE(metadata, "$.url")),
- MIN(JSON_VALUE(metadata, "$.title")),
- COUNT(*) AS c,
- COUNT(DISTINCT request_id)
-FROM `{table}`
-WHERE JSON_VALUE(metadata, "$.title") LIKE '%Error%'
- AND JSON_VALUE(metadata, "$.url") NOT LIKE '%api%'
- AND message <> ''
-GROUP BY message
-ORDER BY c DESC
-LIMIT 10;
--- Q23: Выборка с сортировкой по времени
-SELECT *
-FROM `{table}`
-WHERE message LIKE '%error%'
-ORDER BY timestamp
-LIMIT 10;
--- Q24: Простая выборка сообщений
-SELECT message
-FROM `{table}`
-WHERE message <> ''
-ORDER BY timestamp
-LIMIT 10;
--- Q25: Сортировка по сообщению
-SELECT message
-FROM `{table}`
-WHERE message <> ''
-ORDER BY message
-LIMIT 10;
--- Q26: Сортировка по времени и сообщению
-SELECT message
-FROM `{table}`
-WHERE message <> ''
-ORDER BY timestamp, message
-LIMIT 10;
--- Q27: Анализ длины сообщений
-SELECT
- service_name,
- AVG(Unicode::GetLength(message)) AS l,
- COUNT(*) AS c
-FROM `{table}`
-WHERE message <> ''
-GROUP BY service_name
-HAVING COUNT(*) > 1000
-ORDER BY l DESC
-LIMIT 25;
--- Q28: Анализ источников ошибок
-SELECT
- component,
- AVG(Unicode::GetLength(message)) AS l,
- COUNT(*) AS c,
- MIN(message)
-FROM `{table}`
-WHERE message <> ''
-GROUP BY component
-HAVING COUNT(*) > 1000
-ORDER BY l DESC
-LIMIT 25;
--- Q29: Множественные суммы
-SELECT
- SUM(level),
- SUM(level + 1),
- SUM(level + 2),
- SUM(level + 3),
- SUM(level + 4),
- SUM(level + 5),
- SUM(level + 6),
- SUM(level + 7),
- SUM(level + 8),
- SUM(level + 9),
- SUM(level + 10),
- SUM(level + 11),
- SUM(level + 12),
- SUM(level + 13),
- SUM(level + 14),
- SUM(level + 15),
- SUM(level + 16),
- SUM(level + 17),
- SUM(level + 18),
- SUM(level + 19),
- SUM(level + 20),
- SUM(level + 21),
- SUM(level + 22),
- SUM(level + 23),
- SUM(level + 24),
- SUM(level + 25),
- SUM(level + 26),
- SUM(level + 27),
- SUM(level + 28),
- SUM(level + 29),
- SUM(level + 30),
- SUM(level + 31),
- SUM(level + 32),
- SUM(level + 33),
- SUM(level + 34),
- SUM(level + 35),
- SUM(level + 36),
- SUM(level + 37),
- SUM(level + 38),
- SUM(level + 39),
- SUM(level + 40),
- SUM(level + 41),
- SUM(level + 42),
- SUM(level + 43),
- SUM(level + 44),
- SUM(level + 45),
- SUM(level + 46),
- SUM(level + 47),
- SUM(level + 48),
- SUM(level + 49),
- SUM(level + 50),
- SUM(level + 51),
- SUM(level + 52),
- SUM(level + 53),
- SUM(level + 54),
- SUM(level + 55),
- SUM(level + 56),
- SUM(level + 57),
- SUM(level + 58),
- SUM(level + 59),
- SUM(level + 60),
- SUM(level + 61),
- SUM(level + 62),
- SUM(level + 63),
- SUM(level + 64),
- SUM(level + 65),
- SUM(level + 66),
- SUM(level + 67),
- SUM(level + 68),
- SUM(level + 69),
- SUM(level + 70),
- SUM(level + 71),
- SUM(level + 72),
- SUM(level + 73),
- SUM(level + 74),
- SUM(level + 75),
- SUM(level + 76),
- SUM(level + 77),
- SUM(level + 78),
- SUM(level + 79),
- SUM(level + 80),
- SUM(level + 81),
- SUM(level + 82),
- SUM(level + 83),
- SUM(level + 84),
- SUM(level + 85),
- SUM(level + 86),
- SUM(level + 87),
- SUM(level + 88),
- SUM(level + 89)
-FROM `{table}`;
--- Q30: Группировка по нескольким полям
-SELECT
- level,
- client_ip,
- COUNT(*) AS c,
- SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
- AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
-FROM `{table}`
-WHERE message <> ''
-GROUP BY level, JSON_VALUE(metadata, "$.client_ip") AS client_ip
-ORDER BY c DESC
-LIMIT 10;
--- Q31: Аналогичная группировка с другими полями
-SELECT
- request_id,
- client_ip,
-COUNT(*) AS c,
- SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
- AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
-FROM `{table}`
-WHERE message <> ''
-GROUP BY request_id, JSON_VALUE(metadata, "$.client_ip") AS client_ip
-ORDER BY c DESC
-LIMIT 10;
--- Q32: Похожая группировка без фильтра
-SELECT
- request_id,
- client_ip,
- COUNT(*) AS c,
- SUM(CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32)),
- AVG(CAST(JSON_VALUE(metadata, "$.response_time") AS DOUBLE))
-FROM `{table}`
-GROUP BY request_id, JSON_VALUE(metadata, "$.client_ip") AS client_ip
-ORDER BY c DESC
-LIMIT 10;
--- Q33: Группировка по URL
-SELECT
- url,
- COUNT(*) AS c
-FROM `{table}`
-GROUP BY JSON_VALUE(metadata, "$.url") AS url
-ORDER BY c DESC
-LIMIT 10;
--- Q34: Группировка с константой
-SELECT
- 1,
- url,
- COUNT(*) AS c
-FROM `{table}`
-GROUP BY JSON_VALUE(metadata, "$.url") AS url
-ORDER BY c DESC
-LIMIT 10;
--- Q35: Группировка по IP с вычислениями
-SELECT
- client_ip
- client_ip_1,
- client_ip_2,
- client_ip_3,
- COUNT(*) AS c
-FROM `{table}`
-GROUP BY
- JSON_VALUE(metadata, "$.client_ip") AS client_ip,
- CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 1 AS client_ip_1,
- CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 2 AS client_ip_2,
- CAST(JSON_VALUE(metadata, "$.client_ip") AS Int64) - 3 AS client_ip_3
-ORDER BY c DESC
-LIMIT 10;
--- Q36: Сложная фильтрация по датам
-SELECT
- url,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND JSON_VALUE(metadata, "$.url") <> ''
-GROUP BY JSON_VALUE(metadata, "$.url") as url
-ORDER BY views DESC
-LIMIT 10;
--- Q37: Аналогичный запрос для заголовков
-SELECT
- title,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND JSON_VALUE(metadata, "$.title") <> ''
-GROUP BY JSON_VALUE(metadata, "$.title") as title
-ORDER BY views DESC
-LIMIT 10;
--- Q38: Сложная фильтрация с отступом
-SELECT
- url,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND CAST(JSON_VALUE(metadata, "$.is_link") AS Int32) <> 0
- AND CAST(JSON_VALUE(metadata, "$.is_download") AS Int32) = 0
-GROUP BY JSON_VALUE(metadata, "$.url") AS url
-ORDER BY views DESC
-LIMIT 10
-OFFSET 1000;
--- Q39: Сложная группировка с множеством условий
-SELECT
- traffic_source_id,
- search_engine_id,
- adv_engine_id,
- src,
- dst,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
-GROUP BY
- JSON_VALUE(metadata, "$.traffic_source_id") AS traffic_source_id,
- JSON_VALUE(metadata, "$.search_engine_id") AS search_engine_id,
- JSON_VALUE(metadata, "$.adv_engine_id") AS adv_engine_id,
- CASE
- WHEN JSON_VALUE(metadata, "$.search_engine_id") = '0'
- AND JSON_VALUE(metadata, "$.adv_engine_id") = '0'
- THEN JSON_VALUE(metadata, "$.referer")
- ELSE ''
- END AS src,
- JSON_VALUE(metadata, "$.url") AS dst
-ORDER BY views DESC
-LIMIT 10
-OFFSET 1000;
--- Q40: Группировка по хешу URL
-SELECT
- url_hash,
- date,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND JSON_VALUE(metadata, "$.traffic_source_id") IN ('-1', '6')
- AND JSON_VALUE(metadata, "$.referer_hash") = '3594120000172545465'
-GROUP BY
- JSON_VALUE(metadata, "$.url_hash") AS url_hash,
- CAST(timestamp AS Date) AS date
-ORDER BY views DESC
-LIMIT 10
-OFFSET 100;
--- Q41: Сложная группировка по размерам окна
-SELECT
- window_client_width,
- window_client_height,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
- AND JSON_VALUE(metadata, "$.url_hash") = '2868770270353813622'
-GROUP BY
- JSON_VALUE(metadata, "$.window_client_width") AS window_client_width,
- JSON_VALUE(metadata, "$.window_client_height") AS window_client_height
-ORDER BY views DESC
-LIMIT 10
-OFFSET 10000;
--- Q42: Группировка по минутам
-SELECT
- M,
- COUNT(*) AS views
-FROM `{table}`
-WHERE service_name = 'service1'
- AND timestamp BETWEEN (CurrentUtcTimestamp() - Interval("P7D")) AND CurrentUtcTimestamp()
- AND CAST(JSON_VALUE(metadata, "$.is_refresh") AS Int32) = 0
- AND CAST(JSON_VALUE(metadata, "$.dont_count") AS Int32) = 0
-GROUP BY CAST(CAST(timestamp AS Uint64) / 60000000 * 60000000 AS DateTime) AS M
-ORDER BY M
-LIMIT 10
-OFFSET 1000; \ No newline at end of file
diff --git a/ydb/library/workload/log/ya.make b/ydb/library/workload/log/ya.make
index 8f93fcec40..d02dca1851 100644
--- a/ydb/library/workload/log/ya.make
+++ b/ydb/library/workload/log/ya.make
@@ -14,9 +14,6 @@ PEERDIR(
RESOURCE(
select_queries.sql workload_logs_select_queries.sql
-
- # Temporary disabled GROUP BY and DISNINCT queries
- # select_queries_original.sql workload_logs_select_queries.sql
)
GENERATE_ENUM_SERIALIZATION(log.h)
diff --git a/ydb/library/yaml_config/ya.make b/ydb/library/yaml_config/ya.make
index 0916c245d9..597ca1bb81 100644
--- a/ydb/library/yaml_config/ya.make
+++ b/ydb/library/yaml_config/ya.make
@@ -18,6 +18,7 @@ PEERDIR(
library/cpp/protobuf/json
ydb/core/base
ydb/core/cms/console/util
+ ydb/core/config/validation
ydb/core/erasure
ydb/core/protos
ydb/core/protos/out
diff --git a/ydb/library/yaml_config/yaml_config.cpp b/ydb/library/yaml_config/yaml_config.cpp
index 25e987f51a..16978e2f3e 100644
--- a/ydb/library/yaml_config/yaml_config.cpp
+++ b/ydb/library/yaml_config/yaml_config.cpp
@@ -7,6 +7,7 @@
#include <library/cpp/protobuf/json/json2proto.h>
#include <ydb/core/protos/netclassifier.pb.h>
+#include <ydb/core/config/validation/validators.h>
namespace NKikimr::NYamlConfig {
@@ -97,8 +98,32 @@ void ReplaceUnmanagedKinds(const NKikimrConfig::TAppConfig& from, NKikimrConfig:
}
}
+class TLegacyValidators
+ : public IConfigValidator
+{
+public:
+ EValidationResult ValidateConfig(
+ const NKikimrConfig::TAppConfig& config,
+ std::vector<TString>& msg) const override
+ {
+ auto res = NKikimr::NConfig::ValidateConfig(config, msg);
+ switch (res) {
+ case NKikimr::NConfig::EValidationResult::Ok:
+ return EValidationResult::Ok;
+ case NKikimr::NConfig::EValidationResult::Warn:
+ return EValidationResult::Warn;
+ case NKikimr::NConfig::EValidationResult::Error:
+ return EValidationResult::Error;
+ }
+ }
+};
+
class TDefaultConfigSwissKnife : public IConfigSwissKnife {
public:
+ TDefaultConfigSwissKnife() {
+ Validators["LegacyValidators"] = MakeSimpleShared<TLegacyValidators>();
+ }
+
bool VerifyReplaceRequest(const Ydb::Config::ReplaceConfigRequest&, Ydb::StatusIds::StatusCode&, NYql::TIssues&) const override {
return true;
}
@@ -117,5 +142,22 @@ std::unique_ptr<IConfigSwissKnife> CreateDefaultConfigSwissKnife() {
return std::make_unique<TDefaultConfigSwissKnife>();
}
+EValidationResult IConfigSwissKnife::ValidateConfig(
+ const NKikimrConfig::TAppConfig& config,
+ std::vector<TString>& msg) const
+{
+ for (const auto& [name, validator] : GetValidators()) {
+ EValidationResult result = validator->ValidateConfig(config, msg);
+ if (result == EValidationResult::Error) {
+ return EValidationResult::Error;
+ }
+ }
+
+ if (msg.size() > 0) {
+ return EValidationResult::Warn;
+ }
+
+ return EValidationResult::Ok;
+}
} // namespace NKikimr::NYamlConfig
diff --git a/ydb/library/yaml_config/yaml_config.h b/ydb/library/yaml_config/yaml_config.h
index 72712d1b92..9ce22841f2 100644
--- a/ydb/library/yaml_config/yaml_config.h
+++ b/ydb/library/yaml_config/yaml_config.h
@@ -84,18 +84,44 @@ void ResolveAndParseYamlConfig(
TString* resolvedYamlConfig = nullptr,
TString* resolvedJsonConfig = nullptr);
+enum class EValidationResult {
+ Ok,
+ Warn,
+ Error,
+};
+
+class IConfigValidator {
+public:
+ virtual ~IConfigValidator() = default;
+
+ virtual EValidationResult ValidateConfig(
+ const NKikimrConfig::TAppConfig& config,
+ std::vector<TString>& msg) const = 0;
+};
+
/**
* Replaces kinds not managed by yaml config (e.g. NetClassifierConfig) from config 'from' in config 'to'
* if corresponding configs are presenet in 'from'
*/
void ReplaceUnmanagedKinds(const NKikimrConfig::TAppConfig& from, NKikimrConfig::TAppConfig& to);
+using TValidatorsMap = TMap<TString, TSimpleSharedPtr<IConfigValidator>>;
+
class IConfigSwissKnife {
public:
virtual ~IConfigSwissKnife() = default;
virtual bool VerifyReplaceRequest(const Ydb::Config::ReplaceConfigRequest& request, Ydb::StatusIds::StatusCode& status, NYql::TIssues& issues) const = 0;
virtual bool VerifyMainConfig(const TString& config) const = 0;
virtual bool VerifyStorageConfig(const TString& config) const = 0;
+ virtual EValidationResult ValidateConfig(
+ const NKikimrConfig::TAppConfig& config,
+ std::vector<TString>& msg) const;
+
+ const TMap<TString, TSimpleSharedPtr<IConfigValidator>>& GetValidators() const {
+ return Validators;
+ }
+protected:
+ TMap<TString, TSimpleSharedPtr<IConfigValidator>> Validators;
};
diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor.h
index 06f170f365..74c13ab0c5 100644
--- a/ydb/library/yql/dq/actors/compute/dq_compute_actor.h
+++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor.h
@@ -256,6 +256,8 @@ struct TComputeRuntimeSettings {
i64 AsyncInputPushLimit = std::numeric_limits<i64>::max();
+ bool WithProgressStats = false;
+
inline bool CollectNone() const {
return StatsMode <= NDqProto::DQ_STATS_MODE_NONE;
}
diff --git a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
index 205b87b00b..c079e0677a 100644
--- a/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
+++ b/ydb/library/yql/dq/actors/compute/dq_compute_actor_impl.h
@@ -1776,7 +1776,7 @@ public:
for (auto& [inputIndex, sourceInfo] : SourcesMap) {
if (auto* source = sourceInfo.AsyncInput) {
- source->FillExtraStats(protoTask, last, GetMeteringStats());
+ source->FillExtraStats(protoTask, RuntimeSettings.WithProgressStats || last, GetMeteringStats());
}
}
FillTaskRunnerStats(Task.GetId(), Task.GetStageId(), *taskStats, protoTask, RuntimeSettings.GetCollectStatsLevel());
@@ -1892,7 +1892,7 @@ public:
}
if (auto* source = sinkInfo.AsyncOutput) {
- source->FillExtraStats(protoTask, last, GetMeteringStats());
+ source->FillExtraStats(protoTask, RuntimeSettings.WithProgressStats || last, GetMeteringStats());
}
}
@@ -1917,7 +1917,7 @@ public:
}
if (auto* transform = transformInfo.AsyncInput) {
- transform->FillExtraStats(protoTask, last, GetMeteringStats());
+ transform->FillExtraStats(protoTask, RuntimeSettings.WithProgressStats || last, GetMeteringStats());
}
}
@@ -1980,7 +1980,7 @@ public:
// TODO: what should happen in this case?
}
- static_cast<TDerived*>(this)->FillExtraStats(dst, last);
+ static_cast<TDerived*>(this)->FillExtraStats(dst, RuntimeSettings.WithProgressStats || last);
if (last && MemoryQuota) {
MemoryQuota->ResetProfileStats();
diff --git a/ydb/library/yql/dq/actors/protos/dq_events.proto b/ydb/library/yql/dq/actors/protos/dq_events.proto
index df08f74140..1277b3da14 100644
--- a/ydb/library/yql/dq/actors/protos/dq_events.proto
+++ b/ydb/library/yql/dq/actors/protos/dq_events.proto
@@ -141,6 +141,7 @@ message TComputeRuntimeSettings {
optional bool UseSpilling = 6;
optional uint32 TasksOnNodeCount = 5; // approx
optional TRlPath RlPath = 7;
+ optional bool WithProgressStats = 9 [default = false];
}
message TEvNewCheckpointCoordinator {
diff --git a/ydb/library/yql/dq/opt/dq_opt_build.cpp b/ydb/library/yql/dq/opt/dq_opt_build.cpp
index ef6e8a85e6..220213e6a5 100644
--- a/ydb/library/yql/dq/opt/dq_opt_build.cpp
+++ b/ydb/library/yql/dq/opt/dq_opt_build.cpp
@@ -645,15 +645,6 @@ TDqPhyStage DqEnableWideChannelsInputForStage(const TDqPhyStage& stage, TExprCon
}
bool CanPullReplicateScalars(const TDqPhyStage& stage) {
- if constexpr (!NYql::NBlockStreamIO::ReplicateScalars) {
- auto maybeFromFlow = stage.Program().Body().Maybe<TCoFromFlow>();
- if (!maybeFromFlow) {
- return false;
- }
-
- return bool(maybeFromFlow.Cast().Input().Maybe<TCoReplicateScalars>());
- }
-
return bool(stage.Program().Body().Maybe<TCoReplicateScalars>());
}
@@ -698,55 +689,29 @@ TDqPhyStage DqPullReplicateScalarsFromInputs(const TDqPhyStage& stage, TExprCont
TDqPhyStage childStage = conn.Output().Stage().Cast<TDqPhyStage>();
TCoLambda childProgram(ctx.DeepCopyLambda(childStage.Program().Ref()));
- TMaybeNode<TExprBase> newChildStage;
- TExprNode::TPtr argReplace;
- TExprNode::TPtr newArgNode = newArg.Ptr();
- if constexpr (!NYql::NBlockStreamIO::ReplicateScalars) {
- TCoReplicateScalars childReplicateScalars = childProgram.Body().Cast<TCoFromFlow>().Input().Cast<TCoReplicateScalars>();
-
- // replace FromFlow(ReplicateScalars(x, ...)) with FromFlow(x)
- newChildStage = Build<TDqPhyStage>(ctx, childStage.Pos())
- .InitFrom(childStage)
- .Program()
- .Args(childProgram.Args())
- .Body(ctx.ChangeChild(childProgram.Body().Ref(), TCoFromFlow::idx_Input, childReplicateScalars.Input().Ptr()))
- .Build()
- .Done();
- argReplace = Build<TCoFromFlow>(ctx, arg.Pos())
- .Input<TCoReplicateScalars>()
- .Input<TCoToFlow>()
- .Input(newArgNode)
- .Build()
- .Indexes(childReplicateScalars.Indexes())
- .Build()
- .Done()
- .Ptr();
- } else {
- TCoReplicateScalars childReplicateScalars = childProgram.Body().Cast<TCoReplicateScalars>();
-
- // replace (ReplicateScalars(x, ...)) with (x)
- newChildStage = Build<TDqPhyStage>(ctx, childStage.Pos())
- .InitFrom(childStage)
- .Program()
- .Args(childProgram.Args())
- .Body(childReplicateScalars.Input())
- .Build()
- .Done();
-
- argReplace = Build<TCoReplicateScalars>(ctx, arg.Pos())
- .Input(newArgNode)
- .Indexes(childReplicateScalars.Indexes())
- .Done()
- .Ptr();
- }
+ TCoReplicateScalars childReplicateScalars = childProgram.Body().Cast<TCoReplicateScalars>();
+ // replace (ReplicateScalars(x, ...)) with (x)
+ auto newChildStage = Build<TDqPhyStage>(ctx, childStage.Pos())
+ .InitFrom(childStage)
+ .Program()
+ .Args(childProgram.Args())
+ .Body(childReplicateScalars.Input())
+ .Build()
+ .Done();
auto newOutput = Build<TDqOutput>(ctx, conn.Output().Pos())
.InitFrom(conn.Output())
- .Stage(newChildStage.Cast().Ptr())
+ .Stage(newChildStage)
.Done();
+ newInputs.push_back(ctx.ChangeChild(conn.Ref(), TDqConnection::idx_Output, newOutput.Ptr()));
+ TExprNode::TPtr newArgNode = newArg.Ptr();
+ TExprNode::TPtr argReplace = Build<TCoReplicateScalars>(ctx, arg.Pos())
+ .Input(newArgNode)
+ .Indexes(childReplicateScalars.Indexes())
+ .Done()
+ .Ptr();
argsMap.emplace(arg.Raw(), argReplace);
- newInputs.push_back(ctx.ChangeChild(conn.Ref(), TDqConnection::idx_Output, newOutput.Ptr()));
} else {
argsMap.emplace(arg.Raw(), newArg.Ptr());
newInputs.push_back(stage.Inputs().Item(i).Ptr());
diff --git a/ydb/library/yql/dq/opt/dq_opt_dphyp_solver.h b/ydb/library/yql/dq/opt/dq_opt_dphyp_solver.h
index 32fb2fd04f..9d58042fb1 100644
--- a/ydb/library/yql/dq/opt/dq_opt_dphyp_solver.h
+++ b/ydb/library/yql/dq/opt/dq_opt_dphyp_solver.h
@@ -827,7 +827,7 @@ template <typename TNodeSet> std::array<std::shared_ptr<IBaseOptimizerNode>, 2>
std::shared_ptr<TJoinOptimizerNodeInternal> tree;
auto shuffleRightSideBestJoin = PickBestJoin(left, right, edge, false, true, maybeCardHint, maybeAlgoHint);
if (mapJoinStatistics.Cost < shuffleRightSideBestJoin.Stats.Cost) {
- tree = MakeJoinInternal(std::move(mapJoinStatistics), left, right, edge.LeftJoinKeys, edge.RightJoinKeys, edge.JoinKind, EJoinAlgoType::MapJoin, edge.LeftAny, edge.RightAny, right->LogicalOrderings);
+ tree = MakeJoinInternal(std::move(mapJoinStatistics), left, right, edge.LeftJoinKeys, edge.RightJoinKeys, edge.JoinKind, EJoinAlgoType::MapJoin, edge.LeftAny, edge.RightAny, left->LogicalOrderings);
tree->LogicalOrderings.InduceNewOrderings(edge.FDs | left->LogicalOrderings.GetFDs());
} else {
if (!shuffleRightSideBestJoin.IsReversed) {
diff --git a/ydb/library/yql/dq/opt/dq_opt_join.cpp b/ydb/library/yql/dq/opt/dq_opt_join.cpp
index e5873e4afc..8a9f694894 100644
--- a/ydb/library/yql/dq/opt/dq_opt_join.cpp
+++ b/ydb/library/yql/dq/opt/dq_opt_join.cpp
@@ -123,7 +123,8 @@ TMaybe<TJoinInputDesc> BuildDqJoin(
TExprContext& ctx,
const TTypeAnnotationContext& typeCtx,
TVector<TString>& subtreeLabels,
- const NYql::TOptimizerHints& hints
+ const NYql::TOptimizerHints& hints,
+ bool useCBO
)
{
TMaybe<TJoinInputDesc> left;
@@ -133,7 +134,7 @@ TMaybe<TJoinInputDesc> BuildDqJoin(
left = inputs.at(joinTuple.LeftScope().Cast<TCoAtom>().Value());
YQL_ENSURE(left, "unknown scope " << joinTuple.LeftScope().Cast<TCoAtom>().Value());
} else {
- left = BuildDqJoin(joinTuple.LeftScope().Cast<TCoEquiJoinTuple>(), inputs, mode, ctx, typeCtx, lhsLabels, hints);
+ left = BuildDqJoin(joinTuple.LeftScope().Cast<TCoEquiJoinTuple>(), inputs, mode, ctx, typeCtx, lhsLabels, hints, useCBO);
if (!left) {
return {};
}
@@ -146,7 +147,7 @@ TMaybe<TJoinInputDesc> BuildDqJoin(
right = inputs.at(joinTuple.RightScope().Cast<TCoAtom>().Value());
YQL_ENSURE(right, "unknown scope " << joinTuple.RightScope().Cast<TCoAtom>().Value());
} else {
- right = BuildDqJoin(joinTuple.RightScope().Cast<TCoEquiJoinTuple>(), inputs, mode, ctx, typeCtx, rhsLabels, hints);
+ right = BuildDqJoin(joinTuple.RightScope().Cast<TCoEquiJoinTuple>(), inputs, mode, ctx, typeCtx, rhsLabels, hints, useCBO);
if (!right) {
return {};
}
@@ -260,7 +261,7 @@ TMaybe<TJoinInputDesc> BuildDqJoin(
.Build()
.JoinAlgo(joinAlgo);
- auto getShuffleByExprList = [&](const TVector<NDq::TJoinColumn>& shuffleBy) -> TExprNode::TListType {
+ auto getShuffleByExprListFromSettings = [&](const TVector<NDq::TJoinColumn>& shuffleBy) -> TExprNode::TListType {
TExprNode::TListType shuffleByExprList;
for (const auto& column: shuffleBy) {
@@ -278,13 +279,40 @@ TMaybe<TJoinInputDesc> BuildDqJoin(
return shuffleByExprList;
};
- TExprNode::TListType shuffleLhsBy = getShuffleByExprList(linkSettings.ShuffleLhsBy);
+ auto getShuffleByExprListFromJoinKeys = [&](const TVector<TCoAtom>& joinKeys) {
+ TExprNode::TListType shuffleByExprList;
+
+ for (const auto& column: joinKeys) {
+ auto node =
+ ctx.Builder(joinTuple.Pos())
+ .List()
+ .Atom(0, column.StringValue())
+ .Seal()
+ .Build();
+
+ shuffleByExprList.emplace_back(std::move(node));
+ }
+
+ return shuffleByExprList;
+ };
+
+ TExprNode::TListType shuffleLhsBy;
+ if (useCBO) {
+ shuffleLhsBy = getShuffleByExprListFromSettings(linkSettings.ShuffleLhsBy);
+ } else {
+ shuffleLhsBy = getShuffleByExprListFromJoinKeys(leftJoinKeys);
+ }
dqJoinBuilder
.ShuffleLeftSideBy()
.Add(std::move(shuffleLhsBy))
.Build();
- TExprNode::TListType shuffleRhsBy = getShuffleByExprList(linkSettings.ShuffleRhsBy);
+ TExprNode::TListType shuffleRhsBy;
+ if (useCBO) {
+ shuffleRhsBy = getShuffleByExprListFromSettings(linkSettings.ShuffleRhsBy);
+ } else {
+ shuffleRhsBy = getShuffleByExprListFromJoinKeys(rightJoinKeys);
+ }
dqJoinBuilder
.ShuffleRightSideBy()
.Add(std::move(shuffleRhsBy))
@@ -478,7 +506,7 @@ TExprBase DqRewriteEquiJoin(
TExprBase DqRewriteEquiJoin(
const TExprBase& node,
EHashJoinMode mode,
- bool /* useCBO */,
+ bool useCBO,
TExprContext& ctx,
const TTypeAnnotationContext& typeCtx,
int& joinCounter,
@@ -501,7 +529,7 @@ TExprBase DqRewriteEquiJoin(
auto joinTuple = equiJoin.Arg(equiJoin.ArgCount() - 2).Cast<TCoEquiJoinTuple>();
TVector<TString> dummy;
- auto result = BuildDqJoin(joinTuple, inputs, mode, ctx, typeCtx, dummy, hints);
+ auto result = BuildDqJoin(joinTuple, inputs, mode, ctx, typeCtx, dummy, hints, useCBO);
if (!result) {
return node;
}
diff --git a/ydb/library/yql/dq/opt/dq_opt_join_cost_based.cpp b/ydb/library/yql/dq/opt/dq_opt_join_cost_based.cpp
index 79a69b3469..b90565682b 100644
--- a/ydb/library/yql/dq/opt/dq_opt_join_cost_based.cpp
+++ b/ydb/library/yql/dq/opt/dq_opt_join_cost_based.cpp
@@ -350,7 +350,7 @@ private:
if (postEnumerationShuffleElimination) {
EliminateShuffles(hypergraph, bestJoinOrder, orderingsFSM);
}
- auto resTree = ConvertFromInternal(bestJoinOrder, fdStorage);
+ auto resTree = ConvertFromInternal(bestJoinOrder, fdStorage, EnableShuffleElimination);
AddMissingConditions(hypergraph, resTree);
return resTree;
}
diff --git a/ydb/library/yql/dq/opt/dq_opt_join_tree_node.cpp b/ydb/library/yql/dq/opt/dq_opt_join_tree_node.cpp
index 2a81ec3910..9e91df28a2 100644
--- a/ydb/library/yql/dq/opt/dq_opt_join_tree_node.cpp
+++ b/ydb/library/yql/dq/opt/dq_opt_join_tree_node.cpp
@@ -24,7 +24,8 @@ std::shared_ptr<TJoinOptimizerNodeInternal> MakeJoinInternal(
std::shared_ptr<TJoinOptimizerNode> ConvertFromInternal(
const std::shared_ptr<IBaseOptimizerNode>& internal,
- const TFDStorage& fdStorage
+ const TFDStorage& fdStorage,
+ bool enableShuffleElimination
) {
Y_ENSURE(internal->Kind == EOptimizerNodeKind::JoinNodeType);
@@ -38,20 +39,24 @@ std::shared_ptr<TJoinOptimizerNode> ConvertFromInternal(
auto right = join->RightArg;
if (left->Kind == EOptimizerNodeKind::JoinNodeType) {
- left = ConvertFromInternal(left, fdStorage);
+ left = ConvertFromInternal(left, fdStorage, enableShuffleElimination);
}
if (right->Kind == EOptimizerNodeKind::JoinNodeType) {
- right = ConvertFromInternal(right, fdStorage);
+ right = ConvertFromInternal(right, fdStorage, enableShuffleElimination);
}
auto newJoin = std::make_shared<TJoinOptimizerNode>(left, right, join->LeftJoinKeys, join->RightJoinKeys, join->JoinType, join->JoinAlgo, join->LeftAny, join->RightAny);
newJoin->Stats = std::move(join->Stats);
-
- if (join->ShuffleLeftSideByOrderingIdx != -1) {
+ if (!enableShuffleElimination && join->JoinAlgo == EJoinAlgoType::GraceJoin) {
+ left->Stats.ShuffledByColumns =
+ TIntrusivePtr<TOptimizerStatistics::TShuffledByColumns>(
+ new TOptimizerStatistics::TShuffledByColumns(join->LeftJoinKeys)
+ );
+ } else if (join->ShuffleLeftSideByOrderingIdx != -1) {
auto shuffledBy = fdStorage.GetInterestingOrderingsColumnNamesByIdx(join->ShuffleLeftSideByOrderingIdx);
- left->Stats.ShuffledByColumns =
+ left->Stats.ShuffledByColumns =
TIntrusivePtr<TOptimizerStatistics::TShuffledByColumns>(
new TOptimizerStatistics::TShuffledByColumns(std::move(shuffledBy))
);
@@ -59,10 +64,15 @@ std::shared_ptr<TJoinOptimizerNode> ConvertFromInternal(
left->Stats.ShuffledByColumns = nullptr;
}
- if (join->ShuffleRightSideByOrderingIdx != -1) {
+ if (!enableShuffleElimination && join->JoinAlgo == EJoinAlgoType::GraceJoin) {
+ right->Stats.ShuffledByColumns =
+ TIntrusivePtr<TOptimizerStatistics::TShuffledByColumns>(
+ new TOptimizerStatistics::TShuffledByColumns(join->RightJoinKeys)
+ );
+ } else if (join->ShuffleRightSideByOrderingIdx != -1) {
auto shuffledBy = fdStorage.GetInterestingOrderingsColumnNamesByIdx(join->ShuffleRightSideByOrderingIdx);
- right->Stats.ShuffledByColumns =
+ right->Stats.ShuffledByColumns =
TIntrusivePtr<TOptimizerStatistics::TShuffledByColumns>(
new TOptimizerStatistics::TShuffledByColumns(std::move(shuffledBy))
);
diff --git a/ydb/library/yql/dq/opt/dq_opt_join_tree_node.h b/ydb/library/yql/dq/opt/dq_opt_join_tree_node.h
index a30d2c8d71..2f7a17a3af 100644
--- a/ydb/library/yql/dq/opt/dq_opt_join_tree_node.h
+++ b/ydb/library/yql/dq/opt/dq_opt_join_tree_node.h
@@ -1,6 +1,6 @@
#pragma once
-#include <yql/essentials/core/cbo/cbo_optimizer_new.h>
+#include <yql/essentials/core/cbo/cbo_optimizer_new.h>
const TString& ToString(NYql::EJoinKind);
const TString& ToString(NYql::EJoinAlgoType);
@@ -19,15 +19,15 @@ namespace NYql::NDq {
*/
struct TJoinOptimizerNodeInternal : public IBaseOptimizerNode {
TJoinOptimizerNodeInternal(
- const std::shared_ptr<IBaseOptimizerNode>& left,
+ const std::shared_ptr<IBaseOptimizerNode>& left,
const std::shared_ptr<IBaseOptimizerNode>& right,
const TVector<TJoinColumn>& leftJoinKeys,
- const TVector<TJoinColumn>& rightJoinKeys,
- const EJoinKind joinType,
+ const TVector<TJoinColumn>& rightJoinKeys,
+ const EJoinKind joinType,
const EJoinAlgoType joinAlgo,
const bool leftAny,
const bool rightAny
- )
+ )
: IBaseOptimizerNode(JoinNodeType)
, LeftArg(left)
, RightArg(right)
@@ -55,9 +55,9 @@ struct TJoinOptimizerNodeInternal : public IBaseOptimizerNode {
stream << ToString(JoinType) << "," << ToString(JoinAlgo) << " ";
for (size_t i = 0; i < LeftJoinKeys.size(); ++i){
- stream
+ stream
<< LeftJoinKeys[i].RelName << "." << LeftJoinKeys[i].AttributeName
- << "="
+ << "="
<< RightJoinKeys[i].RelName << "." << RightJoinKeys[i].AttributeName << ",";
}
stream << "\n";
@@ -75,7 +75,7 @@ struct TJoinOptimizerNodeInternal : public IBaseOptimizerNode {
stream << " ";
}
stream << " ";
- stream << "Shuffled By: " << ShuffleRightSideByOrderingIdx << "\n";
+ stream << "Shuffled By: " << ShuffleRightSideByOrderingIdx << "\n";
RightArg->Print(stream, ntabs + 1);
}
@@ -90,7 +90,7 @@ struct TJoinOptimizerNodeInternal : public IBaseOptimizerNode {
// for interesting orderings framework
std::int64_t ShuffleLeftSideByOrderingIdx = -1;
- std::int64_t ShuffleRightSideByOrderingIdx = -1;
+ std::int64_t ShuffleRightSideByOrderingIdx = -1;
};
/**
@@ -118,7 +118,8 @@ std::shared_ptr<TJoinOptimizerNodeInternal> MakeJoinInternal(
*/
std::shared_ptr<TJoinOptimizerNode> ConvertFromInternal(
const std::shared_ptr<IBaseOptimizerNode>& internal,
- const TFDStorage& fdStorage
+ const TFDStorage& fdStorage,
+ bool enableShuffleElimination
);
} // namespace NYql::NDq
diff --git a/ydb/library/yql/dq/opt/dq_opt_phy.cpp b/ydb/library/yql/dq/opt/dq_opt_phy.cpp
index 43e17e51da..e3a707074d 100644
--- a/ydb/library/yql/dq/opt/dq_opt_phy.cpp
+++ b/ydb/library/yql/dq/opt/dq_opt_phy.cpp
@@ -698,6 +698,18 @@ TExprBase DqPushSkipNullMembersToStage(TExprBase node, TExprContext& ctx, IOptim
return DqPushInputBaseCallableToStage<TCoSkipNullMembers>(node, ctx, optCtx, parentsMap, allowStageMultiUsage);
}
+TExprBase DqPushPruneKeysToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
+ const TParentsMap& parentsMap, bool allowStageMultiUsage)
+{
+ return DqPushInputBaseCallableToStage<TCoPruneKeys>(node, ctx, optCtx, parentsMap, allowStageMultiUsage);
+}
+
+TExprBase DqPushPruneAdjacentKeysToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
+ const TParentsMap& parentsMap, bool allowStageMultiUsage)
+{
+ return DqPushInputBaseCallableToStage<TCoPruneAdjacentKeys>(node, ctx, optCtx, parentsMap, allowStageMultiUsage);
+}
+
TExprBase DqPushExtractMembersToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
const TParentsMap& parentsMap, bool allowStageMultiUsage)
{
diff --git a/ydb/library/yql/dq/opt/dq_opt_phy.h b/ydb/library/yql/dq/opt/dq_opt_phy.h
index 693beec611..f60ee7f880 100644
--- a/ydb/library/yql/dq/opt/dq_opt_phy.h
+++ b/ydb/library/yql/dq/opt/dq_opt_phy.h
@@ -28,6 +28,12 @@ void DqPushLambdasToStagesUnionAll(std::vector<std::pair<NNodes::TDqCnUnionAll,
NNodes::TExprBase DqPushSkipNullMembersToStage(NNodes::TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
const TParentsMap& parentsMap, bool allowStageMultiUsage = true);
+NNodes::TExprBase DqPushPruneKeysToStage(NNodes::TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
+ const TParentsMap& parentsMap, bool allowStageMultiUsage = true);
+
+NNodes::TExprBase DqPushPruneAdjacentKeysToStage(NNodes::TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
+ const TParentsMap& parentsMap, bool allowStageMultiUsage = true);
+
NNodes::TExprBase DqPushExtractMembersToStage(NNodes::TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx,
const TParentsMap& parentsMap, bool allowStageMultiUsage = true);
diff --git a/ydb/library/yql/dq/runtime/dq_output_consumer.cpp b/ydb/library/yql/dq/runtime/dq_output_consumer.cpp
index 7788b1da53..d6220f2765 100644
--- a/ydb/library/yql/dq/runtime/dq_output_consumer.cpp
+++ b/ydb/library/yql/dq/runtime/dq_output_consumer.cpp
@@ -128,6 +128,28 @@ struct TColumnShardHashV1 {
return;
}
+ if (uv.IsBoxed()) {
+ if (auto list = uv.GetElements()) {
+ UpdateImpl(*list, keyIdx);
+ return;
+ }
+ }
+
+ UpdateImpl(uv, keyIdx);
+ }
+
+ ui64 Finish() {
+ ui64 hash = HashCalcer.Finish();
+ hash = std::min<ui32>(hash / (Max<ui64>() / ShardCount), ShardCount - 1);
+ return TaskIndexByHash[hash];
+ }
+
+ std::size_t ShardCount;
+ TVector<ui64> TaskIndexByHash;
+ TVector<NYql::NProto::TypeIds> KeyColumnTypes;
+private:
+ template <typename TValue>
+ void UpdateImpl(const TValue& uv, size_t keyIdx) {
switch (KeyColumnTypes[keyIdx]) {
case NYql::NProto::Bool: {
auto value = uv.template Get<bool>();
@@ -202,6 +224,11 @@ struct TColumnShardHashV1 {
HashCalcer.Update(reinterpret_cast<const ui8*>(value.Data()), value.Size());
break;
}
+ case NYql::NProto::Decimal: {
+ auto value = uv.GetInt128();
+ HashCalcer.Update(reinterpret_cast<const ui8*>(&value), sizeof(value));
+ break;
+ }
default: {
Y_ENSURE(false, TStringBuilder{} << "HashFunc for HashShuffle isn't supported with such type: " << static_cast<ui64>(KeyColumnTypes[keyIdx]));
break;
@@ -209,15 +236,6 @@ struct TColumnShardHashV1 {
}
}
- ui64 Finish() {
- ui64 hash = HashCalcer.Finish();
- hash = std::min<ui32>(hash / (Max<ui64>() / ShardCount), ShardCount - 1);
- return TaskIndexByHash[hash];
- }
-
- std::size_t ShardCount;
- TVector<ui64> TaskIndexByHash;
- TVector<NYql::NProto::TypeIds> KeyColumnTypes;
private:
NArrow::NHash::NXX64::TStreamStringHashCalcer HashCalcer;
};
diff --git a/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp b/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp
index 2d96335db6..afeeb211e1 100644
--- a/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp
+++ b/ydb/library/yql/dq/runtime/dq_tasks_runner.cpp
@@ -259,6 +259,14 @@ public:
AllocatedHolder->SelfTypeEnv = std::make_unique<TTypeEnvironment>(alloc);
}
+ NUdf::TLogProviderFunc logProviderFunc = nullptr;
+ if (LogFunc) {
+ logProviderFunc = [log=LogFunc](const NUdf::TStringRef& component, NUdf::ELogLevel level, const NUdf::TStringRef& message) {
+ log(TStringBuilder() << "[" << component << "][" << level << "]: " << message << "\n");
+ };
+ }
+
+ ComputationLogProvider = NUdf::MakeLogProvider(std::move(logProviderFunc), NUdf::ELogLevel::Debug);
}
~TDqTaskRunner() {
@@ -318,7 +326,7 @@ public:
TComputationPatternOpts opts(alloc.Ref(), typeEnv, taskRunnerFactory,
Context.FuncRegistry, NUdf::EValidateMode::None, validatePolicy, optLLVM, EGraphPerProcess::Multi,
- AllocatedHolder->ProgramParsed.StatsRegistry.Get(), CollectFull() ? &CountersProvider : nullptr);
+ AllocatedHolder->ProgramParsed.StatsRegistry.Get(), CollectFull() ? &CountersProvider : nullptr, nullptr, ComputationLogProvider.Get());
if (!SecureParamsProvider) {
SecureParamsProvider = MakeSimpleSecureParamsProvider(Settings.SecureParams);
@@ -716,13 +724,6 @@ public:
}
auto prepareTime = TInstant::Now() - startTime;
- if (LogFunc) {
- TLogFunc logger = [taskId = TaskId, log = LogFunc](const TString& message) {
- log(TStringBuilder() << "Run task: " << taskId << ", " << message);
- };
- LogFunc = logger;
-
- }
LOG(TStringBuilder() << "Prepare task: " << TaskId << ", takes " << prepareTime.MicroSeconds() << " us");
if (Stats) {
@@ -985,6 +986,7 @@ private:
private:
std::shared_ptr<ISpillerFactory> SpillerFactory;
TIntrusivePtr<TSpillingTaskCounters> SpillingTaskCounters;
+ NUdf::TUniquePtr<NUdf::ILogProvider> ComputationLogProvider;
ui64 TaskId = 0;
TDqTaskRunnerContext Context;
diff --git a/ydb/library/yql/providers/dq/opt/physical_optimize.cpp b/ydb/library/yql/providers/dq/opt/physical_optimize.cpp
index e9578c7050..04b9925468 100644
--- a/ydb/library/yql/providers/dq/opt/physical_optimize.cpp
+++ b/ydb/library/yql/providers/dq/opt/physical_optimize.cpp
@@ -33,6 +33,8 @@ public:
AddHandler(0, &TDqSourceWrap::Match, HNDL(BuildStageWithSourceWrap));
AddHandler(0, &TDqReadWrap::Match, HNDL(BuildStageWithReadWrap));
AddHandler(0, &TCoSkipNullMembers::Match, HNDL(PushSkipNullMembersToStage<false>));
+ AddHandler(0, &TCoPruneKeys::Match, HNDL(PushPruneKeysToStage<false>));
+ AddHandler(0, &TCoPruneAdjacentKeys::Match, HNDL(PushPruneAdjacentKeysToStage<false>));
AddHandler(0, &TCoExtractMembers::Match, HNDL(PushExtractMembersToStage<false>));
AddHandler(0, &TCoAssumeUnique::Match, HNDL(PushAssumeUniqueToStage<false>));
AddHandler(0, &TCoAssumeDistinct::Match, HNDL(PushAssumeDistinctToStage<false>));
@@ -73,6 +75,8 @@ public:
}
AddHandler(1, &TCoSkipNullMembers::Match, HNDL(PushSkipNullMembersToStage<true>));
+ AddHandler(1, &TCoPruneKeys::Match, HNDL(PushPruneKeysToStage<true>));
+ AddHandler(1, &TCoPruneAdjacentKeys::Match, HNDL(PushPruneAdjacentKeysToStage<true>));
AddHandler(1, &TCoExtractMembers::Match, HNDL(PushExtractMembersToStage<true>));
AddHandler(1, &TCoAssumeUnique::Match, HNDL(PushAssumeUniqueToStage<true>));
AddHandler(1, &TCoAssumeDistinct::Match, HNDL(PushAssumeDistinctToStage<true>));
@@ -127,6 +131,16 @@ protected:
}
template <bool IsGlobal>
+ TMaybeNode<TExprBase> PushPruneKeysToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) {
+ return DqPushPruneKeysToStage(node, ctx, optCtx, *getParents(), IsGlobal);
+ }
+
+ template <bool IsGlobal>
+ TMaybeNode<TExprBase> PushPruneAdjacentKeysToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) {
+ return DqPushPruneAdjacentKeysToStage(node, ctx, optCtx, *getParents(), IsGlobal);
+ }
+
+ template <bool IsGlobal>
TMaybeNode<TExprBase> PushExtractMembersToStage(TExprBase node, TExprContext& ctx, IOptimizationContext& optCtx, const TGetParents& getParents) {
return DqPushExtractMembersToStage(node, ctx, optCtx, *getParents(), IsGlobal);
}
diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp b/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp
index f6ce58194b..12e1c746c2 100644
--- a/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp
+++ b/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.cpp
@@ -9,6 +9,7 @@
#include <ydb/library/yql/dq/actors/compute/dq_checkpoints_states.h>
#include <ydb/library/yql/dq/actors/compute/dq_source_watermark_tracker.h>
#include <ydb/library/yql/dq/actors/common/retry_queue.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/federated_topic/federated_topic.h>
#include <yql/essentials/minikql/comp_nodes/mkql_saveload.h>
#include <yql/essentials/minikql/mkql_alloc.h>
@@ -18,6 +19,7 @@
#include <ydb/library/yql/providers/pq/async_io/dq_pq_meta_extractor.h>
#include <ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h>
#include <ydb/library/yql/providers/pq/common/pq_meta_fields.h>
+#include <ydb/library/yql/providers/pq/common/pq_partition_key.h>
#include <ydb/library/yql/providers/pq/proto/dq_io_state.pb.h>
#include <yql/essentials/public/issue/yql_issue_message.h>
#include <yql/essentials/utils/log/log.h>
@@ -25,6 +27,7 @@
#include <ydb/core/fq/libs/events/events.h>
#include <ydb/core/fq/libs/row_dispatcher/events/data_plane.h>
+#include <ydb/public/sdk/cpp/adapters/issue/issue.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/types/credentials/credentials.h>
@@ -78,6 +81,9 @@ struct TRowDispatcherReadActorMetrics {
explicit TRowDispatcherReadActorMetrics(const TTxId& txId, ui64 taskId, const ::NMonitoring::TDynamicCounterPtr& counters)
: TxId(std::visit([](auto arg) { return ToString(arg); }, txId))
, Counters(counters) {
+ if (!counters) {
+ return;
+ }
SubGroup = Counters->GetSubgroup("source", "RdPqRead");
auto source = SubGroup->GetSubgroup("tx_id", TxId);
auto task = source->GetSubgroup("task_id", ToString(taskId));
@@ -87,6 +93,9 @@ struct TRowDispatcherReadActorMetrics {
}
~TRowDispatcherReadActorMetrics() {
+ if (!Counters) {
+ return;
+ }
SubGroup->RemoveSubgroup("tx_id", TxId);
}
@@ -104,15 +113,45 @@ struct TEvPrivate {
EvPrintState = EvBegin + 20,
EvProcessState = EvBegin + 21,
EvNotifyCA = EvBegin + 22,
+ EvRefreshClusters = EvBegin + 23,
+ EvReceivedClusters = EvBegin + 24,
+ EvDescribeTopicResult = EvBegin + 25,
EvEnd
};
static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE), "expect EvEnd < EventSpaceEnd(NActors::TEvents::ES_PRIVATE)");
struct TEvPrintState : public NActors::TEventLocal<TEvPrintState, EvPrintState> {};
struct TEvProcessState : public NActors::TEventLocal<TEvProcessState, EvProcessState> {};
struct TEvNotifyCA : public NActors::TEventLocal<TEvNotifyCA, EvNotifyCA> {};
+ struct TEvRefreshClusters : public NActors::TEventLocal<TEvRefreshClusters, EvRefreshClusters> {};
+ struct TEvReceivedClusters : public NActors::TEventLocal<TEvReceivedClusters, EvReceivedClusters> {
+ explicit TEvReceivedClusters(
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>&& federatedClusters)
+ : FederatedClusters(std::move(federatedClusters))
+ {}
+ explicit TEvReceivedClusters(const std::exception& ex)
+ : ExceptionMessage(ex.what())
+ {}
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo> FederatedClusters;
+ std::optional<std::string> ExceptionMessage;
+ };
+ struct TEvDescribeTopicResult : public NActors::TEventLocal<TEvDescribeTopicResult, EvDescribeTopicResult> {
+ TEvDescribeTopicResult(ui32 clusterIndex, ui32 partitionsCount)
+ : ClusterIndex(clusterIndex)
+ , PartitionsCount(partitionsCount)
+ {}
+ TEvDescribeTopicResult(ui32 clusterIndex, const NYdb::TStatus& status)
+ : ClusterIndex(clusterIndex)
+ , PartitionsCount(0)
+ , Status(status)
+ {}
+ ui32 ClusterIndex;
+ ui32 PartitionsCount;
+ TMaybe<NYdb::TStatus> Status;
+ };
};
class TDqPqRdReadActor : public NActors::TActor<TDqPqRdReadActor>, public NYql::NDq::NInternal::TDqPqReadActorBase {
+ static constexpr bool StaticDiscovery = true;
const ui64 PrintStatePeriodSec = 300;
const ui64 ProcessStatePeriodSec = 1;
@@ -121,8 +160,9 @@ class TDqPqRdReadActor : public NActors::TActor<TDqPqRdReadActor>, public NYql::
struct TReadyBatch {
public:
- TReadyBatch(ui64 partitionId, ui32 dataCapacity)
- : PartitionId(partitionId) {
+ TReadyBatch(const TPartitionKey& partitionKey, ui32 dataCapacity)
+ : PartitionKey(partitionKey)
+ {
Data.reserve(dataCapacity);
}
@@ -130,10 +170,12 @@ class TDqPqRdReadActor : public NActors::TActor<TDqPqRdReadActor>, public NYql::
TVector<TRope> Data;
i64 UsedSpace = 0;
ui64 NextOffset = 0;
- ui64 PartitionId;
+ TPartitionKey PartitionKey;
};
enum class EState {
+ START_CLUSTER_DISCOVERY,
+ WAIT_CLUSTER_DISCOVERY,
INIT,
WAIT_COORDINATOR_ID,
WAIT_PARTITIONS_ADDRES,
@@ -163,30 +205,54 @@ class TDqPqRdReadActor : public NActors::TActor<TDqPqRdReadActor>, public NYql::
};
private:
+ // There can be 2 kinds of actors:
+ // 1) Main (that CA interacts with)
+ // 2) Child (per-federated-cluster, interacts with row dispatcher)
+ // For single-cluster/non-federated topics main actor plays both roles.
+ // PartitionToOffset maintained only on main actor, row dispatcher state maintained only on children
+ // As main and child actors resides on same mailbox, their handlers are never concurrently executed
+ // and can modify other side state
+ TDqPqRdReadActor* Parent;
+ TString Cluster;
const TString Token;
TMaybe<NActors::TActorId> CoordinatorActorId;
NActors::TActorId LocalRowDispatcherActorId;
+ // Set on Children
std::queue<TReadyBatch> ReadyBuffer;
+ // Set on Parent
EState State = EState::INIT;
bool Inited = false;
ui64 CoordinatorRequestCookie = 0;
TRowDispatcherReadActorMetrics Metrics;
+ // Set on Parent
bool ProcessStateScheduled = false;
bool InFlyAsyncInputData = false;
+ // Set on Parent
TCounters Counters;
+ // Set on Child (except for NotifyCA)
// Parsing info
std::vector<std::optional<ui64>> ColumnIndexes; // Output column index in schema passed into RowDispatcher
const TType* InputDataType = nullptr; // Multi type (comes from Row Dispatcher)
std::unique_ptr<NKikimr::NMiniKQL::TValuePackerTransport<true>> DataUnpacker;
ui64 CpuMicrosec = 0;
-
- THashMap<ui32, TMaybe<ui64>> NextOffsetFromRD;
-
- struct TPartition {
- bool HasPendingData = false;
- bool IsWaitingMessageBatch = false;
+ // Set on both Parent (cumulative) and Childern (separate)
+
+ using TPartitionKey = ::NPq::TPartitionKey;
+ THashMap<ui64, ui64> NextOffsetFromRD;
+ // Set on Children
+ struct TClusterState {
+ TClusterState(NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo&& info, ui32 partitionsCount)
+ : Info(std::move(info))
+ , PartitionsCount(partitionsCount)
+ {}
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo Info;
+ ITopicClient::TPtr TopicClient;
+ ui32 PartitionsCount;
+ TDqPqRdReadActor* Child = nullptr;
+ NActors::TActorId ChildId;
};
-
+ std::vector<TClusterState> Clusters;
+ // Set on Parent
struct TSession {
enum class ESessionStatus {
INIT,
@@ -217,22 +283,32 @@ private:
NYql::NDq::TRetryEventsQueue EventsQueue;
TActorId RowDispatcherActorId;
ui64 Generation = std::numeric_limits<ui64>::max();
- THashMap<ui32, TPartition> Partitions;
+ THashSet<ui32> HasPendingData;
+ THashSet<ui32> Partitions;
+
bool IsWaitingStartSessionAck = false;
ui64 QueuedBytes = 0;
ui64 QueuedRows = 0;
};
+ IPqGateway::TPtr PqGateway;
TMap<NActors::TActorId, TSession> Sessions;
THashMap<ui64, NActors::TActorId> ReadActorByEventQueueId;
+ // Set on Children
const THolderFactory& HolderFactory;
+ const TTypeEnvironment& TypeEnv;
+ NYdb::TDriver Driver;
+ std::shared_ptr<NYdb::ICredentialsProviderFactory> CredentialsProviderFactory;
+ IFederatedTopicClient::TPtr FederatedTopicClient;
const i64 MaxBufferSize;
i64 ReadyBufferSizeBytes = 0;
+ // Set on Parent
ui64 NextGeneration = 0;
ui64 NextEventQueueId = 0;
TMap<NActors::TActorId, TSet<ui32>> LastUsedPartitionDistribution;
TMap<NActors::TActorId, TSet<ui32>> LastReceivedPartitionDistribution;
+ // Set on Children
public:
TDqPqRdReadActor(
@@ -244,11 +320,16 @@ public:
const TTypeEnvironment& typeEnv,
NPq::NProto::TDqPqTopicSource&& sourceParams,
NPq::NProto::TDqReadTaskParams&& readParams,
+ NYdb::TDriver driver,
+ std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory,
const NActors::TActorId& computeActorId,
const NActors::TActorId& localRowDispatcherActorId,
const TString& token,
const ::NMonitoring::TDynamicCounterPtr& counters,
- i64 bufferSize);
+ i64 bufferSize,
+ const IPqGateway::TPtr& pqGateway,
+ TDqPqRdReadActor* parent = nullptr,
+ const TString& cluster = {});
void Handle(NFq::TEvRowDispatcher::TEvCoordinatorChanged::TPtr& ev);
void Handle(NFq::TEvRowDispatcher::TEvCoordinatorResult::TPtr& ev);
@@ -269,6 +350,9 @@ public:
void Handle(TEvPrivate::TEvPrintState::TPtr&);
void Handle(TEvPrivate::TEvProcessState::TPtr&);
void Handle(TEvPrivate::TEvNotifyCA::TPtr&);
+ void Handle(TEvPrivate::TEvRefreshClusters::TPtr&);
+ void Handle(TEvPrivate::TEvReceivedClusters::TPtr&);
+ void Handle(TEvPrivate::TEvDescribeTopicResult::TPtr&);
STRICT_STFUNC(StateFunc, {
hFunc(NFq::TEvRowDispatcher::TEvCoordinatorChanged, Handle);
@@ -290,6 +374,11 @@ public:
hFunc(TEvPrivate::TEvPrintState, Handle);
hFunc(TEvPrivate::TEvProcessState, Handle);
hFunc(TEvPrivate::TEvNotifyCA, Handle);
+ hFunc(TEvPrivate::TEvRefreshClusters, Handle);
+ hFunc(TEvPrivate::TEvReceivedClusters, Handle);
+ hFunc(TEvPrivate::TEvDescribeTopicResult, Handle);
+
+ cFunc(TEvents::TEvPoisonPill::EventType, PassAway);
})
static constexpr char ActorName[] = "DQ_PQ_READ_ACTOR";
@@ -313,13 +402,56 @@ public:
void NotifyCA();
void SendStartSession(TSession& sessionInfo);
void Init();
+ void InitChild();
void ScheduleProcessState();
void ProcessGlobalState();
void ProcessSessionsState();
void UpdateSessions();
void UpdateQueuedSize();
+ void StartClusterDiscovery();
+ void StartCluster(ui32 clusterIndex);
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const;
+ IFederatedTopicClient& GetFederatedTopicClient();
+ NYdb::NTopic::TTopicClientSettings GetTopicClientSettings() const;
+ ITopicClient& GetTopicClient(TClusterState& clusterState);
};
+IFederatedTopicClient& TDqPqRdReadActor::GetFederatedTopicClient() {
+ if (!FederatedTopicClient) {
+ FederatedTopicClient = PqGateway->GetFederatedTopicClient(Driver, GetFederatedTopicClientSettings());
+ }
+ return *FederatedTopicClient;
+}
+
+ITopicClient& TDqPqRdReadActor::GetTopicClient(TClusterState& clusterState) {
+ if (!clusterState.TopicClient) {
+ auto settings = GetTopicClientSettings();
+ clusterState.Info.AdjustTopicClientSettings(settings);
+ clusterState.TopicClient = PqGateway->GetTopicClient(Driver, settings);
+ }
+ return *clusterState.TopicClient;
+}
+
+NYdb::NFederatedTopic::TFederatedTopicClientSettings TDqPqRdReadActor::GetFederatedTopicClientSettings() const {
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings opts = PqGateway->GetFederatedTopicClientSettings();
+ opts.Database(SourceParams.GetDatabase())
+ .DiscoveryEndpoint(SourceParams.GetEndpoint())
+ .SslCredentials(NYdb::TSslCredentials(SourceParams.GetUseSsl()))
+ .CredentialsProviderFactory(CredentialsProviderFactory);
+
+ return opts;
+}
+
+NYdb::NTopic::TTopicClientSettings TDqPqRdReadActor::GetTopicClientSettings() const {
+ NYdb::NTopic::TTopicClientSettings opts = PqGateway->GetTopicClientSettings();
+ opts.Database(SourceParams.GetDatabase())
+ .DiscoveryEndpoint(SourceParams.GetEndpoint())
+ .SslCredentials(NYdb::TSslCredentials(SourceParams.GetUseSsl()))
+ .CredentialsProviderFactory(CredentialsProviderFactory);
+
+ return opts;
+}
+
TDqPqRdReadActor::TDqPqRdReadActor(
ui64 inputIndex,
TCollectStatsLevel statsLevel,
@@ -329,19 +461,33 @@ TDqPqRdReadActor::TDqPqRdReadActor(
const TTypeEnvironment& typeEnv,
NPq::NProto::TDqPqTopicSource&& sourceParams,
NPq::NProto::TDqReadTaskParams&& readParams,
+ NYdb::TDriver driver,
+ std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory,
const NActors::TActorId& computeActorId,
const NActors::TActorId& localRowDispatcherActorId,
const TString& token,
const ::NMonitoring::TDynamicCounterPtr& counters,
- i64 bufferSize)
+ i64 bufferSize,
+ const IPqGateway::TPtr& pqGateway,
+ TDqPqRdReadActor* parent,
+ const TString& cluster)
: TActor<TDqPqRdReadActor>(&TDqPqRdReadActor::StateFunc)
, TDqPqReadActorBase(inputIndex, taskId, this->SelfId(), txId, std::move(sourceParams), std::move(readParams), computeActorId)
+ , Parent(parent ? parent : this)
+ , Cluster(cluster)
, Token(token)
, LocalRowDispatcherActorId(localRowDispatcherActorId)
, Metrics(txId, taskId, counters)
+ , PqGateway(pqGateway)
, HolderFactory(holderFactory)
+ , TypeEnv(typeEnv)
+ , Driver(std::move(driver))
+ , CredentialsProviderFactory(std::move(credentialsProviderFactory))
, MaxBufferSize(bufferSize)
{
+ if (Parent == this) {
+ State = EState::START_CLUSTER_DISCOVERY;
+ }
const auto programBuilder = std::make_unique<TProgramBuilder>(typeEnv, *holderFactory.GetFunctionRegistry());
// Parse output schema (expected struct output type)
@@ -372,29 +518,46 @@ void TDqPqRdReadActor::Init() {
if (Inited) {
return;
}
- LogPrefix = (TStringBuilder() << "SelfId: " << SelfId() << ", TxId: " << TxId << ", task: " << TaskId << ". PQ source. ");
-
- auto partitionToRead = GetPartitionsToRead();
- for (auto partitionId : partitionToRead) {
- TPartitionKey partitionKey{TString{}, partitionId};
- const auto offsetIt = PartitionToOffset.find(partitionKey);
- auto& nextOffset = NextOffsetFromRD[partitionId];
- if (offsetIt != PartitionToOffset.end()) {
- nextOffset = offsetIt->second;
+ LogPrefix = (TStringBuilder() << "SelfId: " << SelfId() << ", TxId: " << TxId << ", task: " << TaskId << ", Cluster: " << Cluster << ". PQ source. ");
+
+ Inited = true;
+
+ if (Parent == this) {
+ ProcessState();
+ Schedule(TDuration::Seconds(NotifyCAPeriodSec), new TEvPrivate::TEvNotifyCA());
+ }
+}
+
+void TDqPqRdReadActor::InitChild() {
+ for (auto& [partitionKey, offset]: Parent->PartitionToOffset) {
+ if (Cluster == partitionKey.Cluster) {
+ NextOffsetFromRD[partitionKey.PartitionId] = offset;
}
}
SRC_LOG_I("Send TEvCoordinatorChangesSubscribe to local RD (" << LocalRowDispatcherActorId << ")");
Send(LocalRowDispatcherActorId, new NFq::TEvRowDispatcher::TEvCoordinatorChangesSubscribe());
-
Schedule(TDuration::Seconds(PrintStatePeriodSec), new TEvPrivate::TEvPrintState());
- Schedule(TDuration::Seconds(NotifyCAPeriodSec), new TEvPrivate::TEvNotifyCA());
- Inited = true;
}
void TDqPqRdReadActor::ProcessGlobalState() {
switch (State) {
+ case EState::START_CLUSTER_DISCOVERY:
+ State = EState::WAIT_CLUSTER_DISCOVERY;
+ StartClusterDiscovery();
+ break;
+
+ case EState::WAIT_CLUSTER_DISCOVERY:
+ for (auto& clusterState : Clusters) {
+ auto child = clusterState.Child;
+ if (child == nullptr) {
+ continue;
+ }
+ child->ProcessState();
+ }
+ break;
+
case EState::INIT:
- if (!ReadyBuffer.empty()) {
+ if (!Parent->ReadyBuffer.empty()) {
return;
}
if (!CoordinatorActorId) {
@@ -463,16 +626,16 @@ void TDqPqRdReadActor::SendStartSession(TSession& sessionInfo) {
std::set<ui32> partitions;
std::map<ui32, ui64> partitionOffsets;
- for (auto& [partitionId, partition] : sessionInfo.Partitions) {
+ for (auto partitionId : sessionInfo.Partitions) {
partitions.insert(partitionId);
- auto nextOffset = NextOffsetFromRD[partitionId];
+ auto itNextOffset = NextOffsetFromRD.find(partitionId);
str << "(" << partitionId << " / ";
- if (!nextOffset) {
+ if (itNextOffset == NextOffsetFromRD.end()) {
str << "<empty>),";
continue;
}
- partitionOffsets[partitionId] = *nextOffset;
- str << nextOffset << "),";
+ partitionOffsets[partitionId] = itNextOffset->second;
+ str << itNextOffset->second << "),";
}
SRC_LOG_I(str);
@@ -504,6 +667,14 @@ void TDqPqRdReadActor::PassAway() { // Is called from Compute Actor
for (auto& [rowDispatcherActorId, sessionInfo] : Sessions) {
StopSession(sessionInfo);
}
+ for (auto& clusterState : Clusters) {
+ if (clusterState.Child == this) {
+ continue;
+ }
+ Send(clusterState.ChildId, new NActors::TEvents::TEvPoison);
+ }
+ Clusters.clear();
+ FederatedTopicClient.Reset();
TActor<TDqPqRdReadActor>::PassAway();
// TODO: RetryQueue::Unsubscribe()
@@ -530,8 +701,7 @@ i64 TDqPqRdReadActor::GetAsyncInputData(NKikimr::NMiniKQL::TUnboxedValueBatch& b
usedSpace += readyBatch.UsedSpace;
freeSpace -= readyBatch.UsedSpace;
- TPartitionKey partitionKey{TString{}, readyBatch.PartitionId};
- PartitionToOffset[partitionKey] = readyBatch.NextOffset;
+ PartitionToOffset[readyBatch.PartitionKey] = readyBatch.NextOffset;
SRC_LOG_T("NextOffset " << readyBatch.NextOffset);
} while (freeSpace > 0 && !ReadyBuffer.empty());
@@ -541,8 +711,14 @@ i64 TDqPqRdReadActor::GetAsyncInputData(NKikimr::NMiniKQL::TUnboxedValueBatch& b
if (!ReadyBuffer.empty()) {
NotifyCA();
}
- for (auto& [rowDispatcherActorId, sessionInfo] : Sessions) {
- TrySendGetNextBatch(sessionInfo);
+ for (auto& clusterState : Clusters) {
+ auto child = clusterState.Child;
+ if (child == nullptr) {
+ continue;
+ }
+ for (auto& [rowDispatcherActorId, sessionInfo] : child->Sessions) {
+ child->TrySendGetNextBatch(sessionInfo);
+ }
}
return usedSpace;
}
@@ -553,11 +729,12 @@ TDuration TDqPqRdReadActor::GetCpuTime() {
std::vector<ui64> TDqPqRdReadActor::GetPartitionsToRead() const {
std::vector<ui64> res;
+ ui32 partitionsCount = ReadParams.GetPartitioningParams().GetTopicPartitionsCount();
ui64 currentPartition = ReadParams.GetPartitioningParams().GetEachTopicPartitionGroupId();
do {
res.emplace_back(currentPartition); // 0-based in topic API
currentPartition += ReadParams.GetPartitioningParams().GetDqPartitionsCount();
- } while (currentPartition < ReadParams.GetPartitioningParams().GetTopicPartitionsCount());
+ } while (currentPartition < partitionsCount);
return res;
}
@@ -592,29 +769,31 @@ void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvStatistics::TPtr& ev) {
SRC_LOG_T("Received TEvStatistics from " << ev->Sender << ", seqNo " << meta.GetSeqNo() << ", ConfirmedSeqNo " << meta.GetConfirmedSeqNo() << " generation " << ev->Cookie);
Counters.Statistics++;
CpuMicrosec += ev->Get()->Record.GetCpuMicrosec();
+ if (Parent != this) {
+ Parent->CpuMicrosec += ev->Get()->Record.GetCpuMicrosec();
+ }
auto* session = FindAndUpdateSession(ev);
if (!session) {
return;
}
- IngressStats.Bytes += ev->Get()->Record.GetReadBytes();
- IngressStats.FilteredBytes += ev->Get()->Record.GetFilteredBytes();
- IngressStats.FilteredRows += ev->Get()->Record.GetFilteredRows();
+ Parent->IngressStats.Bytes += ev->Get()->Record.GetReadBytes();
+ Parent->IngressStats.FilteredBytes += ev->Get()->Record.GetFilteredBytes();
+ Parent->IngressStats.FilteredRows += ev->Get()->Record.GetFilteredRows();
session->QueuedBytes = ev->Get()->Record.GetQueuedBytes();
session->QueuedRows = ev->Get()->Record.GetQueuedRows();
UpdateQueuedSize();
for (auto partition : ev->Get()->Record.GetPartition()) {
- ui64 partitionId = partition.GetPartitionId();
- auto& nextOffset = NextOffsetFromRD[partitionId];
- if (!nextOffset) {
- nextOffset = partition.GetNextMessageOffset();
- } else {
- nextOffset = std::max(*nextOffset, partition.GetNextMessageOffset());
+ auto partitionId = partition.GetPartitionId();
+ auto offset = partition.GetNextMessageOffset();
+ auto [itNextOffset, inserted] = NextOffsetFromRD.emplace(partitionId, offset);
+ if (!inserted) {
+ itNextOffset->second = std::max(itNextOffset->second, offset);
}
- SRC_LOG_T("NextOffsetFromRD [" << partitionId << "]= " << nextOffset);
- if (ReadyBuffer.empty()) {
- TPartitionKey partitionKey{TString{}, partitionId};
- PartitionToOffset[partitionKey] = *nextOffset;
+ SRC_LOG_T("NextOffsetFromRD [" << partitionId << "]= " << itNextOffset->second);
+ if (Parent->ReadyBuffer.empty()) {
+ auto partitionKey = TPartitionKey { Cluster, partitionId };
+ Parent->PartitionToOffset[partitionKey] = itNextOffset->second;
}
}
}
@@ -626,21 +805,22 @@ void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvGetInternalStateRequest:
}
void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvNewDataArrived::TPtr& ev) {
+ auto partitionId = ev->Get()->Record.GetPartitionId();
const NYql::NDqProto::TMessageTransportMeta& meta = ev->Get()->Record.GetTransportMeta();
- SRC_LOG_T("Received TEvNewDataArrived from " << ev->Sender << ", partition " << ev->Get()->Record.GetPartitionId() << ", seqNo " << meta.GetSeqNo() << ", ConfirmedSeqNo " << meta.GetConfirmedSeqNo() << " generation " << ev->Cookie);
+ SRC_LOG_T("Received TEvNewDataArrived from " << ev->Sender << ", partition " << partitionId << ", seqNo " << meta.GetSeqNo() << ", ConfirmedSeqNo " << meta.GetConfirmedSeqNo() << " generation " << ev->Cookie);
Counters.NewDataArrived++;
auto* session = FindAndUpdateSession(ev);
if (!session) {
return;
}
- auto partitionIt = session->Partitions.find(ev->Get()->Record.GetPartitionId());
+ auto partitionIt = session->Partitions.find(partitionId);
if (partitionIt == session->Partitions.end()) {
- SRC_LOG_E("Received TEvNewDataArrived from " << ev->Sender << " with wrong partition id " << ev->Get()->Record.GetPartitionId());
- Stop(NDqProto::StatusIds::INTERNAL_ERROR, {TIssue(TStringBuilder() << LogPrefix << "No partition with id " << ev->Get()->Record.GetPartitionId())});
+ SRC_LOG_E("Received TEvNewDataArrived from " << ev->Sender << " with wrong partition id " << partitionId);
+ Stop(NDqProto::StatusIds::INTERNAL_ERROR, {TIssue(TStringBuilder() << LogPrefix << "No partition with id " << partitionId )});
return;
}
- partitionIt->second.HasPendingData = true;
+ session->HasPendingData.insert(partitionId);
TrySendGetNextBatch(*session);
}
@@ -720,11 +900,11 @@ void TDqPqRdReadActor::ScheduleProcessState() {
void TDqPqRdReadActor::ReInit(const TString& reason) {
SRC_LOG_I("ReInit state, reason " << reason);
- Metrics.ReInit->Inc();
+ Parent->Metrics.ReInit->Inc();
State = EState::WAIT_COORDINATOR_ID;
- if (!ReadyBuffer.empty()) {
- NotifyCA();
+ if (!Parent->ReadyBuffer.empty()) {
+ Parent->NotifyCA();
}
}
@@ -744,6 +924,7 @@ void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvCoordinatorResult::TPtr&
TMap<NActors::TActorId, TSet<ui32>> distribution;
for (auto& p : ev->Get()->Record.GetPartitions()) {
TActorId rowDispatcherActorId = ActorIdFromProto(p.GetActorId());
+ // Note: for federated case all clusters are handled by same row dispatcher
for (auto partitionId : p.GetPartitionIds()) {
LastReceivedPartitionDistribution[rowDispatcherActorId].insert(partitionId);
}
@@ -810,10 +991,13 @@ void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvMessageBatch::TPtr& ev)
Stop(NDqProto::StatusIds::INTERNAL_ERROR, {TIssue(TStringBuilder() << LogPrefix << "No partition with id " << partitionId)});
return;
}
- auto& partirtion = partitionIt->second;
- Metrics.InFlyGetNextBatch->Set(0);
- ReadyBuffer.emplace(partitionId, ev->Get()->Record.MessagesSize());
- TReadyBatch& activeBatch = ReadyBuffer.back();
+ Parent->Metrics.InFlyGetNextBatch->Set(0);
+ if (ev->Get()->Record.GetMessages().empty()) {
+ return;
+ }
+
+ TPartitionKey partitionKey { Cluster, partitionId };
+ TReadyBatch& activeBatch = Parent->ReadyBuffer.emplace(partitionKey, ev->Get()->Record.MessagesSize());
auto& nextOffset = NextOffsetFromRD[partitionId];
@@ -834,8 +1018,7 @@ void TDqPqRdReadActor::Handle(NFq::TEvRowDispatcher::TEvMessageBatch::TPtr& ev)
activeBatch.UsedSpace = bytes;
ReadyBufferSizeBytes += bytes;
activeBatch.NextOffset = ev->Get()->Record.GetNextMessageOffset();
- partirtion.IsWaitingMessageBatch = false;
- NotifyCA();
+ Parent->NotifyCA();
}
void TDqPqRdReadActor::AddMessageBatch(TRope&& messageBatch, NKikimr::NMiniKQL::TUnboxedValueBatch& buffer) {
@@ -885,25 +1068,30 @@ void TDqPqRdReadActor::PrintInternalState() {
TString TDqPqRdReadActor::GetInternalState() {
TStringStream str;
- str << LogPrefix << "State: used buffer size " << ReadyBufferSizeBytes << " ready buffer event size " << ReadyBuffer.size() << " state " << static_cast<ui64>(State) << " InFlyAsyncInputData " << InFlyAsyncInputData << "\n";
+ str << LogPrefix << "State: used buffer size " << Parent->ReadyBufferSizeBytes << " ready buffer event size " << Parent->ReadyBuffer.size() << " state " << static_cast<ui64>(State) << " InFlyAsyncInputData " << Parent->InFlyAsyncInputData << "\n";
str << "Counters: GetAsyncInputData " << Counters.GetAsyncInputData << " CoordinatorChanged " << Counters.CoordinatorChanged << " CoordinatorResult " << Counters.CoordinatorResult
<< " MessageBatch " << Counters.MessageBatch << " StartSessionAck " << Counters.StartSessionAck << " NewDataArrived " << Counters.NewDataArrived
<< " SessionError " << Counters.SessionError << " Statistics " << Counters.Statistics << " NodeDisconnected " << Counters.NodeDisconnected
<< " NodeConnected " << Counters.NodeConnected << " Undelivered " << Counters.Undelivered << " Retry " << Counters.Retry
<< " PrivateHeartbeat " << Counters.PrivateHeartbeat << " SessionClosed " << Counters.SessionClosed << " Pong " << Counters.Pong
<< " Heartbeat " << Counters.Heartbeat << " PrintState " << Counters.PrintState << " ProcessState " << Counters.ProcessState
- << " NotifyCA " << Counters.NotifyCA << "\n";
+ << " NotifyCA " << Parent->Counters.NotifyCA << "\n";
for (auto& [rowDispatcherActorId, sessionInfo] : Sessions) {
str << " " << rowDispatcherActorId << " status " << static_cast<ui64>(sessionInfo.Status)
<< " is waiting ack " << sessionInfo.IsWaitingStartSessionAck << " connection id " << sessionInfo.Generation << " ";
sessionInfo.EventsQueue.PrintInternalState(str);
- for (const auto& [partitionId, partition] : sessionInfo.Partitions) {
- const auto offsetIt = NextOffsetFromRD.find(partitionId);
- str << " partId " << partitionId
- << " next offset " << ((offsetIt != NextOffsetFromRD.end()) ? ToString(offsetIt->second) : TString("<empty>"))
- << " is waiting batch " << partition.IsWaitingMessageBatch
- << " has pending data " << partition.HasPendingData << "\n";
+ str << " partitions";
+ for (const auto partitionId : sessionInfo.Partitions) {
+ str << " " << partitionId;
+ }
+ str << " offsets";
+ for (const auto& [partitionId, offset] : NextOffsetFromRD) {
+ str << " " << partitionId << "=" << offset;
+ }
+ str << " has pending data";
+ for (const auto partitionId : sessionInfo.HasPendingData) {
+ str << " " << partitionId;
}
str << "\n";
}
@@ -917,20 +1105,17 @@ void TDqPqRdReadActor::Handle(TEvPrivate::TEvProcessState::TPtr&) {
}
void TDqPqRdReadActor::TrySendGetNextBatch(TSession& sessionInfo) {
- if (ReadyBufferSizeBytes > MaxBufferSize) {
+ // called on child
+ if (Parent->ReadyBufferSizeBytes > MaxBufferSize) {
return;
}
- for (auto& [partitionId, partition] : sessionInfo.Partitions) {
- if (!partition.HasPendingData) {
- continue;
- }
- Metrics.InFlyGetNextBatch->Inc();
+ for (auto partitionId : sessionInfo.HasPendingData) {
+ Parent->Metrics.InFlyGetNextBatch->Inc();
auto event = std::make_unique<NFq::TEvRowDispatcher::TEvGetNextBatch>();
- partition.HasPendingData = false;
- partition.IsWaitingMessageBatch = true;
event->Record.SetPartitionId(partitionId);
sessionInfo.EventsQueue.Send(event.release(), sessionInfo.Generation);
}
+ sessionInfo.HasPendingData.clear();
}
template <class TEventPtr>
@@ -972,6 +1157,7 @@ void TDqPqRdReadActor::SendNoSession(const NActors::TActorId& recipient, ui64 co
}
void TDqPqRdReadActor::NotifyCA() {
+ // called on Parent
Metrics.InFlyAsyncInputData->Set(1);
InFlyAsyncInputData = true;
Counters.NotifyCA++;
@@ -1002,9 +1188,7 @@ void TDqPqRdReadActor::UpdateSessions() {
std::forward_as_tuple(TxId, SelfId(), rowDispatcherActorId, queueId, ++NextGeneration));
auto& session = Sessions.at(rowDispatcherActorId);
SRC_LOG_I("Create session to " << rowDispatcherActorId << ", generation " << session.Generation);
- for (auto partitionId : partitions) {
- session.Partitions[partitionId];
- }
+ session.Partitions.insert(partitions.begin(), partitions.end());
ReadActorByEventQueueId[queueId] = rowDispatcherActorId;
}
LastUsedPartitionDistribution = LastReceivedPartitionDistribution;
@@ -1017,8 +1201,178 @@ void TDqPqRdReadActor::UpdateQueuedSize() {
queuedBytes += sessionInfo.QueuedBytes;
queuedRows += sessionInfo.QueuedRows;
}
- IngressStats.QueuedBytes = queuedBytes;
- IngressStats.QueuedRows = queuedRows;
+ Parent->IngressStats.QueuedBytes = queuedBytes;
+ Parent->IngressStats.QueuedRows = queuedRows;
+}
+
+void TDqPqRdReadActor::StartClusterDiscovery() {
+ if (StaticDiscovery) {
+ if (SourceParams.FederatedClustersSize()) {
+ for (auto& federatedCluster : SourceParams.GetFederatedClusters()) {
+ auto& cluster = Clusters.emplace_back(
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo {
+ .Name = federatedCluster.GetName(),
+ .Endpoint = federatedCluster.GetEndpoint(),
+ .Path = federatedCluster.GetDatabase(),
+ },
+ federatedCluster.GetPartitionsCount()
+ );
+ if (cluster.PartitionsCount == 0) {
+ cluster.PartitionsCount = ReadParams.GetPartitioningParams().GetTopicPartitionsCount();
+ SRC_LOG_W("PartitionsCount for offline server assumed to be " << cluster.PartitionsCount);
+ }
+ }
+ } else { // old AST fallback
+ Clusters.emplace_back(
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo {
+ .Endpoint = SourceParams.GetEndpoint(),
+ .Path = SourceParams.GetDatabase(),
+ },
+ ReadParams.GetPartitioningParams().GetTopicPartitionsCount()
+ );
+ }
+ for (ui32 clusterIndex = 0; clusterIndex < Clusters.size(); ++clusterIndex) {
+ StartCluster(clusterIndex);
+ }
+ return;
+ }
+ GetFederatedTopicClient()
+ .GetAllTopicClusters()
+ .Subscribe([
+ actorSystem = NActors::TActivationContext::ActorSystem(),
+ selfId = SelfId()](const auto& future)
+ {
+ try {
+ auto federatedClusters = future.GetValue();
+ actorSystem->Send(selfId, new TEvPrivate::TEvReceivedClusters(std::move(federatedClusters)));
+ } catch (const std::exception& ex) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvReceivedClusters(ex));
+ }
+ });
+}
+
+void TDqPqRdReadActor::Handle(TEvPrivate::TEvReceivedClusters::TPtr& ev) {
+ SRC_LOG_D("Got cluster info");
+ auto& federatedClusters = ev->Get()->FederatedClusters;
+ if (federatedClusters.empty()) {
+ TStringBuilder message;
+ message << "Failed to discover clusters for topic \"" << SourceParams.GetTopicPath() << "\"";
+ if (ev->Get()->ExceptionMessage) {
+ message << ", got exception: " << *ev->Get()->ExceptionMessage;
+ } else {
+ message << ", empty clusters list";
+ }
+ SRC_LOG_E(message);
+ TIssue issue(message);
+ Send(ComputeActorId, new TEvAsyncInputError(InputIndex, TIssues({issue}), NYql::NDqProto::StatusIds::BAD_REQUEST));
+ return;
+ }
+ Y_ENSURE(!federatedClusters.empty());
+ Clusters.reserve(federatedClusters.size());
+ ui32 index = 0;
+ for (auto& cluster : federatedClusters) {
+ auto& clusterState = Clusters.emplace_back(std::move(cluster), 0);
+ SRC_LOG_D(index << " Name " << clusterState.Info.Name << " Endpoint " << clusterState.Info.Endpoint << " Path " << clusterState.Info.Path << " Status " << (int)clusterState.Info.Status);
+ std::string clusterTopicPath = SourceParams.GetTopicPath();
+ clusterState.Info.AdjustTopicPath(clusterTopicPath);
+ GetTopicClient(clusterState)
+ .DescribeTopic(TString(clusterTopicPath), {})
+ .Subscribe([
+ index,
+ actorSystem = NActors::TActivationContext::ActorSystem(),
+ selfId = SelfId()](const auto& describeTopicFuture)
+ {
+ try {
+ auto& describeTopic = describeTopicFuture.GetValue();
+ if (!describeTopic.IsSuccess()) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index, describeTopic));
+ return;
+ }
+ auto partitionsCount = describeTopic.GetTopicDescription().GetTotalPartitionsCount();
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index, partitionsCount));
+ } catch (const std::exception& ex) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index,
+ NYdb::TStatus(NYdb::EStatus::INTERNAL_ERROR,
+ NYdb::NIssue::TIssues({NYdb::NIssue::TIssue(ex.what())}))));
+ return;
+ }
+ });
+ index++;
+ }
+}
+
+void TDqPqRdReadActor::Handle(TEvPrivate::TEvDescribeTopicResult::TPtr& ev) {
+ auto clusterIndex = ev->Get()->ClusterIndex;
+ auto partitionsCount = ev->Get()->PartitionsCount;
+ if (auto status = ev->Get()->Status) {
+ TStringBuilder message;
+ message << "Failed to describe topic \"" << SourceParams.GetTopicPath() << "\"";
+ if (!Clusters[clusterIndex].Info.Name.empty()) {
+ message << " on cluster \"" << Clusters[clusterIndex].Info.Name << "\"";
+ }
+ SRC_LOG_E(message);
+ TIssue issue(message);
+ for (auto& subIssue : status->GetIssues()) {
+ TIssuePtr newIssue(new TIssue(NYdb::NAdapters::ToYqlIssue(subIssue)));
+ issue.AddSubIssue(newIssue);
+ }
+ Send(ComputeActorId, new TEvAsyncInputError(InputIndex, TIssues({issue}), NYql::NDqProto::StatusIds::BAD_REQUEST));
+ return;
+ }
+ SRC_LOG_D("Got partition info for cluster " << clusterIndex << ", partition count " << partitionsCount);
+ Y_ENSURE(Clusters[clusterIndex].PartitionsCount == 0); // TODO Handle refresh
+ Y_ENSURE(partitionsCount >= Clusters[clusterIndex].PartitionsCount);
+ Clusters[clusterIndex].PartitionsCount = partitionsCount;
+ StartCluster(clusterIndex);
+}
+
+void TDqPqRdReadActor::StartCluster(ui32 clusterIndex) {
+ if (Clusters.size() == 1 && Clusters[clusterIndex].Info.Name.empty()) {
+ SRC_LOG_D("Switch to single-cluster mode");
+ FederatedTopicClient.Reset();
+ Clusters[clusterIndex].Child = this;
+ Clusters[clusterIndex].ChildId = SelfId();
+ SourceParams.SetEndpoint(TString(Clusters[clusterIndex].Info.Endpoint));
+ SourceParams.SetDatabase(TString(Clusters[clusterIndex].Info.Path));
+ ReadParams.mutable_partitioningparams()->SetTopicPartitionsCount(Clusters[clusterIndex].PartitionsCount);
+ State = EState::INIT;
+ Init();
+ InitChild();
+ ProcessState();
+ return;
+ }
+ NPq::NProto::TDqPqTopicSource sourceParams = SourceParams;
+ sourceParams.SetEndpoint(TString(Clusters[clusterIndex].Info.Endpoint));
+ sourceParams.SetDatabase(TString(Clusters[clusterIndex].Info.Path));
+ NPq::NProto::TDqReadTaskParams readParams = ReadParams;
+ readParams.mutable_partitioningparams()->SetTopicPartitionsCount(Clusters[clusterIndex].PartitionsCount);
+ auto actor = new TDqPqRdReadActor(
+ InputIndex,
+ IngressStats.Level,
+ TxId,
+ TaskId,
+ HolderFactory,
+ TypeEnv,
+ std::move(sourceParams),
+ std::move(readParams),
+ Driver,
+ CredentialsProviderFactory,
+ ComputeActorId,
+ LocalRowDispatcherActorId,
+ Token,
+ {},
+ MaxBufferSize,
+ PqGateway,
+ this,
+ TString(Clusters[clusterIndex].Info.Name));
+ Clusters[clusterIndex].ChildId = RegisterWithSameMailbox(actor);
+ actor->Init();
+ actor->InitChild();
+ ProcessState();
+}
+
+void TDqPqRdReadActor::Handle(TEvPrivate::TEvRefreshClusters::TPtr&) {
+ Y_ENSURE(false); // TBD
}
void TDqPqRdReadActor::Handle(TEvPrivate::TEvNotifyCA::TPtr&) {
@@ -1035,11 +1389,14 @@ std::pair<IDqComputeActorAsyncInput*, NActors::IActor*> CreateDqPqRdReadActor(
ui64 taskId,
const THashMap<TString, TString>& secureParams,
const THashMap<TString, TString>& taskParams,
+ NYdb::TDriver driver,
+ ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory,
const NActors::TActorId& computeActorId,
const NActors::TActorId& localRowDispatcherActorId,
const NKikimr::NMiniKQL::THolderFactory& holderFactory,
const ::NMonitoring::TDynamicCounterPtr& counters,
- i64 bufferSize)
+ i64 bufferSize,
+ const IPqGateway::TPtr& pqGateway)
{
auto taskParamsIt = taskParams.find("pq");
YQL_ENSURE(taskParamsIt != taskParams.end(), "Failed to get pq task params");
@@ -1049,6 +1406,7 @@ std::pair<IDqComputeActorAsyncInput*, NActors::IActor*> CreateDqPqRdReadActor(
const TString& tokenName = settings.GetToken().GetName();
const TString token = secureParams.Value(tokenName, TString());
+ const bool addBearerToToken = settings.GetAddBearerToToken();
TDqPqRdReadActor* actor = new TDqPqRdReadActor(
inputIndex,
@@ -1059,12 +1417,14 @@ std::pair<IDqComputeActorAsyncInput*, NActors::IActor*> CreateDqPqRdReadActor(
typeEnv,
std::move(settings),
std::move(readTaskParamsMsg),
+ driver,
+ CreateCredentialsProviderFactoryForStructuredToken(credentialsFactory, token, addBearerToToken),
computeActorId,
localRowDispatcherActorId,
token,
counters,
- bufferSize
- );
+ bufferSize,
+ pqGateway);
return {actor, actor};
}
diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h b/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h
index 1f3a4622de..7d7c1f340a 100644
--- a/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h
+++ b/ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h
@@ -8,6 +8,7 @@
#include <ydb/library/yql/providers/pq/proto/dq_io.pb.h>
#include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h>
+#include <ydb/library/yql/providers/pq/provider/yql_pq_gateway.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/driver/driver.h>
@@ -31,10 +32,13 @@ std::pair<IDqComputeActorAsyncInput*, NActors::IActor*> CreateDqPqRdReadActor(
ui64 taskId,
const THashMap<TString, TString>& secureParams,
const THashMap<TString, TString>& taskParams,
+ NYdb::TDriver driver,
+ ISecuredServiceAccountCredentialsFactory::TPtr credentialsFactory,
const NActors::TActorId& computeActorId,
const NActors::TActorId& localRowDispatcherActorId,
const NKikimr::NMiniKQL::THolderFactory& holderFactory,
const ::NMonitoring::TDynamicCounterPtr& counters,
- i64 bufferSize = PQRdReadDefaultFreeSpace);
+ i64 bufferSize,
+ const IPqGateway::TPtr& pqGateway);
} // namespace NYql::NDq
diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp
index 5237917a00..ed2fd535e2 100644
--- a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp
+++ b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor.cpp
@@ -15,10 +15,12 @@
#include <ydb/library/yql/providers/pq/async_io/dq_pq_rd_read_actor.h>
#include <ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h>
#include <ydb/library/yql/providers/pq/common/pq_meta_fields.h>
+#include <ydb/library/yql/providers/pq/common/pq_partition_key.h>
#include <ydb/library/yql/providers/pq/proto/dq_io_state.pb.h>
#include <yql/essentials/utils/log/log.h>
#include <yql/essentials/utils/yql_panic.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/federated_topic/federated_topic.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/types/credentials/credentials.h>
@@ -77,6 +79,8 @@ struct TEvPrivate {
EvSourceDataReady = EvBegin,
EvReconnectSession,
+ EvReceivedClusters,
+ EvDescribeTopicResult,
EvEnd
};
@@ -87,11 +91,36 @@ struct TEvPrivate {
struct TEvSourceDataReady : public TEventLocal<TEvSourceDataReady, EvSourceDataReady> {};
struct TEvReconnectSession : public TEventLocal<TEvReconnectSession, EvReconnectSession> {};
+ struct TEvReceivedClusters : public NActors::TEventLocal<TEvReceivedClusters, EvReceivedClusters> {
+ explicit TEvReceivedClusters(
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>&& federatedClusters)
+ : FederatedClusters(std::move(federatedClusters))
+ {}
+ explicit TEvReceivedClusters(const std::exception& ex)
+ : ExceptionMessage(ex.what())
+ {}
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo> FederatedClusters;
+ std::optional<std::string> ExceptionMessage;
+ };
+ struct TEvDescribeTopicResult : public NActors::TEventLocal<TEvDescribeTopicResult, EvDescribeTopicResult> {
+ TEvDescribeTopicResult(ui32 clusterIndex, ui32 partitionsCount)
+ : ClusterIndex(clusterIndex)
+ , PartitionsCount(partitionsCount)
+ {}
+ TEvDescribeTopicResult(ui32 clusterIndex, const NYdb::TStatus& status)
+ : ClusterIndex(clusterIndex)
+ , PartitionsCount(0)
+ , Status(status)
+ {}
+ ui32 ClusterIndex;
+ ui32 PartitionsCount;
+ TMaybe<NYdb::TStatus> Status;
+ };
};
} // namespace
-
class TDqPqReadActor : public NActors::TActor<TDqPqReadActor>, public NYql::NDq::NInternal::TDqPqReadActorBase {
+ static constexpr bool StaticDiscovery = true;
struct TMetrics {
TMetrics(const TTxId& txId, ui64 taskId, const ::NMonitoring::TDynamicCounterPtr& counters)
: TxId(std::visit([](auto arg) { return ToString(arg); }, txId))
@@ -122,8 +151,24 @@ class TDqPqReadActor : public NActors::TActor<TDqPqReadActor>, public NYql::NDq:
NMonitoring::THistogramPtr WaitEventTimeMs;
};
+ struct TClusterState {
+ TClusterState(ui32 index, NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo&& info, ui32 partitionsCount)
+ : Index(index)
+ , Info(std::move(info))
+ , PartitionsCount(partitionsCount)
+ {}
+ ui32 Index;
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo Info;
+ ITopicClient::TPtr TopicClient;
+ std::shared_ptr<NYdb::NTopic::IReadSession> ReadSession;
+ ui32 PartitionsCount;
+ NThreading::TFuture<void> EventFuture;
+ bool SubscribedOnEvent = false;
+ TMaybe<TInstant> WaitEventStartedAt;
+ };
+
public:
- using TPartitionKey = std::pair<TString, ui64>; // Cluster, partition id.
+ using TPartitionKey = ::NPq::TPartitionKey;
using TDebugOffsets = TMaybe<std::pair<ui64, ui64>>;
TDqPqReadActor(
@@ -160,12 +205,23 @@ public:
IngressStats.Level = statsLevel;
}
- NYdb::NTopic::TTopicClientSettings GetTopicClientSettings() const {
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const {
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings opts = PqGateway->GetFederatedTopicClientSettings();
+ opts.Database(SourceParams.GetDatabase())
+ .DiscoveryEndpoint(SourceParams.GetEndpoint())
+ .SslCredentials(NYdb::TSslCredentials(SourceParams.GetUseSsl()))
+ .CredentialsProviderFactory(CredentialsProviderFactory);
+
+ return opts;
+ }
+
+ NYdb::NTopic::TTopicClientSettings GetTopicClientSettings(TClusterState& state) const {
NYdb::NTopic::TTopicClientSettings opts = PqGateway->GetTopicClientSettings();
opts.Database(SourceParams.GetDatabase())
.DiscoveryEndpoint(SourceParams.GetEndpoint())
.SslCredentials(NYdb::TSslCredentials(SourceParams.GetUseSsl()))
.CredentialsProviderFactory(CredentialsProviderFactory);
+ state.Info.AdjustTopicClientSettings(opts);
return opts;
}
@@ -183,10 +239,9 @@ public:
TDqPqReadActorBase::LoadState(state);
InitWatermarkTracker();
- if (ReadSession) {
- ReadSession.reset();
- GetReadSession();
- }
+ Clusters.clear();
+ AsyncInit = {};
+ StartClusterDiscovery();
}
void CommitState(const NDqProto::TCheckpoint& checkpoint) override {
@@ -198,41 +253,69 @@ public:
}
}
- ITopicClient& GetTopicClient() {
- if (!TopicClient) {
- TopicClient = PqGateway->GetTopicClient(Driver, GetTopicClientSettings());
+ IFederatedTopicClient& GetFederatedTopicClient() {
+ if (!FederatedTopicClient) {
+ FederatedTopicClient = PqGateway->GetFederatedTopicClient(Driver, GetFederatedTopicClientSettings());
}
- return *TopicClient;
+ return *FederatedTopicClient;
}
- NYdb::NTopic::IReadSession& GetReadSession() {
- if (!ReadSession) {
- ReadSession = GetTopicClient().CreateReadSession(GetReadSessionSettings());
- SRC_LOG_I("SessionId: " << GetSessionId() << " CreateReadSession");
+ ITopicClient& GetTopicClient(TClusterState& clusterState) {
+ if (!clusterState.TopicClient) {
+ clusterState.TopicClient = PqGateway->GetTopicClient(Driver, GetTopicClientSettings(clusterState));
}
- return *ReadSession;
+ return *clusterState.TopicClient;
+ }
+
+ NYdb::NTopic::IReadSession& GetReadSession(TClusterState& clusterState) {
+ if (!clusterState.ReadSession) {
+ clusterState.ReadSession = GetTopicClient(clusterState).CreateReadSession(GetReadSessionSettings(clusterState));
+ SRC_LOG_I("SessionId: " << GetSessionId(clusterState.Index) << " CreateReadSession");
+ }
+ return *clusterState.ReadSession;
}
TString GetSessionId() const override {
- return ReadSession ? TString{ReadSession->GetSessionId()} : TString{"empty"};
+ if (Clusters.empty()) {
+ return TString{"empty"};
+ }
+ TStringBuilder str;
+ for (const auto& clusterState : Clusters) {
+ if (auto readSession = clusterState.ReadSession) {
+ str << readSession->GetSessionId();
+ } else {
+ str << TString{"empty"};
+ }
+ str << ',';
+ }
+ str.pop_back();
+ return str;
+ }
+
+ TString GetSessionId(ui32 index) const {
+ return !Clusters.empty() && Clusters[index].ReadSession ? TString{Clusters[index].ReadSession->GetSessionId()} : TString{"empty"};
}
private:
STRICT_STFUNC(StateFunc,
hFunc(TEvPrivate::TEvSourceDataReady, Handle);
hFunc(TEvPrivate::TEvReconnectSession, Handle);
+ hFunc(TEvPrivate::TEvReceivedClusters, Handle);
+ hFunc(TEvPrivate::TEvDescribeTopicResult, Handle);
)
void Handle(TEvPrivate::TEvSourceDataReady::TPtr& ev) {
- SRC_LOG_T("SessionId: " << GetSessionId() << " Source data ready");
- SubscribedOnEvent = false;
- if (ev.Get()->Cookie) {
+ if (ev.Get()->Cookie && !Clusters.empty()) {
+ auto index = ev.Get()->Cookie - 1;
+ auto& clusterState = Clusters[index];
+ SRC_LOG_T("SessionId: " << GetSessionId(index) << " Source data ready");
+ clusterState.SubscribedOnEvent = false;
Metrics.InFlySubscribe->Dec();
- }
- if (WaitEventStartedAt) {
- auto waitEventDurationMs = (TInstant::Now() - *WaitEventStartedAt).MilliSeconds();
- Metrics.WaitEventTimeMs->Collect(waitEventDurationMs);
- WaitEventStartedAt.Clear();
+ if (clusterState.WaitEventStartedAt) {
+ auto waitEventDurationMs = (TInstant::Now() - *clusterState.WaitEventStartedAt).MilliSeconds();
+ Metrics.WaitEventTimeMs->Collect(waitEventDurationMs);
+ clusterState.WaitEventStartedAt.Clear();
+ }
}
Metrics.InFlyAsyncInputData->Set(1);
Metrics.AsyncInputDataRate->Inc();
@@ -240,13 +323,15 @@ private:
}
void Handle(TEvPrivate::TEvReconnectSession::TPtr&) {
- SRC_LOG_D("SessionId: " << GetSessionId() << ", Reconnect epoch: " << Metrics.ReconnectRate->Val());
- Metrics.ReconnectRate->Inc();
- if (ReadSession) {
- ReadSession->Close(TDuration::Zero());
- ReadSession.reset();
- ReadyBuffer = std::queue<TReadyBatch>{}; // clear read buffer
+ for (auto& clusterState : Clusters) {
+ SRC_LOG_D("SessionId: " << GetSessionId(clusterState.Index) << ", Reconnect epoch: " << Metrics.ReconnectRate->Val());
+ if (clusterState.ReadSession) {
+ clusterState.ReadSession->Close(TDuration::Zero());
+ clusterState.ReadSession.reset();
+ }
}
+ ReadyBuffer = std::queue<TReadyBatch>{}; // clear read buffer
+ Metrics.ReconnectRate->Inc();
Schedule(ReconnectPeriod, new TEvPrivate::TEvReconnectSession());
}
@@ -256,11 +341,14 @@ private:
std::queue<TReadyBatch> empty;
ReadyBuffer.swap(empty);
- if (ReadSession) {
- ReadSession->Close(TDuration::Zero());
- ReadSession.reset();
+ for (auto& clusterState : Clusters) {
+ if (clusterState.ReadSession) {
+ clusterState.ReadSession->Close(TDuration::Zero());
+ clusterState.ReadSession.reset();
+ }
+ clusterState.TopicClient.Reset();
}
- TopicClient.Reset();
+ FederatedTopicClient.Reset();
TActor<TDqPqReadActor>::PassAway();
}
@@ -281,6 +369,127 @@ private:
}
}
+ void StartClusterDiscovery() {
+ Y_ENSURE (Clusters.empty());
+ if (StaticDiscovery) {
+ if (SourceParams.FederatedClustersSize()) {
+ for (auto& federatedCluster : SourceParams.GetFederatedClusters()) {
+ auto& cluster = Clusters.emplace_back(
+ 0, // Index
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo {
+ .Name = federatedCluster.GetName(),
+ .Endpoint = federatedCluster.GetEndpoint(),
+ .Path = federatedCluster.GetDatabase(),
+ },
+ ReadParams.GetPartitioningParams().GetTopicPartitionsCount()
+ );
+ if (cluster.PartitionsCount == 0) {
+ cluster.PartitionsCount = ReadParams.GetPartitioningParams().GetTopicPartitionsCount();
+ SRC_LOG_W("PartitionsCount for offline server assumed to be " << cluster.PartitionsCount);
+ }
+ }
+ } else {
+ Clusters.emplace_back(
+ 0, // Index
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo {
+ .Endpoint = SourceParams.GetEndpoint(),
+ .Path =SourceParams.GetDatabase()
+ },
+ ReadParams.GetPartitioningParams().GetTopicPartitionsCount()
+ );
+ }
+ Send(SelfId(), new TEvPrivate::TEvSourceDataReady());
+ return;
+ }
+ if (AsyncInit.Initialized()) {
+ return;
+ }
+ AsyncInit = GetFederatedTopicClient().GetAllTopicClusters();
+ AsyncInit.Subscribe([
+ actorSystem = NActors::TActivationContext::ActorSystem(),
+ selfId = SelfId()](const auto& future)
+ {
+ try {
+ auto federatedClusters = future.GetValue();
+ actorSystem->Send(selfId, new TEvPrivate::TEvReceivedClusters(std::move(federatedClusters)));
+ } catch (const std::exception& ex) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvReceivedClusters(ex));
+ }
+ });
+ }
+
+ void Handle(TEvPrivate::TEvReceivedClusters::TPtr& ev) {
+ // TODO support refresh
+ SRC_LOG_D("Got cluster info");
+ auto& federatedClusters = ev->Get()->FederatedClusters;
+ if (federatedClusters.empty()) {
+ TStringBuilder message;
+ message << "Failed to get clusters topic \"" << SourceParams.GetTopicPath() << "\"";
+ if (ev->Get()->ExceptionMessage) {
+ message << ", got exception: " << *ev->Get()->ExceptionMessage;
+ } else {
+ message << ", empty clusters list";
+ }
+ TIssue issue(message);
+ Send(ComputeActorId, new TEvAsyncInputError(InputIndex, TIssues({issue}), NYql::NDqProto::StatusIds::BAD_REQUEST));
+ return;
+ }
+ Clusters.reserve(federatedClusters.size());
+ ui32 index = 0;
+ for (auto& cluster : federatedClusters) {
+ auto& clusterState = Clusters.emplace_back(index, std::move(cluster), 0u);
+ SRC_LOG_D(index << " Name " << clusterState.Info.Name << " Endpoint " << clusterState.Info.Endpoint << " Path " << clusterState.Info.Path << " Status " << (int)clusterState.Info.Status);
+ std::string clusterTopicPath = SourceParams.GetTopicPath();
+ clusterState.Info.AdjustTopicPath(clusterTopicPath);
+ GetTopicClient(clusterState)
+ .DescribeTopic(TString(clusterTopicPath), {})
+ .Subscribe([
+ index,
+ actorSystem = NActors::TActivationContext::ActorSystem(),
+ selfId = SelfId()](const auto& describeTopicFuture)
+ {
+ try {
+ auto& describeTopic = describeTopicFuture.GetValue();
+ if (!describeTopic.IsSuccess()) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index, describeTopic));
+ return;
+ }
+ auto partitionsCount = describeTopic.GetTopicDescription().GetTotalPartitionsCount();
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index, partitionsCount));
+ } catch (const std::exception& ex) {
+ actorSystem->Send(selfId, new TEvPrivate::TEvDescribeTopicResult(index,
+ NYdb::TStatus(NYdb::EStatus::INTERNAL_ERROR,
+ NYdb::NIssue::TIssues({NYdb::NIssue::TIssue(ex.what())}))));
+ return;
+ }
+ });
+ index++;
+ }
+ }
+
+ void Handle(TEvPrivate::TEvDescribeTopicResult::TPtr& ev) {
+ auto clusterIndex = ev->Get()->ClusterIndex;
+ auto partitionsCount = ev->Get()->PartitionsCount;
+ if (auto status = ev->Get()->Status) {
+ TStringBuilder message;
+ message << "Failed to describe topic \"" << SourceParams.GetTopicPath() << "\"";
+ if (!Clusters[clusterIndex].Info.Name.empty()) {
+ message << " on cluster \"" << Clusters[clusterIndex].Info.Name << "\"";
+ }
+ SRC_LOG_E(message);
+ TIssue issue(message);
+ for (auto& subIssue : status->GetIssues()) {
+ TIssuePtr newIssue(new TIssue(NYdb::NAdapters::ToYqlIssue(subIssue)));
+ issue.AddSubIssue(newIssue);
+ }
+ Send(ComputeActorId, new TEvAsyncInputError(InputIndex, TIssues({issue}), NYql::NDqProto::StatusIds::BAD_REQUEST));
+ return;
+ }
+ SRC_LOG_D("Got partition info for cluster " << clusterIndex << " = " << partitionsCount);
+ Clusters[clusterIndex].PartitionsCount = partitionsCount;
+ Send(SelfId(), new TEvPrivate::TEvSourceDataReady());
+ }
+
i64 GetAsyncInputData(NKikimr::NMiniKQL::TUnboxedValueBatch& buffer, TMaybe<TInstant>& watermark, bool&, i64 freeSpace) override {
Metrics.InFlyAsyncInputData->Set(0);
SRC_LOG_T("SessionId: " << GetSessionId() << " GetAsyncInputData freeSpace = " << freeSpace);
@@ -300,20 +509,30 @@ private:
}
bool recheckBatch = false;
-
if (freeSpace > 0) {
- auto events = GetReadSession().GetEvents(false, std::nullopt, static_cast<size_t>(freeSpace));
- recheckBatch = !events.empty();
+ if (Clusters.empty()) {
+ StartClusterDiscovery();
+ }
+ for (auto& clusterState : Clusters) {
+ if (clusterState.PartitionsCount == 0) {
+ continue;
+ }
+ auto events = GetReadSession(clusterState).GetEvents(false, std::nullopt, static_cast<size_t>(freeSpace));
+ if (!events.empty()) {
+ recheckBatch = true;
+ }
- ui32 batchItemsEstimatedCount = 0;
- for (auto& event : events) {
- if (const auto* val = std::get_if<NYdb::NTopic::TReadSessionEvent::TDataReceivedEvent>(&event)) {
- batchItemsEstimatedCount += val->GetMessages().size();
+ ui32 batchItemsEstimatedCount = 0;
+ for (auto& event : events) {
+ if (const auto* val = std::get_if<NYdb::NTopic::TReadSessionEvent::TDataReceivedEvent>(&event)) {
+ batchItemsEstimatedCount += val->GetMessages().size();
+ }
}
- }
- for (auto& event : events) {
- std::visit(TTopicEventProcessor{*this, batchItemsEstimatedCount, LogPrefix}, event);
+ TTopicEventProcessor topicEventProcessor {*this, batchItemsEstimatedCount, LogPrefix, TString(clusterState.Info.Name), clusterState.Index };
+ for (auto& event : events) {
+ std::visit(topicEventProcessor, event);
+ }
}
}
@@ -341,14 +560,14 @@ private:
}
private:
- std::vector<ui64> GetPartitionsToRead() const {
+ std::vector<ui64> GetPartitionsToRead(TClusterState& clusterState) const {
std::vector<ui64> res;
ui64 currentPartition = ReadParams.GetPartitioningParams().GetEachTopicPartitionGroupId();
- do {
+ while (currentPartition < clusterState.PartitionsCount) {
res.emplace_back(currentPartition); // 0-based in topic API
currentPartition += ReadParams.GetPartitioningParams().GetDqPartitionsCount();
- } while (currentPartition < ReadParams.GetPartitioningParams().GetTopicPartitionsCount());
+ }
return res;
}
@@ -369,36 +588,43 @@ private:
TInstant::Now());
}
- NYdb::NTopic::TReadSessionSettings GetReadSessionSettings() const {
+ NYdb::NTopic::TReadSessionSettings GetReadSessionSettings(TClusterState& clusterState) const {
NYdb::NTopic::TTopicReadSettings topicReadSettings;
- topicReadSettings.Path(SourceParams.GetTopicPath());
- auto partitionsToRead = GetPartitionsToRead();
- SRC_LOG_D("SessionId: " << GetSessionId() << " PartitionsToRead: " << JoinSeq(", ", partitionsToRead));
+ std::string topicPath = SourceParams.GetTopicPath();
+ clusterState.Info.AdjustTopicPath(topicPath);
+ topicReadSettings.Path(topicPath);
+ auto partitionsToRead = GetPartitionsToRead(clusterState);
+ SRC_LOG_D("SessionId: " << GetSessionId(clusterState.Index) << " PartitionsToRead: " << JoinSeq(", ", partitionsToRead));
for (const auto partitionId : partitionsToRead) {
topicReadSettings.AppendPartitionIds(partitionId);
}
- return NYdb::NTopic::TReadSessionSettings()
+ auto settings = NYdb::NTopic::TReadSessionSettings();
+ settings
.AppendTopics(topicReadSettings)
.ConsumerName(SourceParams.GetConsumerName())
.MaxMemoryUsageBytes(BufferSize)
.ReadFromTimestamp(StartingMessageTimestamp);
+ return settings;
}
- static TPartitionKey MakePartitionKey(const NYdb::NTopic::TPartitionSession::TPtr& partitionSession) {
- // auto cluster = partitionSession->GetDatabaseName() // todo: switch to federatedfTopicApi to support lb federation
- const TString cluster; // empty value is used in YDS
- return std::make_pair(cluster, partitionSession->GetPartitionId());
+ static TPartitionKey MakePartitionKey(const TString& cluster, const NYdb::NTopic::TPartitionSession::TPtr& partitionSession) {
+ return { cluster, partitionSession->GetPartitionId() };
}
void SubscribeOnNextEvent() {
- if (!SubscribedOnEvent) {
- SubscribedOnEvent = true;
+ for (auto& clusterState : Clusters) {
+ SubscribeOnNextEvent(clusterState);
+ }
+ }
+ void SubscribeOnNextEvent(TClusterState& clusterState) {
+ if (!clusterState.SubscribedOnEvent) {
+ clusterState.SubscribedOnEvent = true;
Metrics.InFlySubscribe->Inc();
NActors::TActorSystem* actorSystem = NActors::TActivationContext::ActorSystem();
- WaitEventStartedAt = TInstant::Now();
- EventFuture = GetReadSession().WaitEvent().Subscribe([actorSystem, selfId = SelfId()](const auto&){
- actorSystem->Send(selfId, new TEvPrivate::TEvSourceDataReady(), 0, 1);
+ clusterState.WaitEventStartedAt = TInstant::Now();
+ clusterState.EventFuture = GetReadSession(clusterState).WaitEvent().Subscribe([actorSystem, selfId = SelfId(), index = clusterState.Index](const auto&){
+ actorSystem->Send(selfId, new TEvPrivate::TEvSourceDataReady(), 0, 1 + index);
});
}
}
@@ -414,7 +640,7 @@ private:
TMaybe<TInstant> Watermark;
TUnboxedValueVector Data;
i64 UsedSpace = 0;
- THashMap<NYdb::NTopic::TPartitionSession::TPtr, TList<std::pair<ui64, ui64>>> OffsetRanges; // [start, end)
+ THashMap<NYdb::NTopic::TPartitionSession::TPtr, std::pair<std::string, TList<std::pair<ui64, ui64>>>> OffsetRanges; // [start, end)
};
bool MaybeReturnReadyBatch(NKikimr::NMiniKQL::TUnboxedValueBatch& buffer, TMaybe<TInstant>& watermark, i64& usedSpace) {
@@ -430,11 +656,12 @@ private:
usedSpace = readyBatch.UsedSpace;
Metrics.DataRate->Add(readyBatch.UsedSpace);
- for (const auto& [PartitionSession, ranges] : readyBatch.OffsetRanges) {
+ for (const auto& [partitionSession, clusterRanges] : readyBatch.OffsetRanges) {
+ const auto& [cluster, ranges] = clusterRanges;
for (const auto& [start, end] : ranges) {
- CurrentDeferredCommit.Add(PartitionSession, start, end);
+ CurrentDeferredCommit.Add(partitionSession, start, end);
}
- PartitionToOffset[MakePartitionKey(PartitionSession)] = ranges.back().second;
+ PartitionToOffset[MakePartitionKey(TString(cluster), partitionSession)] = ranges.back().second;
}
ReadyBuffer.pop();
@@ -464,21 +691,16 @@ private:
}
struct TTopicEventProcessor {
- static TString ToString(const TPartitionKey& key) {
- return TStringBuilder{} << "[" << key.first << ", " << key.second << "]";
- }
-
void operator()(NYdb::NTopic::TReadSessionEvent::TDataReceivedEvent& event) {
- const auto partitionKey = MakePartitionKey(event.GetPartitionSession());
- const auto partitionKeyStr = ToString(partitionKey);
+ const auto partitionKey = MakePartitionKey(Cluster, event.GetPartitionSession());
for (const auto& message : event.GetMessages()) {
const std::string& data = message.GetData();
Self.IngressStats.Bytes += data.size();
LWPROBE(PqReadDataReceived, TString(TStringBuilder() << Self.TxId), Self.SourceParams.GetTopicPath(), TString{data});
- SRC_LOG_T("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " Data received: " << message.DebugString(true));
+ SRC_LOG_T("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " Data received: " << message.DebugString(true));
if (message.GetWriteTime() < Self.StartingMessageTimestamp) {
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " Skip data. StartingMessageTimestamp: " << Self.StartingMessageTimestamp << ". Write time: " << message.GetWriteTime());
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " Skip data. StartingMessageTimestamp: " << Self.StartingMessageTimestamp << ". Write time: " << message.GetWriteTime());
continue;
}
@@ -488,7 +710,7 @@ private:
curBatch.Data.emplace_back(std::move(item));
curBatch.UsedSpace += size;
- auto& offsets = curBatch.OffsetRanges[message.GetPartitionSession()];
+ auto& [cluster, offsets] = curBatch.OffsetRanges[message.GetPartitionSession()];
if (!offsets.empty() && offsets.back().second == message.GetOffset()) {
offsets.back().second = message.GetOffset() + 1;
} else {
@@ -500,7 +722,7 @@ private:
void operator()(NYdb::NTopic::TSessionClosedEvent& ev) {
const auto& LogPrefix = Self.LogPrefix;
TString message = (TStringBuilder() << "Read session to topic \"" << Self.SourceParams.GetTopicPath() << "\" was closed");
- SRC_LOG_E("SessionId: " << Self.GetSessionId() << " " << message << ": " << ev.DebugString());
+ SRC_LOG_E("SessionId: " << Self.GetSessionId(Index) << " " << message << ": " << ev.DebugString());
TIssue issue(message);
for (const auto& subIssue : ev.GetIssues()) {
TIssuePtr newIssue(new TIssue(NYdb::NAdapters::ToYqlIssue(subIssue)));
@@ -512,39 +734,34 @@ private:
void operator()(NYdb::NTopic::TReadSessionEvent::TCommitOffsetAcknowledgementEvent&) { }
void operator()(NYdb::NTopic::TReadSessionEvent::TStartPartitionSessionEvent& event) {
- const auto partitionKey = MakePartitionKey(event.GetPartitionSession());
- const auto partitionKeyStr = ToString(partitionKey);
-
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " StartPartitionSessionEvent received");
+ const auto partitionKey = MakePartitionKey(Cluster, event.GetPartitionSession());
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " StartPartitionSessionEvent received");
std::optional<ui64> readOffset;
const auto offsetIt = Self.PartitionToOffset.find(partitionKey);
if (offsetIt != Self.PartitionToOffset.end()) {
readOffset = offsetIt->second;
}
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " Confirm StartPartitionSession with offset " << readOffset);
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " Confirm StartPartitionSession with offset " << readOffset);
event.Confirm(readOffset);
}
void operator()(NYdb::NTopic::TReadSessionEvent::TStopPartitionSessionEvent& event) {
- const auto partitionKey = MakePartitionKey(event.GetPartitionSession());
- const auto partitionKeyStr = ToString(partitionKey);
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " StopPartitionSessionEvent received");
+ const auto partitionKey = MakePartitionKey(Cluster, event.GetPartitionSession());
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " StopPartitionSessionEvent received");
event.Confirm();
}
void operator()(NYdb::NTopic::TReadSessionEvent::TEndPartitionSessionEvent& event) {
- const auto partitionKey = MakePartitionKey(event.GetPartitionSession());
- const auto partitionKeyStr = ToString(partitionKey);
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " EndPartitionSessionEvent received");
+ const auto partitionKey = MakePartitionKey(Cluster, event.GetPartitionSession());
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " EndPartitionSessionEvent received");
}
void operator()(NYdb::NTopic::TReadSessionEvent::TPartitionSessionStatusEvent&) { }
void operator()(NYdb::NTopic::TReadSessionEvent::TPartitionSessionClosedEvent& event) {
- const auto partitionKey = MakePartitionKey(event.GetPartitionSession());
- const auto partitionKeyStr = ToString(partitionKey);
- SRC_LOG_D("SessionId: " << Self.GetSessionId() << " Key: " << partitionKeyStr << " PartitionSessionClosedEvent received");
+ const auto partitionKey = MakePartitionKey(Cluster, event.GetPartitionSession());
+ SRC_LOG_D("SessionId: " << Self.GetSessionId(Index) << " Key: " << partitionKey << " PartitionSessionClosedEvent received");
}
TReadyBatch& GetActiveBatch(const TPartitionKey& partitionKey, TInstant time) {
@@ -599,6 +816,8 @@ private:
TDqPqReadActor& Self;
ui32 BatchCapacity;
const TString& LogPrefix;
+ const TString& Cluster;
+ const ui32 Index;
};
private:
@@ -609,18 +828,16 @@ private:
const THolderFactory& HolderFactory;
NYdb::TDriver Driver;
std::shared_ptr<NYdb::ICredentialsProviderFactory> CredentialsProviderFactory;
- ITopicClient::TPtr TopicClient;
- std::shared_ptr<NYdb::NTopic::IReadSession> ReadSession;
- NThreading::TFuture<void> EventFuture;
+ IFederatedTopicClient::TPtr FederatedTopicClient;
+ std::vector<TClusterState> Clusters;
std::queue<std::pair<ui64, NYdb::NTopic::TDeferredCommit>> DeferredCommits;
NYdb::NTopic::TDeferredCommit CurrentDeferredCommit;
- bool SubscribedOnEvent = false;
std::vector<std::tuple<TString, TPqMetaExtractor::TPqMetaExtractorLambda>> MetadataFields;
std::queue<TReadyBatch> ReadyBuffer;
TMaybe<TDqSourceWatermarkTracker<TPartitionKey>> WatermarkTracker;
TMaybe<TInstant> NextIdlenesCheckAt;
IPqGateway::TPtr PqGateway;
- TMaybe<TInstant> WaitEventStartedAt;
+ NThreading::TFuture<std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>> AsyncInit;
};
std::pair<IDqComputeActorAsyncInput*, NActors::IActor*> CreateDqPqReadActor(
@@ -708,11 +925,14 @@ void RegisterDqPqReadActorFactory(TDqAsyncIoFactory& factory, NYdb::TDriver driv
args.TaskId,
args.SecureParams,
args.TaskParams,
+ driver,
+ credentialsFactory,
args.ComputeActorId,
NFq::RowDispatcherServiceActorId(),
args.HolderFactory,
counters,
- PQReadDefaultFreeSpace);
+ PQReadDefaultFreeSpace,
+ pqGateway);
});
}
diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp
index f4c4162a9f..d4a3445c32 100644
--- a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp
+++ b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.cpp
@@ -76,7 +76,7 @@ void TDqPqReadActorBase::LoadState(const TSourceState& state) {
TStringStream str;
str << "SessionId: " << GetSessionId() << " Restoring offset: ";
for (const auto& [key, value] : PartitionToOffset) {
- str << "{" << key.first << "," << key.second << "," << value << "},";
+ str << "{" << key << "," << value << "},";
}
SRC_LOG_D(str.Str());
StartingMessageTimestamp = minStartingMessageTs;
@@ -90,4 +90,4 @@ ui64 TDqPqReadActorBase::GetInputIndex() const {
const NYql::NDq::TDqAsyncStats& TDqPqReadActorBase::GetIngressStats() const {
return IngressStats;
-} \ No newline at end of file
+}
diff --git a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h
index 7b965b7f94..f39235b612 100644
--- a/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h
+++ b/ydb/library/yql/providers/pq/async_io/dq_pq_read_actor_base.h
@@ -1,6 +1,7 @@
#pragma once
#include <ydb/library/yql/dq/actors/compute/dq_compute_actor_async_io.h>
+#include <ydb/library/yql/providers/pq/common/pq_partition_key.h>
#include <ydb/library/yql/providers/pq/proto/dq_io.pb.h>
#include <ydb/library/yql/providers/pq/proto/dq_task_params.pb.h>
@@ -9,16 +10,16 @@ namespace NYql::NDq::NInternal {
class TDqPqReadActorBase : public IDqComputeActorAsyncInput {
public:
- using TPartitionKey = std::pair<TString, ui64>; // Cluster, partition id.
+ using TPartitionKey = ::NPq::TPartitionKey;
const ui64 InputIndex;
THashMap<TPartitionKey, ui64> PartitionToOffset; // {cluster, partition} -> offset of next event.
const TTxId TxId;
- const NPq::NProto::TDqPqTopicSource SourceParams;
+ NPq::NProto::TDqPqTopicSource SourceParams;
TDqAsyncStats IngressStats;
TInstant StartingMessageTimestamp;
TString LogPrefix;
- const NPq::NProto::TDqReadTaskParams ReadParams;
+ NPq::NProto::TDqReadTaskParams ReadParams;
const NActors::TActorId ComputeActorId;
ui64 TaskId;
diff --git a/ydb/library/yql/providers/pq/async_io/ya.make b/ydb/library/yql/providers/pq/async_io/ya.make
index 99b505adce..0b2d47cb8a 100644
--- a/ydb/library/yql/providers/pq/async_io/ya.make
+++ b/ydb/library/yql/providers/pq/async_io/ya.make
@@ -23,6 +23,7 @@ PEERDIR(
yql/essentials/public/types
yql/essentials/utils/log
ydb/public/sdk/cpp/adapters/issue
+ ydb/public/sdk/cpp/src/client/federated_topic
ydb/public/sdk/cpp/src/client/driver
ydb/public/sdk/cpp/src/client/topic
ydb/public/sdk/cpp/src/client/types/credentials
diff --git a/ydb/library/yql/providers/pq/common/pq_partition_key.cpp b/ydb/library/yql/providers/pq/common/pq_partition_key.cpp
new file mode 100644
index 0000000000..249004a739
--- /dev/null
+++ b/ydb/library/yql/providers/pq/common/pq_partition_key.cpp
@@ -0,0 +1,3 @@
+#include "pq_partition_key.h"
+namespace NPq {
+} // namespace NPq
diff --git a/ydb/library/yql/providers/pq/common/pq_partition_key.h b/ydb/library/yql/providers/pq/common/pq_partition_key.h
new file mode 100644
index 0000000000..a42641d46b
--- /dev/null
+++ b/ydb/library/yql/providers/pq/common/pq_partition_key.h
@@ -0,0 +1,29 @@
+#pragma once
+#include <util/generic/hash.h>
+#include <util/generic/string.h>
+#include <util/stream/output.h>
+namespace NPq {
+ struct TPartitionKey {
+ TString Cluster;
+ ui64 PartitionId;
+ bool operator==(const TPartitionKey& other) const = default;
+ ui64 Hash() const {
+ return CombineHashes<ui64>(
+ std::hash<TString>{} (Cluster),
+ std::hash<ui64>{} (PartitionId)
+ );
+ }
+ };
+}
+
+template<>
+struct THash<NPq::TPartitionKey> {
+ ui64 operator() (const NPq::TPartitionKey& x) const {
+ return x.Hash();
+ }
+};
+
+template <>
+inline void Out<NPq::TPartitionKey>(IOutputStream& stream, TTypeTraits<NPq::TPartitionKey>::TFuncParam& t) {
+ stream << t.PartitionId << '@' << t.Cluster;
+}
diff --git a/ydb/library/yql/providers/pq/common/ya.make b/ydb/library/yql/providers/pq/common/ya.make
index 691fbd9b18..e2dbc03f53 100644
--- a/ydb/library/yql/providers/pq/common/ya.make
+++ b/ydb/library/yql/providers/pq/common/ya.make
@@ -2,6 +2,7 @@ LIBRARY()
SRCS(
pq_meta_fields.cpp
+ pq_partition_key.cpp
yql_names.cpp
)
diff --git a/ydb/library/yql/providers/pq/common/yql_names.h b/ydb/library/yql/providers/pq/common/yql_names.h
index 66353ebe00..f1449baf68 100644
--- a/ydb/library/yql/providers/pq/common/yql_names.h
+++ b/ydb/library/yql/providers/pq/common/yql_names.h
@@ -5,6 +5,7 @@
namespace NYql {
constexpr TStringBuf PartitionsCountProp = "PartitionsCount";
+constexpr TStringBuf FederatedClustersProp = "FederatedClusters";
constexpr TStringBuf ConsumerSetting = "Consumer";
constexpr TStringBuf EndpointSetting = "Endpoint";
constexpr TStringBuf SharedReading = "SharedReading";
diff --git a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json
index 9a6603bd66..93ee200599 100644
--- a/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json
+++ b/ydb/library/yql/providers/pq/expr_nodes/yql_pq_expr_nodes.json
@@ -77,6 +77,21 @@
]
},
{
+ "Name": "TDqPqFederatedCluster",
+ "Base": "TCallable",
+ "Match": {"Type": "Callable", "Name": "DqPqFederatedCluster"},
+ "Children": [
+ {"Index": 0, "Name": "Name", "Type": "TCoAtom"},
+ {"Index": 1, "Name": "Endpoint", "Type": "TCoAtom"},
+ {"Index": 2, "Name": "Database", "Type": "TCoAtom"},
+ {"Index": 3, "Name": "PartitionsCount", "Type": "TCoAtom", "Optional": true}
+ ]
+ },
+ {
+ "Name": "TDqPqFederatedClusterList",
+ "ListBase": "TDqPqFederatedCluster"
+ },
+ {
"Name": "TDqPqTopicSink",
"Base": "TCallable",
"Match": {"Type": "Callable", "Name": "DqPqTopicSink"},
diff --git a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp
index f4b56a3ea2..0999db7a10 100644
--- a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp
+++ b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.cpp
@@ -8,6 +8,23 @@
namespace NYql {
+struct TDummyFederatedTopicClient : public IFederatedTopicClient {
+ TDummyFederatedTopicClient(const NYdb::NTopic::TFederatedTopicClientSettings& settings = {}):
+ FederatedClientSettings_(settings) {}
+
+ NThreading::TFuture<std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>> GetAllTopicClusters() override {
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo> dbInfo;
+ dbInfo.emplace_back(
+ "",
+ FederatedClientSettings_.DiscoveryEndpoint_.value_or(""),
+ FederatedClientSettings_.Database_.value_or(""),
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo::EStatus::AVAILABLE);
+ return NThreading::MakeFuture(std::move(dbInfo));
+ }
+private:
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings FederatedClientSettings_;
+};
+
NThreading::TFuture<void> TDummyPqGateway::OpenSession(const TString& sessionId, const TString& username) {
with_lock (Mutex) {
Y_ENSURE(sessionId);
@@ -45,6 +62,23 @@ NPq::NConfigurationManager::TAsyncDescribePathResult TDummyPqGateway::DescribePa
}
}
+IPqGateway::TAsyncDescribeFederatedTopicResult TDummyPqGateway::DescribeFederatedTopic(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) {
+ Y_UNUSED(database);
+ Y_UNUSED(token);
+ with_lock (Mutex) {
+ Y_ENSURE(IsIn(OpenedSessions, sessionId), "Session " << sessionId << " is not opened in pq gateway");
+ const auto key = std::make_pair(cluster, path);
+ if (const auto* topic = Topics.FindPtr(key)) {
+ IPqGateway::TDescribeFederatedTopicResult result;
+ auto& cluster = result.emplace_back();
+ cluster.PartitionsCount = topic->PartitionsCount;
+ return NThreading::MakeFuture<TDescribeFederatedTopicResult>(result);
+ }
+ return NThreading::MakeErrorFuture<IPqGateway::TDescribeFederatedTopicResult>(
+ std::make_exception_ptr(yexception() << "Topic " << path << " is not found on cluster " << cluster));
+ }
+}
+
NThreading::TFuture<IPqGateway::TListStreams> TDummyPqGateway::ListStreams(const TString& sessionId, const TString& cluster, const TString& database, const TString& token, ui32 limit, const TString& exclusiveStartStreamName) {
Y_UNUSED(sessionId, cluster, database, token, limit, exclusiveStartStreamName);
return NThreading::MakeFuture<IPqGateway::TListStreams>();
@@ -68,6 +102,13 @@ ITopicClient::TPtr TDummyPqGateway::GetTopicClient(const NYdb::TDriver&, const N
return MakeIntrusive<TFileTopicClient>(Topics);
}
+IFederatedTopicClient::TPtr TDummyPqGateway::GetFederatedTopicClient(const NYdb::TDriver&, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& settings) {
+ return MakeIntrusive<TDummyFederatedTopicClient>(settings);
+}
+NYdb::NFederatedTopic::TFederatedTopicClientSettings TDummyPqGateway::GetFederatedTopicClientSettings() const {
+ return {};
+}
+
void TDummyPqGateway::UpdateClusterConfigs(
const TString& clusterName,
const TString& endpoint,
diff --git a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h
index 084dd07f57..9164d8277f 100644
--- a/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h
+++ b/ydb/library/yql/providers/pq/gateway/dummy/yql_pq_dummy_gateway.h
@@ -45,6 +45,13 @@ public:
const TString& path,
const TString& token) override;
+ IPqGateway::TAsyncDescribeFederatedTopicResult DescribeFederatedTopic(
+ const TString& sessionId,
+ const TString& cluster,
+ const TString& database,
+ const TString& path,
+ const TString& token) override;
+
NThreading::TFuture<TListStreams> ListStreams(
const TString& sessionId,
const TString& cluster,
@@ -62,7 +69,9 @@ public:
void UpdateClusterConfigs(const TPqGatewayConfigPtr& config) override;
ITopicClient::TPtr GetTopicClient(const NYdb::TDriver& driver, const NYdb::NTopic::TTopicClientSettings& settings) override;
+ IFederatedTopicClient::TPtr GetFederatedTopicClient(const NYdb::TDriver& driver, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& settings) override;
NYdb::NTopic::TTopicClientSettings GetTopicClientSettings() const override;
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const override;
using TClusterNPath = std::pair<TString, TString>;
private:
diff --git a/ydb/library/yql/providers/pq/gateway/native/ya.make b/ydb/library/yql/providers/pq/gateway/native/ya.make
index 8bdb78b068..5b7e57e567 100644
--- a/ydb/library/yql/providers/pq/gateway/native/ya.make
+++ b/ydb/library/yql/providers/pq/gateway/native/ya.make
@@ -14,6 +14,7 @@ PEERDIR(
yql/essentials/utils
ydb/public/sdk/cpp/src/client/datastreams
ydb/public/sdk/cpp/src/client/driver
+ ydb/public/sdk/cpp/src/client/federated_topic
ydb/public/sdk/cpp/src/client/topic
)
diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp
index 2fb5322481..8a51ce2161 100644
--- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp
+++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_gateway.cpp
@@ -34,6 +34,8 @@ public:
ui32 limit,
const TString& exclusiveStartStreamName = {}) override;
+ TAsyncDescribeFederatedTopicResult DescribeFederatedTopic(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) override;
+
void UpdateClusterConfigs(
const TString& clusterName,
const TString& endpoint,
@@ -43,7 +45,9 @@ public:
void UpdateClusterConfigs(const TPqGatewayConfigPtr& config) override;
ITopicClient::TPtr GetTopicClient(const NYdb::TDriver& driver, const NYdb::NTopic::TTopicClientSettings& settings) override;
+ IFederatedTopicClient::TPtr GetFederatedTopicClient(const NYdb::TDriver& driver, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& settings) override;
NYdb::NTopic::TTopicClientSettings GetTopicClientSettings() const override;
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const override;
private:
TPqSession::TPtr GetExistingSession(const TString& sessionId) const;
@@ -140,6 +144,10 @@ NThreading::TFuture<IPqGateway::TListStreams> TPqNativeGateway::ListStreams(cons
return GetExistingSession(sessionId)->ListStreams(cluster, database, token, limit, exclusiveStartStreamName);
}
+IPqGateway::TAsyncDescribeFederatedTopicResult TPqNativeGateway::DescribeFederatedTopic(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) {
+ return GetExistingSession(sessionId)->DescribeFederatedTopic(cluster, database, path, token);
+}
+
IPqGateway::TPtr CreatePqNativeGateway(const TPqGatewayServices& services) {
return MakeIntrusive<TPqNativeGateway>(services);
}
@@ -152,6 +160,31 @@ NYdb::NTopic::TTopicClientSettings TPqNativeGateway::GetTopicClientSettings() co
return CommonTopicClientSettings ? *CommonTopicClientSettings : NYdb::NTopic::TTopicClientSettings();
}
+IFederatedTopicClient::TPtr TPqNativeGateway::GetFederatedTopicClient(const NYdb::TDriver& driver, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& settings = NYdb::NFederatedTopic::TFederatedTopicClientSettings()) {
+ return MakeIntrusive<TNativeFederatedTopicClient>(driver, settings);
+}
+
+NYdb::NFederatedTopic::TFederatedTopicClientSettings TPqNativeGateway::GetFederatedTopicClientSettings() const {
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings settings;
+
+ if (!CommonTopicClientSettings) {
+ return settings;
+ }
+
+ settings.DefaultCompressionExecutor(CommonTopicClientSettings->DefaultCompressionExecutor_);
+ settings.DefaultHandlersExecutor(CommonTopicClientSettings->DefaultHandlersExecutor_);
+#define COPY_OPTIONAL_SETTINGS(NAME) \
+ if (CommonTopicClientSettings->NAME##_) { \
+ settings.NAME(*CommonTopicClientSettings->NAME##_); \
+ }
+ COPY_OPTIONAL_SETTINGS(CredentialsProviderFactory);
+ COPY_OPTIONAL_SETTINGS(SslCredentials);
+ COPY_OPTIONAL_SETTINGS(DiscoveryMode);
+#undef COPY_OPTIONAL_SETTINGS
+
+ return settings;
+}
+
TPqNativeGateway::~TPqNativeGateway() {
Sessions.clear();
}
diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp
index 36683abb47..65478d7ece 100644
--- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp
+++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.cpp
@@ -2,6 +2,8 @@
#include <yql/essentials/utils/yql_panic.h>
+#include <library/cpp/threading/future/wait/wait.h>
+
namespace NYql {
namespace {
@@ -15,6 +17,17 @@ NPq::NConfigurationManager::TClientOptions GetCmClientOptions(const NYql::TPqClu
return opts;
}
+NYdb::NFederatedTopic::TFederatedTopicClientSettings GetYdbFederatedPqClientOptions(const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) {
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings opts;
+ opts
+ .DiscoveryEndpoint(cfg.GetEndpoint())
+ .Database(database)
+ .SslCredentials(NYdb::TSslCredentials(cfg.GetUseSsl()))
+ .CredentialsProviderFactory(credentialsProviderFactory);
+
+ return opts;
+}
+
NYdb::NTopic::TTopicClientSettings GetYdbPqClientOptions(const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) {
NYdb::NTopic::TTopicClientSettings opts;
opts
@@ -46,6 +59,14 @@ const NPq::NConfigurationManager::IClient::TPtr& TPqSession::GetConfigManagerCli
return client;
}
+NYdb::NFederatedTopic::TFederatedTopicClient& TPqSession::GetYdbFederatedPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) {
+ const auto clientIt = ClusterYdbFederatedPqClients.find(cluster);
+ if (clientIt != ClusterYdbFederatedPqClients.end()) {
+ return clientIt->second;
+ }
+ return ClusterYdbFederatedPqClients.emplace(cluster, NYdb::NFederatedTopic::TFederatedTopicClient(YdbDriver, GetYdbFederatedPqClientOptions(database, cfg, credentialsProviderFactory))).first->second;
+}
+
NYdb::NTopic::TTopicClient& TPqSession::GetYdbPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory) {
const auto clientIt = ClusterYdbPqClients.find(cluster);
if (clientIt != ClusterYdbPqClients.end()) {
@@ -62,6 +83,91 @@ NYdb::NDataStreams::V1::TDataStreamsClient& TPqSession::GetDsClient(const TStrin
return ClusterDsClients.emplace(cluster, NYdb::NDataStreams::V1::TDataStreamsClient(YdbDriver, GetDsClientOptions(database, cfg, credentialsProviderFactory))).first->second;
}
+IPqGateway::TAsyncDescribeFederatedTopicResult TPqSession::DescribeFederatedTopic(const TString& cluster, const TString& database, const TString& path, const TString& token) {
+ const auto* config = ClusterConfigs->FindPtr(cluster);
+ if (!config) {
+ ythrow yexception() << "Pq cluster `" << cluster << "` does not exist";
+ }
+
+ YQL_ENSURE(config->GetEndpoint(), "Can't describe topic `" << cluster << "`.`" << path << "`: no endpoint");
+
+ std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory = CreateCredentialsProviderFactoryForStructuredToken(CredentialsFactory, token, config->GetAddBearerToToken());
+ with_lock (Mutex) {
+ return GetYdbFederatedPqClient(cluster, database, *config, credentialsProviderFactory)
+ .GetAllClusterInfo()
+ .Apply([
+ ydbDriver = YdbDriver, credentialsProviderFactory,
+ cluster, database, path,
+ topicSettings = GetYdbPqClientOptions(database, *config, credentialsProviderFactory)
+ ](const auto& futureClusterInfo) mutable {
+ auto allClustersInfo = futureClusterInfo.GetValue();
+ Y_ENSURE(!allClustersInfo.empty());
+ std::vector<NYdb::NTopic::TAsyncDescribeTopicResult> futures;
+ IPqGateway::TDescribeFederatedTopicResult results;
+ results.reserve(allClustersInfo.size());
+ futures.reserve(allClustersInfo.size());
+ std::vector<std::string> paths;
+ paths.reserve(allClustersInfo.size());
+ for (auto& clusterInfo: allClustersInfo) {
+ auto& clusterTopicPath = paths.emplace_back(path);
+ clusterInfo.AdjustTopicPath(clusterTopicPath);
+ if (!clusterInfo.IsAvailableForRead()) {
+ futures.emplace_back(NThreading::MakeErrorFuture<NYdb::NTopic::TDescribeTopicResult>(std::make_exception_ptr(NThreading::TFutureException() << "Cluster " << clusterInfo.Name << " is unavailable for read")));
+ } else {
+ clusterInfo.AdjustTopicClientSettings(topicSettings);
+ futures.emplace_back(NYdb::NTopic::TTopicClient(ydbDriver, topicSettings).DescribeTopic(clusterTopicPath));
+ }
+ results.emplace_back(std::move(clusterInfo));
+ }
+ Y_ENSURE(results.size() == allClustersInfo.size());
+ Y_ENSURE(paths.size() == allClustersInfo.size());
+ Y_ENSURE(futures.size() == allClustersInfo.size());
+ // XXX This produces circular dependency until the future is fired
+ // futures references allFutureDescribe
+ // lambda references futures[]
+ // allFutureDescribe contains lambda
+ auto allFutureDescribes = NThreading::WaitAll(futures);
+ return allFutureDescribes.Apply([futures = std::move(futures), paths = std::move(paths), results = std::move(results), cluster, database, path](const auto& ) mutable {
+ TStringBuilder ex;
+ auto addErrorHeader = [&]() {
+ if (ex.empty()) {
+ ex << "Failed to describe topic `" << cluster << "`.`" << path << "` in the database `" << database << "`: ";
+ } else {
+ ex << "; ";
+ }
+ };
+ bool gotAnyTopic = false;
+ for (size_t i = 0; i != results.size(); ++i) {
+ auto& futureDescribe = futures[i];
+ auto addErrorCluster = [&]() {
+ addErrorHeader();
+ ex << "#" << i << " (name '" << results[i].Info.Name << "' endpoint '" << results[i].Info.Endpoint << "' path `" << paths[i] << "`): ";
+ };
+ try {
+ auto describeTopicResult = futureDescribe.ExtractValue();
+ if (!describeTopicResult.IsSuccess()) {
+ addErrorCluster();
+ ex << describeTopicResult.GetIssues().ToString();
+ continue;
+ }
+ results[i].PartitionsCount = describeTopicResult.GetTopicDescription().GetTotalPartitionsCount();
+ gotAnyTopic = true;
+ } catch (...) {
+ addErrorCluster();
+ ex << "Got exception: " << FormatCurrentException();
+ }
+ }
+ if (!gotAnyTopic) {
+ addErrorHeader();
+ ex << "No working cluster found\n";
+ throw yexception() << ex;
+ }
+ return results;
+ });
+ });
+ }
+}
+
NPq::NConfigurationManager::TAsyncDescribePathResult TPqSession::DescribePath(const TString& cluster, const TString& database, const TString& path, const TString& token) {
const auto* config = ClusterConfigs->FindPtr(cluster);
if (!config) {
diff --git a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h
index 533cde59cc..c7bc9443ff 100644
--- a/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h
+++ b/ydb/library/yql/providers/pq/gateway/native/yql_pq_session.h
@@ -2,6 +2,7 @@
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/datastreams/datastreams.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/federated_topic/federated_topic.h>
#include <yql/essentials/providers/common/proto/gateways_config.pb.h>
#include <ydb/library/yql/providers/common/token_accessor/client/factory.h>
@@ -39,11 +40,13 @@ public:
NPq::NConfigurationManager::TAsyncDescribePathResult DescribePath(const TString& cluster, const TString& database, const TString& path, const TString& token);
NThreading::TFuture<IPqGateway::TListStreams> ListStreams(const TString& cluster, const TString& database, const TString& token, ui32 limit, const TString& exclusiveStartStreamName);
+ IPqGateway::TAsyncDescribeFederatedTopicResult DescribeFederatedTopic(const TString& cluster, const TString& database, const TString& path, const TString& token);
private:
const NPq::NConfigurationManager::IClient::TPtr& GetConfigManagerClient(const TString& cluster, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory);
- NYdb::NTopic::TTopicClient& GetYdbPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory);
NYdb::NDataStreams::V1::TDataStreamsClient& GetDsClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory);
+ NYdb::NFederatedTopic::TFederatedTopicClient& GetYdbFederatedPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory);
+ NYdb::NTopic::TTopicClient& GetYdbPqClient(const TString& cluster, const TString& database, const NYql::TPqClusterConfig& cfg, std::shared_ptr<NYdb::ICredentialsProviderFactory> credentialsProviderFactory);
private:
const TString SessionId;
@@ -55,8 +58,9 @@ private:
TMutex Mutex;
THashMap<TString, NPq::NConfigurationManager::IClient::TPtr> ClusterCmClients; // Cluster -> CM Client.
- THashMap<TString, NYdb::NTopic::TTopicClient> ClusterYdbPqClients; // Cluster -> Topic Client.
THashMap<TString, NYdb::NDataStreams::V1::TDataStreamsClient> ClusterDsClients; // Cluster -> DS Client
+ THashMap<TString, NYdb::NTopic::TTopicClient> ClusterYdbPqClients; // Cluster -> Topic Client.
+ THashMap<TString, NYdb::NFederatedTopic::TFederatedTopicClient> ClusterYdbFederatedPqClients; // Cluster -> Topic Client.
};
} // namespace NYql
diff --git a/ydb/library/yql/providers/pq/proto/dq_io.proto b/ydb/library/yql/providers/pq/proto/dq_io.proto
index 93b5d8a060..7fc6327910 100644
--- a/ydb/library/yql/providers/pq/proto/dq_io.proto
+++ b/ydb/library/yql/providers/pq/proto/dq_io.proto
@@ -21,6 +21,13 @@ message TWatermarks {
bool IdlePartitionsEnabled = 4;
}
+message TDqPqFederatedCluster {
+ string Name = 1;
+ string Endpoint = 2;
+ string Database = 3;
+ uint32 PartitionsCount = 4;
+}
+
message TDqPqTopicSource {
string TopicPath = 1;
string ConsumerName = 2;
@@ -42,6 +49,7 @@ message TDqPqTopicSource {
string ReadGroup = 18;
string Format = 19;
string RowType = 20; // Final row type with metadata columns
+ repeated TDqPqFederatedCluster FederatedClusters = 21;
}
message TDqPqTopicSink {
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp
index 82cebc19e0..5e5ef2b0fc 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_datasource_type_ann.cpp
@@ -31,6 +31,38 @@ public:
AddHandler({TPqTopic::CallableName()}, Hndl(&TSelf::HandleTopic));
AddHandler({TDqPqTopicSource::CallableName()}, Hndl(&TSelf::HandleDqTopicSource));
AddHandler({TCoSystemMetadata::CallableName()}, Hndl(&TSelf::HandleMetadata));
+ AddHandler({TDqPqFederatedCluster::CallableName()}, Hndl(&TSelf::HandleFederatedCluster));
+ }
+
+ TStatus HandleFederatedCluster(TExprBase input, TExprContext& ctx) {
+ const auto cluster = input.Cast<NNodes::TDqPqFederatedCluster>();
+ if (!EnsureMinMaxArgsCount(input.Ref(), 3, 4, ctx)) {
+ return TStatus::Error;
+ }
+
+ if (!EnsureAtom(cluster.Name().Ref(), ctx)) {
+ return TStatus::Error;
+ }
+
+ if (!EnsureAtom(cluster.Endpoint().Ref(), ctx)) {
+ return TStatus::Error;
+ }
+
+ if (!EnsureAtom(cluster.Database().Ref(), ctx)) {
+ return TStatus::Error;
+ }
+
+ if (TDqPqFederatedCluster::idx_PartitionsCount < input.Ref().ChildrenSize()) {
+ if (!EnsureAtom(cluster.PartitionsCount().Ref(), ctx)) {
+ return TStatus::Error;
+ }
+ if (!TryFromString<ui32>(cluster.PartitionsCount().Cast().StringValue())) {
+ ctx.AddError(TIssue(ctx.GetPosition(cluster.PartitionsCount().Cast().Pos()), TStringBuilder() << "Expected integer, but got: " << cluster.PartitionsCount().Cast().StringValue()));
+ return TStatus::Error;
+ }
+ }
+ input.Ptr()->SetTypeAnn(ctx.MakeType<TUnitExprType>());
+ return TStatus::Ok;
}
TStatus HandleConfigure(const TExprNode::TPtr& input, TExprContext& ctx) {
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp
index a91029b656..0bb34a2de9 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_dq_integration.cpp
@@ -205,6 +205,23 @@ public:
srcDesc.MutableWatermarks()->SetIdlePartitionsEnabled(true);
}
}
+
+ for (auto prop : topic.Props()) {
+ const TStringBuf name = Name(prop);
+ if (name == FederatedClustersProp) {
+ auto clusterList = prop.Value().Cast<TDqPqFederatedClusterList>();
+ for (auto cluster : clusterList) {
+ auto federatedCluster = srcDesc.AddFederatedClusters();
+ federatedCluster->SetName(cluster.Name().StringValue());
+ federatedCluster->SetEndpoint(cluster.Endpoint().StringValue());
+ federatedCluster->SetDatabase(cluster.Database().StringValue());
+ if (cluster.PartitionsCount()) {
+ federatedCluster->SetPartitionsCount(FromString<ui32>(cluster.PartitionsCount().Cast().StringValue()));
+ }
+ }
+ }
+ }
+
srcDesc.SetFormat(format);
if (auto maybeToken = TMaybeNode<TCoSecureParam>(topicSource.Token().Raw())) {
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h b/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h
index 4898be1347..9c1171ae0b 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_gateway.h
@@ -30,6 +30,13 @@ struct IPqGateway : public TThrRefBase {
TVector<TString> Names;
};
+ struct TClusterInfo {
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo Info;
+ ui32 PartitionsCount = 0;
+ };
+ using TDescribeFederatedTopicResult = std::vector<TClusterInfo>;
+ using TAsyncDescribeFederatedTopicResult = NThreading::TFuture<IPqGateway::TDescribeFederatedTopicResult>;
+
virtual NThreading::TFuture<void> OpenSession(const TString& sessionId, const TString& username) = 0;
virtual NThreading::TFuture<void> CloseSession(const TString& sessionId) = 0;
@@ -38,8 +45,10 @@ struct IPqGateway : public TThrRefBase {
// DS API.
virtual NThreading::TFuture<TListStreams> ListStreams(const TString& sessionId, const TString& cluster, const TString& database, const TString& token, ui32 limit, const TString& exclusiveStartStreamName = {}) = 0;
+ virtual TAsyncDescribeFederatedTopicResult DescribeFederatedTopic(const TString& sessionId, const TString& cluster, const TString& database, const TString& path, const TString& token) = 0;
virtual ITopicClient::TPtr GetTopicClient(const NYdb::TDriver& driver, const NYdb::NTopic::TTopicClientSettings& settings) = 0;
+ virtual IFederatedTopicClient::TPtr GetFederatedTopicClient(const NYdb::TDriver& driver, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& settings) = 0;
virtual void UpdateClusterConfigs(
const TString& clusterName,
@@ -50,6 +59,8 @@ struct IPqGateway : public TThrRefBase {
virtual void UpdateClusterConfigs(const TPqGatewayConfigPtr& config) = 0;
virtual NYdb::NTopic::TTopicClientSettings GetTopicClientSettings() const = 0;
+
+ virtual NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const = 0;
};
struct IPqGatewayFactory : public TThrRefBase {
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp
index de525c9336..a21164086a 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_helpers.cpp
@@ -19,7 +19,30 @@ void Add(TVector<TCoNameValueTuple>& settings, TStringBuf name, TStringBuf value
TCoNameValueTupleList BuildTopicPropsList(const TPqState::TTopicMeta& meta, TPositionHandle pos, TExprContext& ctx) {
TVector<TCoNameValueTuple> props;
- Add(props, PartitionsCountProp, ToString(meta.Description->PartitionsCount), pos, ctx);
+ ui32 maxPartitionsCount = 0;
+ if (meta.FederatedTopic) {
+ const auto& federatedTopics = *meta.FederatedTopic;
+ if (federatedTopics.size() == 1 && federatedTopics[0].Info.Name.empty()) {
+ // non-federated fallback, omit FederatedClusters
+ maxPartitionsCount = federatedTopics[0].PartitionsCount;
+ } else {
+ TVector<TDqPqFederatedCluster> clusters(Reserve(federatedTopics.size()));
+ for (const auto& topic: federatedTopics) {
+ clusters.push_back(Build<TDqPqFederatedCluster>(ctx, pos)
+ .Name().Build(topic.Info.Name)
+ .Endpoint().Build(topic.Info.Endpoint)
+ .Database().Build(topic.Info.Path)
+ .PartitionsCount().Build(ToString(topic.PartitionsCount))
+ .Done());
+ maxPartitionsCount = std::max(maxPartitionsCount, topic.PartitionsCount);
+ }
+ props.push_back(
+ Build<TCoNameValueTuple>(ctx, pos)
+ .Name().Build(FederatedClustersProp)
+ .Value<TDqPqFederatedClusterList>().Add(clusters).Build().Done());
+ }
+ }
+ Add(props, PartitionsCountProp, ToString(maxPartitionsCount), pos, ctx);
return Build<TCoNameValueTupleList>(ctx, pos)
.Add(props)
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp b/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp
index fb0ab02356..514f13ccd7 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_load_meta.cpp
@@ -123,13 +123,8 @@ private:
const TStructExprType* LoadTopicMeta(const TString& cluster, const TString& topic, TExprContext& ctx, TPqState::TTopicMeta& meta) {
// todo: return TFuture
try {
- auto future = State_->Gateway->DescribePath(State_->SessionId, cluster, State_->Configuration->GetDatabaseForTopic(cluster), topic, State_->Configuration->Tokens.at(cluster));
- NPq::NConfigurationManager::TDescribePathResult description = future.GetValueSync();
- if (!description.IsTopic()) {
- ctx.IssueManager.RaiseIssue(TIssue{TStringBuilder() << "Path '" << topic << "' is not a topic"});
- return {};
- }
- meta.Description = description.GetTopicDescription();
+ auto future = State_->Gateway->DescribeFederatedTopic(State_->SessionId, cluster, State_->Configuration->GetDatabaseForTopic(cluster), topic, State_->Configuration->Tokens.at(cluster));
+ meta.FederatedTopic = future.GetValueSync();
return CreateDefaultItemType(ctx);
} catch (const std::exception& ex) {
TIssues issues;
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_provider.h b/ydb/library/yql/providers/pq/provider/yql_pq_provider.h
index 7f4c0245af..43056a765b 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_provider.h
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_provider.h
@@ -21,7 +21,7 @@ struct TPqState : public TThrRefBase {
bool RawFormat = true;
TExprNode::TPtr RowSpec;
TExprNode::TPtr ColumnOrder;
- TMaybe<::NPq::NConfigurationManager::TTopicDescription> Description;
+ TMaybe<IPqGateway::TDescribeFederatedTopicResult> FederatedTopic;
};
public:
diff --git a/ydb/library/yql/providers/pq/provider/yql_pq_topic_client.h b/ydb/library/yql/providers/pq/provider/yql_pq_topic_client.h
index 314784aa4d..ed4af81a40 100644
--- a/ydb/library/yql/providers/pq/provider/yql_pq_topic_client.h
+++ b/ydb/library/yql/providers/pq/provider/yql_pq_topic_client.h
@@ -1,5 +1,6 @@
#pragma once
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/federated_topic/federated_topic.h>
namespace NYql {
class ITopicClient : public TThrRefBase {
@@ -31,6 +32,13 @@ public:
const NYdb::NTopic::TCommitOffsetSettings& settings = {}) = 0;
};
+class IFederatedTopicClient : public TThrRefBase {
+public:
+ using TPtr = TIntrusivePtr<IFederatedTopicClient>;
+
+ virtual NThreading::TFuture<std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>> GetAllTopicClusters() = 0;
+};
+
class TNativeTopicClient : public ITopicClient {
public:
TNativeTopicClient(const NYdb::TDriver& driver, const NYdb::NTopic::TTopicClientSettings& settings = {}):
@@ -86,4 +94,19 @@ private:
NYdb::TDriver Driver_;
NYdb::NTopic::TTopicClient Client_;
};
-} \ No newline at end of file
+
+class TNativeFederatedTopicClient : public IFederatedTopicClient {
+public:
+ TNativeFederatedTopicClient(const NYdb::TDriver& driver, const NYdb::NTopic::TFederatedTopicClientSettings& settings = {}):
+ Driver_(driver), FederatedClient_(Driver_, settings) {}
+
+ NThreading::TFuture<std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>> GetAllTopicClusters() override {
+ return FederatedClient_.GetAllClusterInfo();
+ }
+
+ ~TNativeFederatedTopicClient() {}
+private:
+ NYdb::TDriver Driver_;
+ NYdb::NFederatedTopic::TFederatedTopicClient FederatedClient_;
+};
+}
diff --git a/ydb/library/yql/tests/sql/dq_file.py b/ydb/library/yql/tests/sql/dq_file.py
index 48f2cb7265..62144f80a6 100644
--- a/ydb/library/yql/tests/sql/dq_file.py
+++ b/ydb/library/yql/tests/sql/dq_file.py
@@ -6,7 +6,8 @@ import re
import yatest.common
from yql_utils import get_supported_providers, yql_binary_path, is_xfail, is_skip_forceblocks, get_param, \
dump_table_yson, get_gateway_cfg_suffix, do_custom_query_check, normalize_result, \
- stable_result_file, stable_table_file, is_with_final_result_issues, log, is_unordered_result, is_sorted_table
+ stable_result_file, stable_table_file, is_with_final_result_issues, log, is_unordered_result, is_sorted_table, \
+ get_table_clusters
from test_utils import get_config
from test_file_common import run_file, run_file_no_cache
@@ -15,12 +16,23 @@ ASTDIFF_PATH = yql_binary_path('yql/essentials/tools/astdiff/astdiff')
DQRUN_PATH = yql_binary_path('ydb/library/yql/tools/dqrun/dqrun')
DATA_PATH = yatest.common.source_path('yt/yql/tests/sql/suites')
+def add_table_clusters(suite, config):
+ clusters = get_table_clusters(suite, config, DATA_PATH)
+ if not clusters:
+ return None
+ def patch(cfg_message):
+ for c in sorted(clusters):
+ mapping = cfg_message.Yt.ClusterMapping.add()
+ mapping.Name = c
+ return patch
+
def run_test(suite, case, cfg, tmpdir, what, yql_http_file_server):
if get_gateway_cfg_suffix() != '' and what != 'Results':
pytest.skip('non-trivial gateways.conf')
config = get_config(suite, case, cfg, data_path=DATA_PATH)
+ cfg_postprocess = add_table_clusters(suite, config)
program_sql = os.path.join(DATA_PATH, suite, '%s.sql' % case)
with codecs.open(program_sql, encoding='utf-8') as program_file_descr:
@@ -42,17 +54,20 @@ def run_test(suite, case, cfg, tmpdir, what, yql_http_file_server):
if is_with_final_result_issues(config):
extra_args += ["--with-final-issues"]
- (res, tables_res) = run_file('dq', suite, case, cfg, config, yql_http_file_server, DQRUN_PATH, extra_args=extra_args, data_path=DATA_PATH)
+ (res, tables_res) = run_file('dq', suite, case, cfg, config, yql_http_file_server, DQRUN_PATH, extra_args=extra_args, data_path=DATA_PATH,
+ cfg_postprocess=cfg_postprocess)
if what == 'Results' or force_blocks:
if not xfail:
if force_blocks:
yqlrun_res, yqlrun_tables_res = run_file_no_cache('dq', suite, case, cfg, config, yql_http_file_server, DQRUN_PATH, \
- extra_args=["--emulate-yt"], force_blocks=True, data_path=DATA_PATH)
+ extra_args=["--emulate-yt"], force_blocks=True, data_path=DATA_PATH, \
+ cfg_postprocess=cfg_postprocess)
dq_result_name = 'Scalar'
yqlrun_result_name = 'Block'
else:
- yqlrun_res, yqlrun_tables_res = run_file_no_cache('yt', suite, case, cfg, config, yql_http_file_server, data_path=DATA_PATH)
+ yqlrun_res, yqlrun_tables_res = run_file_no_cache('yt', suite, case, cfg, config, yql_http_file_server, data_path=DATA_PATH,
+ cfg_postprocess=cfg_postprocess)
dq_result_name = 'DQFILE'
yqlrun_result_name = 'YQLRUN'
diff --git a/ydb/library/yql/tests/sql/hybrid_file.py b/ydb/library/yql/tests/sql/hybrid_file.py
index 000925a14f..105941d364 100644
--- a/ydb/library/yql/tests/sql/hybrid_file.py
+++ b/ydb/library/yql/tests/sql/hybrid_file.py
@@ -7,7 +7,8 @@ import yatest.common
from yql_utils import replace_vals, yql_binary_path, is_xfail, get_param, \
get_gateway_cfg_suffix, normalize_result, stable_result_file, stable_table_file, \
- dump_table_yson, normalize_source_code_path, is_sorted_table, is_unordered_result
+ dump_table_yson, normalize_source_code_path, is_sorted_table, is_unordered_result, \
+ get_table_clusters
from test_utils import get_config
from test_file_common import run_file, run_file_no_cache
@@ -16,15 +17,27 @@ ASTDIFF_PATH = yql_binary_path('yql/essentials/tools/astdiff/astdiff')
DQRUN_PATH = yql_binary_path('ydb/library/yql/tools/dqrun/dqrun')
DATA_PATH = yatest.common.source_path('yt/yql/tests/sql/suites')
+def add_table_clusters(suite, config):
+ clusters = get_table_clusters(suite, config, DATA_PATH)
+ if not clusters:
+ return None
+ def patch(cfg_message):
+ for c in sorted(clusters):
+ mapping = cfg_message.Yt.ClusterMapping.add()
+ mapping.Name = c
+ return patch
+
def run_test(suite, case, cfg, tmpdir, what, yql_http_file_server):
if get_gateway_cfg_suffix() != '' and what != 'Results':
pytest.skip('non-trivial gateways.conf')
config = get_config(suite, case, cfg, data_path=DATA_PATH)
+ cfg_postprocess = add_table_clusters(suite, config)
xfail = is_xfail(config)
- (res, tables_res) = run_file('hybrid', suite, case, cfg, config, yql_http_file_server, DQRUN_PATH, extra_args=["--emulate-yt", "--no-force-dq"], data_path=DATA_PATH)
+ (res, tables_res) = run_file('hybrid', suite, case, cfg, config, yql_http_file_server, DQRUN_PATH, extra_args=["--emulate-yt", "--no-force-dq"],
+ data_path=DATA_PATH, cfg_postprocess=cfg_postprocess)
if what == 'Results':
if not xfail:
@@ -33,7 +46,8 @@ def run_test(suite, case, cfg, tmpdir, what, yql_http_file_server):
sql_query = program_file_descr.read()
# yqlrun run
- yqlrun_res, yqlrun_tables_res = run_file_no_cache('yt', suite, case, cfg, config, yql_http_file_server, data_path=DATA_PATH)
+ yqlrun_res, yqlrun_tables_res = run_file_no_cache('yt', suite, case, cfg, config, yql_http_file_server, data_path=DATA_PATH,
+ cfg_postprocess=cfg_postprocess)
hybrid_result_name = 'HYBRIDFILE'
yqlrun_result_name = 'YQLRUN'
diff --git a/ydb/library/yql/udfs/common/roaring/roaring.cpp b/ydb/library/yql/udfs/common/roaring/roaring.cpp
index 227917e9aa..ba1e559d3f 100644
--- a/ydb/library/yql/udfs/common/roaring/roaring.cpp
+++ b/ydb/library/yql/udfs/common/roaring/roaring.cpp
@@ -645,10 +645,21 @@ namespace {
return nullptr;
}
+ // Get the old allocation information.
+ auto oldAllocatedMemPointer = ((void**)oldPointer)[-1];
+ auto oldSizePointer = ((void**)oldPointer)[-2];
+
+ // Calculate the actual old data size (excluding the header).
+ size_t oldSize = (char*)oldSizePointer - (char*)oldAllocatedMemPointer - 2 * sizeof(void*);
+
+ // Allocate new memory.
auto reallocatedPointer = RoaringMallocUdf(newSize);
- auto oldAllocatedMemPointer = (char*)((void**)oldPointer)[-1];
- auto oldSizePointer = (char*)((void**)oldPointer)[-2];
- memcpy(reallocatedPointer, oldPointer, oldSizePointer - oldAllocatedMemPointer);
+
+ // Copy the minimum of old size and new size.
+ size_t copySize = oldSize < newSize ? oldSize : newSize;
+ memcpy(reallocatedPointer, oldPointer, copySize);
+
+ // Free the old memory.
RoaringFreeUdf(oldPointer);
return reallocatedPointer;
diff --git a/ydb/mvp/meta/meta_cp_databases.h b/ydb/mvp/meta/meta_cp_databases.h
index 8f525897cc..79991dbe77 100644
--- a/ydb/mvp/meta/meta_cp_databases.h
+++ b/ydb/mvp/meta/meta_cp_databases.h
@@ -230,7 +230,6 @@ public:
.SetMapAsObject(true)
.SetEnumMode(NProtobufJson::TProto2JsonConfig::EnumValueMode::EnumName);
-
std::unordered_map<TString, const yandex::cloud::priv::ydb::v1::Database*> indexDatabaseById;
std::unordered_map<TString, const yandex::cloud::priv::ydb::v1::Database*> indexDatabaseByName;
std::unordered_map<TString, NJson::TJsonValue*> indexJsonDatabaseById;
@@ -245,16 +244,23 @@ public:
databases.SetType(NJson::JSON_ARRAY);
NJson::TJsonValue::TArray tenantArray(TenantInfo["TenantInfo"].GetArray());
- std::sort(tenantArray.begin(), tenantArray.end(), [](const NJson::TJsonValue& a, const NJson::TJsonValue& b) -> bool {
- return a["Name"].GetStringRobust() < b["Name"].GetStringRobust();
- });
+ TString filterDatabase = Request.Parameters["database"];
+ if (!filterDatabase) {
+ std::sort(tenantArray.begin(), tenantArray.end(), [](const NJson::TJsonValue& a, const NJson::TJsonValue& b) -> bool {
+ return a["Name"].GetStringRobust() < b["Name"].GetStringRobust();
+ });
+ }
for (const NJson::TJsonValue& tenant : tenantArray) {
+ if (filterDatabase && tenant["Name"].GetStringRobust() != filterDatabase) {
+ continue;
+ }
NJson::TJsonValue& jsonDatabase = databases.AppendValue(NJson::TJsonValue());
jsonDatabase = std::move(tenant);
TString id = jsonDatabase["Id"].GetStringRobust();
if (!id.empty()) {
indexJsonDatabaseById[id] = &jsonDatabase;
}
+ bool foundDatabase = false;
NJson::TJsonValue* jsonUserAttributes;
if (jsonDatabase.GetValuePointer("UserAttributes", &jsonUserAttributes)) {
NJson::TJsonValue* jsonDatabaseId;
@@ -263,16 +269,26 @@ public:
auto itDatabase = indexDatabaseById.find(jsonDatabaseId->GetStringRobust());
if (itDatabase != indexDatabaseById.end()) {
NProtobufJson::Proto2Json(*itDatabase->second, jsonDatabase["ControlPlane"], proto2JsonConfig);
+ foundDatabase = true;
+ }
+ if (!foundDatabase) {
+ auto itDatabase = indexDatabaseByName.find(jsonDatabaseId->GetStringRobust());
+ if (itDatabase != indexDatabaseByName.end()) {
+ NProtobufJson::Proto2Json(*itDatabase->second, jsonDatabase["ControlPlane"], proto2JsonConfig);
+ foundDatabase = true;
+ }
}
}
}
}
- NJson::TJsonValue* jsonName;
- if (jsonDatabase.GetValuePointer("Name", &jsonName)) {
- if (jsonName->GetType() == NJson::JSON_STRING) {
- auto itDatabase = indexDatabaseByName.find(jsonName->GetStringRobust());
- if (itDatabase != indexDatabaseByName.end()) {
- NProtobufJson::Proto2Json(*itDatabase->second, jsonDatabase["ControlPlane"], proto2JsonConfig);
+ if (!foundDatabase) {
+ NJson::TJsonValue* jsonName;
+ if (jsonDatabase.GetValuePointer("Name", &jsonName)) {
+ if (jsonName->GetType() == NJson::JSON_STRING) {
+ auto itDatabase = indexDatabaseByName.find(jsonName->GetStringRobust());
+ if (itDatabase != indexDatabaseByName.end()) {
+ NProtobufJson::Proto2Json(*itDatabase->second, jsonDatabase["ControlPlane"], proto2JsonConfig);
+ }
}
}
}
diff --git a/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp b/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp
index eb025e495f..eeea746f37 100644
--- a/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp
+++ b/ydb/public/lib/ydb_cli/commands/benchmark_utils.cpp
@@ -131,23 +131,6 @@ TString FullTablePath(const TString& database, const TString& table) {
return prefixPathSplit.Reconstruct();
}
-
-TMaybe<TQueryBenchmarkResult> ResultByStatus(const TStatus& status, const TString& deadlineName) {
- if (status.IsSuccess()) {
- return Nothing();
- }
- TStringBuilder errorInfo;
- switch (status.GetStatus()) {
- case NYdb::EStatus::CLIENT_DEADLINE_EXCEEDED:
- errorInfo << deadlineName << " deadline expiried: " << status.GetIssues();
- break;
- default:
- errorInfo << "Operation failed with status " << status.GetStatus() << ": " << status.GetIssues().ToString();
- break;
- }
- return TQueryBenchmarkResult::Error(errorInfo, "", "");
-}
-
bool HasCharsInString(const TString& str) {
for(TStringBuf q(str), line; q.ReadLine(line);) {
line = line.NextTok("--");
@@ -184,7 +167,7 @@ public:
}
template <typename TIterator>
- bool Scan(TIterator& it, std::optional<TString> planFileName = std::nullopt) {
+ TStatus Scan(TIterator& it, std::optional<TString> planFileName = std::nullopt) {
TProgressIndication progressIndication(true);
TMaybe<NQuery::TExecStats> execStats;
@@ -254,7 +237,7 @@ public:
if (!streamPart.IsSuccess()) {
if (!streamPart.EOS()) {
OnError(streamPart.GetStatus(), streamPart.GetIssues().ToString());
- return false;
+ return streamPart;
}
break;
}
@@ -268,10 +251,39 @@ public:
QueryPlan = execStats->GetPlan().value_or("");
PlanAst = execStats->GetAst().value_or("");
}
- return true;
+ return TStatus(EStatus::SUCCESS, NIssue::TIssues());
}
};
+TQueryBenchmarkResult ConstructResultByStatus(const TStatus& status, const THolder<TQueryResultScanner>& scaner, const TQueryBenchmarkSettings& becnhmarkSettings) {
+ if (status.IsSuccess()) {
+ Y_ENSURE(scaner);
+ return TQueryBenchmarkResult::Result(
+ scaner->ExtractRawResults(),
+ scaner->GetServerTiming(),
+ scaner->GetQueryPlan(),
+ scaner->GetPlanAst()
+ );
+ }
+ TStringBuilder errorInfo;
+ TString plan, ast;
+ switch (status.GetStatus()) {
+ case NYdb::EStatus::CLIENT_DEADLINE_EXCEEDED:
+ errorInfo << becnhmarkSettings.Deadline.Name << " deadline expiried: " << status.GetIssues();
+ break;
+ default:
+ if (scaner) {
+ errorInfo << scaner->GetErrorInfo();
+ plan = scaner->GetQueryPlan();
+ ast = scaner->GetPlanAst();
+ } else {
+ errorInfo << "Operation failed with status " << status.GetStatus() << ": " << status.GetIssues().ToString();
+ }
+ break;
+ }
+ return TQueryBenchmarkResult::Error(errorInfo, plan, ast);
+}
+
template<class TSettings>
TMaybe<TQueryBenchmarkResult> SetTimeoutSettings(TSettings& settings, const TQueryBenchmarkDeadline& deadline) {
if (deadline.Deadline != TInstant::Max()) {
@@ -284,39 +296,32 @@ TMaybe<TQueryBenchmarkResult> SetTimeoutSettings(TSettings& settings, const TQue
return Nothing();
}
-TQueryBenchmarkResult ExecuteImpl(const TString& query, NTable::TTableClient& client, const TQueryBenchmarkDeadline& deadline, bool explainOnly) {
+TQueryBenchmarkResult ExecuteImpl(const TString& query, NTable::TTableClient& client, const TQueryBenchmarkSettings& benchmarkSettings, bool explainOnly) {
TStreamExecScanQuerySettings settings;
settings.CollectQueryStats(ECollectQueryStatsMode::Full);
settings.Explain(explainOnly);
- if (const auto error = SetTimeoutSettings(settings, deadline)) {
+ if (const auto error = SetTimeoutSettings(settings, benchmarkSettings.Deadline)) {
return *error;
}
- auto it = client.StreamExecuteScanQuery(query, settings).GetValueSync();
- if (const auto error = ResultByStatus(it, deadline.Name)) {
- return *error;
- }
-
- TQueryResultScanner composite;
- composite.SetDeadlineName(deadline.Name);
- if (!composite.Scan(it)) {
- return TQueryBenchmarkResult::Error(
- composite.GetErrorInfo(), composite.GetQueryPlan(), composite.GetPlanAst());
- } else {
- return TQueryBenchmarkResult::Result(
- composite.ExtractRawResults(),
- composite.GetServerTiming(),
- composite.GetQueryPlan(),
- composite.GetPlanAst()
- );
- }
+ THolder<TQueryResultScanner> composite;
+ const auto resStatus = client.RetryOperationSync([&composite, &benchmarkSettings, &query, &settings](NTable::TTableClient& tc) -> TStatus {
+ auto it = tc.StreamExecuteScanQuery(query, settings).GetValueSync();
+ if (!it.IsSuccess()) {
+ return it;
+ }
+ composite = MakeHolder<TQueryResultScanner>();
+ composite->SetDeadlineName(benchmarkSettings.Deadline.Name);
+ return composite->Scan(it);
+ }, benchmarkSettings.RetrySettings);
+ return ConstructResultByStatus(resStatus, composite, benchmarkSettings);
}
TQueryBenchmarkResult Execute(const TString& query, NTable::TTableClient& client, const TQueryBenchmarkSettings& settings) {
- return ExecuteImpl(query, client, settings.Deadline, false);
+ return ExecuteImpl(query, client, settings, false);
}
-TQueryBenchmarkResult Explain(const TString& query, NTable::TTableClient& client, const TQueryBenchmarkDeadline& deadline) {
- return ExecuteImpl(query, client, deadline, true);
+TQueryBenchmarkResult Explain(const TString& query, NTable::TTableClient& client, const TQueryBenchmarkSettings& settings) {
+ return ExecuteImpl(query, client, settings, true);
}
TQueryBenchmarkResult ExecuteImpl(const TString& query, NQuery::TQueryClient& client, const TQueryBenchmarkSettings& benchmarkSettings, bool explainOnly) {
@@ -329,36 +334,27 @@ TQueryBenchmarkResult ExecuteImpl(const TString& query, NQuery::TQueryClient& cl
if (auto error = SetTimeoutSettings(settings, benchmarkSettings.Deadline)) {
return *error;
}
- auto it = client.StreamExecuteQuery(
- query,
- NYdb::NQuery::TTxControl::BeginTx().CommitTx(),
- settings).GetValueSync();
- if (auto error = ResultByStatus(it, benchmarkSettings.Deadline.Name)) {
- return *error;
- }
-
- TQueryResultScanner composite;
- composite.SetDeadlineName(benchmarkSettings.Deadline.Name);
- if (!composite.Scan(it, benchmarkSettings.PlanFileName)) {
- return TQueryBenchmarkResult::Error(
- composite.GetErrorInfo(), composite.GetQueryPlan(), composite.GetPlanAst());
- } else {
- return TQueryBenchmarkResult::Result(
- composite.ExtractRawResults(),
- composite.GetServerTiming(),
- composite.GetQueryPlan(),
- composite.GetPlanAst()
- );
- }
+ THolder<TQueryResultScanner> composite;
+ const auto resStatus = client.RetryQuerySync([&composite, &benchmarkSettings, &query, &settings](NQuery::TQueryClient& qc) -> TStatus {
+ auto it = qc.StreamExecuteQuery(
+ query,
+ NYdb::NQuery::TTxControl::BeginTx().CommitTx(),
+ settings).GetValueSync();
+ if (!it.IsSuccess()) {
+ return it;
+ }
+ composite = MakeHolder<TQueryResultScanner>();
+ composite->SetDeadlineName(benchmarkSettings.Deadline.Name);
+ return composite->Scan(it);
+ }, benchmarkSettings.RetrySettings);
+ return ConstructResultByStatus(resStatus, composite, benchmarkSettings);
}
TQueryBenchmarkResult Execute(const TString& query, NQuery::TQueryClient& client, const TQueryBenchmarkSettings& settings) {
return ExecuteImpl(query, client, settings, false);
}
-TQueryBenchmarkResult Explain(const TString& query, NQuery::TQueryClient& client, const TQueryBenchmarkDeadline& deadline) {
- TQueryBenchmarkSettings settings;
- settings.Deadline = deadline;
+TQueryBenchmarkResult Explain(const TString& query, NQuery::TQueryClient& client, const TQueryBenchmarkSettings& settings) {
return ExecuteImpl(query, client, settings, true);
}
diff --git a/ydb/public/lib/ydb_cli/commands/benchmark_utils.h b/ydb/public/lib/ydb_cli/commands/benchmark_utils.h
index 20d9713a16..67b775d1b9 100644
--- a/ydb/public/lib/ydb_cli/commands/benchmark_utils.h
+++ b/ydb/public/lib/ydb_cli/commands/benchmark_utils.h
@@ -82,14 +82,15 @@ struct TQueryBenchmarkSettings {
TQueryBenchmarkDeadline Deadline;
std::optional<TString> PlanFileName;
bool WithProgress = false;
+ NYdb::NRetry::TRetryOperationSettings RetrySettings;
};
TString FullTablePath(const TString& database, const TString& table);
bool HasCharsInString(const TString& str);
TQueryBenchmarkResult Execute(const TString & query, NTable::TTableClient & client, const TQueryBenchmarkSettings& settings);
TQueryBenchmarkResult Execute(const TString & query, NQuery::TQueryClient & client, const TQueryBenchmarkSettings& settings);
-TQueryBenchmarkResult Explain(const TString & query, NTable::TTableClient & client, const TQueryBenchmarkDeadline& deadline);
-TQueryBenchmarkResult Explain(const TString & query, NQuery::TQueryClient & client, const TQueryBenchmarkDeadline& deadline);
+TQueryBenchmarkResult Explain(const TString & query, NTable::TTableClient & client, const TQueryBenchmarkSettings& settings);
+TQueryBenchmarkResult Explain(const TString & query, NQuery::TQueryClient & client, const TQueryBenchmarkSettings& settings);
NJson::TJsonValue GetQueryLabels(ui32 queryId);
NJson::TJsonValue GetSensorValue(TStringBuf sensor, TDuration& value, ui32 queryId);
NJson::TJsonValue GetSensorValue(TStringBuf sensor, double value, ui32 queryId);
diff --git a/ydb/public/lib/ydb_cli/commands/interactive/complete/ya.make b/ydb/public/lib/ydb_cli/commands/interactive/complete/ya.make
index e2d8c56e93..ec285886bd 100644
--- a/ydb/public/lib/ydb_cli/commands/interactive/complete/ya.make
+++ b/ydb/public/lib/ydb_cli/commands/interactive/complete/ya.make
@@ -7,6 +7,8 @@ SRCS(
PEERDIR(
contrib/restricted/patched/replxx
yql/essentials/sql/v1/complete
+ yql/essentials/sql/v1/lexer/antlr4_pure
+ yql/essentials/sql/v1/lexer/antlr4_pure_ansi
)
END()
diff --git a/ydb/public/lib/ydb_cli/commands/interactive/complete/yql_completer.cpp b/ydb/public/lib/ydb_cli/commands/interactive/complete/yql_completer.cpp
index e2b48fa5ea..5b37a92413 100644
--- a/ydb/public/lib/ydb_cli/commands/interactive/complete/yql_completer.cpp
+++ b/ydb/public/lib/ydb_cli/commands/interactive/complete/yql_completer.cpp
@@ -1,6 +1,9 @@
#include "yql_completer.h"
#include <yql/essentials/sql/v1/complete/sql_complete.h>
+#include <yql/essentials/sql/v1/complete/name/static/name_service.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure_ansi/lexer.h>
namespace NYdb::NConsoleClient {
@@ -38,8 +41,8 @@ namespace NYdb::NConsoleClient {
};
IYQLCompleter::TPtr MakeYQLCompleter() {
- return IYQLCompleter::TPtr(
- new TYQLCompleter(NSQLComplete::MakeSqlCompletionEngine()));
+ return IYQLCompleter::TPtr(new TYQLCompleter(
+ NSQLComplete::MakeSqlCompletionEngine()));
}
} // namespace NYdb::NConsoleClient
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp b/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp
index 6fa094c184..00ed403bd9 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_benchmark.cpp
@@ -12,7 +12,7 @@ namespace NYdb::NConsoleClient {
TWorkloadCommandBenchmark::TWorkloadCommandBenchmark(NYdbWorkload::TWorkloadParams& params, const NYdbWorkload::IWorkloadQueryGenerator::TWorkloadType& workload)
: TWorkloadCommandBase(workload.CommandName, params, NYdbWorkload::TWorkloadParams::ECommandType::Run, workload.Description, workload.Type)
{
-
+ RetrySettings.MaxRetries(0);
}
@@ -43,6 +43,7 @@ void TWorkloadCommandBenchmark::Config(TConfig& config) {
config.Opts->AddLongOption("query-prefix", "Query prefix.\nEvery prefix is a line that will be added to the beginning of each query. For multiple prefixes lines use this option several times.")
.AppendTo(&QuerySettings);
config.Opts->MutuallyExclusive("query-prefix", "query-settings");
+ config.Opts->AddLongOption("retries", "Max retry count for every request.").StoreResult(&RetrySettings.MaxRetries_).DefaultValue(RetrySettings.MaxRetries_);
auto fillTestCases = [](TStringBuf line, std::function<void(ui32)>&& op) {
for (const auto& token : StringSplitter(line).Split(',').SkipEmpty()) {
TStringBuf part = token.Token();
@@ -354,7 +355,7 @@ int TWorkloadCommandBenchmark::RunBench(TClient* client, NYdbWorkload::IWorkload
TQueryBenchmarkResult res = TQueryBenchmarkResult::Error("undefined", "undefined", "undefined");
try {
if (client) {
- res = Explain(query, *client, GetDeadline());
+ res = Explain(query, *client, GetBenchmarkSettings(false));
} else {
res = TQueryBenchmarkResult::Result(TQueryBenchmarkResult::TRawResults(), TDuration::Zero(), "", "");
}
@@ -371,18 +372,13 @@ int TWorkloadCommandBenchmark::RunBench(TClient* client, NYdbWorkload::IWorkload
break;
}
TQueryBenchmarkResult res = TQueryBenchmarkResult::Error("undefined", "undefined", "undefined");
-
- TQueryBenchmarkSettings settings;
- settings.Deadline = GetDeadline();
- settings.WithProgress = true;
-
- if (PlanFileName) {
- settings.PlanFileName = TStringBuilder() << PlanFileName << "." << queryN << "." << ToString(i) << ".in_progress";
- }
-
try {
if (client) {
- res = Execute(query, *client, settings);
+ auto settings = GetBenchmarkSettings(true);
+ if (PlanFileName) {
+ settings.PlanFileName = TStringBuilder() << PlanFileName << "." << queryN << "." << ToString(i) << ".in_progress";
+ }
+ res = Execute(query, *client, settings);
} else {
res = TQueryBenchmarkResult::Result(TQueryBenchmarkResult::TRawResults(), TDuration::Zero(), "", "");
}
@@ -542,16 +538,18 @@ void TWorkloadCommandBenchmark::SavePlans(const BenchmarkUtils::TQueryBenchmarkR
}
}
-BenchmarkUtils::TQueryBenchmarkDeadline TWorkloadCommandBenchmark::GetDeadline() const {
- BenchmarkUtils::TQueryBenchmarkDeadline result;
+BenchmarkUtils::TQueryBenchmarkSettings TWorkloadCommandBenchmark::GetBenchmarkSettings(bool withProgress) const {
+ BenchmarkUtils::TQueryBenchmarkSettings result;
+ result.WithProgress = withProgress;
+ result.RetrySettings = RetrySettings;
if (GlobalDeadline != TInstant::Max()) {
- result.Deadline = GlobalDeadline;
- result.Name = "Global ";
+ result.Deadline.Deadline = GlobalDeadline;
+ result.Deadline.Name = "Global ";
}
TInstant requestDeadline = (RequestTimeout == TDuration::Zero()) ? TInstant::Max() : (Now() + RequestTimeout);
- if (requestDeadline < result.Deadline) {
- result.Deadline = requestDeadline;
- result.Name = "Request";
+ if (requestDeadline < result.Deadline.Deadline) {
+ result.Deadline.Deadline = requestDeadline;
+ result.Deadline.Name = "Request";
}
return result;
}
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_benchmark.h b/ydb/public/lib/ydb_cli/commands/ydb_benchmark.h
index c09c77e304..f538e74741 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_benchmark.h
+++ b/ydb/public/lib/ydb_cli/commands/ydb_benchmark.h
@@ -6,7 +6,7 @@ namespace NYdb::NConsoleClient {
namespace BenchmarkUtils {
class TQueryBenchmarkResult;
- struct TQueryBenchmarkDeadline;
+ struct TQueryBenchmarkSettings;
}
class TWorkloadCommandBenchmark final: public TWorkloadCommandBase {
@@ -30,7 +30,7 @@ private:
int RunBench(TClient* client, NYdbWorkload::IWorkloadQueryGenerator& workloadGen);
void SavePlans(const BenchmarkUtils::TQueryBenchmarkResult& res, ui32 queryNum, const TStringBuf name) const;
void PrintResult(const BenchmarkUtils::TQueryBenchmarkResult& res, IOutputStream& out, const std::string& expected) const;
- BenchmarkUtils::TQueryBenchmarkDeadline GetDeadline() const;
+ BenchmarkUtils::TQueryBenchmarkSettings GetBenchmarkSettings(bool withProgress) const;
private:
EQueryExecutor QueryExecuterType = EQueryExecutor::Generic;
@@ -47,6 +47,7 @@ private:
TDuration GlobalTimeout = TDuration::Zero();
TDuration RequestTimeout = TDuration::Zero();
TInstant GlobalDeadline = TInstant::Max();
+ NYdb::NRetry::TRetryOperationSettings RetrySettings;
};
} \ No newline at end of file
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_command.cpp b/ydb/public/lib/ydb_cli/commands/ydb_command.cpp
index 6c0295b922..81f8335374 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_command.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_command.cpp
@@ -29,6 +29,8 @@ TDriverConfig TYdbCommand::CreateDriverConfig(TConfig& config) {
driverConfig.UseSecureConnection(config.CaCerts);
if (config.IsNetworkIntensive)
driverConfig.SetNetworkThreadsNum(16);
+ if (config.SkipDiscovery)
+ driverConfig.SetDiscoveryMode(EDiscoveryMode::Off);
driverConfig.UseClientCertificate(config.ClientCert, config.ClientCertPrivateKey);
return driverConfig;
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp b/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp
index 44284a8481..f8332f0fa4 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_root_common.cpp
@@ -155,6 +155,16 @@ void TClientCommandRootCommon::Config(TConfig& config) {
.RequiredArgument("NAME").StoreResult(&ProfileName);
opts.AddLongOption('y', "assume-yes", "Automatic yes to prompts; assume \"yes\" as answer to all prompts and run non-interactively")
.Optional().StoreTrue(&config.AssumeYes);
+
+ if (config.HelpCommandVerbosiltyLevel >= 2) {
+ opts.AddLongOption("no-discovery", "Do not perform discovery (client balancing) for ydb cluster connection."
+ " If this option is set the user provided endpoint (by -e option) will be used to setup a connections")
+ .Optional().StoreTrue(&config.SkipDiscovery);
+ } else {
+ opts.AddLongOption("no-discovery")
+ .Optional().Hidden().StoreTrue(&config.SkipDiscovery);
+ }
+
TClientCommandRootBase::Config(config);
TAuthMethodOption* iamTokenAuth = nullptr;
@@ -439,12 +449,13 @@ void TClientCommandRootCommon::ExtractParams(TConfig& config) {
}
}
+ config.EnableSsl = EnableSsl;
+
ParseCaCerts(config);
ParseClientCert(config);
ParseStaticCredentials(config);
config.Address = Address;
- config.EnableSsl = EnableSsl;
config.Database = Database;
config.ChosenAuthMethod = ParseResult->GetChosenAuthMethod();
}
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp b/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp
index e872dd4a2f..b0081f8bbd 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp
+++ b/ydb/public/lib/ydb_cli/commands/ydb_sql.cpp
@@ -46,8 +46,11 @@ void TCommandSql::Config(TConfig& config) {
config.Opts->AddLongOption("diagnostics-file", "Path to file where the diagnostics will be saved.")
.RequiredArgument("[String]").StoreResult(&DiagnosticsFile);
config.Opts->AddLongOption("syntax", "Query syntax [yql, pg]")
- .RequiredArgument("[String]").DefaultValue("yql").StoreResult(&Syntax)
- .Hidden();
+ .RequiredArgument("[String]")
+ .Hidden()
+ .GetOpt().Handler1T<TString>("yql", [this](const TString& arg) {
+ SetSyntax(arg);
+ });
AddOutputFormats(config, {
EDataFormat::Pretty,
@@ -143,13 +146,8 @@ int TCommandSql::RunCommand(TConfig& config) {
auto defaultStatsMode = ExplainAnalyzeMode ? NQuery::EStatsMode::Full : NQuery::EStatsMode::None;
settings.StatsMode(ParseQueryStatsModeOrThrow(CollectStatsMode, defaultStatsMode));
}
- if (Syntax == "yql") {
- settings.Syntax(NQuery::ESyntax::YqlV1);
- } else if (Syntax == "pg") {
- settings.Syntax(NQuery::ESyntax::Pg);
- } else {
- throw TMisuseException() << "Unknow syntax option \"" << Syntax << "\"";
- }
+
+ settings.Syntax(SyntaxType);
if (!Parameters.empty() || InputParamStream) {
// Execute query with parameters
@@ -284,8 +282,14 @@ void TCommandSql::SetCollectStatsMode(TString&& collectStatsMode) {
CollectStatsMode = std::move(collectStatsMode);
}
-void TCommandSql::SetSyntax(TString&& syntax) {
- Syntax = std::move(syntax);
+void TCommandSql::SetSyntax(const TString& syntax) {
+ if (syntax == "yql") {
+ SyntaxType = NYdb::NQuery::ESyntax::YqlV1;
+ } else if (syntax == "pg") {
+ SyntaxType = NYdb::NQuery::ESyntax::Pg;
+ } else {
+ throw TMisuseException() << "Unknown syntax option \"" << syntax << "\"";
+ }
}
}
diff --git a/ydb/public/lib/ydb_cli/commands/ydb_sql.h b/ydb/public/lib/ydb_cli/commands/ydb_sql.h
index b0cfec0145..a623f8a311 100644
--- a/ydb/public/lib/ydb_cli/commands/ydb_sql.h
+++ b/ydb/public/lib/ydb_cli/commands/ydb_sql.h
@@ -19,9 +19,9 @@ public:
virtual void Config(TConfig& config) override;
virtual void Parse(TConfig& config) override;
virtual int Run(TConfig& config) override;
- void SetSyntax(TString&& syntax);
void SetCollectStatsMode(TString&& collectStatsMode);
void SetScript(TString&& script);
+ void SetSyntax(const TString& syntax);
private:
int RunCommand(TConfig& config);
@@ -31,7 +31,6 @@ private:
TString DiagnosticsFile;
TString Query;
TString QueryFile;
- TString Syntax;
bool ExplainMode = false;
bool ExplainAnalyzeMode = false;
bool ExplainAst = false;
diff --git a/ydb/public/lib/ydb_cli/common/client_command_options.h b/ydb/public/lib/ydb_cli/common/client_command_options.h
index 9f278b2753..22706ae279 100644
--- a/ydb/public/lib/ydb_cli/common/client_command_options.h
+++ b/ydb/public/lib/ydb_cli/common/client_command_options.h
@@ -351,6 +351,25 @@ private:
std::vector<TString> OptValues;
};
+class TCommandOptsParseResult: public NLastGetopt::TOptsParseResult {
+public:
+ TCommandOptsParseResult(const NLastGetopt::TOpts* options, int argc, const char* argv[])
+ : ThrowOnParseError(options->HasLongOption("throw-on-parse-error")) {
+ Init(options, argc, argv);
+ }
+
+ virtual ~TCommandOptsParseResult() = default;
+
+ void HandleError() const override {
+ if (ThrowOnParseError) {
+ throw;
+ }
+ NLastGetopt::TOptsParseResult::HandleError();
+ }
+private:
+ bool ThrowOnParseError;
+};
+
class TOptionsParseResult {
friend class TClientCommandOptions;
@@ -392,7 +411,7 @@ public:
private:
const TClientCommandOptions* ClientOptions = nullptr;
- NLastGetopt::TOptsParseResult ParseFromCommandLineResult; // First parsing stage
+ TCommandOptsParseResult ParseFromCommandLineResult; // First parsing stage
std::vector<TOptionParseResult> Opts;
std::vector<size_t> AuthMethodOpts; // indexes
TString ChosenAuthMethod;
diff --git a/ydb/public/lib/ydb_cli/common/command.h b/ydb/public/lib/ydb_cli/common/command.h
index 7858026bab..cb5318ca77 100644
--- a/ydb/public/lib/ydb_cli/common/command.h
+++ b/ydb/public/lib/ydb_cli/common/command.h
@@ -125,6 +125,7 @@ public:
TMap<TString, TVector<TConnectionParam>> ConnectionParams;
bool EnableSsl = false;
+ bool SkipDiscovery = false;
bool IsNetworkIntensive = false;
TString Oauth2KeyFile;
TString Oauth2KeyParams;
diff --git a/ydb/public/lib/ydb_cli/common/parameters.cpp b/ydb/public/lib/ydb_cli/common/parameters.cpp
index aa14137446..89ac5155a1 100644
--- a/ydb/public/lib/ydb_cli/common/parameters.cpp
+++ b/ydb/public/lib/ydb_cli/common/parameters.cpp
@@ -3,6 +3,7 @@
#include <ydb/public/lib/json_value/ydb_json_value.h>
#include <ydb/public/lib/ydb_cli/commands/ydb_common.h>
#include <ydb/public/lib/ydb_cli/common/interactive.h>
+#include <ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h>
#include <library/cpp/json/json_reader.h>
#include <library/cpp/threading/future/async.h>
@@ -333,26 +334,38 @@ void TCommandWithParameters::SetParamsInputFromFile(TString& file) {
SetParamsInput(InputFileHolder.Get());
}
-void TCommandWithParameters::GetParamTypes(const TDriver& driver, const TString& queryText) {
- NScripting::TScriptingClient client(driver);
+void TCommandWithParameters::InitParamTypes(const TDriver& driver, const TString& queryText) {
+ if (SyntaxType == NQuery::ESyntax::Pg) {
+ ParamTypes.clear();
+ return;
+ }
- NScripting::TExplainYqlRequestSettings explainSettings;
- explainSettings.Mode(NScripting::ExplainYqlRequestMode::Validate);
+ auto types = TYqlParamParser::GetParamTypes(queryText);
+ if (types.has_value()) {
+ ParamTypes = *types;
+ return;
+ }
+
+ // Fallback to ExplainYql
+ NScripting::TScriptingClient client(driver);
+ auto explainSettings = NScripting::TExplainYqlRequestSettings()
+ .Mode(NScripting::ExplainYqlRequestMode::Validate);
auto result = client.ExplainYqlScript(
queryText,
explainSettings
).GetValueSync();
+
NStatusHelpers::ThrowOnErrorOrPrintIssues(result);
ParamTypes = result.GetParameterTypes();
}
-bool TCommandWithParameters::GetNextParams(const TDriver& driver, const TString& queryText,
- THolder<TParamsBuilder>& paramBuilder) {
+bool TCommandWithParameters::GetNextParams(const TDriver& driver, const TString& queryText, THolder<TParamsBuilder>& paramBuilder) {
paramBuilder = MakeHolder<TParamsBuilder>();
if (IsFirstEncounter) {
IsFirstEncounter = false;
- GetParamTypes(driver, queryText);
+ InitParamTypes(driver, queryText);
+
if (!InputParamStream) {
AddParams(*paramBuilder);
return true;
diff --git a/ydb/public/lib/ydb_cli/common/parameters.h b/ydb/public/lib/ydb_cli/common/parameters.h
index 6bc151688f..cd841783e0 100644
--- a/ydb/public/lib/ydb_cli/common/parameters.h
+++ b/ydb/public/lib/ydb_cli/common/parameters.h
@@ -10,6 +10,7 @@
#include <ydb/public/lib/ydb_cli/common/parameter_stream.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/params/params.h>
#include <ydb/public/lib/json_value/ydb_json_value.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/query/query.h>
namespace NYdb {
namespace NConsoleClient {
@@ -42,7 +43,7 @@ private:
void SetParamsInput(IInputStream* input);
void SetParamsInputFromFile(TString& file);
void SetParamsInputFromStdin();
- void GetParamTypes(const TDriver& driver, const TString& queryText);
+ void InitParamTypes(const TDriver& driver, const TString& queryText);
TMaybe<TString> ReadData();
@@ -69,6 +70,7 @@ protected:
TDuration BatchMaxDelay;
THolder<NScripting::TExplainYqlResult> ValidateResult;
bool ReadingSomethingFromStdin = false;
+ NQuery::ESyntax SyntaxType = NQuery::ESyntax::YqlV1;
};
}
diff --git a/ydb/public/lib/ydb_cli/common/ya.make b/ydb/public/lib/ydb_cli/common/ya.make
index 7905d3c428..13b2ea827e 100644
--- a/ydb/public/lib/ydb_cli/common/ya.make
+++ b/ydb/public/lib/ydb_cli/common/ya.make
@@ -58,6 +58,7 @@ PEERDIR(
ydb/public/sdk/cpp/src/client/types/credentials
ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange
ydb/library/arrow_parquet
+ ydb/public/lib/ydb_cli/common/yql_parser
)
GENERATE_ENUM_SERIALIZATION(formats.h)
@@ -65,6 +66,10 @@ GENERATE_ENUM_SERIALIZATION(parameters.h)
END()
+RECURSE(
+ yql_parser
+)
+
RECURSE_FOR_TESTS(
ut
)
diff --git a/ydb/public/lib/ydb_cli/common/yql_parser/ut/ya.make b/ydb/public/lib/ydb_cli/common/yql_parser/ut/ya.make
new file mode 100644
index 0000000000..35bbe38af1
--- /dev/null
+++ b/ydb/public/lib/ydb_cli/common/yql_parser/ut/ya.make
@@ -0,0 +1,11 @@
+UNITTEST_FOR(ydb/public/lib/ydb_cli/common/yql_parser)
+
+SRCS(
+ yql_parser_ut.cpp
+)
+
+DATA(
+ arcadia/yql/essentials/data/language/types.json
+)
+
+END()
diff --git a/ydb/public/lib/ydb_cli/common/yql_parser/ya.make b/ydb/public/lib/ydb_cli/common/yql_parser/ya.make
new file mode 100644
index 0000000000..79e0487f0b
--- /dev/null
+++ b/ydb/public/lib/ydb_cli/common/yql_parser/ya.make
@@ -0,0 +1,20 @@
+LIBRARY()
+
+PEERDIR(
+ ydb/public/sdk/cpp/src/client/value
+ ydb/public/sdk/cpp/src/client/types
+ yql/essentials/sql/settings
+ yql/essentials/sql/v1/lexer
+ yql/essentials/sql/v1/lexer/antlr4
+ yql/essentials/sql/v1/lexer/antlr4_ansi
+)
+
+SRCS(
+ yql_parser.cpp
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.cpp b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.cpp
new file mode 100644
index 0000000000..3a43861067
--- /dev/null
+++ b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.cpp
@@ -0,0 +1,392 @@
+#include "yql_parser.h"
+
+#include <yql/essentials/public/issue/yql_issue.h>
+#include <yql/essentials/sql/settings/translation_settings.h>
+#include <yql/essentials/sql/v1/lexer/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_ansi/lexer.h>
+
+#include <util/generic/scope.h>
+#include <util/string/split.h>
+#include <util/string/strip.h>
+
+namespace NYdb {
+namespace NConsoleClient {
+
+namespace {
+
+TString ToLower(const TString& s) {
+ TString result = s;
+ for (char& c : result) {
+ c = std::tolower(c);
+ }
+ return result;
+}
+
+class TYqlTypeParser {
+public:
+ TYqlTypeParser(const TVector<NSQLTranslation::TParsedToken>& tokens)
+ : Tokens(tokens)
+ {}
+
+ std::optional<TType> Build(size_t& pos) {
+ auto node = Parse(SkipWS(pos));
+ if (!node || (pos < Tokens.size() && Tokens[pos].Content != ";")) {
+ return std::nullopt;
+ }
+
+ TTypeBuilder builder;
+ if (!BuildType(*node, builder)) {
+ return std::nullopt;
+ }
+ return builder.Build();
+ }
+
+private:
+ struct TypeNode {
+ TTypeParser::ETypeKind TypeKind;
+ std::vector<TypeNode> Children;
+
+ // For primitive type
+ EPrimitiveType PrimitiveType;
+
+ // For struct type
+ TString Name;
+
+ // For decimal type
+ ui32 precision = 0;
+ ui32 scale = 0;
+ };
+
+ std::optional<TypeNode> Parse(size_t& pos) {
+ TypeNode node;
+
+ TString lowerContent = ToLower(Tokens[pos].Content);
+
+ if (lowerContent == "struct" || lowerContent == "tuple") {
+ node.TypeKind = lowerContent == "struct" ? TTypeParser::ETypeKind::Struct :
+ TTypeParser::ETypeKind::Tuple;
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != "<") {
+ return std::nullopt;
+ }
+
+ while (pos < Tokens.size() && Tokens[pos].Content != ">") {
+ if (lowerContent == "struct") {
+ SkipCurrentTokenAndWS(pos);
+ auto name = Tokens[pos].Content;
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != ":") {
+ return std::nullopt;
+ }
+
+ auto parseResult = Parse(SkipCurrentTokenAndWS(pos));
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node.Children.push_back(*parseResult);
+ node.Children.back().Name = name;
+ } else {
+ auto parseResult = Parse(SkipCurrentTokenAndWS(pos));
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node.Children.push_back(*parseResult);
+ }
+
+ if (pos >= Tokens.size() || (Tokens[pos].Content != "," && Tokens[pos].Content != ">")) {
+ return std::nullopt;
+ }
+ }
+ } else if (lowerContent == "list" ||
+ lowerContent == "optional" ||
+ lowerContent == "dict") {
+ node.TypeKind = lowerContent == "list" ? TTypeParser::ETypeKind::List :
+ lowerContent == "optional" ? TTypeParser::ETypeKind::Optional :
+ TTypeParser::ETypeKind::Dict;
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != "<") {
+ return std::nullopt;
+ }
+
+ auto parseResult = Parse(SkipCurrentTokenAndWS(pos));
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node.Children.push_back(*parseResult);
+
+ if (lowerContent == "dict") {
+ if (pos >= Tokens.size() || Tokens[pos].Content != ",") {
+ return std::nullopt;
+ }
+
+ parseResult = Parse(SkipCurrentTokenAndWS(pos));
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node.Children.push_back(*parseResult);
+ }
+
+ if (pos >= Tokens.size() || Tokens[pos].Content != ">") {
+ return std::nullopt;
+ }
+ } else if (lowerContent == "decimal") {
+ auto parseResult = ParseDecimal(pos);
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node = *parseResult;
+ } else {
+ auto parseResult = ParsePrimitive(lowerContent);
+ if (!parseResult) {
+ return std::nullopt;
+ }
+ node = *parseResult;
+ }
+
+ if (SkipCurrentTokenAndWS(pos) < Tokens.size() && Tokens[pos].Content == "?") {
+ TypeNode optionalNode;
+ optionalNode.TypeKind = TTypeParser::ETypeKind::Optional;
+ optionalNode.Children.push_back(node);
+ SkipCurrentTokenAndWS(pos);
+
+ return optionalNode;
+ }
+
+ return node;
+ }
+
+ std::optional<TypeNode> ParsePrimitive(const TString& content) {
+ TypeNode node;
+ node.TypeKind = TTypeParser::ETypeKind::Primitive;
+
+ if (content == "bool") {
+ node.PrimitiveType = EPrimitiveType::Bool;
+ } else if (content == "int8") {
+ node.PrimitiveType = EPrimitiveType::Int8;
+ } else if (content == "uint8") {
+ node.PrimitiveType = EPrimitiveType::Uint8;
+ } else if (content == "int16") {
+ node.PrimitiveType = EPrimitiveType::Int16;
+ } else if (content == "uint16") {
+ node.PrimitiveType = EPrimitiveType::Uint16;
+ } else if (content == "int32") {
+ node.PrimitiveType = EPrimitiveType::Int32;
+ } else if (content == "uint32") {
+ node.PrimitiveType = EPrimitiveType::Uint32;
+ } else if (content == "int64") {
+ node.PrimitiveType = EPrimitiveType::Int64;
+ } else if (content == "uint64") {
+ node.PrimitiveType = EPrimitiveType::Uint64;
+ } else if (content == "float") {
+ node.PrimitiveType = EPrimitiveType::Float;
+ } else if (content == "double") {
+ node.PrimitiveType = EPrimitiveType::Double;
+ } else if (content == "string") {
+ node.PrimitiveType = EPrimitiveType::String;
+ } else if (content == "utf8") {
+ node.PrimitiveType = EPrimitiveType::Utf8;
+ } else if (content == "json") {
+ node.PrimitiveType = EPrimitiveType::Json;
+ } else if (content == "yson") {
+ node.PrimitiveType = EPrimitiveType::Yson;
+ } else if (content == "date") {
+ node.PrimitiveType = EPrimitiveType::Date;
+ } else if (content == "datetime") {
+ node.PrimitiveType = EPrimitiveType::Datetime;
+ } else if (content == "timestamp") {
+ node.PrimitiveType = EPrimitiveType::Timestamp;
+ } else if (content == "interval") {
+ node.PrimitiveType = EPrimitiveType::Interval;
+ } else if (content == "date32") {
+ node.PrimitiveType = EPrimitiveType::Date32;
+ } else if (content == "datetime64") {
+ node.PrimitiveType = EPrimitiveType::Datetime64;
+ } else if (content == "timestamp64") {
+ node.PrimitiveType = EPrimitiveType::Timestamp64;
+ } else if (content == "interval64") {
+ node.PrimitiveType = EPrimitiveType::Interval64;
+ } else if (content == "tzdate") {
+ node.PrimitiveType = EPrimitiveType::TzDate;
+ } else if (content == "tzdatetime") {
+ node.PrimitiveType = EPrimitiveType::TzDatetime;
+ } else if (content == "tztimestamp") {
+ node.PrimitiveType = EPrimitiveType::TzTimestamp;
+ } else if (content == "uuid") {
+ node.PrimitiveType = EPrimitiveType::Uuid;
+ } else if (content == "jsondocument") {
+ node.PrimitiveType = EPrimitiveType::JsonDocument;
+ } else if (content == "dynumber") {
+ node.PrimitiveType = EPrimitiveType::DyNumber;
+ } else if (content == "emptylist") {
+ node.TypeKind = TTypeParser::ETypeKind::EmptyList;
+ } else if (content == "emptydict") {
+ node.TypeKind = TTypeParser::ETypeKind::EmptyDict;
+ } else if (content == "void") {
+ node.TypeKind = TTypeParser::ETypeKind::Void;
+ } else if (content == "null") {
+ node.TypeKind = TTypeParser::ETypeKind::Null;
+ } else {
+ return std::nullopt;
+ }
+
+ return node;
+ }
+
+ std::optional<TypeNode> ParseDecimal(size_t& pos) {
+ TypeNode node;
+ node.TypeKind = TTypeParser::ETypeKind::Decimal;
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != "(") {
+ return std::nullopt;
+ }
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || !TryFromString<ui32>(Tokens[pos].Content, node.precision)) {
+ return std::nullopt;
+ }
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != ",") {
+ return std::nullopt;
+ }
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || !TryFromString<ui32>(Tokens[pos].Content, node.scale)) {
+ return std::nullopt;
+ }
+
+ if (SkipCurrentTokenAndWS(pos) >= Tokens.size() || Tokens[pos].Content != ")") {
+ return std::nullopt;
+ }
+
+ return node;
+ }
+
+ size_t& SkipWS(size_t& pos) {
+ while (pos < Tokens.size() && Tokens[pos].Name == "WS") {
+ pos++;
+ }
+ return pos;
+ }
+
+ size_t& SkipCurrentTokenAndWS(size_t& pos) {
+ pos++;
+ return SkipWS(pos);
+ }
+
+ bool BuildType(const TypeNode& node, TTypeBuilder& builder) {
+ if (node.TypeKind == TTypeParser::ETypeKind::Optional) {
+ builder.BeginOptional();
+ BuildType(node.Children[0], builder);
+ builder.EndOptional();
+ } else if (node.TypeKind == TTypeParser::ETypeKind::List) {
+ builder.BeginList();
+ BuildType(node.Children[0], builder);
+ builder.EndList();
+ } else if (node.TypeKind == TTypeParser::ETypeKind::Struct) {
+ builder.BeginStruct();
+ for (const auto& field : node.Children) {
+ builder.AddMember(field.Name);
+ BuildType(field, builder);
+ }
+ builder.EndStruct();
+ } else if (node.TypeKind == TTypeParser::ETypeKind::Tuple) {
+ builder.BeginTuple();
+ for (const auto& element : node.Children) {
+ builder.AddElement();
+ BuildType(element, builder);
+ }
+ builder.EndTuple();
+ } else if (node.TypeKind == TTypeParser::ETypeKind::Dict) {
+ builder.BeginDict();
+ builder.DictKey();
+ BuildType(node.Children[0], builder);
+ builder.DictPayload();
+ BuildType(node.Children[1], builder);
+ builder.EndDict();
+ } else if (node.TypeKind == TTypeParser::ETypeKind::Decimal) {
+ builder.Decimal(TDecimalType(node.precision, node.scale));
+ } else if (node.TypeKind == TTypeParser::ETypeKind::Primitive) {
+ builder.Primitive(node.PrimitiveType);
+ } else {
+ return false;
+ }
+
+ return true;
+ }
+
+ const TVector<NSQLTranslation::TParsedToken>& Tokens;
+};
+
+}
+
+
+std::optional<std::map<std::string, TType>> TYqlParamParser::GetParamTypes(const TString& queryText) {
+ enum class EParseState {
+ Start,
+ Declare,
+ ParamName,
+ As,
+ Type
+ };
+
+ std::map<std::string, TType> result;
+
+ NSQLTranslationV1::TLexers lexers;
+ lexers.Antlr4 = NSQLTranslationV1::MakeAntlr4LexerFactory();
+ lexers.Antlr4Ansi = NSQLTranslationV1::MakeAntlr4AnsiLexerFactory();
+
+ auto lexer = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true);
+
+ TVector<NSQLTranslation::TParsedToken> tokens;
+ NYql::TIssues issues;
+ if (!NSQLTranslation::Tokenize(*lexer, queryText, "Query", tokens, issues, NSQLTranslation::SQL_MAX_PARSER_ERRORS) ||
+ !issues.Empty()) {
+ return std::nullopt;
+ }
+
+ EParseState state = EParseState::Start;
+ TString paramName;
+
+ for (size_t i = 0; i < tokens.size(); ++i) {
+ if (tokens[i].Name == "WS") {
+ continue;
+ }
+
+ if (state == EParseState::Start) {
+ if (ToLower(tokens[i].Content) != "declare") {
+ continue;
+ }
+
+ state = EParseState::Declare;
+ } else if (state == EParseState::Declare) {
+ if (tokens[i].Content != "$") {
+ return std::nullopt;
+ }
+
+ state = EParseState::ParamName;
+ } else if (state == EParseState::ParamName) {
+ paramName = "$" + tokens[i].Content;
+ state = EParseState::As;
+ } else if (state == EParseState::As) {
+ if (ToLower(tokens[i].Content) != "as") {
+ return std::nullopt;
+ }
+
+ state = EParseState::Type;
+ } else if (state == EParseState::Type) {
+ TYqlTypeParser parser(tokens);
+ auto parsedType = parser.Build(i);
+
+ if (!parsedType) {
+ return std::nullopt;
+ }
+
+ result.emplace(paramName, *parsedType);
+ state = EParseState::Start;
+ }
+ }
+
+ return result;
+}
+
+} // namespace NConsoleClient
+} // namespace NYdb
diff --git a/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h
new file mode 100644
index 0000000000..e022ac4443
--- /dev/null
+++ b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/value/value.h>
+
+#include <map>
+#include <optional>
+
+namespace NYdb {
+namespace NConsoleClient {
+
+class TYqlParamParser {
+public:
+ static std::optional<std::map<std::string, TType>> GetParamTypes(const TString& queryText);
+};
+
+} // namespace NConsoleClient
+} // namespace NYdb
diff --git a/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser_ut.cpp b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser_ut.cpp
new file mode 100644
index 0000000000..08ab9d7e18
--- /dev/null
+++ b/ydb/public/lib/ydb_cli/common/yql_parser/yql_parser_ut.cpp
@@ -0,0 +1,928 @@
+#include <ydb/public/lib/ydb_cli/common/yql_parser/yql_parser.h>
+
+#include <library/cpp/json/json_reader.h>
+#include <library/cpp/testing/common/env.h>
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/stream/file.h>
+
+using namespace NYdb;
+using namespace NYdb::NConsoleClient;
+
+Y_UNIT_TEST_SUITE(TYqlParamParserTest) {
+ Y_UNIT_TEST(TestBasicTypes) {
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS Uint64;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $name AS Utf8;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$name");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ }
+ }
+
+ Y_UNIT_TEST(TestListType) {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $values AS List<Uint64>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$values");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseList();
+ }
+
+ Y_UNIT_TEST(TestStructType) {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $user AS Struct<id:Uint64,name:Utf8>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$user");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "id");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ }
+
+ Y_UNIT_TEST(TestMultipleParams) {
+ TString query = R"(
+ DECLARE $id AS Uint64;
+ DECLARE $name AS Utf8;
+ DECLARE $age AS Uint32;
+ )";
+ auto types = TYqlParamParser::GetParamTypes(query);
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 3);
+
+ {
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+ {
+ auto it = types->find("$name");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ }
+ {
+ auto it = types->find("$age");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint32);
+ }
+ }
+
+ Y_UNIT_TEST(TestDecimalType) {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $price AS Decimal(22,9);");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$price");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Decimal);
+ auto decimal = parser.GetDecimal();
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Precision, 22);
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Scale, 9);
+ }
+
+ Y_UNIT_TEST(TestDictType) {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $dict AS Dict<Utf8,Uint64>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$dict");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+
+ parser.OpenDict();
+
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ parser.CloseDict();
+ }
+
+ Y_UNIT_TEST(TestTupleType) {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $tuple AS Tuple<Uint64,Utf8,Bool>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$tuple");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Tuple);
+
+ parser.OpenTuple();
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Bool);
+
+ UNIT_ASSERT(!parser.TryNextElement());
+ parser.CloseTuple();
+ }
+
+ Y_UNIT_TEST(TestNestedTypes) {
+ TString query = R"(
+ DECLARE $nested AS List<Struct<
+ id: Uint64,
+ name: Utf8,
+ tags: List<Utf8>,
+ meta: Dict<Utf8, List<Uint32>>
+ >>;
+ )";
+ auto types = TYqlParamParser::GetParamTypes(query);
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$nested");
+ UNIT_ASSERT(it != types->end());
+
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "id");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "tags");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ parser.CloseList();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "meta");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+
+ parser.OpenDict();
+
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint32);
+ parser.CloseList();
+
+ parser.CloseDict();
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ parser.CloseList();
+ }
+
+ Y_UNIT_TEST(TestCaseInsensitiveTypes) {
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS UINT64;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $list AS LIST<UINT32>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$list");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint32);
+ parser.CloseList();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $struct AS STRUCT<ID:UINT64,NAME:UTF8>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$struct");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "ID");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "NAME");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $dict AS DICT<UTF8,UINT64>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$dict");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+
+ parser.OpenDict();
+
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ parser.CloseDict();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $tuple AS TUPLE<UINT64,UTF8,BOOL>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$tuple");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Tuple);
+
+ parser.OpenTuple();
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Bool);
+
+ UNIT_ASSERT(!parser.TryNextElement());
+ parser.CloseTuple();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $price AS DECIMAL(22,9);");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$price");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Decimal);
+ auto decimal = parser.GetDecimal();
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Precision, 22);
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Scale, 9);
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("declare $id as UINT64;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+ }
+
+ Y_UNIT_TEST(TestOptionalTypes) {
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS Uint64?;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS Optional<Uint64>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $list AS Optional<List<Uint64>>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$list");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseList();
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $struct AS Struct<id:Uint64,name:Utf8>?;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$struct");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "id");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $dict AS Optional<Dict<Utf8,Uint64>>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$dict");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+
+ parser.OpenDict();
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseDict();
+
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $tuple AS Optional<Tuple<Uint64,Utf8>>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$tuple");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Tuple);
+
+ parser.OpenTuple();
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextElement());
+ parser.CloseTuple();
+
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $decimal AS Optional<Decimal(10,2)>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$decimal");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Decimal);
+ auto decimal = parser.GetDecimal();
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Precision, 10);
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Scale, 2);
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $nested AS Optional<List<Struct<id:Uint64,name:Utf8>>>;");
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ auto it = types->find("$nested");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "id");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ parser.CloseList();
+
+ parser.CloseOptional();
+ }
+ }
+
+ Y_UNIT_TEST(TestInvalidQuery) {
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS @#$%^;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $id AS");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE AS $id Uint64;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $invalid AS Optional;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $invalid AS Optional<>;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $invalid AS Optional<Uint64,Utf8>;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ {
+ auto types = TYqlParamParser::GetParamTypes("DECLARE $invalid AS SomeUnknownType;");
+ UNIT_ASSERT(!types.has_value());
+ }
+
+ auto types = *TYqlParamParser::GetParamTypes(R"(
+ DECLARE $id AS Uint64;
+ abacaba abacaba;
+ lol lol lol
+ DECLARE $name AS Utf8;
+ )");
+
+ UNIT_ASSERT(types.size() == 2);
+
+ {
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto it = types.find("$name");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ }
+ }
+
+ Y_UNIT_TEST(TestWhitespace) {
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Uint64;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE\t$id\tAS\tUint64;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE\n$id\nAS\nUint64;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS List< Uint64 >;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseList();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Struct< id : Uint64, name : Utf8 >;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "id");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Tuple< Uint64, Utf8 >;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Tuple);
+ parser.OpenTuple();
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextElement());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(!parser.TryNextElement());
+ parser.CloseTuple();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Dict< Utf8, Uint64 >;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+ parser.OpenDict();
+
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseDict();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Decimal( 10, 2 );");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Decimal);
+ auto decimal = parser.GetDecimal();
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Precision, 10);
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Scale, 2);
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Uint64 ?;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes("DECLARE $id AS Optional < Uint64 >;");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 1);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ parser.CloseOptional();
+ }
+
+ {
+ auto types = *TYqlParamParser::GetParamTypes(R"(
+ DECLARE $id AS Uint64;
+ DECLARE $name AS Utf8;
+ DECLARE $age AS Uint32;
+ )");
+ UNIT_ASSERT_VALUES_EQUAL(types.size(), 3);
+ auto it = types.find("$id");
+ UNIT_ASSERT(it != types.end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+ }
+
+ Y_UNIT_TEST(TestComplexQuery) {
+ TString query = R"(
+ DECLARE $user_id AS Uint64;
+ DECLARE $start_date AS Date;
+
+ SELECT
+ user_id,
+ name,
+ email,
+ created_at,
+ last_login
+ FROM users
+ WHERE user_id = $user_id
+ AND created_at >= $start_date;
+
+ DECLARE $user_data AS Struct<
+ name: Utf8,
+ email: Utf8,
+ age: Uint32?,
+ preferences: Dict<Utf8, Json>
+ >;
+
+ DECLARE $user_tags AS List<Utf8>;
+
+ UPDATE users
+ SET
+ name = $user_data.name,
+ email = $user_data.email,
+ age = $user_data.age,
+ preferences = $user_data.preferences,
+ tags = $user_tags,
+ updated_at = CurrentUtcTimestamp()
+ WHERE user_id = $user_id;
+
+ DECLARE $stats AS Struct<
+ total_users: Uint64,
+ active_users: Uint64,
+ avg_age: Decimal(5,2)
+ >;
+
+ SELECT
+ COUNT(*) as total_users,
+ COUNT(CASE WHEN last_login >= $start_date THEN 1 END) as active_users,
+ AVG(CAST(age AS Decimal(5,2))) as avg_age
+ FROM users;
+ )";
+
+ auto types = TYqlParamParser::GetParamTypes(query);
+ UNIT_ASSERT(types.has_value());
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 5);
+
+ {
+ auto it = types->find("$user_id");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+ }
+
+ {
+ auto it = types->find("$start_date");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Date);
+ }
+
+ {
+ auto it = types->find("$user_data");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "name");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "email");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "age");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Optional);
+ parser.OpenOptional();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint32);
+ parser.CloseOptional();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "preferences");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Dict);
+ parser.OpenDict();
+ parser.DictKey();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ parser.DictPayload();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Json);
+ parser.CloseDict();
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ }
+
+ {
+ auto it = types->find("$user_tags");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::List);
+ parser.OpenList();
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Utf8);
+ parser.CloseList();
+ }
+
+ {
+ auto it = types->find("$stats");
+ UNIT_ASSERT(it != types->end());
+ TTypeParser parser(it->second);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Struct);
+ parser.OpenStruct();
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "total_users");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "active_users");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Primitive);
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetPrimitive(), EPrimitiveType::Uint64);
+
+ UNIT_ASSERT(parser.TryNextMember());
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetMemberName(), "avg_age");
+ UNIT_ASSERT_VALUES_EQUAL(parser.GetKind(), TTypeParser::ETypeKind::Decimal);
+ auto decimal = parser.GetDecimal();
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Precision, 5);
+ UNIT_ASSERT_VALUES_EQUAL(decimal.Scale, 2);
+
+ UNIT_ASSERT(!parser.TryNextMember());
+ parser.CloseStruct();
+ }
+ }
+
+ Y_UNIT_TEST(TestAllTypes) {
+ TString jsonContent;
+ TFileInput file(TStringBuilder() << ArcadiaSourceRoot() << "/yql/essentials/data/language/types.json");
+ jsonContent = file.ReadAll();
+
+ NJson::TJsonValue jsonValue;
+ UNIT_ASSERT(NJson::ReadJsonTree(jsonContent, &jsonValue));
+
+ for (const auto& [typeName, typeInfo] : jsonValue.GetMap()) {
+ if (typeName == "Generic" || typeName == "Unit" || typeName == "Void" ||
+ typeName == "EmptyList" || typeName == "EmptyDict" || typeName == "Null" ||
+ typeName == "TzDate32" || typeName == "TzDatetime64" || typeName == "TzTimestamp64" ||
+ typeInfo.GetMap().at("kind").GetString() == "Pg") {
+ continue;
+ }
+
+ TString query = TStringBuilder() << "DECLARE $param AS " << typeName << ";";
+
+ auto types = TYqlParamParser::GetParamTypes(query);
+ UNIT_ASSERT_C(types.has_value(), "Unknown type: " << typeName);
+ UNIT_ASSERT_VALUES_EQUAL(types->size(), 1);
+ UNIT_ASSERT(types->contains("$param"));
+ }
+ }
+}
diff --git a/ydb/public/lib/ydb_cli/dump/util/view_utils.cpp b/ydb/public/lib/ydb_cli/dump/util/view_utils.cpp
index 8bd3a51324..c039c0f874 100644
--- a/ydb/public/lib/ydb_cli/dump/util/view_utils.cpp
+++ b/ydb/public/lib/ydb_cli/dump/util/view_utils.cpp
@@ -15,26 +15,6 @@ using namespace NSQLv1Generated;
namespace {
-struct TViewQuerySplit {
- TString ContextRecreation;
- TString Select;
-};
-
-TViewQuerySplit SplitViewQuery(TStringInput query) {
- // to do: make the implementation more versatile
- TViewQuerySplit split;
-
- TString line;
- while (query.ReadLine(line)) {
- (line.StartsWith("--") || line.StartsWith("PRAGMA ")
- ? split.ContextRecreation
- : split.Select
- ) += line;
- }
-
- return split;
-}
-
bool ValidateViewQuery(const TString& query, NYql::TIssues& issues) {
TRule_sql_query queryProto;
if (!SqlToProtoAst(query, queryProto, issues)) {
@@ -104,6 +84,21 @@ bool RewriteTablePathPrefix(TString& query, TStringBuf backupRoot, TStringBuf re
} // anonymous
+TViewQuerySplit SplitViewQuery(TStringInput query) {
+ // to do: make the implementation more versatile
+ TViewQuerySplit split;
+
+ TString line;
+ while (query.ReadLine(line)) {
+ (line.StartsWith("--") || line.StartsWith("PRAGMA ")
+ ? split.ContextRecreation
+ : split.Select
+ ) += line;
+ }
+
+ return split;
+}
+
TString BuildCreateViewQuery(
const TString& name, const TString& dbPath, const TString& viewQuery, const TString& backupRoot,
NYql::TIssues& issues
diff --git a/ydb/public/lib/ydb_cli/dump/util/view_utils.h b/ydb/public/lib/ydb_cli/dump/util/view_utils.h
index b75074f35f..7f60c107d0 100644
--- a/ydb/public/lib/ydb_cli/dump/util/view_utils.h
+++ b/ydb/public/lib/ydb_cli/dump/util/view_utils.h
@@ -1,6 +1,7 @@
#pragma once
#include <util/generic/string.h>
+#include <util/stream/str.h>
namespace NYql {
class TIssues;
@@ -8,6 +9,13 @@ namespace NYql {
namespace NYdb::NDump {
+struct TViewQuerySplit {
+ TString ContextRecreation;
+ TString Select;
+};
+
+TViewQuerySplit SplitViewQuery(TStringInput query);
+
TString BuildCreateViewQuery(
const TString& name, const TString& dbPath, const TString& viewQuery, const TString& backupRoot,
NYql::TIssues& issues
diff --git a/ydb/public/sdk/cpp/client/ydb_topic/impl/read_session_impl.ipp b/ydb/public/sdk/cpp/client/ydb_topic/impl/read_session_impl.ipp
index e11dadfa02..58ead30d30 100644
--- a/ydb/public/sdk/cpp/client/ydb_topic/impl/read_session_impl.ipp
+++ b/ydb/public/sdk/cpp/client/ydb_topic/impl/read_session_impl.ipp
@@ -71,6 +71,9 @@ void TPartitionStreamImpl<UseMigrationProtocol>::RequestStatus() {
template<bool UseMigrationProtocol>
void TPartitionStreamImpl<UseMigrationProtocol>::ConfirmCreate(TMaybe<ui64> readOffset, TMaybe<ui64> commitOffset) {
if (auto sessionShared = CbContext->LockShared()) {
+ if (commitOffset.Defined()) {
+ SetFirstNotReadOffset(*commitOffset);
+ }
sessionShared->ConfirmPartitionStreamCreate(this, readOffset, commitOffset);
}
}
diff --git a/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/rate_limiter/rate_limiter.h b/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/rate_limiter/rate_limiter.h
index 8214c76c13..1dd328956d 100644
--- a/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/rate_limiter/rate_limiter.h
+++ b/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/rate_limiter/rate_limiter.h
@@ -176,6 +176,8 @@ struct TCreateResourceSettings
: public TOperationRequestSettings<TCreateResourceSettings>
, public THierarchicalDrrSettings<TCreateResourceSettings>
{
+ using TSelf = TCreateResourceSettings;
+
TCreateResourceSettings() = default;
TCreateResourceSettings(const Ydb::RateLimiter::CreateResourceRequest&);
@@ -187,6 +189,8 @@ struct TAlterResourceSettings
: public TOperationRequestSettings<TAlterResourceSettings>
, public THierarchicalDrrSettings<TAlterResourceSettings>
{
+ using TSelf = TAlterResourceSettings;
+
FLUENT_SETTING_OPTIONAL(TMeteringConfig, MeteringConfig);
};
diff --git a/ydb/public/sdk/cpp/src/client/resources/ya.make b/ydb/public/sdk/cpp/src/client/resources/ya.make
index a281190cd7..636a7e45bf 100644
--- a/ydb/public/sdk/cpp/src/client/resources/ya.make
+++ b/ydb/public/sdk/cpp/src/client/resources/ya.make
@@ -6,7 +6,6 @@ SRCS(
)
RESOURCE(
- ydb/public/sdk/cpp/src/client/resources/ydb_sdk_version.txt ydb_sdk_version_dev.txt
ydb/public/sdk/cpp/src/client/resources/ydb_root_ca.pem ydb_root_ca_dev.pem
)
diff --git a/ydb/public/sdk/cpp/src/client/resources/ydb_ca.cpp b/ydb/public/sdk/cpp/src/client/resources/ydb_ca.cpp
index d88d5de59a..54ed427355 100644
--- a/ydb/public/sdk/cpp/src/client/resources/ydb_ca.cpp
+++ b/ydb/public/sdk/cpp/src/client/resources/ydb_ca.cpp
@@ -2,10 +2,12 @@
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/resources/ydb_ca.h>
+#include <ydb/public/sdk/cpp/src/version.h>
+
namespace NYdb::inline Dev {
std::string GetRootCertificate() {
- return NResource::Find("ydb_root_ca_dev.pem");
+ return NResource::Find(YDB_CERTIFICATE_FILE_KEY);
}
} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/src/client/resources/ydb_resources.cpp b/ydb/public/sdk/cpp/src/client/resources/ydb_resources.cpp
index 8f1ee52fd4..c56ee5f07e 100644
--- a/ydb/public/sdk/cpp/src/client/resources/ydb_resources.cpp
+++ b/ydb/public/sdk/cpp/src/client/resources/ydb_resources.cpp
@@ -2,6 +2,8 @@
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/resources/ydb_resources.h>
+#include <ydb/public/sdk/cpp/src/version.h>
+
namespace NYdb::inline Dev {
const char* YDB_AUTH_TICKET_HEADER = "x-ydb-auth-ticket";
@@ -29,7 +31,7 @@ const char* YDB_CLIENT_CAPABILITY_SESSION_BALANCER = "session-balancer";
std::string GetSdkSemver() {
- return NResource::Find("ydb_sdk_version_dev.txt");
+ return YDB_SDK_VERSION;
}
} // namespace NYdb
diff --git a/ydb/public/sdk/cpp/src/client/resources/ydb_sdk_version.txt b/ydb/public/sdk/cpp/src/client/resources/ydb_sdk_version.txt
deleted file mode 100644
index acf9bf09db..0000000000
--- a/ydb/public/sdk/cpp/src/client/resources/ydb_sdk_version.txt
+++ /dev/null
@@ -1 +0,0 @@
-3.2.2 \ No newline at end of file
diff --git a/ydb/public/sdk/cpp/src/client/topic/proto_accessor.cpp b/ydb/public/sdk/cpp/src/client/topic/impl/proto_accessor.cpp
index 421ed72d1f..421ed72d1f 100644
--- a/ydb/public/sdk/cpp/src/client/topic/proto_accessor.cpp
+++ b/ydb/public/sdk/cpp/src/client/topic/impl/proto_accessor.cpp
diff --git a/ydb/public/sdk/cpp/src/client/topic/impl/read_session_impl.ipp b/ydb/public/sdk/cpp/src/client/topic/impl/read_session_impl.ipp
index 5c62522633..55f7b11803 100644
--- a/ydb/public/sdk/cpp/src/client/topic/impl/read_session_impl.ipp
+++ b/ydb/public/sdk/cpp/src/client/topic/impl/read_session_impl.ipp
@@ -73,6 +73,9 @@ void TPartitionStreamImpl<UseMigrationProtocol>::RequestStatus() {
template<bool UseMigrationProtocol>
void TPartitionStreamImpl<UseMigrationProtocol>::ConfirmCreate(std::optional<ui64> readOffset, std::optional<ui64> commitOffset) {
if (auto sessionShared = CbContext->LockShared()) {
+ if (commitOffset.has_value()) {
+ SetFirstNotReadOffset(commitOffset.value());
+ }
sessionShared->ConfirmPartitionStreamCreate(this, readOffset, commitOffset);
}
}
diff --git a/ydb/public/sdk/cpp/src/client/topic/impl/ya.make b/ydb/public/sdk/cpp/src/client/topic/impl/ya.make
index 98d1745b15..597b5f4abe 100644
--- a/ydb/public/sdk/cpp/src/client/topic/impl/ya.make
+++ b/ydb/public/sdk/cpp/src/client/topic/impl/ya.make
@@ -7,6 +7,7 @@ SRCS(
deferred_commit.cpp
event_handlers.cpp
offsets_collector.cpp
+ proto_accessor.cpp
read_session_event.cpp
read_session_impl.ipp
read_session.h
diff --git a/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp b/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp
index 37f2d20b5e..3e5b9485b6 100644
--- a/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp
+++ b/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp
@@ -3,7 +3,7 @@
#include <ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/ut_utils.h>
#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
-
+
#include <ydb/public/sdk/cpp/src/client/persqueue_public/persqueue.h>
#include <ydb/public/sdk/cpp/src/client/topic/impl/common.h>
@@ -661,6 +661,64 @@ Y_UNIT_TEST_SUITE(BasicUsage) {
Sleep(TDuration::Seconds(5));
}
+ Y_UNIT_TEST(ConfirmPartitionSessionWithCommitOffset) {
+ // TStartPartitionSessionEvent::Confirm(readOffset, commitOffset) should work,
+ // if commitOffset passed to Confirm is greater than the offset committed previously by the consumer.
+ // https://st.yandex-team.ru/KIKIMR-23015
+
+ auto setup = TTopicSdkTestSetup(TEST_CASE_NAME);
+
+ {
+ // Write 2 messages:
+ auto settings = NTopic::TWriteSessionSettings()
+ .Path(setup.GetTopicPath())
+ .MessageGroupId(TEST_MESSAGE_GROUP_ID)
+ .ProducerId(TEST_MESSAGE_GROUP_ID);
+ auto client = setup.MakeClient();
+ auto writer = client.CreateSimpleBlockingWriteSession(settings);
+ writer->Write("message");
+ writer->Write("message");
+ writer->Close();
+ }
+
+ {
+ // Read messages:
+ auto settings = NTopic::TReadSessionSettings()
+ .ConsumerName(TEST_CONSUMER)
+ .AppendTopics(std::string(setup.GetTopicPath()));
+
+ auto client = setup.MakeClient();
+ auto reader = client.CreateReadSession(settings);
+
+ {
+ // Start partition session and request to read from offset 1 and commit offset 1:
+ auto event = reader->GetEvent(true);
+ UNIT_ASSERT(event.has_value());
+ UNIT_ASSERT(std::holds_alternative<TReadSessionEvent::TStartPartitionSessionEvent>(*event));
+ auto& startPartitionSession = std::get<TReadSessionEvent::TStartPartitionSessionEvent>(*event);
+ startPartitionSession.Confirm(/*readOffset=*/ 1, /*commitOffset=*/ 1);
+ }
+
+ {
+ // Receive a message with offset 1 and commit it:
+ auto event = reader->GetEvent(true);
+ UNIT_ASSERT(event.has_value());
+ UNIT_ASSERT(std::holds_alternative<TReadSessionEvent::TDataReceivedEvent>(*event));
+ auto& dataReceived = std::get<TReadSessionEvent::TDataReceivedEvent>(*event);
+
+ // Here we should commit range [1, 2), not [0, 2):
+ dataReceived.Commit();
+ }
+
+ {
+ // And then get a TCommitOffsetAcknowledgementEvent:
+ auto event = reader->GetEvent(true);
+ UNIT_ASSERT(event.has_value());
+ UNIT_ASSERT(std::holds_alternative<TReadSessionEvent::TCommitOffsetAcknowledgementEvent>(*event));
+ }
+ }
+ }
+
Y_UNIT_TEST(ConflictingWrites) {
TTopicSdkTestSetup setup(TEST_CASE_NAME);
diff --git a/ydb/public/sdk/cpp/src/client/topic/ya.make b/ydb/public/sdk/cpp/src/client/topic/ya.make
index 3867d37301..edd8aff0aa 100644
--- a/ydb/public/sdk/cpp/src/client/topic/ya.make
+++ b/ydb/public/sdk/cpp/src/client/topic/ya.make
@@ -2,7 +2,6 @@ LIBRARY()
SRCS(
out.cpp
- proto_accessor.cpp
)
PEERDIR(
diff --git a/ydb/public/sdk/cpp/src/version.h b/ydb/public/sdk/cpp/src/version.h
new file mode 100644
index 0000000000..af5ef8048e
--- /dev/null
+++ b/ydb/public/sdk/cpp/src/version.h
@@ -0,0 +1,8 @@
+#pragma once
+
+namespace NYdb {
+
+inline const char* YDB_SDK_VERSION = "dev";
+inline const char* YDB_CERTIFICATE_FILE_KEY = "ydb_root_ca_dev.pem";
+
+} // namespace NYdb
diff --git a/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp b/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp
index 6e90bbe904..0ff4eeb56e 100644
--- a/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp
+++ b/ydb/services/metadata/ds_table/accessor_snapshot_base.cpp
@@ -32,8 +32,8 @@ void TDSAccessorBase::Handle(NRequest::TEvRequestResult<NRequest::TDialogYQLRequ
for (auto&& i : managers) {
auto it = CurrentExistence.find(i->GetStorageTablePath());
Y_ABORT_UNLESS(it != CurrentExistence.end());
- Y_ABORT_UNLESS(it->second);
- if (it->second == 1) {
+ Y_ABORT_UNLESS(it->second.State != EState::UNKNOWN);
+ if (it->second.State == EState::EXISTS) {
Y_ABORT_UNLESS((int)replyIdx < qResult.result_sets().size());
*qResultFull.add_result_sets() = std::move(qResult.result_sets()[replyIdx]);
++replyIdx;
@@ -66,7 +66,13 @@ void TDSAccessorBase::Handle(TEvRecheckExistence::TPtr& ev) {
}
void TDSAccessorBase::Handle(TTableExistsActor::TEvController::TEvError::TPtr& ev) {
- AFL_ERROR(NKikimrServices::METADATA_PROVIDER)("action", "cannot detect path existence")("path", ev->Get()->GetPath())("error", ev->Get()->GetErrorMessage());
+ auto it = ExistenceChecks.find(ev->Get()->GetPath());
+ if (it == ExistenceChecks.end() || it->second.RetryCount == 0) {
+ AFL_ERROR(NKikimrServices::METADATA_PROVIDER)("action", "cannot detect path existence")("path", ev->Get()->GetPath())("error", ev->Get()->GetErrorMessage());
+ Schedule(TDuration::Seconds(1), new TEvRecheckExistence(ev->Get()->GetPath()));
+ return;
+ }
+ ++it->second.RetryCount;
Schedule(TDuration::Seconds(1), new TEvRecheckExistence(ev->Get()->GetPath()));
}
@@ -74,16 +80,16 @@ void TDSAccessorBase::Handle(TTableExistsActor::TEvController::TEvResult::TPtr&
auto it = ExistenceChecks.find(ev->Get()->GetTablePath());
Y_ABORT_UNLESS(it != ExistenceChecks.end());
if (ev->Get()->IsTableExists()) {
- it->second = 1;
+ it->second.State = EState::EXISTS;
} else {
- it->second = -1;
+ it->second.State = EState::NON_EXISTS;
}
bool hasExists = false;
for (auto&& i : ExistenceChecks) {
- if (i.second == 0) {
+ if (i.second.State == EState::UNKNOWN) {
return;
}
- if (i.second == 1) {
+ if (i.second.State == EState::EXISTS) {
hasExists = true;
}
}
@@ -101,11 +107,11 @@ void TDSAccessorBase::StartSnapshotsFetching() {
bool hasExistsCheckers = false;
for (auto&& i : managers) {
auto it = ExistenceChecks.find(i->GetStorageTablePath());
- if (it == ExistenceChecks.end() || it->second == -1) {
+ if (it == ExistenceChecks.end() || it->second.State == EState::NON_EXISTS) {
Register(new TTableExistsActor(InternalController, i->GetStorageTablePath(), TDuration::Seconds(5)));
hasExistsCheckers = true;
- ExistenceChecks[i->GetStorageTablePath()] = 0;
- } else if (it->second == 0) {
+ ExistenceChecks[i->GetStorageTablePath()].State = EState::UNKNOWN;
+ } else if (it->second.State == EState::UNKNOWN) {
hasExistsCheckers = true;
}
}
@@ -123,8 +129,8 @@ void TDSAccessorBase::StartSnapshotsFetchingImpl() {
for (auto&& i : managers) {
auto it = CurrentExistence.find(i->GetStorageTablePath());
Y_ABORT_UNLESS(it != CurrentExistence.end());
- Y_ABORT_UNLESS(it->second);
- if (it->second == 1) {
+ Y_ABORT_UNLESS(it->second.State != EState::UNKNOWN);
+ if (it->second.State == EState::EXISTS) {
sb << "SELECT * FROM `" + EscapeC(i->GetStorageTablePath()) + "`;" << Endl;
}
}
diff --git a/ydb/services/metadata/ds_table/accessor_snapshot_base.h b/ydb/services/metadata/ds_table/accessor_snapshot_base.h
index 39c48f7bfd..1b9f4dc33c 100644
--- a/ydb/services/metadata/ds_table/accessor_snapshot_base.h
+++ b/ydb/services/metadata/ds_table/accessor_snapshot_base.h
@@ -68,11 +68,20 @@ public:
class TDSAccessorBase: public NActors::TActorBootstrapped<TDSAccessorBase> {
private:
+ enum class EState {
+ UNKNOWN,
+ EXISTS,
+ NON_EXISTS
+ };
+ struct TTableInfo {
+ EState State = EState::UNKNOWN;
+ int64_t RetryCount = 0;
+ };
using TBase = NActors::TActorBootstrapped<TDSAccessorBase>;
YDB_READONLY(TInstant, RequestedActuality, TInstant::Zero());
const NRequest::TConfig Config;
- std::map<TString, i32> ExistenceChecks;
- std::map<TString, i32> CurrentExistence;
+ std::map<TString, TTableInfo> ExistenceChecks;
+ std::map<TString, TTableInfo> CurrentExistence;
void StartSnapshotsFetchingImpl();
protected:
std::shared_ptr<TRefreshInternalController> InternalController;
diff --git a/ydb/services/persqueue_v1/actors/partition_actor.cpp b/ydb/services/persqueue_v1/actors/partition_actor.cpp
index 6fe140ab5f..a44de8029a 100644
--- a/ydb/services/persqueue_v1/actors/partition_actor.cpp
+++ b/ydb/services/persqueue_v1/actors/partition_actor.cpp
@@ -906,10 +906,6 @@ void TPartitionActor::Handle(TEvTabletPipe::TEvClientConnected::TPtr& ev, const
TabletGeneration = msg->Generation;
NodeId = msg->ServerId.NodeId();
-
- if (InitDone) {
- ctx.Send(ParentId, new TEvPQProxy::TEvUpdateSession(Partition, NodeId, TabletGeneration));
- }
}
void TPartitionActor::Handle(TEvTabletPipe::TEvClientDestroyed::TPtr& ev, const TActorContext& ctx) {
@@ -1160,6 +1156,10 @@ void TPartitionActor::OnDirectReadsRestored() {
const auto& ctx = ActorContext();
LOG_DEBUG_S(ctx, NKikimrServices::PQ_READ_PROXY, PQ_LOG_PREFIX << " " << Partition
<< "Restore direct reads done, continue working");
+
+ if (InitDone) {
+ ctx.Send(ParentId, new TEvPQProxy::TEvUpdateSession(Partition, NodeId, TabletGeneration));
+ }
ResendRecentRequests();
}
@@ -1320,7 +1320,7 @@ void TPartitionActor::Handle(TEvPQProxy::TEvRead::TPtr& ev, const TActorContext&
auto request = MakeReadRequest(ReadOffset, 0, req->MaxCount, req->MaxSize, req->MaxTimeLagMs, req->ReadTimestampMs, DirectReadId);
RequestInfly = true;
CurrentRequest = request;
-
+
if (!PipeClient) //Pipe will be recreated soon
return;
diff --git a/ydb/services/persqueue_v1/topic_yql_ut.cpp b/ydb/services/persqueue_v1/topic_yql_ut.cpp
index be0185301e..4a3339b934 100644
--- a/ydb/services/persqueue_v1/topic_yql_ut.cpp
+++ b/ydb/services/persqueue_v1/topic_yql_ut.cpp
@@ -186,6 +186,54 @@ Y_UNIT_TEST_SUITE(TTopicYqlTest) {
//1609462861
}
+ Y_UNIT_TEST(AlterAutopartitioning) {
+ NKikimrConfig::TFeatureFlags ff;
+ ff.SetEnableTopicSplitMerge(true);
+ auto settings = NKikimr::NPersQueueTests::PQSettings();
+ settings.SetFeatureFlags(ff);
+
+ NPersQueue::TTestServer server(settings);
+
+ {
+ const char *query = R"(
+ CREATE TOPIC `/Root/PQ/rt3.dc1--legacy--topic1`
+ )";
+
+ server.AnnoyingClient->RunYqlSchemeQuery(query);
+ }
+
+ {
+ const char *query = R"__(
+ ALTER TOPIC `/Root/PQ/rt3.dc1--legacy--topic1`
+ SET (
+ min_active_partitions = 7,
+ max_active_partitions = 100,
+ auto_partitioning_stabilization_window = Interval('PT1S'),
+ auto_partitioning_up_utilization_percent = 2,
+ auto_partitioning_down_utilization_percent = 1,
+ partition_write_speed_bytes_per_second = 3,
+ auto_partitioning_strategy = 'up'
+ );
+ )__";
+
+ server.AnnoyingClient->RunYqlSchemeQuery(query);
+ }
+
+ {
+ auto pqGroup = server.AnnoyingClient->Ls("/Root/PQ/rt3.dc1--legacy--topic1")->Record.GetPathDescription().GetPersQueueGroup();
+ const auto& describe = pqGroup.GetPQTabletConfig();
+
+ Cerr <<"=== PATH DESCRIPTION: \n" << pqGroup.DebugString();
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionConfig().GetWriteSpeedInBytesPerSecond(), 3);
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionStrategy().GetMinPartitionCount(), 7);
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionStrategy().GetMaxPartitionCount(), 100);
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionStrategy().GetScaleThresholdSeconds(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionStrategy().GetScaleUpPartitionWriteSpeedThresholdPercent(), 2);
+ UNIT_ASSERT_VALUES_EQUAL(describe.GetPartitionStrategy().GetScaleDownPartitionWriteSpeedThresholdPercent(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(static_cast<int>(describe.GetPartitionStrategy().GetPartitionStrategyType()), static_cast<int>(::NKikimrPQ::TPQTabletConfig_TPartitionStrategyType::TPQTabletConfig_TPartitionStrategyType_CAN_SPLIT));
+ }
+ }
+
Y_UNIT_TEST(BadRequests) {
NPersQueue::TTestServer server;
{
diff --git a/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp b/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp
index 77e545feb9..9344467832 100644
--- a/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp
+++ b/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp
@@ -33,6 +33,7 @@
using namespace NYdb;
using namespace NYdb::NOperation;
using namespace NYdb::NRateLimiter;
+using namespace NYdb::NReplication;
using namespace NYdb::NScheme;
using namespace NYdb::NTable;
using namespace NYdb::NView;
@@ -1071,7 +1072,7 @@ void TestCoordinationNodeResourcesArePreserved(
}
}
-void WaitReplicationInit(NReplication::TReplicationClient& client, const TString& path) {
+void WaitReplicationInit(TReplicationClient& client, const TString& path) {
int retry = 0;
do {
auto result = client.DescribeReplication(path).ExtractValueSync();
@@ -1088,7 +1089,7 @@ void WaitReplicationInit(NReplication::TReplicationClient& client, const TString
void TestReplicationSettingsArePreserved(
const TString& endpoint,
NQuery::TSession& session,
- NReplication::TReplicationClient& client,
+ TReplicationClient& client,
TBackupFunction&& backup,
TRestoreFunction&& restore)
{
@@ -1647,7 +1648,7 @@ Y_UNIT_TEST_SUITE(BackupRestore) {
NQuery::TQueryClient queryClient(driver);
auto session = queryClient.GetSession().ExtractValueSync().GetSession();
- NReplication::TReplicationClient replicationClient(driver);
+ TReplicationClient replicationClient(driver);
TTempDir tempDir;
const auto& pathToBackup = tempDir.Path();
@@ -1667,7 +1668,7 @@ Y_UNIT_TEST_SUITE(BackupRestore) {
NQuery::TQueryClient queryClient(driver);
auto session = queryClient.GetSession().ExtractValueSync().GetSession();
- NReplication::TReplicationClient replicationClient(driver);
+ TReplicationClient replicationClient(driver);
TTempDir tempDir;
const auto& pathToBackup = tempDir.Path();
diff --git a/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp b/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp
index df85e8d7e2..9cbec0779d 100644
--- a/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp
+++ b/ydb/tests/fq/pq_async_io/mock_pq_gateway.cpp
@@ -78,7 +78,12 @@ class TMockPqGateway : public IMockPqGateway {
NYdb::TAsyncStatus AlterTopic(const TString& /*path*/, const NYdb::NTopic::TAlterTopicSettings& /*settings*/ = {}) override {return NYdb::TAsyncStatus{};}
NYdb::TAsyncStatus DropTopic(const TString& /*path*/, const NYdb::NTopic::TDropTopicSettings& /*settings*/ = {}) override {return NYdb::TAsyncStatus{};}
NYdb::NTopic::TAsyncDescribeTopicResult DescribeTopic(const TString& /*path*/,
- const NYdb::NTopic::TDescribeTopicSettings& /*settings*/ = {}) override {return NYdb::NTopic::TAsyncDescribeTopicResult{};}
+ const NYdb::NTopic::TDescribeTopicSettings& /*settings*/ = {}) override {
+ NYdb::TStatus success(NYdb::EStatus::SUCCESS, {});
+ Ydb::Topic::DescribeTopicResult describe;
+ describe.Addpartitions();
+ return NThreading::MakeFuture(NYdb::NTopic::TDescribeTopicResult(std::move(success), std::move(describe)));
+ }
NYdb::NTopic::TAsyncDescribeConsumerResult DescribeConsumer(const TString& /*path*/, const TString& /*consumer*/,
const NYdb::NTopic::TDescribeConsumerSettings& /*settings*/ = {}) override {return NYdb::NTopic::TAsyncDescribeConsumerResult{};}
@@ -106,6 +111,17 @@ class TMockPqGateway : public IMockPqGateway {
TMockPqGateway* Self;
};
+
+ struct TMockFederatedTopicClient : public IFederatedTopicClient {
+ NThreading::TFuture<std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo>> GetAllTopicClusters() override {
+ std::vector<NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo> dbInfo;
+ dbInfo.emplace_back(
+ "", "dummy", "/Root",
+ NYdb::NFederatedTopic::TFederatedTopicClient::TClusterInfo::EStatus::AVAILABLE);
+ return NThreading::MakeFuture(std::move(dbInfo));
+ }
+ };
+
public:
TMockPqGateway(
@@ -142,6 +158,18 @@ public:
return NThreading::TFuture<TListStreams>{};
}
+ IPqGateway::TAsyncDescribeFederatedTopicResult DescribeFederatedTopic(
+ const TString& /*sessionId*/,
+ const TString& /*cluster*/,
+ const TString& /*database*/,
+ const TString& /*path*/,
+ const TString& /*token*/) override {
+ TDescribeFederatedTopicResult result;
+ auto& cluster = result.emplace_back();
+ cluster.PartitionsCount = 1;
+ return NThreading::MakeFuture(result);
+ }
+
void UpdateClusterConfigs(
const TString& /*clusterName*/,
const TString& /*endpoint*/,
@@ -154,6 +182,14 @@ public:
return MakeIntrusive<TMockTopicClient>(this);
}
+ IFederatedTopicClient::TPtr GetFederatedTopicClient(const NYdb::TDriver& /*driver*/, const NYdb::NFederatedTopic::TFederatedTopicClientSettings& /*settings*/) override {
+ return MakeIntrusive<TMockFederatedTopicClient>();
+ }
+
+ NYdb::NFederatedTopic::TFederatedTopicClientSettings GetFederatedTopicClientSettings() const override {
+ return {};
+ }
+
std::shared_ptr<TQueue> GetEventQueue(const TString& topic) {
if (!Queues.contains(topic)) {
Queues[topic] = std::make_shared<TQueue>(4_MB);
diff --git a/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp b/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp
index 34ef1eacb7..37a08d4129 100644
--- a/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp
+++ b/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp
@@ -11,6 +11,7 @@
#include <ydb/library/yql/dq/common/rope_over_buffer.h>
#include <library/cpp/testing/unittest/gtest.h>
#include <library/cpp/testing/unittest/registar.h>
+#include <ydb/tests/fq/pq_async_io/mock_pq_gateway.h>
#include <thread>
@@ -47,6 +48,7 @@ struct TFixture : public TPqIoTestFixture {
const THashMap<TString, TString> taskParams { {"pq", serializedParams} };
NYql::NPq::NProto::TDqPqTopicSource copySettings = settings;
+ TPqIoTestFixture setup;
auto [dqSource, dqSourceAsActor] = CreateDqPqRdReadActor(
actor.TypeEnv,
std::move(copySettings),
@@ -56,11 +58,14 @@ struct TFixture : public TPqIoTestFixture {
0,
secureParams,
taskParams,
+ setup.Driver,
+ {},
actor.SelfId(), // computeActorId
LocalRowDispatcherId,
actor.GetHolderFactory(),
MakeIntrusive<NMonitoring::TDynamicCounters>(),
- freeSpace
+ freeSpace,
+ CreateMockPqGateway(*CaSetup->Runtime, {}) // XXX Is this correct XXX
);
actor.InitAsyncInput(dqSource, dqSourceAsActor);
diff --git a/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp b/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp
index b03c795128..b6b89f399d 100644
--- a/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp
+++ b/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp
@@ -106,7 +106,9 @@ Y_UNIT_TEST_SUITE(TDqPqReadActorTest) {
while (Now() < deadline) {
SourceRead<TString>(UVParser);
if (future.HasValue()) {
- UNIT_ASSERT_STRING_CONTAINS(future.GetValue().ToOneLineString(), "Read session to topic \"NonExistentTopic\" was closed");
+ auto message = future.GetValue().ToOneLineString();
+ UNIT_ASSERT_STRING_CONTAINS(message, "Error: ");
+ UNIT_ASSERT_STRING_CONTAINS(message, " \"NonExistentTopic\" ");
failured = true;
break;
}
diff --git a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestClickbenchInit.test_s1_row/s1_row b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestClickbenchInit.test_s1_row/s1_row
index 2d3dd3b06a..0a0a1bb1b4 100644
--- a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestClickbenchInit.test_s1_row/s1_row
+++ b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestClickbenchInit.test_s1_row/s1_row
@@ -110,6 +110,8 @@ CREATE TABLE `/Root/db/Root/db/clickbench/s1` (
PRIMARY KEY (CounterID, EventDate, UserID, EventTime, WatchID)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 128
);
diff --git a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpcdsInit.test_s1_row/s1_row b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpcdsInit.test_s1_row/s1_row
index e20e3f4b40..8aa44ea5cc 100644
--- a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpcdsInit.test_s1_row/s1_row
+++ b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpcdsInit.test_s1_row/s1_row
@@ -18,6 +18,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/customer_address` (
PRIMARY KEY (ca_address_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -34,6 +36,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/customer_demographics` (
PRIMARY KEY (cd_demo_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -69,6 +73,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/date_dim` (
PRIMARY KEY (d_date_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -90,6 +96,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/warehouse` (
PRIMARY KEY (w_warehouse_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -103,6 +111,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/ship_mode` (
PRIMARY KEY (sm_ship_mode_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -120,6 +130,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/time_dim` (
PRIMARY KEY (t_time_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -130,6 +142,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/reason` (
PRIMARY KEY (r_reason_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -140,6 +154,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/income_band` (
PRIMARY KEY (ib_income_band_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -169,6 +185,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/item` (
PRIMARY KEY (i_item_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -205,6 +223,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/store` (
PRIMARY KEY (s_store_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -243,6 +263,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/call_center` (
PRIMARY KEY (cc_call_center_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -268,6 +290,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/customer` (
PRIMARY KEY (c_customer_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -301,6 +325,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/web_site` (
PRIMARY KEY (web_site_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -328,6 +354,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/store_returns` (
PRIMARY KEY (sr_item_sk, sr_ticket_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -340,6 +368,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/household_demographics` (
PRIMARY KEY (hd_demo_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -361,6 +391,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/web_page` (
PRIMARY KEY (wp_web_page_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -387,6 +419,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/promotion` (
PRIMARY KEY (p_promo_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -403,6 +437,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/catalog_page` (
PRIMARY KEY (cp_catalog_page_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -414,6 +450,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/inventory` (
PRIMARY KEY (inv_date_sk, inv_item_sk, inv_warehouse_sk)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -448,6 +486,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/catalog_returns` (
PRIMARY KEY (cr_item_sk, cr_order_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -479,6 +519,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/web_returns` (
PRIMARY KEY (wr_item_sk, wr_order_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -520,6 +562,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/web_sales` (
PRIMARY KEY (ws_item_sk, ws_order_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -561,6 +605,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/catalog_sales` (
PRIMARY KEY (cs_item_sk, cs_order_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -591,6 +637,8 @@ CREATE TABLE `/Root/db/Root/db/tpcds/s1/store_sales` (
PRIMARY KEY (ss_item_sk, ss_ticket_number)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
diff --git a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpchInit.test_s1_row/s1_row b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpchInit.test_s1_row/s1_row
index b8ec455d9e..1aa9d381d7 100644
--- a/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpchInit.test_s1_row/s1_row
+++ b/ydb/tests/functional/benchmarks_init/canondata/test_init.TestTpchInit.test_s1_row/s1_row
@@ -13,6 +13,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/customer` (
PRIMARY KEY (c_custkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -36,6 +38,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/lineitem` (
PRIMARY KEY (l_orderkey, l_linenumber)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -47,6 +51,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/nation` (
PRIMARY KEY (n_nationkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1
);
@@ -63,6 +69,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/orders` (
PRIMARY KEY (o_orderkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -79,6 +87,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/part` (
PRIMARY KEY (p_partkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -91,6 +101,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/partsupp` (
PRIMARY KEY (ps_partkey, ps_suppkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
@@ -101,6 +113,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/region` (
PRIMARY KEY (r_regionkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1
);
@@ -115,6 +129,8 @@ CREATE TABLE `/Root/db/Root/db/tpch/s1/supplier` (
PRIMARY KEY (s_suppkey)
)
WITH (
+ STORE = ROW,
+ AUTO_PARTITIONING_PARTITION_SIZE_MB = 2000,
AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 64
);
diff --git a/ydb/tests/functional/canonical/canondata/test_sql.TestCanonicalFolder1.test_case_write_multi_usage.script-script_/write_multi_usage.script.plan b/ydb/tests/functional/canonical/canondata/test_sql.TestCanonicalFolder1.test_case_write_multi_usage.script-script_/write_multi_usage.script.plan
index 8c8f361d05..3707f7e969 100644
--- a/ydb/tests/functional/canonical/canondata/test_sql.TestCanonicalFolder1.test_case_write_multi_usage.script-script_/write_multi_usage.script.plan
+++ b/ydb/tests/functional/canonical/canondata/test_sql.TestCanonicalFolder1.test_case_write_multi_usage.script-script_/write_multi_usage.script.plan
@@ -65,6 +65,7 @@
"Name (-\u221e, +\u221e)"
],
"ReadRangesPointPrefixLen": "0",
+ "Reverse": false,
"Scan": "Sequential",
"Table": "base_write_multi_usage_script_script/Temp"
}
@@ -144,6 +145,7 @@
"Name (-\u221e, +\u221e)"
],
"ReadRangesPointPrefixLen": "0",
+ "Reverse": false,
"Scan": "Sequential",
"Table": "base_write_multi_usage_script_script/Input1"
}
diff --git a/ydb/tests/functional/clickbench/canondata/test.test_plans_column_/queries-original-plan-column-0 b/ydb/tests/functional/clickbench/canondata/test.test_plans_column_/queries-original-plan-column-0
index 2626baeb22..55e2200cc8 100644
--- a/ydb/tests/functional/clickbench/canondata/test.test_plans_column_/queries-original-plan-column-0
+++ b/ydb/tests/functional/clickbench/canondata/test.test_plans_column_/queries-original-plan-column-0
@@ -79,7 +79,9 @@
"Inputs": [],
"Name": "TableFullScan",
"Path": "/local/clickbench/plans/column/hits",
- "ReadColumns": null,
+ "ReadColumns": [
+ "CounterID"
+ ],
"ReadRanges": [
"CounterID (-\u221e, +\u221e)",
"EventDate (-\u221e, +\u221e)",
@@ -141,6 +143,9 @@
"name": "/local/clickbench/plans/column/hits",
"reads": [
{
+ "columns": [
+ "CounterID"
+ ],
"scan_by": [
"CounterID (-\u221e, +\u221e)",
"EventDate (-\u221e, +\u221e)",
diff --git a/ydb/tests/functional/compatibility/test_stress.py b/ydb/tests/functional/compatibility/test_stress.py
index a6e6ca2500..6fef8a189b 100644
--- a/ydb/tests/functional/compatibility/test_stress.py
+++ b/ydb/tests/functional/compatibility/test_stress.py
@@ -56,21 +56,6 @@ class TestStress(object):
+ ["--path", path]
)
- def set_auto_partitioning_size_mb(self, path, size_mb):
- yatest.common.execute(
- [
- yatest.common.binary_path(os.getenv("YDB_CLI_BINARY")),
- "--verbose",
- "--endpoint",
- "grpc://localhost:%d" % self.cluster.nodes[1].grpc_port,
- "--database=/Root",
- "sql", "-s",
- "ALTER TABLE `{}` SET (AUTO_PARTITIONING_PARTITION_SIZE_MB={})".format(path, size_mb),
- ],
- stdout=self.output_f,
- stderr=self.output_f,
- )
-
@pytest.mark.parametrize("store_type", ["row"])
def test_log(self, store_type):
timeout_scale = 60
@@ -228,6 +213,7 @@ class TestStress(object):
"init",
"--store={}".format(store_type),
"--datetime", # use 32 bit dates instead of 64 (not supported in 24-4)
+ "--partition-size=25",
]
import_command = [
yatest.common.binary_path(os.getenv("YDB_CLI_BINARY")),
@@ -256,26 +242,13 @@ class TestStress(object):
"run",
"--scale=1",
"--exclude",
- # not working for row tables
- "17",
+ "17", # not working for row tables
"--check-canonical",
+ "--retries",
+ "5", # in row tables we have to retry query by design
]
yatest.common.execute(init_command, wait=True, stdout=self.output_f, stderr=self.output_f)
-
- # make tables distributed across nodes
- tables = [
- "lineitem",
- "nation",
- "orders",
- "part",
- "partsupp",
- "region",
- "supplier",
- ]
- for table in tables:
- self.set_auto_partitioning_size_mb("tpch/{}".format(table), 25)
-
yatest.common.execute(import_command, wait=True, stdout=self.output_f, stderr=self.output_f)
yatest.common.execute(run_command, wait=True, stdout=self.output_f, stderr=self.output_f)
@@ -296,6 +269,7 @@ class TestStress(object):
"init",
"--store={}".format(store_type),
"--datetime", # use 32 bit dates instead of 64 (not supported in 24-4)
+ "--partition-size=25",
]
import_command = [
yatest.common.binary_path(os.getenv("YDB_CLI_BINARY")),
@@ -327,39 +301,10 @@ class TestStress(object):
"--exclude",
# not working for row tables
"5,7,14,18,22,23,24,26,27,31,33,39,46,51,54,56,58,60,61,64,66,67,68,72,75,77,78,79,80,93",
+ "--retries",
+ "5", # in row tables we have to retry query by design
]
yatest.common.execute(init_command, wait=True, stdout=self.output_f, stderr=self.output_f)
-
- # make table distributed across nodes
- tables = [
- "call_center",
- "catalog_page",
- "catalog_returns",
- "catalog_sales",
- "customer",
- "customer_demographics",
- "date_dim",
- "household_demographics",
- "income_band",
- "inventory",
- "item",
- "promotion",
- "reason",
- "ship_mode",
- "store",
- "store_returns",
- "store_sales",
- "time_dim",
- "warehouse",
- "web_page",
- "web_returns",
- "web_sales",
- "web_site",
- ]
-
- for table in tables:
- self.set_auto_partitioning_size_mb("tpcds/{}".format(table), 25)
-
yatest.common.execute(import_command, wait=True, stdout=self.output_f, stderr=self.output_f)
yatest.common.execute(run_command, wait=True, stdout=self.output_f, stderr=self.output_f)
diff --git a/ydb/tests/functional/replication/main.cpp b/ydb/tests/functional/replication/main.cpp
deleted file mode 100644
index 2766ea519e..0000000000
--- a/ydb/tests/functional/replication/main.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-#include <util/system/env.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/driver/driver.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/table/table.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/proto/accessor.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/draft/ydb_scripting.h>
-
-#include <library/cpp/threading/local_executor/local_executor.h>
-
-using namespace NYdb;
-using namespace NYdb::NTable;
-
-namespace {
-
-std::pair<ui64, Ydb::ResultSet> DoRead(TSession& s, const TString& table) {
- auto res = s.ExecuteDataQuery(
- Sprintf("SELECT * FROM `/local/%s`; SELECT COUNT(*) AS __count FROM `/local/%s`;",
- table.data(), table.data()), TTxControl::BeginTx().CommitTx()).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- auto rs = NYdb::TResultSetParser(res.GetResultSet(1));
- UNIT_ASSERT(rs.TryNextRow());
- auto count = rs.ColumnParser("__count").GetUint64();
-
- const auto proto = NYdb::TProtoAccessor::GetProto(res.GetResultSet(0));
- return {count, proto};
-}
-
-} // namespace
-
-Y_UNIT_TEST_SUITE(Replication)
-{
- Y_UNIT_TEST(Types)
- {
- TString connectionString = GetEnv("YDB_ENDPOINT") + "/?database=" + GetEnv("YDB_DATABASE");
- auto config = TDriverConfig(connectionString);
- auto driver = TDriver(config);
- auto tableClient = TTableClient(driver);
- auto session = tableClient.GetSession().GetValueSync().GetSession();
-
- {
- auto res = session.ExecuteSchemeQuery(R"(
- CREATE TABLE `/local/ProducerUuidValue` (
- Key Uint32,
- Key2 Uuid,
- v01 Uuid,
- v02 Uuid NOT NULL,
- v03 Double,
- PRIMARY KEY (Key, Key2)
- );
- )").GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
-
- {
- auto sessionResult = tableClient.GetSession().GetValueSync();
- UNIT_ASSERT_C(sessionResult.IsSuccess(), sessionResult.GetIssues().ToString());
- auto s = sessionResult.GetSession();
-
- {
- const TString query = "UPSERT INTO ProducerUuidValue (Key,Key2,v01,v02,v03) VALUES"
- "(1, "
- "CAST(\"00078af5-0000-0000-6c0b-040000000000\" as Uuid), "
- "CAST(\"00078af5-0000-0000-6c0b-040000000001\" as Uuid), "
- "UNWRAP(CAST(\"00078af5-0000-0000-6c0b-040000000002\" as Uuid)), "
- "CAST(\"311111111113.222222223\" as Double) "
- ");";
- auto res = s.ExecuteDataQuery(query, TTxControl::BeginTx().CommitTx()).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
-
- {
- const TString query = "GRANT ALL ON `/local` TO `user@builtin`";
- auto res = s.ExecuteSchemeQuery(query).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
-
- {
- const TString query = Sprintf("CREATE ASYNC REPLICATION `replication` FOR"
- "`ProducerUuidValue` AS `ConsumerUuidValue`"
- "WITH ("
- "CONNECTION_STRING = 'grpc://%s',"
- "TOKEN = 'user@builtin'"
- ");", connectionString.data());
- auto res = s.ExecuteSchemeQuery(query).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
- // TODO: Make CREATE ASYNC REPLICATION to be a sync call
- Sleep(TDuration::Seconds(10));
- }
-
- NYdb::NTable::TExecDataQuerySettings execSettings;
- execSettings.KeepInQueryCache(true);
-
- auto sessionResult = tableClient.GetSession().GetValueSync();
- UNIT_ASSERT_C(sessionResult.IsSuccess(), sessionResult.GetIssues().ToString());
-
- auto s = sessionResult.GetSession();
- TUuidValue expectedKey2("00078af5-0000-0000-6c0b-040000000000");
- TUuidValue expectedV1("00078af5-0000-0000-6c0b-040000000001");
- TUuidValue expectedV2("00078af5-0000-0000-6c0b-040000000002");
- double expectedV3 = 311111111113.222222223;
- ui32 attempt = 10;
- while (--attempt) {
- auto res = DoRead(s, "ConsumerUuidValue");
- if (res.first == 1) {
- const Ydb::ResultSet& proto = res.second;
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(0).uint32_value(), 1);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(1).low_128(), expectedKey2.Buf_.Halfs[0]);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(1).high_128(), expectedKey2.Buf_.Halfs[1]);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(2).low_128(), expectedV1.Buf_.Halfs[0]);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(2).high_128(), expectedV1.Buf_.Halfs[1]);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(3).low_128(), expectedV2.Buf_.Halfs[0]);
- UNIT_ASSERT_VALUES_EQUAL(proto.rows(0).items(3).high_128(), expectedV2.Buf_.Halfs[1]);
- UNIT_ASSERT_DOUBLES_EQUAL(proto.rows(0).items(4).double_value(), expectedV3, 0.0001);
- break;
- }
- Sleep(TDuration::Seconds(1));
- }
-
- UNIT_ASSERT_C(attempt, "Unable to wait replication result");
- }
-}
-
diff --git a/ydb/tests/functional/replication/replication.cpp b/ydb/tests/functional/replication/replication.cpp
new file mode 100644
index 0000000000..504ed965f0
--- /dev/null
+++ b/ydb/tests/functional/replication/replication.cpp
@@ -0,0 +1,104 @@
+#include "utils.h"
+
+using namespace NReplicationTest;
+
+Y_UNIT_TEST_SUITE(Replication)
+{
+ Y_UNIT_TEST(Types)
+ {
+ MainTestCase testCase;
+ testCase.CreateSourceTable(R"(
+ CREATE TABLE `%s` (
+ Key Uint32,
+ Key2 Uuid,
+ v01 Uuid,
+ v02 Uuid NOT NULL,
+ v03 Double,
+ PRIMARY KEY (Key, Key2)
+ );
+ )");
+
+ testCase.ExecuteSourceTableQuery(R"(
+ UPSERT INTO `%s` (Key,Key2,v01,v02,v03) VALUES
+ (
+ 1,
+ CAST("00078af5-0000-0000-6c0b-040000000000" as Uuid),
+ CAST("00078af5-0000-0000-6c0b-040000000001" as Uuid),
+ UNWRAP(CAST("00078af5-0000-0000-6c0b-040000000002" as Uuid)),
+ CAST("311111111113.222222223" as Double)
+ );
+ )");
+
+ testCase.CreateReplication();
+
+ testCase.CheckResult({{
+ _C("Key2", TUuidValue("00078af5-0000-0000-6c0b-040000000000")),
+ _C("v01", TUuidValue("00078af5-0000-0000-6c0b-040000000001")),
+ _C("v02", TUuidValue("00078af5-0000-0000-6c0b-040000000002")),
+ _C("v03", 311111111113.222222223)
+ }});
+
+ testCase.DropReplication();
+ testCase.DropSourceTable();
+ }
+
+ Y_UNIT_TEST(PauseAndResumeReplication)
+ {
+ MainTestCase testCase;
+ testCase.CreateSourceTable(R"(
+ CREATE TABLE `%s` (
+ Key Uint64 NOT NULL,
+ Message Utf8,
+ PRIMARY KEY (Key)
+ );
+ )");
+
+ testCase.CreateReplication();
+
+ testCase.ExecuteSourceTableQuery("INSERT INTO `%s` (`Key`, `Message`) VALUES (1, 'Message-1');");
+
+ testCase.CheckResult({{
+ _C("Message", TString("Message-1"))
+ }});
+
+ testCase.CheckReplicationState(TReplicationDescription::EState::Running);
+
+ Cerr << "State: Paused" << Endl << Flush;
+
+ testCase.PauseReplication();
+
+ Sleep(TDuration::Seconds(1));
+ testCase.CheckReplicationState(TReplicationDescription::EState::Paused);
+
+ testCase.ExecuteSourceTableQuery("INSERT INTO `%s` (`Key`, `Message`) VALUES (2, 'Message-2');");
+
+ // Replication is paused. New messages aren`t added to the table.
+ Sleep(TDuration::Seconds(3));
+ testCase.CheckResult({{
+ _C("Message", TString("Message-1"))
+ }});
+
+ Cerr << "State: StandBy" << Endl << Flush;
+
+ testCase.ResumeReplication();
+
+ // Replication is resumed. New messages are added to the table.
+ testCase.CheckReplicationState(TReplicationDescription::EState::Running);
+ testCase.CheckResult({{
+ _C("Message", TString("Message-1"))
+ }, {
+ _C("Message", TString("Message-2")),
+ }});
+
+ // More cycles for pause/resume
+ testCase.PauseReplication();
+ testCase.CheckReplicationState(TReplicationDescription::EState::Paused);
+
+ testCase.ResumeReplication();
+ testCase.CheckReplicationState(TReplicationDescription::EState::Running);
+
+ testCase.DropReplication();
+ testCase.DropSourceTable();
+ }
+}
+
diff --git a/ydb/tests/functional/replication/transfer.cpp b/ydb/tests/functional/replication/transfer.cpp
new file mode 100644
index 0000000000..59a9b8c231
--- /dev/null
+++ b/ydb/tests/functional/replication/transfer.cpp
@@ -0,0 +1,27 @@
+#include "utils.h"
+
+using namespace NReplicationTest;
+
+Y_UNIT_TEST_SUITE(Transfer)
+{
+ Y_UNIT_TEST(CreateTransfer)
+ {
+ MainTestCase testCase;
+ testCase.ExecuteDDL(Sprintf(R"(
+ $l = ($x) -> {
+ return [
+ <|
+ Key:CAST($x._offset AS Uint64)
+ |>
+ ];
+ };
+
+ CREATE TRANSFER `%s`
+ FROM `SourceTopic` TO `TargetTable` USING $l
+ WITH (
+ CONNECTION_STRING = 'grpc://localhost'
+ );
+ )", testCase.TransferName.data()), true, "The transfer is only available in the Enterprise version");
+ }
+}
+
diff --git a/ydb/tests/functional/replication/utils.h b/ydb/tests/functional/replication/utils.h
new file mode 100644
index 0000000000..03c4d54be6
--- /dev/null
+++ b/ydb/tests/functional/replication/utils.h
@@ -0,0 +1,572 @@
+#pragma once
+
+#include <util/system/env.h>
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/driver/driver.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/query/client.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/proto/accessor.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/draft/ydb_scripting.h>
+#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/draft/ydb_replication.h>
+
+#include <library/cpp/threading/local_executor/local_executor.h>
+
+using namespace NYdb;
+using namespace NYdb::NQuery;
+using namespace NYdb::NTopic;
+using namespace NYdb::NReplication;
+
+namespace NUnitTest::NPrivate {
+ template<>
+ inline bool CompareEqual<TUuidValue>(const TUuidValue& x, const TUuidValue& y) {
+ return x.ToString() == y.ToString();
+ }
+}
+
+template<>
+inline void Out<NYdb::Dev::TUuidValue>(IOutputStream& os, const NYdb::Dev::TUuidValue& value) {
+ os << value.ToString();
+}
+
+namespace NReplicationTest {
+
+struct IChecker {
+ virtual void Assert(const TString& msg, const ::Ydb::Value& value) = 0;
+ virtual ~IChecker() = default;
+};
+
+
+template<typename T>
+struct Checker : public IChecker {
+ Checker(T&& expected)
+ : Expected(std::move(expected))
+ {}
+
+ void Assert(const TString& msg, const ::Ydb::Value& value) override {
+ UNIT_ASSERT_VALUES_EQUAL_C(Get(value), Expected, msg);
+ }
+
+ T Get(const ::Ydb::Value& value);
+
+ T Expected;
+};
+
+template<>
+inline bool Checker<bool>::Get(const ::Ydb::Value& value) {
+ return value.bool_value();
+}
+
+template<>
+inline ui32 Checker<ui32>::Get(const ::Ydb::Value& value) {
+ return value.uint32_value();
+}
+
+template<>
+inline ui64 Checker<ui64>::Get(const ::Ydb::Value& value) {
+ return value.uint64_value();
+}
+
+template<>
+inline double Checker<double>::Get(const ::Ydb::Value& value) {
+ return value.double_value();
+}
+
+template<>
+inline TString Checker<TString>::Get(const ::Ydb::Value& value) {
+ return value.text_value();
+}
+
+template<>
+inline TInstant Checker<TInstant>::Get(const ::Ydb::Value& value) {
+ return TInstant::Days(value.uint32_value());
+}
+
+template<>
+inline TUuidValue Checker<TUuidValue>::Get(const ::Ydb::Value& value) {
+ return TUuidValue(value);
+}
+
+template<typename T>
+std::pair<TString, std::shared_ptr<IChecker>> _C(TString&& name, T&& expected) {
+ return {
+ std::move(name),
+ std::make_shared<Checker<T>>(std::move(expected))
+ };
+}
+
+struct TMessage {
+ TString Message;
+ std::optional<ui32> Partition = std::nullopt;
+ std::optional<TString> ProducerId = std::nullopt;
+ std::optional<TString> MessageGroupId = std::nullopt;
+ std::optional<ui64> SeqNo = std::nullopt;
+};
+
+inline TMessage _withSeqNo(ui64 seqNo) {
+ return {
+ .Message = TStringBuilder() << "Message-" << seqNo,
+ .Partition = 0,
+ .ProducerId = std::nullopt,
+ .MessageGroupId = std::nullopt,
+ .SeqNo = seqNo
+ };
+}
+
+inline TMessage _withProducerId(const TString& producerId) {
+ return {
+ .Message = TStringBuilder() << "Message-" << producerId,
+ .Partition = 0,
+ .ProducerId = producerId,
+ .MessageGroupId = std::nullopt,
+ .SeqNo = std::nullopt
+ };
+}
+
+inline TMessage _withMessageGroupId(const TString& messageGroupId) {
+ return {
+ .Message = TStringBuilder() << "Message-" << messageGroupId,
+ .Partition = 0,
+ .ProducerId = messageGroupId,
+ .MessageGroupId = messageGroupId,
+ .SeqNo = std::nullopt
+ };
+}
+
+using TExpectations = TVector<TVector<std::pair<TString, std::shared_ptr<IChecker>>>>;
+
+struct TConfig {
+ const TString TableDDL;
+ const TString Lambda;
+ const TVector<TMessage> Messages;
+ const TExpectations Expectations;
+ const TVector<TString> AlterLambdas;
+};
+
+struct MainTestCase {
+
+ MainTestCase()
+ : Id(RandomNumber<size_t>())
+ , ConnectionString(GetEnv("YDB_ENDPOINT") + "/?database=" + GetEnv("YDB_DATABASE"))
+ , TopicName(TStringBuilder() << "Topic_" << Id)
+ , SourceTableName(TStringBuilder() << "SourceTable_" << Id)
+ , TableName(TStringBuilder() << "Table_" << Id)
+ , ReplicationName(TStringBuilder() << "Replication_" << Id)
+ , TransferName(TStringBuilder() << "Transfer_" << Id)
+ , Driver(TDriverConfig(ConnectionString))
+ , TableClient(Driver)
+ , Session(TableClient.GetSession().GetValueSync().GetSession())
+ , TopicClient(Driver)
+ {
+ }
+
+ ~MainTestCase() {
+ Driver.Stop(true);
+ }
+
+ void ExecuteDDL(const TString& ddl, bool checkResult = true, const TString& expectedMessage = "") {
+ Cerr << "DDL: " << ddl << Endl << Flush;
+ auto res = Session.ExecuteQuery(ddl, TTxControl::NoTx()).GetValueSync();
+ if (checkResult) {
+ if (expectedMessage) {
+ UNIT_ASSERT(!res.IsSuccess());
+ Cerr << ">>>>> ACTUAL: " << res.GetIssues().ToOneLineString() << Endl << Flush;
+ Cerr << ">>>>> EXPECTED: " << expectedMessage << Endl << Flush;
+ UNIT_ASSERT(res.GetIssues().ToOneLineString().contains(expectedMessage));
+ } else {
+ UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
+ }
+ }
+ }
+
+ auto ExecuteSourceTableQuery(const TString& query) {
+ for (size_t i = 10; i--;) {
+ auto q = Sprintf(query.data(), SourceTableName.data());
+ Cerr << ">>>>> Query: " << q << Endl << Flush;
+ auto res = Session.ExecuteQuery(q, TTxControl::NoTx()).GetValueSync();
+ if (res.IsSuccess()) {
+ return;
+ }
+
+ UNIT_ASSERT_C(i, res.GetIssues().ToString());
+ Sleep(TDuration::Seconds(1));
+ }
+ }
+
+ void CreateTable(const TString& tableDDL) {
+ ExecuteDDL(Sprintf(tableDDL.data(), TableName.data()));
+ }
+
+ void DropTable() {
+ ExecuteDDL(Sprintf("DROP TABLE `%s`", TableName.data()));
+ }
+
+ void CreateSourceTable(const TString& tableDDL) {
+ ExecuteDDL(Sprintf(tableDDL.data(), SourceTableName.data()));
+ }
+
+ void DropSourceTable() {
+ ExecuteDDL(Sprintf("DROP TABLE `%s`", SourceTableName.data()));
+ }
+
+ void CreateTopic(size_t partitionCount = 10) {
+ ExecuteDDL(Sprintf(R"(
+ CREATE TOPIC `%s`
+ WITH (
+ min_active_partitions = %d
+ );
+ )", TopicName.data(), partitionCount));
+ }
+
+ void DropTopic() {
+ ExecuteDDL(Sprintf("DROP TOPIC `%s`", TopicName.data()));
+ }
+
+ void CreateConsumer(const TString& consumerName) {
+ ExecuteDDL(Sprintf(R"(
+ ALTER TOPIC `%s`
+ ADD CONSUMER `%s`;
+ )", TopicName.data(), consumerName.data()));
+ }
+
+ struct CreateTransferSettings {
+ std::optional<TString> ConsumerName = std::nullopt;
+ std::optional<TDuration> FlushInterval;
+ std::optional<ui64> BatchSizeBytes;
+
+ CreateTransferSettings()
+ : ConsumerName(std::nullopt)
+ , FlushInterval(TDuration::Seconds(1))
+ , BatchSizeBytes(8_MB) {}
+
+ static CreateTransferSettings WithConsumerName(const TString& consumerName) {
+ CreateTransferSettings result;
+ result.ConsumerName = consumerName;
+ return result;
+ }
+
+ static CreateTransferSettings WithBatching(const TDuration& flushInterval, const ui64 batchSize) {
+ CreateTransferSettings result;
+ result.FlushInterval = flushInterval;
+ result.BatchSizeBytes = batchSize;
+ return result;
+ }
+ };
+
+ void CreateTransfer(const TString& lambda, const CreateTransferSettings& settings = CreateTransferSettings()) {
+ TStringBuilder sb;
+ if (settings.ConsumerName) {
+ sb << ", CONSUMER = '" << *settings.ConsumerName << "'" << Endl;
+ }
+ if (settings.FlushInterval) {
+ sb << ", FLUSH_INTERVAL = Interval('PT" << settings.FlushInterval->Seconds() << "S')" << Endl;
+ }
+ if (settings.BatchSizeBytes) {
+ sb << ", BATCH_SIZE_BYTES = " << *settings.BatchSizeBytes << Endl;
+ }
+
+ auto ddl = Sprintf(R"(
+ %s;
+
+ CREATE TRANSFER `%s`
+ FROM `%s` TO `%s` USING $l
+ WITH (
+ CONNECTION_STRING = 'grpc://%s'
+ %s
+ );
+ )", lambda.data(), TransferName.data(), TopicName.data(), TableName.data(), ConnectionString.data(), sb.data());
+
+ ExecuteDDL(ddl);
+ }
+
+ struct AlterTransferSettings {
+ std::optional<TString> TransformLambda;
+ std::optional<TDuration> FlushInterval;
+ std::optional<ui64> BatchSizeBytes;
+
+ AlterTransferSettings()
+ : FlushInterval(std::nullopt)
+ , BatchSizeBytes(std::nullopt) {}
+
+ static AlterTransferSettings WithBatching(const TDuration& flushInterval, const ui64 batchSize) {
+ AlterTransferSettings result;
+ result.FlushInterval = flushInterval;
+ result.BatchSizeBytes = batchSize;
+ return result;
+ }
+
+ static AlterTransferSettings WithTransformLambda(const TString& lambda) {
+ AlterTransferSettings result;
+ result.TransformLambda = lambda;
+ return result;
+ }
+ };
+
+ void AlterTransfer(const TString& lambda) {
+ AlterTransfer(AlterTransferSettings::WithTransformLambda(lambda));
+ }
+
+ void AlterTransfer(const AlterTransferSettings& settings, bool success = true) {
+ TString lambda = settings.TransformLambda ? *settings.TransformLambda : "";
+ TString setLambda = settings.TransformLambda ? "SET USING $l" : "";
+
+ TStringBuilder sb;
+ if (settings.FlushInterval) {
+ sb << "FLUSH_INTERVAL = Interval('PT" << settings.FlushInterval->Seconds() << "S')" << Endl;
+ }
+ if (settings.BatchSizeBytes) {
+ sb << ", BATCH_SIZE_BYTES = " << *settings.BatchSizeBytes << Endl;
+ }
+
+ TString setOptions;
+ if (!sb.empty()) {
+ setOptions = TStringBuilder() << "SET (" << sb << " )";
+ }
+
+ auto res = Session.ExecuteQuery(Sprintf(R"(
+ %s;
+
+ ALTER TRANSFER `%s`
+ %s
+ %s;
+ )", lambda.data(), TransferName.data(), setLambda.data(), setOptions.data()), TTxControl::NoTx()).GetValueSync();
+ UNIT_ASSERT_VALUES_EQUAL_C(success, res.IsSuccess(), res.GetIssues().ToString());
+ }
+
+ void DropTransfer() {
+ ExecuteDDL(Sprintf("DROP TRANSFER `%s`;", TransferName.data()));
+ }
+
+ void PauseTransfer() {
+ ExecuteDDL(Sprintf(R"(
+ ALTER TRANSFER `%s`
+ SET (
+ STATE = "Paused"
+ );
+ )", TransferName.data()));
+ }
+
+ void ResumeTransfer() {
+ ExecuteDDL(Sprintf(R"(
+ ALTER TRANSFER `%s`
+ SET (
+ STATE = "StandBy"
+ );
+ )", TransferName.data()));
+ }
+
+ auto DescribeTransfer() {
+ TReplicationClient client(Driver);
+
+ TDescribeReplicationSettings settings;
+ settings.IncludeStats(true);
+
+ return client.DescribeReplication(TString("/") + GetEnv("YDB_DATABASE") + "/" + TransferName, settings).ExtractValueSync();
+ }
+
+ void CreateReplication() {
+ auto ddl = Sprintf(R"(
+ CREATE ASYNC REPLICATION `%s`
+ FOR `%s` AS `%s`
+ WITH (
+ CONNECTION_STRING = 'grpc://%s'
+ );
+ )", ReplicationName.data(), SourceTableName.data(), TableName.data(), ConnectionString.data());
+
+ ExecuteDDL(ddl);
+ }
+
+ void DropReplication() {
+ ExecuteDDL(Sprintf("DROP ASYNC REPLICATION `%s`;", ReplicationName.data()));
+ }
+
+ auto DescribeReplication() {
+ TReplicationClient client(Driver);
+
+ TDescribeReplicationSettings settings;
+ settings.IncludeStats(true);
+
+ return client.DescribeReplication(TString("/") + GetEnv("YDB_DATABASE") + "/" + ReplicationName, settings).ExtractValueSync();
+ }
+
+ TReplicationDescription CheckReplicationState(TReplicationDescription::EState expected) {
+ for (size_t i = 20; i--;) {
+ auto result = DescribeReplication().GetReplicationDescription();
+ if (expected == result.GetState()) {
+ return result;
+ }
+
+ UNIT_ASSERT_C(i, "Unable to wait replication state. Expected: " << expected << ", actual: " << result.GetState());
+ Sleep(TDuration::Seconds(1));
+ }
+
+ Y_UNREACHABLE();
+ }
+
+ void PauseReplication() {
+ ExecuteDDL(Sprintf(R"(
+ ALTER ASYNC REPLICATION `%s`
+ SET (
+ STATE = "Paused"
+ );
+ )", ReplicationName.data()));
+ }
+
+ void ResumeReplication() {
+ ExecuteDDL(Sprintf(R"(
+ ALTER ASYNC REPLICATION `%s`
+ SET (
+ STATE = "StandBy"
+ );
+ )", ReplicationName.data()));
+ }
+
+ auto DescribeTopic() {
+ TDescribeTopicSettings settings;
+ settings.IncludeLocation(true);
+ settings.IncludeStats(true);
+
+ return TopicClient.DescribeTopic(TopicName, settings).ExtractValueSync();
+ }
+
+ void Write(const TMessage& message) {
+ TWriteSessionSettings writeSettings;
+ writeSettings.Path(TopicName);
+ writeSettings.DeduplicationEnabled(message.SeqNo);
+ if (message.Partition) {
+ writeSettings.PartitionId(message.Partition);
+ }
+ if (message.ProducerId) {
+ writeSettings.ProducerId(*message.ProducerId);
+ }
+ if (message.MessageGroupId) {
+ writeSettings.MessageGroupId(*message.MessageGroupId);
+ }
+ auto writeSession = TopicClient.CreateSimpleBlockingWriteSession(writeSettings);
+
+ UNIT_ASSERT(writeSession->Write(message.Message, message.SeqNo));
+ writeSession->Close(TDuration::Seconds(1));
+ }
+
+ std::pair<i64, Ydb::ResultSet> DoRead(const TExpectations& expectations) {
+ auto& e = expectations.front();
+
+ TStringBuilder columns;
+ for (size_t i = 0; i < e.size(); ++i) {
+ if (i) {
+ columns << ", ";
+ }
+ columns << "`" << e[i].first << "`";
+ }
+
+
+ auto query = Sprintf("SELECT %s FROM `%s` ORDER BY %s", columns.data(), TableName.data(), columns.data());
+ Cerr << ">>>>> Query: " << query << Endl << Flush;
+ auto res = Session.ExecuteQuery(query, TTxControl::NoTx()).GetValueSync();
+ if (!res.IsSuccess()) {
+ Cerr << ">>>>> Query error: " << res.GetIssues().ToString() << Endl << Flush;
+ TResultSet r{Ydb::ResultSet()};
+ return {-1, NYdb::TProtoAccessor::GetProto(r)};
+ }
+
+ const auto proto = NYdb::TProtoAccessor::GetProto(res.GetResultSet(0));
+ return {proto.rowsSize(), proto};
+ }
+
+ void CheckResult(const TExpectations& expectations) {
+ for (size_t attempt = 20; attempt--; ) {
+ auto res = DoRead(expectations);
+ Cerr << "Attempt=" << attempt << " count=" << res.first << Endl << Flush;
+ if (res.first == (ssize_t)expectations.size()) {
+ const Ydb::ResultSet& proto = res.second;
+ for (size_t i = 0; i < expectations.size(); ++i) {
+ auto& row = proto.rows(i);
+ auto& rowExpectations = expectations[i];
+ for (size_t i = 0; i < rowExpectations.size(); ++i) {
+ auto& c = rowExpectations[i];
+ TString msg = TStringBuilder() << "Row " << i << " column '" << c.first << "': ";
+ c.second->Assert(msg, row.items(i));
+ }
+ }
+
+ break;
+ }
+
+ UNIT_ASSERT_C(attempt, "Unable to wait transfer result");
+ Sleep(TDuration::Seconds(1));
+ }
+ }
+
+ TReplicationDescription CheckTransferState(TReplicationDescription::EState expected) {
+ for (size_t i = 20; i--;) {
+ auto result = DescribeTransfer().GetReplicationDescription();
+ if (expected == result.GetState()) {
+ return result;
+ }
+
+ UNIT_ASSERT_C(i, "Unable to wait transfer state. Expected: " << expected << ", actual: " << result.GetState());
+ Sleep(TDuration::Seconds(1));
+ }
+
+ Y_UNREACHABLE();
+ }
+
+ void CheckTransferStateError(const TString& expectedMessage) {
+ auto result = CheckTransferState(TReplicationDescription::EState::Error);
+ Cerr << ">>>>> ACTUAL: " << result.GetErrorState().GetIssues().ToOneLineString() << Endl << Flush;
+ Cerr << ">>>>> EXPECTED: " << expectedMessage << Endl << Flush;
+ UNIT_ASSERT(result.GetErrorState().GetIssues().ToOneLineString().contains(expectedMessage));
+ }
+
+ void Run(const TConfig& config) {
+
+ CreateTable(config.TableDDL);
+ CreateTopic();
+
+ TVector<TString> lambdas;
+ lambdas.insert(lambdas.end(), config.AlterLambdas.begin(), config.AlterLambdas.end());
+ lambdas.push_back(config.Lambda);
+
+ for (size_t i = 0; i < lambdas.size(); ++i) {
+ auto lambda = lambdas[i];
+ if (!i) {
+ CreateTransfer(lambda);
+ } else {
+ Sleep(TDuration::Seconds(1));
+
+ AlterTransfer(lambda);
+
+ if (i == lambdas.size() - 1) {
+ Sleep(TDuration::Seconds(1));
+ }
+ }
+ }
+
+ for (const auto& m : config.Messages) {
+ Write(m);
+ }
+
+ CheckResult(config.Expectations);
+
+ DropTransfer();
+ DropTable();
+ DropTopic();
+ }
+
+ const size_t Id;
+ const TString ConnectionString;
+
+ const TString TopicName;
+ const TString SourceTableName;
+ const TString TableName;
+ const TString ReplicationName;
+ const TString TransferName;
+
+ TDriver Driver;
+ TQueryClient TableClient;
+ TSession Session;
+ TTopicClient TopicClient;
+};
+
+
+} // namespace
diff --git a/ydb/tests/functional/replication/ya.make b/ydb/tests/functional/replication/ya.make
index 8b9f1c9609..c6b70829d5 100644
--- a/ydb/tests/functional/replication/ya.make
+++ b/ydb/tests/functional/replication/ya.make
@@ -4,15 +4,20 @@ ENV(YDB_USE_IN_MEMORY_PDISKS=true)
ENV(YDB_ERASURE=block_4-2)
+ENV(YDB_FEATURE_FLAGS="enable_topic_transfer")
+ENV(YDB_GRPC_SERVICES="replication")
+
PEERDIR(
library/cpp/threading/local_executor
ydb/public/sdk/cpp/src/client/table
+ ydb/public/sdk/cpp/src/client/topic
ydb/public/sdk/cpp/src/client/proto
ydb/public/sdk/cpp/src/client/draft
)
SRCS(
- main.cpp
+ replication.cpp
+ transfer.cpp
)
INCLUDE(${ARCADIA_ROOT}/ydb/public/tools/ydb_recipe/recipe.inc)
@@ -20,7 +25,9 @@ INCLUDE(${ARCADIA_ROOT}/ydb/public/tools/ydb_recipe/recipe.inc)
SIZE(MEDIUM)
IF (SANITIZER_TYPE)
- REQUIREMENTS(ram:16 cpu:4)
+ REQUIREMENTS(ram:24 cpu:4)
+ELSE()
+ REQUIREMENTS(ram:16 cpu:2)
ENDIF()
END()
diff --git a/ydb/tests/functional/scheme_tests/canondata/tablet_scheme_tests.TestTabletSchemes.test_tablet_schemes_flat_schemeshard_/flat_schemeshard.schema b/ydb/tests/functional/scheme_tests/canondata/tablet_scheme_tests.TestTabletSchemes.test_tablet_schemes_flat_schemeshard_/flat_schemeshard.schema
index d9dea01aab..f76ea2e948 100644
--- a/ydb/tests/functional/scheme_tests/canondata/tablet_scheme_tests.TestTabletSchemes.test_tablet_schemes_flat_schemeshard_/flat_schemeshard.schema
+++ b/ydb/tests/functional/scheme_tests/canondata/tablet_scheme_tests.TestTabletSchemes.test_tablet_schemes_flat_schemeshard_/flat_schemeshard.schema
@@ -6259,6 +6259,16 @@
"ColumnId": 16,
"ColumnName": "NextChangefeedIdx",
"ColumnType": "Uint32"
+ },
+ {
+ "ColumnId": 17,
+ "ColumnName": "SrcPrefix",
+ "ColumnType": "Utf8"
+ },
+ {
+ "ColumnId": 18,
+ "ColumnName": "EncryptionIV",
+ "ColumnType": "String"
}
],
"ColumnsDropped": [],
@@ -6280,7 +6290,9 @@
13,
14,
15,
- 16
+ 16,
+ 17,
+ 18
],
"RoomID": 0,
"Codec": 0,
diff --git a/ydb/tests/functional/tenants/conftest.py b/ydb/tests/functional/tenants/conftest.py
index bcbae56495..a663e73450 100644
--- a/ydb/tests/functional/tenants/conftest.py
+++ b/ydb/tests/functional/tenants/conftest.py
@@ -7,9 +7,7 @@ from ydb.tests.library.clients.kikimr_http_client import HiveClient
# but somehow it does not
#
# for ydb_{cluster, database, ...} fixture family
-COMMON_FIXTURES = 'ydb.tests.library.fixtures'
-FLAVOURS_FIXTURE = 'ydb.tests.library.flavours'
-pytest_plugins = [COMMON_FIXTURES, FLAVOURS_FIXTURE]
+pytest_plugins = ['ydb.tests.library.fixtures', 'ydb.tests.library.flavours']
@pytest.fixture(scope='module')
diff --git a/ydb/tests/functional/tenants/test_user_administration.py b/ydb/tests/functional/tenants/test_user_administration.py
index 21497c81f8..8e62e13a26 100644
--- a/ydb/tests/functional/tenants/test_user_administration.py
+++ b/ydb/tests/functional/tenants/test_user_administration.py
@@ -96,8 +96,10 @@ def prepared_tenant_db(ydb_cluster, ydb_endpoint, ydb_database_module_scope):
session.execute_scheme("create group ordinarygroup")
session.execute_scheme("create user dbadmin2 password '1234'")
+ session.execute_scheme("create user dbadmin3 password '1234' nologin")
+ session.execute_scheme("create user dbadmin4 password '1234'")
session.execute_scheme("create group dbsubadmins")
- session.execute_scheme('alter group dbadmins add user dbadmin2, dbsubadmins')
+ session.execute_scheme('alter group dbadmins add user dbadmin2, dbadmin3, dbadmin4, dbsubadmins')
# setup for database admins, second
# make dbadmin the real admin of the database
@@ -114,10 +116,14 @@ def login_user(endpoint, database, user, password):
return credentials._make_token_request()['access_token']
-def test_ordinaryuser_can_change_password_for_himself(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client):
+@pytest.mark.parametrize('subject_user', [
+ 'ordinaryuser',
+ pytest.param('dbadmin4', id='dbadmin')
+])
+def test_user_can_change_password_for_himself(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client, subject_user):
database_path = prepared_tenant_db
- user_auth_token = login_user(ydb_endpoint, database_path, 'ordinaryuser', '1234')
+ user_auth_token = login_user(ydb_endpoint, database_path, subject_user, '1234')
credentials = ydb.AuthTokenCredentials(user_auth_token)
with ydb_client(database_path, credentials=credentials) as driver:
@@ -125,9 +131,9 @@ def test_ordinaryuser_can_change_password_for_himself(ydb_endpoint, prepared_roo
pool = ydb.SessionPool(driver)
with pool.checkout() as session:
- session.execute_scheme("alter user ordinaryuser password '4321'")
+ session.execute_scheme(f"alter user {subject_user} password '4321'")
- user_auth_token = login_user(ydb_endpoint, database_path, 'ordinaryuser', '4321')
+ user_auth_token = login_user(ydb_endpoint, database_path, subject_user, '4321')
def test_database_admin_cant_change_database_owner(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client):
@@ -154,7 +160,6 @@ def test_database_admin_cant_change_database_owner(ydb_endpoint, prepared_root_d
pytest.param('alter group dbadmins drop user dbsubadmins', id='remove-subgroup'),
pytest.param('drop group dbadmins', id='remove-admin-group'),
pytest.param('alter group dbadmins rename to dbadminsdemoted', id='rename-admin-group'),
-
])
def test_database_admin_cant_change_database_admin_group(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client, query):
database_path = prepared_tenant_db
@@ -174,6 +179,30 @@ def test_database_admin_cant_change_database_admin_group(ydb_endpoint, prepared_
assert 'Access denied.' in exc_info.value.message
+@pytest.mark.parametrize('query', [
+ pytest.param('alter user dbadmin2 password "4321"', id='change-password'),
+ pytest.param('alter user dbadmin2 nologin', id='block'),
+ pytest.param('alter user dbadmin3 login', id='unblock'),
+])
+def test_database_admin_cant_change_database_admin_user(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client, query):
+ database_path = prepared_tenant_db
+
+ user_auth_token = login_user(ydb_endpoint, database_path, 'dbadmin', '1234')
+ credentials = ydb.AuthTokenCredentials(user_auth_token)
+
+ with ydb_client(database_path, credentials=credentials) as driver:
+ driver.wait()
+
+ pool = ydb.SessionPool(driver)
+ with pool.checkout() as session:
+ with pytest.raises(ydb.issues.Error) as exc_info:
+ session.execute_scheme(query)
+
+ assert exc_info.type is ydb.issues.Unauthorized
+ logger.debug(exc_info.value.message)
+ assert 'Access denied.' in exc_info.value.message
+
+
def test_database_admin_can_create_user(ydb_endpoint, prepared_root_db, prepared_tenant_db, ydb_client):
database_path = prepared_tenant_db
diff --git a/ydb/tests/functional/tpc/large/ya.make b/ydb/tests/functional/tpc/large/ya.make
index 7816b50d72..c362745c87 100644
--- a/ydb/tests/functional/tpc/large/ya.make
+++ b/ydb/tests/functional/tpc/large/ya.make
@@ -2,9 +2,15 @@ PY3TEST()
TEST_SRCS(
test_tpcds.py
- test_tpch_spilling.py
)
+# https://github.com/ydb-platform/ydb/issues/15726
+IF (SANITIZER_TYPE != "memory" AND SANITIZER_TYPE != "thread")
+ TEST_SRCS(
+ test_tpch_spilling.py
+ )
+ENDIF()
+
SIZE(LARGE)
TAG(ya:fat)
diff --git a/ydb/tests/functional/transfer/main.cpp b/ydb/tests/functional/transfer/main.cpp
deleted file mode 100644
index abf1dae7d7..0000000000
--- a/ydb/tests/functional/transfer/main.cpp
+++ /dev/null
@@ -1,1467 +0,0 @@
-#include <util/system/env.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/driver/driver.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/query/client.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/client.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/proto/accessor.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/draft/ydb_scripting.h>
-#include <ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/draft/ydb_replication.h>
-
-#include <library/cpp/threading/local_executor/local_executor.h>
-
-using namespace NYdb;
-using namespace NYdb::NQuery;
-using namespace NYdb::NTopic;
-using namespace NYdb::NReplication;
-
-namespace {
-
-volatile size_t TestCaseCounter = RandomNumber<size_t>();
-
-struct IChecker {
- virtual void Assert(const TString& msg, const ::Ydb::Value& value) = 0;
- virtual ~IChecker() = default;
-};
-
-template<typename T>
-struct Checker : public IChecker {
- Checker(T&& expected)
- : Expected(std::move(expected))
- {}
-
- void Assert(const TString& msg, const ::Ydb::Value& value) override {
- UNIT_ASSERT_VALUES_EQUAL_C(Get(value), Expected, msg);
- }
-
- T Get(const ::Ydb::Value& value);
-
- T Expected;
-};
-
-template<>
-bool Checker<bool>::Get(const ::Ydb::Value& value) {
- return value.bool_value();
-}
-
-template<>
-ui32 Checker<ui32>::Get(const ::Ydb::Value& value) {
- return value.uint32_value();
-}
-
-template<>
-ui64 Checker<ui64>::Get(const ::Ydb::Value& value) {
- return value.uint64_value();
-}
-
-template<>
-double Checker<double>::Get(const ::Ydb::Value& value) {
- return value.double_value();
-}
-
-template<>
-TString Checker<TString>::Get(const ::Ydb::Value& value) {
- return value.text_value();
-}
-
-template<>
-TInstant Checker<TInstant>::Get(const ::Ydb::Value& value) {
- return TInstant::Days(value.uint32_value());
-}
-
-template<typename T>
-std::pair<TString, std::shared_ptr<IChecker>> _C(TString&& name, T&& expected) {
- return {
- std::move(name),
- std::make_shared<Checker<T>>(std::move(expected))
- };
-}
-
-struct TMessage {
- TString Message;
- std::optional<ui32> Partition = std::nullopt;
- std::optional<TString> ProducerId = std::nullopt;
- std::optional<TString> MessageGroupId = std::nullopt;
- std::optional<ui64> SeqNo = std::nullopt;
-};
-
-TMessage _withSeqNo(ui64 seqNo) {
- return {
- .Message = TStringBuilder() << "Message-" << seqNo,
- .Partition = 0,
- .ProducerId = std::nullopt,
- .MessageGroupId = std::nullopt,
- .SeqNo = seqNo
- };
-}
-
-TMessage _withProducerId(const TString& producerId) {
- return {
- .Message = TStringBuilder() << "Message-" << producerId,
- .Partition = 0,
- .ProducerId = producerId,
- .MessageGroupId = std::nullopt,
- .SeqNo = std::nullopt
- };
-}
-
-TMessage _withMessageGroupId(const TString& messageGroupId) {
- return {
- .Message = TStringBuilder() << "Message-" << messageGroupId,
- .Partition = 0,
- .ProducerId = messageGroupId,
- .MessageGroupId = messageGroupId,
- .SeqNo = std::nullopt
- };
-}
-
-using TExpectations = TVector<TVector<std::pair<TString, std::shared_ptr<IChecker>>>>;
-
-struct TConfig {
- const TString TableDDL;
- const TString Lambda;
- const TVector<TMessage> Messages;
- const TExpectations Expectations;
- const TVector<TString> AlterLambdas;
-};
-
-struct MainTestCase {
-
- MainTestCase()
- : Id(TestCaseCounter++)
- , ConnectionString(GetEnv("YDB_ENDPOINT") + "/?database=" + GetEnv("YDB_DATABASE"))
- , TopicName(TStringBuilder() << "Topic_" << Id)
- , TableName(TStringBuilder() << "Table_" << Id)
- , TransferName(TStringBuilder() << "Transfer_" << Id)
- , Driver(TDriverConfig(ConnectionString))
- , TableClient(Driver)
- , Session(TableClient.GetSession().GetValueSync().GetSession())
- , TopicClient(Driver)
- {
- }
-
- void ExecuteDDL(const TString& ddl) {
- auto res = Session.ExecuteQuery(ddl, TTxControl::NoTx()).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
-
- void CreateTable(const TString& tableDDL) {
- ExecuteDDL(Sprintf(tableDDL.data(), TableName.data()));
- }
-
- void CreateTopic(size_t partitionCount = 10) {
- ExecuteDDL(Sprintf(R"(
- CREATE TOPIC `%s`
- WITH (
- min_active_partitions = %d
- );
- )", TopicName.data(), partitionCount));
- }
-
- void CreateConsumer(const TString& consumerName) {
- ExecuteDDL(Sprintf(R"(
- ALTER TOPIC `%s`
- ADD CONSUMER `%s`;
- )", TopicName.data(), consumerName.data()));
- }
-
- struct CreateTransferSettings {
- std::optional<TString> ConsumerName = std::nullopt;
- std::optional<TDuration> FlushInterval;
- std::optional<ui64> BatchSizeBytes;
-
- CreateTransferSettings()
- : ConsumerName(std::nullopt)
- , FlushInterval(TDuration::Seconds(1))
- , BatchSizeBytes(8_MB) {}
-
- static CreateTransferSettings WithConsumerName(const TString& consumerName) {
- CreateTransferSettings result;
- result.ConsumerName = consumerName;
- return result;
- }
-
- static CreateTransferSettings WithBatching(const TDuration& flushInterval, const ui64 batchSize) {
- CreateTransferSettings result;
- result.FlushInterval = flushInterval;
- result.BatchSizeBytes = batchSize;
- return result;
- }
- };
-
- void CreateTransfer(const TString& lambda, const CreateTransferSettings& settings = CreateTransferSettings()) {
- TStringBuilder sb;
- if (settings.ConsumerName) {
- sb << ", CONSUMER = '" << *settings.ConsumerName << "'" << Endl;
- }
- if (settings.FlushInterval) {
- sb << ", FLUSH_INTERVAL = Interval('PT" << settings.FlushInterval->Seconds() << "S')" << Endl;
- }
- if (settings.BatchSizeBytes) {
- sb << ", BATCH_SIZE_BYTES = " << *settings.BatchSizeBytes << Endl;
- }
-
- auto ddl = Sprintf(R"(
- %s;
-
- CREATE TRANSFER `%s`
- FROM `%s` TO `%s` USING $l
- WITH (
- CONNECTION_STRING = 'grpc://%s'
- %s
- );
- )", lambda.data(), TransferName.data(), TopicName.data(), TableName.data(), ConnectionString.data(), sb.data());
-
- ExecuteDDL(ddl);
- }
-
- struct AlterTransferSettings {
- std::optional<TString> TransformLambda;
- std::optional<TDuration> FlushInterval;
- std::optional<ui64> BatchSizeBytes;
-
- AlterTransferSettings()
- : FlushInterval(std::nullopt)
- , BatchSizeBytes(std::nullopt) {}
-
- static AlterTransferSettings WithBatching(const TDuration& flushInterval, const ui64 batchSize) {
- AlterTransferSettings result;
- result.FlushInterval = flushInterval;
- result.BatchSizeBytes = batchSize;
- return result;
- }
-
- static AlterTransferSettings WithTransformLambda(const TString& lambda) {
- AlterTransferSettings result;
- result.TransformLambda = lambda;
- return result;
- }
- };
-
- void AlterTransfer(const TString& lambda) {
- AlterTransfer(AlterTransferSettings::WithTransformLambda(lambda));
- }
-
- void AlterTransfer(const AlterTransferSettings& settings, bool success = true) {
- TString lambda = settings.TransformLambda ? *settings.TransformLambda : "";
- TString setLambda = settings.TransformLambda ? "SET USING $l" : "";
-
- TStringBuilder sb;
- if (settings.FlushInterval) {
- sb << "FLUSH_INTERVAL = Interval('PT" << settings.FlushInterval->Seconds() << "S')" << Endl;
- }
- if (settings.BatchSizeBytes) {
- sb << ", BATCH_SIZE_BYTES = " << *settings.BatchSizeBytes << Endl;
- }
-
- TString setOptions;
- if (!sb.empty()) {
- setOptions = TStringBuilder() << "SET (" << sb << " )";
- }
-
- auto res = Session.ExecuteQuery(Sprintf(R"(
- %s;
-
- ALTER TRANSFER `%s`
- %s
- %s;
- )", lambda.data(), TransferName.data(), setLambda.data(), setOptions.data()), TTxControl::NoTx()).GetValueSync();
- UNIT_ASSERT_VALUES_EQUAL_C(success, res.IsSuccess(), res.GetIssues().ToString());
- }
-
- void DropTransfer() {
- auto res = Session.ExecuteQuery(Sprintf(R"(
- DROP TRANSFER `%s`;
- )", TransferName.data()), TTxControl::NoTx()).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
- }
-
- void PauseTransfer() {
- ExecuteDDL(Sprintf(R"(
- ALTER TRANSFER `%s`
- SET (
- STATE = "Paused"
- );
- )", TransferName.data()));
- }
-
- void ResumeTransfer() {
- ExecuteDDL(Sprintf(R"(
- ALTER TRANSFER `%s`
- SET (
- STATE = "StandBy"
- );
- )", TransferName.data()));
- }
-
- auto DescribeTransfer() {
- TReplicationClient client(Driver);
-
- TDescribeReplicationSettings settings;
- settings.IncludeStats(true);
-
- return client.DescribeReplication(TString("/") + GetEnv("YDB_DATABASE") + "/" + TransferName, settings).ExtractValueSync();
- }
-
- auto DescribeTopic() {
- TDescribeTopicSettings settings;
- settings.IncludeLocation(true);
- settings.IncludeStats(true);
-
- return TopicClient.DescribeTopic(TopicName, settings).ExtractValueSync();
- }
-
- void Write(const TMessage& message) {
- TWriteSessionSettings writeSettings;
- writeSettings.Path(TopicName);
- writeSettings.DeduplicationEnabled(message.SeqNo);
- if (message.Partition) {
- writeSettings.PartitionId(message.Partition);
- }
- if (message.ProducerId) {
- writeSettings.ProducerId(*message.ProducerId);
- }
- if (message.MessageGroupId) {
- writeSettings.MessageGroupId(*message.MessageGroupId);
- }
- auto writeSession = TopicClient.CreateSimpleBlockingWriteSession(writeSettings);
-
- UNIT_ASSERT(writeSession->Write(message.Message, message.SeqNo));
- writeSession->Close(TDuration::Seconds(1));
- }
-
- std::pair<ui64, Ydb::ResultSet> DoRead(const TExpectations& expectations) {
- auto& e = expectations.front();
-
- TStringBuilder columns;
- for (size_t i = 0; i < e.size(); ++i) {
- if (i) {
- columns << ", ";
- }
- columns << "`" << e[i].first << "`";
- }
-
-
- auto query = Sprintf("SELECT %s FROM `%s` ORDER BY %s", columns.data(), TableName.data(), columns.data());
- Cerr << ">>>>> Query: " << query << Endl << Flush;
- auto res = Session.ExecuteQuery(query, TTxControl::NoTx()).GetValueSync();
- UNIT_ASSERT_C(res.IsSuccess(), res.GetIssues().ToString());
-
- const auto proto = NYdb::TProtoAccessor::GetProto(res.GetResultSet(0));
- return {proto.rowsSize(), proto};
- }
-
- void CheckResult(const TExpectations& expectations) {
- for (size_t attempt = 20; attempt--; ) {
- auto res = DoRead(expectations);
- Cerr << "Attempt=" << attempt << " count=" << res.first << Endl << Flush;
- if (res.first == expectations.size()) {
- const Ydb::ResultSet& proto = res.second;
- for (size_t i = 0; i < expectations.size(); ++i) {
- auto& row = proto.rows(i);
- auto& rowExpectations = expectations[i];
- for (size_t i = 0; i < rowExpectations.size(); ++i) {
- auto& c = rowExpectations[i];
- TString msg = TStringBuilder() << "Row " << i << " column '" << c.first << "': ";
- c.second->Assert(msg, row.items(i));
- }
- }
-
- break;
- }
-
- UNIT_ASSERT_C(attempt, "Unable to wait transfer result");
- Sleep(TDuration::Seconds(1));
- }
- }
-
- TReplicationDescription CheckTransferState(TReplicationDescription::EState expected) {
- for (size_t i = 20; i--;) {
- auto result = DescribeTransfer().GetReplicationDescription();
- if (expected == result.GetState()) {
- return result;
- }
-
- UNIT_ASSERT_C(i, "Unable to wait transfer state. Expected: " << expected << ", actual: " << result.GetState());
- Sleep(TDuration::Seconds(1));
- }
-
- Y_UNREACHABLE();
- }
-
- void CheckTransferStateError(const TString& expectedMessage) {
- auto result = CheckTransferState(TReplicationDescription::EState::Error);
- Cerr << ">>>>> ACTUAL: " << result.GetErrorState().GetIssues().ToOneLineString() << Endl << Flush;
- Cerr << ">>>>> EXPECTED: " << expectedMessage << Endl << Flush;
- UNIT_ASSERT(result.GetErrorState().GetIssues().ToOneLineString().contains(expectedMessage));
- }
-
- void Run(const TConfig& config) {
-
- CreateTable(config.TableDDL);
- CreateTopic();
-
- TVector<TString> lambdas;
- lambdas.insert(lambdas.end(), config.AlterLambdas.begin(), config.AlterLambdas.end());
- lambdas.push_back(config.Lambda);
-
- for (size_t i = 0; i < lambdas.size(); ++i) {
- auto lambda = lambdas[i];
- if (!i) {
- CreateTransfer(lambda);
- } else {
- Sleep(TDuration::Seconds(1));
-
- AlterTransfer(lambda);
-
- if (i == lambdas.size() - 1) {
- Sleep(TDuration::Seconds(1));
- }
- }
- }
-
- for (const auto& m : config.Messages) {
- Write(m);
- }
-
- CheckResult(config.Expectations);
- }
-
- const size_t Id;
- const TString ConnectionString;
-
- const TString TopicName;
- const TString TableName;
- const TString TransferName;
-
- TDriver Driver;
- TQueryClient TableClient;
- TSession Session;
- TTopicClient TopicClient;
-
- std::vector<std::string> ColumnNames;
-};
-
-
-} // namespace
-
-Y_UNIT_TEST_SUITE(Transfer)
-{
- Y_UNIT_TEST(Main_ColumnTable_KeyColumnFirst)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TString("Message-1")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_KeyColumnLast)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Message Utf8 NOT NULL,
- Key Uint64 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TString("Message-1")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_ComplexKey)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key1 Uint64 NOT NULL,
- Key3 Uint64 NOT NULL,
- Value1 Utf8,
- Key2 Uint64 NOT NULL,
- Value2 Utf8,
- Key4 Uint64 NOT NULL,
- PRIMARY KEY (Key3, Key2, Key1, Key4)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key1:CAST(1 AS Uint64),
- Key2:CAST(2 AS Uint64),
- Value2:CAST("value-2" AS Utf8),
- Key4:CAST(4 AS Uint64),
- Key3:CAST(3 AS Uint64),
- Value1:CAST("value-1" AS Utf8),
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Key1", ui64(1)),
- _C("Key2", ui64(2)),
- _C("Key3", ui64(3)),
- _C("Key4", ui64(4)),
- _C("Value1", TString("value-1")),
- _C("Value2", TString("value-2")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_JsonMessage)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Id Uint64 NOT NULL,
- FirstName Utf8 NOT NULL,
- LastName Utf8 NOT NULL,
- Salary Uint64 NOT NULL,
- PRIMARY KEY (Id)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- $input = CAST($x._data AS JSON);
-
- return [
- <|
- Id: Yson::ConvertToUint64($input.id),
- FirstName: CAST(Yson::ConvertToString($input.first_name) AS Utf8),
- LastName: CAST(Yson::ConvertToString($input.last_name) AS Utf8),
- Salary: CAST(Yson::ConvertToString($input.salary) AS UInt64)
- |>
- ];
- };
- )",
-
- .Messages = {{R"({
- "id": 1,
- "first_name": "Vasya",
- "last_name": "Pupkin",
- "salary": "123"
- })"}},
-
- .Expectations = {{
- _C("Id", ui64(1)),
- _C("FirstName", TString("Vasya")),
- _C("LastName", TString("Pupkin")),
- _C("Salary", ui64(123)),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_NullableColumn)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TString("Message-1")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_Date)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Date,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message: CAST($x._data AS Date)
- |>
- ];
- };
- )",
-
- .Messages = {{"2025-02-21"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TInstant::ParseIso8601("2025-02-21")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_Double)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Double,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message: CAST($x._data AS Double)
- |>
- ];
- };
- )",
-
- .Messages = {{"1.23"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", 1.23),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_ColumnTable_Utf8_Long)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1 long value 0 1234567890 1 1234567890 2 1234567890 3 1234567890 4 1234567890 5 1234567890 6 1234567890"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TString("Message-1 long value 0 1234567890 1 1234567890 2 1234567890 3 1234567890 4 1234567890 5 1234567890 6 1234567890")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_MessageField_Partition)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Partition Uint32 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Partition)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Partition:CAST($x._partition AS Uint32),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1", 7}},
-
- .Expectations = {{
- _C("Partition", ui32(7)),
- _C("Message", TString("Message-1")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_MessageField_SeqNo)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- SeqNo Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (SeqNo)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- SeqNo:CAST($x._seq_no AS Uint32),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {_withSeqNo(13)},
-
- .Expectations = {{
- _C("SeqNo", ui64(13)),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_MessageField_ProducerId)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Offset Uint64 NOT NULL,
- ProducerId Utf8,
- PRIMARY KEY (Offset)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Offset:CAST($x._offset AS Uint64),
- ProducerId:CAST($x._producer_id AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {_withProducerId("Producer-13")},
-
- .Expectations = {{
- _C("ProducerId", TString("Producer-13")),
- }}
- });
- }
-
- Y_UNIT_TEST(Main_MessageField_MessageGroupId)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Offset Uint64 NOT NULL,
- MessageGroupId Utf8,
- PRIMARY KEY (Offset)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Offset:CAST($x._offset AS Uint64),
- MessageGroupId:CAST($x._message_group_id AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {_withMessageGroupId("MessageGroupId-13")},
-
- .Expectations = {{
- _C("MessageGroupId", TString("MessageGroupId-13")),
- }}
- });
- }
-
- Y_UNIT_TEST(AlterLambda)
- {
- MainTestCase().Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data || " new lambda" AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Message", TString("Message-1 new lambda")),
- }},
-
- .AlterLambdas = {
- R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data || " 1 lambda" AS Utf8)
- |>
- ];
- };
- )",
- R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data || " 2 lambda" AS Utf8)
- |>
- ];
- };
- )",
- }
-
- });
- }
-
- Y_UNIT_TEST(DropTransfer)
- {
- MainTestCase testCase;
- testCase.Run({
- .TableDDL = R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )",
-
- .Lambda = R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )",
-
- .Messages = {{"Message-1"}},
-
- .Expectations = {{
- _C("Key", ui64(0)),
- _C("Message", TString("Message-1")),
- }}
- });
-
- {
- auto result = testCase.DescribeTransfer();
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToOneLineString());
- }
-
- testCase.DropTransfer();
-
- {
- auto result = testCase.DescribeTransfer();
- UNIT_ASSERT_C(!result.IsSuccess(), result.GetIssues().ToOneLineString());
- UNIT_ASSERT_VALUES_EQUAL(EStatus::SCHEME_ERROR, result.GetStatus());
- }
- }
-
- Y_UNIT_TEST(CreateAndDropConsumer)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic();
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- for (size_t i = 20; i--; ) {
- auto result = testCase.DescribeTopic();
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToOneLineString());
- auto& consumers = result.GetTopicDescription().GetConsumers();
- if (1 == consumers.size()) {
- UNIT_ASSERT_VALUES_EQUAL(1, consumers.size());
- Cerr << "Consumer name is '" << consumers[0].GetConsumerName() << "'" << Endl << Flush;
- UNIT_ASSERT_C("replicationConsumer" != consumers[0].GetConsumerName(), "Consumer name is random uuid");
- break;
- }
-
- UNIT_ASSERT_C(i, "Unable to wait consumer has been created");
- Sleep(TDuration::Seconds(1));
- }
-
- testCase.DropTransfer();
-
- for (size_t i = 20; i--; ) {
- auto result = testCase.DescribeTopic();
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToOneLineString());
- auto& consumers = result.GetTopicDescription().GetConsumers();
- if (0 == consumers.size()) {
- UNIT_ASSERT_VALUES_EQUAL(0, consumers.size());
- break;
- }
-
- UNIT_ASSERT_C(i, "Unable to wait consumer has been removed");
- Sleep(TDuration::Seconds(1));
- }
- }
-
- Y_UNIT_TEST(DescribeError_OnLambdaCompilation)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return $x._unknown_field_for_lambda_compilation_error;
- };
- )");
-
- testCase.CheckTransferStateError("_unknown_field_for_lambda_compilation_error");
- }
-/*
- Y_UNIT_TEST(DescribeError_OnWriteToShard)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:null,
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.Write({"message-1"});
-
- testCase.CheckTransferStateError("Cannot write data into shard");
- }
-*/
-
- Y_UNIT_TEST(CustomConsumer)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateConsumer("PredefinedConsumer");
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )", MainTestCase::CreateTransferSettings::WithConsumerName("PredefinedConsumer"));
-
- Sleep(TDuration::Seconds(3));
-
- { // Check that consumer is reused
- auto result = testCase.DescribeTopic();
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToOneLineString());
- auto& consumers = result.GetTopicDescription().GetConsumers();
- UNIT_ASSERT_VALUES_EQUAL(1, consumers.size());
- UNIT_ASSERT_VALUES_EQUAL("PredefinedConsumer", consumers[0].GetConsumerName());
- }
-
- testCase.DropTransfer();
-
- Sleep(TDuration::Seconds(3));
-
- { // Check that consumer is not removed
- auto result = testCase.DescribeTopic();
- UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToOneLineString());
- auto& consumers = result.GetTopicDescription().GetConsumers();
- UNIT_ASSERT_VALUES_EQUAL(1, consumers.size());
- UNIT_ASSERT_VALUES_EQUAL("PredefinedConsumer", consumers[0].GetConsumerName());
- }
- }
-
- Y_UNIT_TEST(CustomFlushInterval)
- {
- TDuration flushInterval = TDuration::Seconds(5);
-
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )", MainTestCase::CreateTransferSettings::WithBatching(flushInterval, 13_MB));
-
-
- TInstant expectedEnd = TInstant::Now() + flushInterval;
- testCase.Write({"Message-1"});
-
- // check that data in the table only after flush interval
- for (size_t attempt = 20; attempt--; ) {
- auto res = testCase.DoRead({{
- _C("Key", ui64(0)),
- }});
- Cerr << "Attempt=" << attempt << " count=" << res.first << Endl << Flush;
- if (res.first == 1) {
- UNIT_ASSERT_C(expectedEnd <= TInstant::Now(), "Expected: " << expectedEnd << " Now: " << TInstant::Now());
- break;
- }
-
- UNIT_ASSERT_C(attempt, "Unable to wait transfer result");
- Sleep(TDuration::Seconds(1));
- }
- }
-
- Y_UNIT_TEST(AlterFlushInterval)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )", MainTestCase::CreateTransferSettings::WithBatching(TDuration::Hours(1), 1_GB));
-
-
- testCase.Write({"Message-1"});
-
- // check if there isn`t data in the table (flush_interval is big)
- for (size_t attempt = 5; attempt--; ) {
- auto res = testCase.DoRead({{
- _C("Key", ui64(0)),
- }});
- Cerr << "Attempt=" << attempt << " count=" << res.first << Endl << Flush;
- UNIT_ASSERT_VALUES_EQUAL_C(0, res.first, "Flush has not been happened");
- Sleep(TDuration::Seconds(1));
- }
-
- // flush interval is small
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::MilliSeconds(1), 1_GB), false);
- // flush interval is big
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::Days(1) + TDuration::Seconds(1), 1_GB), false);
-
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::Seconds(1), 1_GB));
-
- // check if there is data in the table
- testCase.CheckResult({{
- _C("Message", TString("Message-1"))
- }});
- }
-
- Y_UNIT_TEST(AlterBatchSize)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTopic(1);
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )", MainTestCase::CreateTransferSettings::WithBatching(TDuration::Hours(1), 512_MB));
-
-
- testCase.Write({"Message-1"});
-
- // batch size is big. alter is not success
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::Hours(1), 1_GB + 1), false);
-
- // batch size is top valid value. alter is success
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::Hours(1), 1_GB));
-
- // batch size is small. alter is success. after flush will
- testCase.AlterTransfer(MainTestCase::AlterTransferSettings::WithBatching(TDuration::Hours(1), 1));
-
- // check if there is data in the table
- testCase.CheckResult({{
- _C("Message", TString("Message-1"))
- }});
- }
-
- Y_UNIT_TEST(CreateTransferSourceNotExists)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.CheckTransferStateError("Discovery error: local/Topic_");
- }
-
- Y_UNIT_TEST(CreateTransferSourceIsNotTopic)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
-
- testCase.ExecuteDDL(Sprintf(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- PRIMARY KEY (Key)
- );
- )", testCase.TopicName.data()));
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.CheckTransferStateError("Discovery error: local/Topic_");
- }
-
- Y_UNIT_TEST(CreateTransferRowTable)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8 NOT NULL,
- PRIMARY KEY (Key)
- );
- )");
- testCase.CreateTopic();
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.CheckTransferStateError("Only column tables are supported as transfer targets");
- }
-
- Y_UNIT_TEST(CreateTransferTargetIsNotTable)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TOPIC `%s`;
- )");
- testCase.CreateTopic();
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.CheckTransferStateError("Only column tables are supported as transfer targets");
- }
-
- Y_UNIT_TEST(CreateTransferTargetNotExists)
- {
- MainTestCase testCase;
- testCase.CreateTopic();
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )");
-
- testCase.CheckTransferStateError(TStringBuilder() << "The target table `/local/" << testCase.TableName << "` does not exist");
- }
-
- Y_UNIT_TEST(PauseAndResumeTransfer)
- {
- MainTestCase testCase;
- testCase.CreateTable(R"(
- CREATE TABLE `%s` (
- Key Uint64 NOT NULL,
- Message Utf8,
- PRIMARY KEY (Key)
- ) WITH (
- STORE = COLUMN
- );
- )");
- testCase.CreateTopic(1);
-
- testCase.CreateTransfer(R"(
- $l = ($x) -> {
- return [
- <|
- Key:CAST($x._offset AS Uint64),
- Message:CAST($x._data AS Utf8)
- |>
- ];
- };
- )", MainTestCase::CreateTransferSettings::WithBatching(TDuration::Seconds(1), 1));
-
- testCase.Write({"Message-1"});
-
- testCase.CheckResult({{
- _C("Message", TString("Message-1"))
- }});
-
- testCase.CheckTransferState(TReplicationDescription::EState::Running);
-
- Cerr << "State: Paused" << Endl << Flush;
-
- testCase.PauseTransfer();
-
- Sleep(TDuration::Seconds(1));
- testCase.CheckTransferState(TReplicationDescription::EState::Paused);
-
- testCase.Write({"Message-2"});
-
- // Transfer is paused. New messages aren`t added to the table.
- Sleep(TDuration::Seconds(3));
- testCase.CheckResult({{
- _C("Message", TString("Message-1"))
- }});
-
- Cerr << "State: StandBy" << Endl << Flush;
-
- testCase.ResumeTransfer();
-
- // Transfer is resumed. New messages are added to the table.
- testCase.CheckTransferState(TReplicationDescription::EState::Running);
- testCase.CheckResult({{
- _C("Message", TString("Message-1"))
- }, {
- _C("Message", TString("Message-2")),
- }});
-
- // More cycles for pause/resume
- testCase.PauseTransfer();
- testCase.CheckTransferState(TReplicationDescription::EState::Paused);
-
- testCase.ResumeTransfer();
- testCase.CheckTransferState(TReplicationDescription::EState::Running);
- }
-}
-
diff --git a/ydb/tests/functional/transfer/ya.make b/ydb/tests/functional/transfer/ya.make
deleted file mode 100644
index 152a3fde51..0000000000
--- a/ydb/tests/functional/transfer/ya.make
+++ /dev/null
@@ -1,30 +0,0 @@
-UNITTEST()
-
-ENV(YDB_USE_IN_MEMORY_PDISKS=true)
-
-ENV(YDB_ERASURE=block_4-2)
-
-ENV(YDB_FEATURE_FLAGS="enable_topic_transfer")
-ENV(YDB_GRPC_SERVICES="replication")
-
-PEERDIR(
- library/cpp/threading/local_executor
- ydb/public/sdk/cpp/src/client/table
- ydb/public/sdk/cpp/src/client/topic
- ydb/public/sdk/cpp/src/client/proto
- ydb/public/sdk/cpp/src/client/draft
-)
-
-SRCS(
- main.cpp
-)
-
-INCLUDE(${ARCADIA_ROOT}/ydb/public/tools/ydb_recipe/recipe.inc)
-
-SIZE(MEDIUM)
-
-IF (SANITIZER_TYPE)
- REQUIREMENTS(ram:16 cpu:4)
-ENDIF()
-
-END()
diff --git a/ydb/tests/functional/ya.make b/ydb/tests/functional/ya.make
index 955c45a1f8..7dc41aefdc 100644
--- a/ydb/tests/functional/ya.make
+++ b/ydb/tests/functional/ya.make
@@ -31,7 +31,6 @@ RECURSE(
suite_tests
tpc
tenants
- transfer
ttl
wardens
ydb_cli
diff --git a/ydb/tests/functional/ydb_cli/canondata/result.json b/ydb/tests/functional/ydb_cli/canondata/result.json
index 8a050cd0f4..7ce7a736df 100644
--- a/ydb/tests/functional/ydb_cli/canondata/result.json
+++ b/ydb/tests/functional/ydb_cli/canondata/result.json
@@ -611,6 +611,9 @@
"test_ydb_sql.TestExecuteSqlWithParamsFromStdin.test_stdin_par_tsv[sql]": {
"uri": "file://test_ydb_sql.TestExecuteSqlWithParamsFromStdin.test_stdin_par_tsv_sql_/result.output"
},
+ "test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax": {
+ "uri": "file://test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax/result.output"
+ },
"test_ydb_table.TestExecuteQueryWithFormats.test_data_query_csv": {
"uri": "file://test_ydb_table.TestExecuteQueryWithFormats.test_data_query_csv/result.output"
},
diff --git a/ydb/tests/functional/ydb_cli/canondata/test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax/result.output b/ydb/tests/functional/ydb_cli/canondata/test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax/result.output
new file mode 100644
index 0000000000..53e755e635
--- /dev/null
+++ b/ydb/tests/functional/ydb_cli/canondata/test_ydb_sql.TestExecuteSqlWithPgSyntax.test_pg_syntax/result.output
@@ -0,0 +1,5 @@
+┌─────┬────────┬─────────────┐
+│ key │ id │ value │
+├─────┼────────┼─────────────┤
+│ "1" │ "1111" │ "\\x6f6e65" │
+└─────┴────────┴─────────────┘
diff --git a/ydb/tests/functional/ydb_cli/test_ydb_recursive_remove.py b/ydb/tests/functional/ydb_cli/test_ydb_recursive_remove.py
index e7ca6ba794..74c2f9a314 100644
--- a/ydb/tests/functional/ydb_cli/test_ydb_recursive_remove.py
+++ b/ydb/tests/functional/ydb_cli/test_ydb_recursive_remove.py
@@ -4,7 +4,11 @@ import pytest
import yatest
-CLUSTER_CONFIG = dict(extra_feature_flags=["enable_external_data_sources"])
+CLUSTER_CONFIG = dict(
+ extra_feature_flags=["enable_external_data_sources"],
+ query_service_config=dict(
+ available_external_data_sources=["ObjectStorage"]
+ ))
def bin_from_env(var):
diff --git a/ydb/tests/functional/ydb_cli/test_ydb_sql.py b/ydb/tests/functional/ydb_cli/test_ydb_sql.py
index 3e86a1bd86..26d8e029dc 100644
--- a/ydb/tests/functional/ydb_cli/test_ydb_sql.py
+++ b/ydb/tests/functional/ydb_cli/test_ydb_sql.py
@@ -664,3 +664,21 @@ class TestExecuteSqlFromStdinWithWideOutput(BaseTestSqlWithDatabase):
script = "SELECT * FROM `{}`;".format(self.table_path)
output = self.execute_ydb_cli_command_with_db(["sql", "-s", script])
return self.canonical_result(output, self.tmp_path)
+
+
+class TestExecuteSqlWithPgSyntax(BaseTestSqlWithDatabase):
+ @classmethod
+ def setup_class(cls):
+ BaseTestSqlWithDatabase.setup_class()
+ cls.session = cls.driver.table_client.session().create()
+
+ @pytest.fixture(autouse=True, scope='function')
+ def init_test(self, tmp_path):
+ self.tmp_path = tmp_path
+ self.table_path = self.tmp_path.name
+ create_table_with_data(self.session, self.root_dir + "/" + self.table_path)
+
+ def test_pg_syntax(self):
+ script = "SELECT * FROM \"{}\" WHERE key = 1;".format(self.table_path)
+ output = self.execute_ydb_cli_command_with_db(["sql", "-s", script, "--syntax", "pg"])
+ return self.canonical_result(output, self.tmp_path)
diff --git a/ydb/tests/library/fixtures/__init__.py_ b/ydb/tests/library/fixtures/__init__.py_
deleted file mode 100644
index 0e4ecceffc..0000000000
--- a/ydb/tests/library/fixtures/__init__.py_
+++ /dev/null
@@ -1,168 +0,0 @@
-# -*- coding: utf-8 -*-
-import contextlib
-import logging
-import os
-
-import pytest
-
-from ydb import Driver, DriverConfig, SessionPool
-
-from ydb.tests.library.common.types import Erasure
-from ydb.tests.library.harness.kikimr_runner import KiKiMR
-from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
-from ydb.tests.library.harness.util import LogLevels
-
-logger = logging.getLogger(__name__)
-
-
-DEFAULT_CLUSTER_CONFIG = dict(
- erasure=Erasure.NONE,
- nodes=1,
- additional_log_configs={
- 'FLAT_TX_SCHEMESHARD': LogLevels.DEBUG,
- 'SCHEME_BOARD_POPULATOR': LogLevels.WARN,
- 'SCHEME_BOARD_SUBSCRIBER': LogLevels.WARN,
- 'TX_DATASHARD': LogLevels.DEBUG,
- 'CHANGE_EXCHANGE': LogLevels.DEBUG,
- },
-)
-
-
-@pytest.fixture(scope='module')
-def ydb_cluster_configuration(request):
- conf = getattr(request.module, 'CLUSTER_CONFIG', DEFAULT_CLUSTER_CONFIG)
- return conf
-
-
-@pytest.fixture(scope='module')
-def ydb_configurator(ydb_cluster_configuration):
- return KikimrConfigGenerator(**ydb_cluster_configuration)
-
-
-@pytest.fixture(scope='module')
-def ydb_cluster(ydb_configurator, request):
- module_name = request.module.__name__
-
- logger.info("setup ydb_cluster for %s", module_name)
-
- logger.info("setup ydb_cluster as local")
- cluster = KiKiMR(
- configurator=ydb_configurator,
- )
- cluster.is_local_test = True
-
- cluster.start()
-
- yield cluster
-
- logger.info("destroy ydb_cluster for %s", module_name)
- cluster.stop()
-
-
-@pytest.fixture(scope='module')
-def ydb_root(ydb_cluster):
- return os.path.join("/", ydb_cluster.domain_name)
-
-
-@pytest.fixture(scope='module')
-def ydb_private_client(ydb_cluster):
- return ydb_cluster.client
-
-
-@pytest.fixture(scope='function')
-def ydb_safe_test_name(request):
- return request.node.name.replace("[", "_").replace("]", "_")
-
-
-@contextlib.contextmanager
-def ydb_database_ctx(ydb_cluster, database_path, node_count=1, timeout_seconds=20, storage_pools={'hdd': 1}):
- '''???'''
- assert os.path.abspath(database_path), 'database_path should be an (absolute) path, not a database name'
-
- token = ydb_cluster.config.default_clusteradmin
-
- ydb_cluster.remove_database(database_path, timeout_seconds=timeout_seconds, token=token)
-
- logger.debug("create database %s: create path and declare internals", database_path)
-
- ydb_cluster.create_database(database_path, storage_pool_units_count=storage_pools, timeout_seconds=timeout_seconds, token=token)
-
- logger.debug("create database %s: start nodes and construct internals", database_path)
- database_nodes = ydb_cluster.register_and_start_slots(database_path, node_count)
-
- logger.debug("create database %s: wait construction done", database_path)
- ydb_cluster.wait_tenant_up(database_path, token=token)
-
- logger.debug("create database %s: database up", database_path)
- yield database_path
-
- logger.debug("destroy database %s: remove path and dismantle internals", database_path)
- ydb_cluster.remove_database(database_path, timeout_seconds=timeout_seconds, token=token)
-
- logger.debug("destroy database %s: stop nodes", database_path)
- ydb_cluster.unregister_and_stop_slots(database_nodes)
-
- logger.debug("destroy database %s: database down", database_path)
-
-
-def _ydb_database(cluster, database_path_base, unique_name):
- database = os.path.join(database_path_base, unique_name)
-
- with ydb_database_ctx(cluster, database):
- yield database
-
-@pytest.fixture(scope='function')
-def ydb_database(ydb_cluster, ydb_root, ydb_safe_test_name):
- yield from _ydb_database(ydb_cluster, ydb_root, ydb_safe_test_name)
-
-
-@pytest.fixture(scope='module')
-def ydb_database_module_scope(ydb_cluster, ydb_root, request):
- # make unique database name from the test module name, ensuring that
- # it does not contains the dots
- unique_name = request.module.__name__.split('.')[-1]
- yield from _ydb_database(ydb_cluster, ydb_root, unique_name)
-
-
-@pytest.fixture(scope='module')
-def ydb_endpoint(ydb_cluster):
- return "%s:%s" % (ydb_cluster.nodes[1].host, ydb_cluster.nodes[1].port)
-
-
-@pytest.fixture(scope='function')
-def ydb_client(ydb_endpoint, request):
- def _make_driver(database_path, **kwargs):
- driver_config = DriverConfig(ydb_endpoint, database_path, **kwargs)
- driver = Driver(driver_config)
-
- def stop_driver():
- driver.stop()
-
- request.addfinalizer(stop_driver)
- return driver
-
- return _make_driver
-
-
-@pytest.fixture(scope='function')
-def ydb_client_session(ydb_client, request):
- def _make_pool(database_path, **kwargs):
- driver = ydb_client(database_path, **kwargs)
- pool = SessionPool(driver)
-
- def stop_pool():
- pool.stop()
-
- request.addfinalizer(stop_pool)
- return pool
-
- return _make_pool
-
-
-# possible replacement for both ydb_client and ydb_client_session
-# @pytest.fixture(scope='function')
-# def ydb_database_and_client(ydb_database, ydb_endpoint):
-# database_path = ydb_database
-# with Driver(DriverConfig(ydb_endpoint, database_path)) as driver:
-# with SessionPool(driver) as pool:
-# yield database_path, pool
diff --git a/ydb/tests/library/harness/kikimr_config.py b/ydb/tests/library/harness/kikimr_config.py
index 711e14856d..a2c0d5b37f 100644
--- a/ydb/tests/library/harness/kikimr_config.py
+++ b/ydb/tests/library/harness/kikimr_config.py
@@ -163,6 +163,8 @@ class KikimrConfigGenerator(object):
separate_node_configs=False,
default_clusteradmin=None,
enable_resource_pools=None,
+ grouped_memory_limiter_config=None,
+ query_service_config=None,
):
if extra_feature_flags is None:
extra_feature_flags = []
@@ -258,6 +260,16 @@ class KikimrConfigGenerator(object):
self.yaml_config["local_pg_wire_config"] = {}
self.yaml_config["local_pg_wire_config"]["listening_port"] = os.getenv('PGWIRE_LISTENING_PORT')
+ # dirty hack for internal ydbd flavour
+ if "cert" in self.get_binary_path(0):
+ # Hardcoded feature flags. Should be hardcoded in binary itself
+ self.yaml_config["feature_flags"]["enable_strict_acl_check"] = True
+ self.yaml_config["feature_flags"]["enable_strict_user_management"] = True
+ self.yaml_config["feature_flags"]["enable_database_admin"] = True
+ self.yaml_config["feature_flags"]["database_yaml_config_allowed"] = True
+ self.yaml_config["feature_flags"]["enable_resource_pools"] = False
+ self.yaml_config["feature_flags"]["check_database_access_permission"] = True
+
self.yaml_config["feature_flags"]["enable_public_api_external_blobs"] = enable_public_api_external_blobs
# for faster shutdown: there is no reason to wait while tablets are drained before whole cluster is stopping
@@ -354,6 +366,12 @@ class KikimrConfigGenerator(object):
if column_shard_config:
self.yaml_config["column_shard_config"] = column_shard_config
+ if query_service_config:
+ self.yaml_config["query_service_config"] = query_service_config
+
+ if grouped_memory_limiter_config:
+ self.yaml_config["grouped_memory_limiter_config"] = grouped_memory_limiter_config
+
self.__build()
if self.grpc_ssl_enable:
diff --git a/ydb/tests/library/harness/resources/default_yaml.yml b/ydb/tests/library/harness/resources/default_yaml.yml
index 96dad3c6b3..fc15717150 100644
--- a/ydb/tests/library/harness/resources/default_yaml.yml
+++ b/ydb/tests/library/harness/resources/default_yaml.yml
@@ -260,16 +260,3 @@ federated_query_config:
uri: ""
pinger:
ping_period: "30s"
-query_service_config:
- available_external_data_sources:
- - ObjectStorage
- - ClickHouse
- - PostgreSQL
- - MySQL
- - Ydb
- - YT
- - Greenplum
- - MsSQLServer
- - Oracle
- - Logging
- - Solomon
diff --git a/ydb/tests/library/ut/ya.make b/ydb/tests/library/ut/ya.make
index c5ed005960..7a314eb1f9 100644
--- a/ydb/tests/library/ut/ya.make
+++ b/ydb/tests/library/ut/ya.make
@@ -1,5 +1,11 @@
PY3TEST()
+ENV(YDB_DRIVER_BINARY="ydb/apps/ydbd/ydbd")
+
+DEPENDS(
+ ydb/apps/ydbd
+)
+
PEERDIR(
ydb/tests/library
yql/essentials/providers/common/proto
diff --git a/ydb/tests/olap/load/lib/tpch.py b/ydb/tests/olap/load/lib/tpch.py
index 1f76724037..8b64960dc5 100644
--- a/ydb/tests/olap/load/lib/tpch.py
+++ b/ydb/tests/olap/load/lib/tpch.py
@@ -91,3 +91,17 @@ class TestTpch10000(TpchSuiteBase):
iterations: int = 1
check_canonical: bool = CheckCanonicalPolicy.WARNING
timeout = max(TpchSuiteBase.timeout, 14400.)
+
+
+class TestTpch30000(TpchSuiteBase):
+ scale: int = 30000
+ iterations: int = 1
+ check_canonical: bool = CheckCanonicalPolicy.WARNING
+ timeout = max(TpchSuiteBase.timeout, 14400.)
+
+
+class TestTpch100000(TpchSuiteBase):
+ scale: int = 100000
+ iterations: int = 1
+ check_canonical: bool = CheckCanonicalPolicy.WARNING
+ timeout = max(TpchSuiteBase.timeout, 14400.)
diff --git a/ydb/tests/olap/oom/overlapping_portions.py b/ydb/tests/olap/oom/overlapping_portions.py
new file mode 100644
index 0000000000..08fe8f2653
--- /dev/null
+++ b/ydb/tests/olap/oom/overlapping_portions.py
@@ -0,0 +1,104 @@
+import datetime
+import logging
+import os
+import pytest
+import random
+import yatest.common
+import ydb
+
+from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
+from ydb.tests.library.harness.kikimr_runner import KiKiMR
+from ydb.tests.library.test_meta import link_test_case
+from ydb.tests.olap.common.ydb_client import YdbClient
+
+logger = logging.getLogger(__name__)
+
+
+class TestOverlappingPortions(object):
+ test_name = "overlapping_portions"
+
+ @classmethod
+ def setup_class(cls):
+ ydb_path = yatest.common.build_path(os.environ.get("YDB_DRIVER_BINARY"))
+ logger.info(yatest.common.execute([ydb_path, "-V"], wait=True).stdout.decode("utf-8"))
+ config = KikimrConfigGenerator(
+ column_shard_config={"compaction_enabled": False},
+ grouped_memory_limiter_config={
+ "enabled": True,
+ "memory_limit": 100 * 1024 * 1024,
+ "hard_memory_limit": 100 * 1024 * 1024,
+ },
+ )
+ cls.cluster = KiKiMR(config)
+ cls.cluster.start()
+ node = cls.cluster.nodes[1]
+ cls.ydb_client = YdbClient(database=f"/{config.domain_name}", endpoint=f"grpc://{node.host}:{node.port}")
+ cls.ydb_client.wait_connection()
+
+ def write_data(
+ self,
+ table: str,
+ timestamp_from_ms: int,
+ rows: int,
+ value: int = 1,
+ ):
+ column_types = ydb.BulkUpsertColumns()
+ column_types.add_column("ts", ydb.PrimitiveType.Timestamp)
+ column_types.add_column("s", ydb.PrimitiveType.String)
+ column_types.add_column("val", ydb.PrimitiveType.Uint64)
+
+ chunk_size = 100
+ while rows:
+ current_chunk_size = min(chunk_size, rows)
+ data = [
+ {
+ "ts": timestamp_from_ms + i,
+ "s": random.randbytes(1024 * 10),
+ "val": value,
+ }
+ for i in range(current_chunk_size)
+ ]
+ self.ydb_client.bulk_upsert(
+ table,
+ column_types,
+ data,
+ )
+ timestamp_from_ms += current_chunk_size
+ rows -= current_chunk_size
+ assert rows >= 0
+
+ def write_and_check(self, table_path, count):
+ ts_start = int(datetime.datetime.now().timestamp() * 1000000)
+ for value in range(count):
+ self.write_data(table_path, ts_start, 100, value)
+
+ self.ydb_client.query(
+ f"""
+ select * from `{table_path}`
+ """
+ )
+
+ @link_test_case("#15512")
+ def test(self):
+ test_dir = f"{self.ydb_client.database}/{self.test_name}"
+ table_path = f"{test_dir}/table"
+
+ self.ydb_client.query(
+ f"""
+ CREATE TABLE `{table_path}` (
+ ts Timestamp NOT NULL,
+ s String,
+ val Uint64,
+ PRIMARY KEY(ts),
+ )
+ WITH (
+ STORE = COLUMN,
+ AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1
+ )
+ """
+ )
+
+ self.write_and_check(table_path, 1)
+
+ with pytest.raises(ydb.issues.GenericError, match=r'.*cannot allocate memory.*'):
+ self.write_and_check(table_path, 300)
diff --git a/ydb/tests/olap/oom/ya.make b/ydb/tests/olap/oom/ya.make
new file mode 100644
index 0000000000..6b340046c1
--- /dev/null
+++ b/ydb/tests/olap/oom/ya.make
@@ -0,0 +1,26 @@
+PY3TEST()
+ENV(YDB_DRIVER_BINARY="ydb/apps/ydbd/ydbd")
+
+FORK_TEST_FILES()
+
+TEST_SRCS(
+ overlapping_portions.py
+)
+
+SIZE(MEDIUM)
+
+PEERDIR(
+ ydb/tests/library
+ ydb/tests/library/test_meta
+ ydb/public/sdk/python
+ ydb/public/sdk/python/enable_v3_new_behavior
+ library/recipes/common
+ ydb/tests/olap/common
+)
+
+DEPENDS(
+ ydb/apps/ydbd
+)
+
+END()
+
diff --git a/ydb/tests/olap/scenario/test_read_update_write_load.py b/ydb/tests/olap/scenario/test_read_update_write_load.py
index d30f076cb6..6aa9d05cc3 100644
--- a/ydb/tests/olap/scenario/test_read_update_write_load.py
+++ b/ydb/tests/olap/scenario/test_read_update_write_load.py
@@ -84,9 +84,9 @@ class TestReadUpdateWriteLoad(BaseTestSet):
def scenario_read_update_write_load(self, ctx: TestContext):
sth = ScenarioTestHelper(ctx)
- table_size_mib = int(get_external_param("table_size_mib", "64"))
+ table_size_mib = int(get_external_param("table_size_mib", "1"))
- assert table_size_mib >= 64, "invalid table_size_mib parameter"
+ assert table_size_mib >= 1, "invalid table_size_mib parameter"
sth.execute_scheme_query(CreateTable(self.big_table_name).with_schema(self.big_table_schema))
@@ -97,8 +97,6 @@ class TestReadUpdateWriteLoad(BaseTestSet):
print("Step 1. only write", file=sys.stderr)
- math.ceil(table_size_mib * 1024 / 10 / 64)
-
upsert_only_threads: TestThreads = TestThreads()
for i in range(64):
upsert_only_threads.append(
diff --git a/ydb/tests/olap/test_log_scenario.py b/ydb/tests/olap/test_log_scenario.py
new file mode 100644
index 0000000000..9921b14ffe
--- /dev/null
+++ b/ydb/tests/olap/test_log_scenario.py
@@ -0,0 +1,151 @@
+import datetime
+import os
+import random
+
+import logging
+import time
+import yatest.common
+
+from ydb.tests.olap.lib.utils import get_external_param
+from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
+from ydb.tests.library.harness.kikimr_runner import KiKiMR
+from ydb.tests.olap.common.thread_helper import TestThread
+from ydb.tests.olap.common.ydb_client import YdbClient
+
+from enum import Enum
+
+
+logger = logging.getLogger(__name__)
+
+
+class YdbWorkloadLog:
+ def __init__(self, endpoint: str, database: str, table_name: str):
+ self.path: str = yatest.common.binary_path(os.environ["YDB_CLI_BINARY"])
+ self.endpoint: str = endpoint
+ self.database: str = database
+ self.begin_command: list[str] = [self.path, "-e", self.endpoint, "-d", self.database, "workload", "log", "--path", table_name]
+
+ def _call(self, command: list[str], wait=False):
+ logging.info(f'YdbWorkloadLog execute {' '.join(command)} with wait = {wait}')
+ yatest.common.execute(command=command, wait=wait)
+
+ def create_table(self, table_name: str):
+ logging.info('YdbWorkloadLog init table')
+ command = self.begin_command + ["init", "--path", table_name, "--store", "column"]
+ self._call(command=command, wait=True)
+
+ def _insert_rows(self, operation_name: str, seconds: int, threads: int, rows: int, wait: bool):
+ logging.info(f'YdbWorkloadLog {operation_name}')
+ command = self.begin_command + [
+ "run",
+ str(operation_name),
+ "--seconds",
+ str(seconds),
+ "--threads",
+ str(threads),
+ "--rows",
+ str(rows),
+ "--timestamp_deviation",
+ "180"
+ ]
+ self._call(command=command, wait=wait)
+
+ # seconds - Seconds to run workload
+ # threads - Number of parallel threads in workload
+ # rows - Number of rows to upsert
+ def bulk_upsert(self, seconds: int, threads: int, rows: int, wait: bool = False):
+ self._insert_rows(operation_name="bulk_upsert", seconds=seconds, threads=threads, rows=rows, wait=wait)
+
+ def upsert(self, seconds: int, threads: int, rows: int, wait: bool = False):
+ self._insert_rows(operation_name="upsert", seconds=seconds, threads=threads, rows=rows, wait=wait)
+
+ def insert(self, seconds: int, threads: int, rows: int, wait: bool = False):
+ self._insert_rows(operation_name="insert", seconds=seconds, threads=threads, rows=rows, wait=wait)
+
+ def __del__(self):
+ command: list[str] = self.begin_command + ["clean"]
+ try:
+ yatest.common.execute(command=command, wait=True)
+ except Exception:
+ pass
+
+
+class TestLogScenario(object):
+ class InsertMode(Enum):
+ BULK_UPSERT = 1
+ INSERT = 2
+ UPSERT = 3
+
+ @classmethod
+ def setup_class(cls):
+ cls._setup_ydb()
+ pass
+
+ @classmethod
+ def teardown_class(cls):
+ cls.ydb_client.stop()
+ cls.cluster.stop()
+
+ @classmethod
+ def _setup_ydb(cls):
+ ydb_path = yatest.common.build_path(os.environ.get("YDB_DRIVER_BINARY"))
+ logger.info(yatest.common.execute([ydb_path, "-V"], wait=True).stdout.decode("utf-8"))
+ config = KikimrConfigGenerator(
+ extra_feature_flags={
+ "enable_immediate_writing_on_bulk_upsert": True
+ },
+ )
+ cls.cluster = KiKiMR(config)
+ cls.cluster.start()
+ node = cls.cluster.nodes[1]
+ cls.ydb_client = YdbClient(endpoint=f"grpc://{node.host}:{node.port}", database=f"/{config.domain_name}")
+ cls.ydb_client.wait_connection()
+
+ def get_row_count(self) -> int:
+ return self.ydb_client.query(f"select count(*) as Rows from `{self.table_name}`")[0].rows[0]["Rows"]
+
+ def aggregation_query(self, duration: datetime.timedelta):
+ deadline: datetime = datetime.datetime.now() + duration
+ while datetime.datetime.now() < deadline:
+ hours: int = random.randint(1, 10)
+ self.ydb_client.query(f"SELECT COUNT(*) FROM `{self.table_name}` ")
+ self.ydb_client.query(f"SELECT * FROM `{self.table_name}` WHERE timestamp < CurrentUtcTimestamp() - DateTime::IntervalFromHours({hours})")
+ self.ydb_client.query(f"SELECT COUNT(*) FROM `{self.table_name}` WHERE timestamp < CurrentUtcTimestamp() - DateTime::IntervalFromHours({hours})")
+ self.ydb_client.query(f"SELECT COUNT(*) FROM `{self.table_name}` WHERE " +
+ f"(timestamp >= CurrentUtcTimestamp() - DateTime::IntervalFromHours({hours + 1})) AND " +
+ f"(timestamp <= CurrentUtcTimestamp() - DateTime::IntervalFromHours({hours}))")
+
+ def check_insert(self, duration: int):
+ prev_count: int = self.get_row_count()
+ time.sleep(duration)
+ current_count: int = self.get_row_count()
+ logging.info(f'check insert: {current_count} {prev_count}')
+ assert current_count != prev_count
+
+ def test(self):
+ """As per https://github.com/ydb-platform/ydb/issues/13530"""
+
+ wait_time: int = int(get_external_param("wait_seconds", "30"))
+ self.table_name: str = "log"
+
+ ydb_workload: YdbWorkloadLog = YdbWorkloadLog(endpoint=self.ydb_client.endpoint, database=self.ydb_client.database, table_name=self.table_name)
+ ydb_workload.create_table(self.table_name)
+ ydb_workload.bulk_upsert(seconds=10, threads=10, rows=500, wait=True)
+ logging.info(f"Count rows after insert {self.get_row_count()} before wait")
+
+ assert self.get_row_count() != 0
+
+ threads: list[TestThread] = []
+ threads.append(TestThread(target=ydb_workload.bulk_upsert, args=[wait_time, 10, 500, True]))
+ threads.append(TestThread(target=ydb_workload.insert, args=[wait_time, 10, 500, True]))
+ threads.append(TestThread(target=ydb_workload.upsert, args=[wait_time, 10, 500, True]))
+
+ for _ in range(10):
+ threads.append(TestThread(target=self.aggregation_query, args=[datetime.timedelta(seconds=int(wait_time))]))
+ threads.append(TestThread(target=self.check_insert, args=[wait_time + 10]))
+
+ for thread in threads:
+ thread.start()
+
+ for thread in threads:
+ thread.join()
diff --git a/ydb/tests/olap/ttl_tiering/base.py b/ydb/tests/olap/ttl_tiering/base.py
index 3a31d2cf83..e0ab5e034d 100644
--- a/ydb/tests/olap/ttl_tiering/base.py
+++ b/ydb/tests/olap/ttl_tiering/base.py
@@ -46,6 +46,9 @@ class TllTieringTestBase(object):
"TX_COLUMNSHARD_ACTUALIZATION": LogLevels.TRACE,
"TX_COLUMNSHARD_BLOBS_TIER": LogLevels.DEBUG,
},
+ query_service_config=dict(
+ available_external_data_sources=["ObjectStorage"]
+ )
)
cls.cluster = KiKiMR(config)
cls.cluster.start()
diff --git a/ydb/tests/olap/ya.make b/ydb/tests/olap/ya.make
index 2cf6448aff..147450c033 100644
--- a/ydb/tests/olap/ya.make
+++ b/ydb/tests/olap/ya.make
@@ -5,6 +5,8 @@ PY3TEST()
TEST_SRCS(
test_quota_exhaustion.py
+ test_log_scenario.py
+ zip_bomb.py
)
IF (SANITIZER_TYPE OR WITH_VALGRIND)
@@ -20,8 +22,10 @@ PY3TEST()
)
PEERDIR(
- ydb/tests/library
- ydb/tests/library/test_meta
+ ydb/tests/library
+ ydb/tests/library/test_meta
+ ydb/tests/olap/common
+ ydb/tests/olap/lib
)
END()
@@ -32,6 +36,7 @@ RECURSE(
high_load
lib
load
+ oom
s3_import
scenario
ttl_tiering
diff --git a/ydb/tests/olap/zip_bomb.py b/ydb/tests/olap/zip_bomb.py
new file mode 100644
index 0000000000..a124c3a7ab
--- /dev/null
+++ b/ydb/tests/olap/zip_bomb.py
@@ -0,0 +1,125 @@
+import sys
+import time
+
+import ydb
+from threading import Thread
+from ydb.tests.library.harness.kikimr_config import KikimrConfigGenerator
+from ydb.tests.library.harness.kikimr_runner import KiKiMR
+from ydb.tests.library.harness.util import LogLevels
+
+ROWS_CHUNK_SIZE = 100000
+ROWS_CHUNKS_COUNT = 2
+
+
+class TestZipBomb(object):
+ @classmethod
+ def setup_class(cls):
+ cls.cluster = KiKiMR(KikimrConfigGenerator(
+ column_shard_config={},
+ additional_log_configs={'MEMORY_CONTROLLER': LogLevels.INFO, "TX_COLUMNSHARD": LogLevels.DEBUG},
+ extra_feature_flags={'enable_write_portions_on_insert': True},
+ static_pdisk_size=10 * 1024 * 1024,
+ dynamic_pdisk_size=5 * 1024 * 1024
+ ))
+ cls.cluster.start()
+
+ @classmethod
+ def teardown_class(cls):
+ cls.cluster.stop()
+
+ def make_session(self):
+ driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database=self.database_name)
+ session = ydb.QuerySessionPool(driver)
+ driver.wait(5, fail_fast=True)
+ return session
+
+ def create_test_str_table(self, session, table):
+ return session.execute_with_retries(f"""
+ CREATE TABLE `{table}` (
+ k Int32 NOT NULL,
+ v1 String,
+ v2 String,
+ v3 String,
+ v4 String,
+ v5 String,
+ PRIMARY KEY (k)
+ ) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT=1)
+ """)
+
+ def upsert_test_str_chunk(self, session, table, chunk_id, retries=10):
+ long_string = 'x' * 5000
+ return session.execute_with_retries(f"""
+ $n = {ROWS_CHUNK_SIZE};
+ $values_list = ListReplicate(42ul, $n);
+ $value = '{long_string}';
+ $rows_list = ListFoldMap($values_list, {chunk_id * ROWS_CHUNK_SIZE}, ($val, $i)->((<|k:$i, v1:$value||'1', v2:$value||'2', v3:$value||'3', v4:$value||'4', v5:$value||'5'|>, $i + 1)));
+
+ UPSERT INTO `{table}`
+ SELECT * FROM AS_TABLE($rows_list);
+ """, None, ydb.retries.RetrySettings(max_retries=retries))
+
+ def upsert_str(self, session, table):
+ for i in range(ROWS_CHUNKS_COUNT):
+ res = self.upsert_test_str_chunk(session, table, i, retries=0)
+ print(f"upsert #{i} ok, result:", res, file=sys.stderr)
+
+ def select(self, table, session):
+ result = session.execute_with_retries("""
+ SELECT
+ MAX(v1),
+ MAX(v2),
+ MAX(v3),
+ MAX(v4),
+ MAX(v5)
+ FROM huge
+ """)
+ print(result[0].rows, file=sys.stderr)
+
+ def get_rss(self, pid):
+ with open(f"/proc/{pid}/status", "r") as f:
+ for line in f:
+ if line.startswith("RssAnon:"):
+ return int(line.split()[1])
+
+ def watch_rss(self, pid, rss):
+ maxrss = 0
+ try:
+ while rss[1] == 0:
+ rss_kb = self.get_rss(pid)
+ if rss_kb > maxrss:
+ maxrss = rss_kb
+ time.sleep(1)
+ except FileNotFoundError:
+ return
+ rss[0] = maxrss
+
+ def test(self):
+ """As per https://github.com/ydb-platform/ydb/issues/13529"""
+ pid = self.cluster.nodes[1].pid
+ maxrss = [0, 0]
+ watch_thread = Thread(target=self.watch_rss, args=[pid, maxrss])
+ watch_thread.start()
+ print('Pid {}'.format(pid), file=sys.stderr)
+ self.database_name = '/Root'
+ session = self.make_session()
+
+ # Overflow the database
+ self.create_test_str_table(session, 'huge')
+ self.upsert_str(session, 'huge')
+ rss = self.get_rss(pid)
+ print('Rss after upsert {}'.format(rss), file=sys.stderr)
+ threads = []
+ for c in range(20):
+ thread = Thread(target=self.select, args=['huge', session])
+ threads.append(thread)
+
+ for thread in threads:
+ thread.start()
+
+ for thread in threads:
+ thread.join()
+
+ maxrss[1] = 1
+ watch_thread.join()
+ print('Max rss {}', format(maxrss[0]), file=sys.stderr)
+ assert maxrss[0] < 12 * 1024 * 1024, "Too high memory consumption"
diff --git a/ydb/tests/tools/fqrun/src/fq_runner.cpp b/ydb/tests/tools/fqrun/src/fq_runner.cpp
index db57e99600..f54ad05e6d 100644
--- a/ydb/tests/tools/fqrun/src/fq_runner.cpp
+++ b/ydb/tests/tools/fqrun/src/fq_runner.cpp
@@ -220,6 +220,7 @@ private:
ast = CanonizeAstLogicalId(ast);
}
Options.AstOutput->Write(ast);
+ Options.AstOutput->Flush();
}
void PrintQueryPlan(TString plan) const {
@@ -242,6 +243,7 @@ private:
}
Options.PlanOutput->Write(plan);
+ Options.PlanOutput->Flush();
}
private:
diff --git a/ydb/tests/tools/fqrun/src/fq_setup.cpp b/ydb/tests/tools/fqrun/src/fq_setup.cpp
index 54c0e48db2..dc135030cb 100644
--- a/ydb/tests/tools/fqrun/src/fq_setup.cpp
+++ b/ydb/tests/tools/fqrun/src/fq_setup.cpp
@@ -73,7 +73,7 @@ private:
if (Settings.MonitoringEnabled) {
serverSettings.InitKikimrRunConfig();
- serverSettings.SetMonitoringPortOffset(Settings.MonitoringPortOffset, true);
+ serverSettings.SetMonitoringPortOffset(Settings.FirstMonitoringPort, true);
serverSettings.SetNeedStatsCollectors(true);
}
@@ -192,7 +192,7 @@ public:
explicit TImpl(const TFqSetupSettings& settings)
: Settings(settings)
{
- const ui32 grpcPort = Settings.GrpcPort ? Settings.GrpcPort : PortManager.GetPort();
+ const ui32 grpcPort = Settings.FirstGrpcPort ? Settings.FirstGrpcPort : PortManager.GetPort();
InitializeYqlLogger();
InitializeServer(grpcPort);
InitializeFqProxy(grpcPort);
diff --git a/ydb/tests/tools/kqprun/README.md b/ydb/tests/tools/kqprun/README.md
index e014905b5f..56f73238bc 100644
--- a/ydb/tests/tools/kqprun/README.md
+++ b/ydb/tests/tools/kqprun/README.md
@@ -71,14 +71,14 @@ For profiling memory allocations build kqprun with ya make flag `-D PROFILE_MEMO
### Cluster
-* Embedded UI:
+* Embedded UI, flag `-M` means the beginning of the range of ports used for different nodes:
```(bash)
./kqprun -M 32000
```
Monitoring endpoint: http://localhost:32000
-* gRPC endpoint:
+* gRPC endpoint, flag `-G` means the beginning of the range of ports used for different tenants:
```(bash)
./kqprun -G 32000
```
diff --git a/ydb/tests/tools/kqprun/configuration/app_config.conf b/ydb/tests/tools/kqprun/configuration/app_config.conf
index d6cdedab50..84711fee73 100644
--- a/ydb/tests/tools/kqprun/configuration/app_config.conf
+++ b/ydb/tests/tools/kqprun/configuration/app_config.conf
@@ -55,6 +55,8 @@ FeatureFlags {
EnableExternalSourceSchemaInference: true
EnableTempTables: true
EnableReplaceIfExistsForExternalEntities: true
+ EnableResourcePools: true
+ EnableResourcePoolsOnServerless: true
EnableResourcePoolsCounters: true
}
diff --git a/ydb/tests/tools/kqprun/runlib/application.cpp b/ydb/tests/tools/kqprun/runlib/application.cpp
index dac91ddb9d..d4524e99cf 100644
--- a/ydb/tests/tools/kqprun/runlib/application.cpp
+++ b/ydb/tests/tools/kqprun/runlib/application.cpp
@@ -81,21 +81,21 @@ void TMainBase::RegisterKikimrOptions(NLastGetopt::TOpts& options, TServerSettin
.RequiredArgument("file")
.StoreMappedResultT<TString>(&ProfileAllocationsOutput, &GetDefaultOutput);
- options.AddLongOption('M', "monitoring", "Embedded UI port (use 0 to start on random free port), if used will be run as daemon")
+ options.AddLongOption('M', "monitoring", "Embedded UI ports range start (use 0 to start on random free port), if used will be run as daemon")
.RequiredArgument("uint")
.Handler1([&settings](const NLastGetopt::TOptsParser* option) {
if (const TString& port = option->CurVal()) {
settings.MonitoringEnabled = true;
- settings.MonitoringPortOffset = FromString(port);
+ settings.FirstMonitoringPort = FromString(port);
}
});
- options.AddLongOption('G', "grpc", "gRPC port (use 0 to start on random free port), if used will be run as daemon")
+ options.AddLongOption('G', "grpc", "gRPC ports range start (use 0 to start on random free port), if used will be run as daemon")
.RequiredArgument("uint")
.Handler1([&settings](const NLastGetopt::TOptsParser* option) {
if (const TString& port = option->CurVal()) {
settings.GrpcEnabled = true;
- settings.GrpcPort = FromString(port);
+ settings.FirstGrpcPort = FromString(port);
}
});
diff --git a/ydb/tests/tools/kqprun/runlib/settings.h b/ydb/tests/tools/kqprun/runlib/settings.h
index 874ec2b21e..dbb4d25ccd 100644
--- a/ydb/tests/tools/kqprun/runlib/settings.h
+++ b/ydb/tests/tools/kqprun/runlib/settings.h
@@ -18,10 +18,10 @@ struct TServerSettings {
TString DomainName = "Root";
bool MonitoringEnabled = false;
- ui16 MonitoringPortOffset = 0;
+ ui16 FirstMonitoringPort = 0;
bool GrpcEnabled = false;
- ui16 GrpcPort = 0;
+ ui16 FirstGrpcPort = 0;
TString LogOutputFile;
};
diff --git a/ydb/tests/tools/kqprun/runlib/utils.cpp b/ydb/tests/tools/kqprun/runlib/utils.cpp
index c507374b4c..716c114cb3 100644
--- a/ydb/tests/tools/kqprun/runlib/utils.cpp
+++ b/ydb/tests/tools/kqprun/runlib/utils.cpp
@@ -93,6 +93,7 @@ void TStatsPrinter::PrintPlan(const TString& plan, IOutputStream& output) const
NYdb::NConsoleClient::TQueryPlanPrinter printer(PlanFormat, true, output);
printer.Print(plan);
+ output.Flush();
}
void TStatsPrinter::PrintInProgressStatistics(const TString& plan, IOutputStream& output) const {
diff --git a/ydb/tests/tools/kqprun/src/kqp_runner.cpp b/ydb/tests/tools/kqprun/src/kqp_runner.cpp
index 4a385d7737..c2eda1a3e6 100644
--- a/ydb/tests/tools/kqprun/src/kqp_runner.cpp
+++ b/ydb/tests/tools/kqprun/src/kqp_runner.cpp
@@ -273,6 +273,7 @@ private:
Cout << CoutColors_.Cyan() << "Writing scheme query ast" << CoutColors_.Default() << Endl;
}
Options_.SchemeQueryAstOutput->Write(ast);
+ Options_.SchemeQueryAstOutput->Flush();
}
}
@@ -282,6 +283,7 @@ private:
Cout << CoutColors_.Cyan() << "Writing script query ast" << CoutColors_.Default() << Endl;
}
output->Write(ast);
+ output->Flush();
}
}
diff --git a/ydb/tests/tools/kqprun/src/ydb_setup.cpp b/ydb/tests/tools/kqprun/src/ydb_setup.cpp
index 004edb6827..2d6e3aad58 100644
--- a/ydb/tests/tools/kqprun/src/ydb_setup.cpp
+++ b/ydb/tests/tools/kqprun/src/ydb_setup.cpp
@@ -140,6 +140,25 @@ class TYdbSetup::TImpl {
using EVerbose = TYdbSetupSettings::EVerbose;
using EHealthCheck = TYdbSetupSettings::EHealthCheck;
+ class TPortGenerator {
+ public:
+ TPortGenerator(TPortManager& portManager, ui32 firstPort)
+ : PortManager_(portManager)
+ , Port_(firstPort)
+ {}
+
+ ui32 GetPort() {
+ if (!Port_) {
+ return PortManager_.GetPort();
+ }
+ return Port_++;
+ }
+
+ private:
+ TPortManager& PortManager_;
+ ui32 Port_;
+ };
+
private:
TAutoPtr<TLogBackend> CreateLogBackend() const {
if (Settings_.LogOutputFile) {
@@ -230,7 +249,9 @@ private:
serverSettings.SetEnableMockOnSingleNode(!Settings_.DisableDiskMock && !Settings_.PDisksPath);
serverSettings.SetCustomDiskParams(storage);
- serverSettings.SetStorageGeneration(StorageMeta_.GetStorageGeneration());
+
+ const auto storageGeneration = StorageMeta_.GetStorageGeneration();
+ serverSettings.SetStorageGeneration(storageGeneration, storageGeneration > 0);
}
NKikimr::Tests::TServerSettings GetServerSettings(ui32 grpcPort) {
@@ -239,7 +260,7 @@ private:
NKikimr::Tests::TServerSettings serverSettings(msgBusPort, Settings_.AppConfig.GetAuthConfig(), Settings_.AppConfig.GetPQConfig());
serverSettings.SetNodeCount(Settings_.NodeCount);
- serverSettings.SetDomainName(Settings_.DomainName);
+ serverSettings.SetDomainName(TString(NKikimr::ExtractDomain(NKikimr::CanonizePath(Settings_.DomainName))));
serverSettings.SetAppConfig(Settings_.AppConfig);
serverSettings.SetFeatureFlags(Settings_.AppConfig.GetFeatureFlags());
serverSettings.SetControls(Settings_.AppConfig.GetImmediateControlsConfig());
@@ -264,7 +285,7 @@ private:
if (Settings_.MonitoringEnabled) {
serverSettings.InitKikimrRunConfig();
- serverSettings.SetMonitoringPortOffset(Settings_.MonitoringPortOffset, true);
+ serverSettings.SetMonitoringPortOffset(Settings_.FirstMonitoringPort, true);
serverSettings.SetNeedStatsCollectors(true);
}
@@ -279,7 +300,7 @@ private:
ui32 dynNodesCount = 0;
for (const auto& [tenantPath, tenantInfo] : Settings_.Tenants) {
if (tenantInfo.GetType() != TStorageMeta::TTenant::SERVERLESS) {
- serverSettings.AddStoragePoolType(tenantPath);
+ serverSettings.AddStoragePool(tenantPath, TStringBuilder() << GetTenantPath(tenantPath) << ":" << tenantPath);
dynNodesCount += tenantInfo.GetNodesCount();
}
}
@@ -288,7 +309,7 @@ private:
return serverSettings;
}
- void CreateTenant(Ydb::Cms::CreateDatabaseRequest&& request, const TString& relativePath, const TString& type, TStorageMeta::TTenant tenantInfo) {
+ void CreateTenant(Ydb::Cms::CreateDatabaseRequest&& request, const TString& relativePath, const TString& type, TStorageMeta::TTenant tenantInfo, ui32 grpcPort) {
const auto absolutePath = request.path();
const auto [it, inserted] = StorageMeta_.MutableTenants()->emplace(relativePath, tenantInfo);
if (inserted || it->second.GetCreationInProgress()) {
@@ -322,10 +343,14 @@ private:
}
}
- if (Settings_.MonitoringEnabled) {
- ui32 nodeIndex = GetNodeIndexForDatabase(absolutePath);
- NActors::TActorId edgeActor = GetRuntime()->AllocateEdgeActor(nodeIndex);
- GetRuntime()->Register(NKikimr::CreateBoardPublishActor(NKikimr::MakeEndpointsBoardPath(absolutePath), "", edgeActor, 0, true), nodeIndex, GetRuntime()->GetAppData(nodeIndex).UserPoolId);
+ if (tenantInfo.GetType() != TStorageMeta::TTenant::SERVERLESS) {
+ if (Settings_.GrpcEnabled) {
+ Server_->EnableGRpc(grpcPort, GetNodeIndexForDatabase(absolutePath), absolutePath);
+ } else if (Settings_.MonitoringEnabled) {
+ ui32 nodeIndex = GetNodeIndexForDatabase(absolutePath);
+ NActors::TActorId edgeActor = GetRuntime()->AllocateEdgeActor(nodeIndex);
+ GetRuntime()->Register(NKikimr::CreateBoardPublishActor(NKikimr::MakeEndpointsBoardPath(absolutePath), "", edgeActor, 0, true), nodeIndex, GetRuntime()->GetAppData(nodeIndex).UserPoolId);
+ }
}
}
@@ -334,7 +359,7 @@ private:
storage->set_count(1);
}
- void CreateTenants() {
+ void CreateTenants(TPortGenerator& grpcPortGen) {
std::set<TString> sharedTenants;
std::map<TString, TStorageMeta::TTenant> serverlessTenants;
for (const auto& [tenantPath, tenantInfo] : Settings_.Tenants) {
@@ -344,13 +369,13 @@ private:
switch (tenantInfo.GetType()) {
case TStorageMeta::TTenant::DEDICATED:
AddTenantStoragePool(request.mutable_resources()->add_storage_units(), tenantPath);
- CreateTenant(std::move(request), tenantPath, "dedicated", tenantInfo);
+ CreateTenant(std::move(request), tenantPath, "dedicated", tenantInfo, grpcPortGen.GetPort());
break;
case TStorageMeta::TTenant::SHARED:
sharedTenants.emplace(tenantPath);
AddTenantStoragePool(request.mutable_shared_resources()->add_storage_units(), tenantPath);
- CreateTenant(std::move(request), tenantPath, "shared", tenantInfo);
+ CreateTenant(std::move(request), tenantPath, "shared", tenantInfo, grpcPortGen.GetPort());
break;
case TStorageMeta::TTenant::SERVERLESS:
@@ -378,12 +403,14 @@ private:
request.set_path(GetTenantPath(tenantPath));
request.mutable_serverless_resources()->set_shared_database_path(GetTenantPath(tenantInfo.GetSharedTenant()));
ServerlessToShared_[request.path()] = request.serverless_resources().shared_database_path();
- CreateTenant(std::move(request), tenantPath, "serverless", tenantInfo);
+ CreateTenant(std::move(request), tenantPath, "serverless", tenantInfo, 0);
}
}
- void InitializeServer(ui32 grpcPort) {
- NKikimr::Tests::TServerSettings serverSettings = GetServerSettings(grpcPort);
+ void InitializeServer() {
+ TPortGenerator grpcPortGen(PortManager_, Settings_.FirstGrpcPort);
+ const ui32 domainGrpcPort = grpcPortGen.GetPort();
+ NKikimr::Tests::TServerSettings serverSettings = GetServerSettings(domainGrpcPort);
Server_ = MakeIntrusive<NKikimr::Tests::TServer>(serverSettings);
@@ -393,14 +420,14 @@ private:
Server_->GetRuntime()->SetDispatchTimeout(TDuration::Max());
if (Settings_.GrpcEnabled) {
- Server_->EnableGRpc(grpcPort);
+ Server_->EnableGRpc(domainGrpcPort);
}
Client_ = MakeHolder<NKikimr::Tests::TClient>(serverSettings);
Client_->InitRootScheme();
Tenants_ = MakeHolder<NKikimr::Tests::TTenants>(Server_);
- CreateTenants();
+ CreateTenants(grpcPortGen);
}
void InitializeYqlLogger() {
@@ -452,10 +479,8 @@ public:
: Settings_(settings)
, CoutColors_(NColorizer::AutoColors(Cout))
{
- const ui32 grpcPort = Settings_.GrpcPort ? Settings_.GrpcPort : PortManager_.GetPort();
-
InitializeYqlLogger();
- InitializeServer(grpcPort);
+ InitializeServer();
WaitResourcesPublishing();
if (Settings_.MonitoringEnabled && Settings_.VerboseLevel >= EVerbose::Info) {
@@ -475,7 +500,12 @@ public:
}
if (Settings_.GrpcEnabled && Settings_.VerboseLevel >= EVerbose::Info) {
- Cout << CoutColors_.Cyan() << "Domain gRPC port: " << CoutColors_.Default() << grpcPort << Endl;
+ Cout << CoutColors_.Cyan() << "Domain gRPC port: " << CoutColors_.Default() << Server_->GetGRpcServer().GetPort() << Endl;
+ for (const auto& [tenantPath, tenantInfo] : Settings_.Tenants) {
+ if (tenantInfo.GetType() != TStorageMeta::TTenant::SERVERLESS) {
+ Cout << CoutColors_.Cyan() << "Tenant [" << tenantPath << "] gRPC port: " << CoutColors_.Default() << Server_->GetTenantGRpcServer(GetTenantPath(tenantPath)).GetPort() << Endl;
+ }
+ }
}
}
@@ -658,11 +688,7 @@ private:
}
TString GetDatabasePath(const TString& database) const {
- const TString& result = NKikimr::CanonizePath(database ? database : GetDefaultDatabase());
- if (StorageMeta_.TenantsSize() > 0 && result == NKikimr::CanonizePath(Settings_.DomainName)) {
- ythrow yexception() << "Cannot use root domain '" << result << "' as request database then created additional tenants";
- }
- return result;
+ return NKikimr::CanonizePath(database ? database : GetDefaultDatabase());
}
ui32 GetNodeIndexForDatabase(const TString& path) const {
diff --git a/ydb/tests/tools/kqprun/ya.make b/ydb/tests/tools/kqprun/ya.make
index b39159f0ea..2c501ed994 100644
--- a/ydb/tests/tools/kqprun/ya.make
+++ b/ydb/tests/tools/kqprun/ya.make
@@ -22,6 +22,7 @@ PEERDIR(
PEERDIR(
yql/essentials/udfs/common/datetime2
+ yql/essentials/udfs/common/digest
yql/essentials/udfs/common/re2
yql/essentials/udfs/common/string
yql/essentials/udfs/common/yson2
diff --git a/ydb/tools/cfg/base.py b/ydb/tools/cfg/base.py
index 680af5965e..3811c1e2f9 100644
--- a/ydb/tools/cfg/base.py
+++ b/ydb/tools/cfg/base.py
@@ -311,6 +311,7 @@ class ClusterDetailsProvider(object):
self.immediate_controls_config = self.__cluster_description.get("immediate_controls_config")
self.cms_config = self.__cluster_description.get("cms_config")
self.pdisk_key_config = self.__cluster_description.get("pdisk_key_config", {})
+ self.selector_config = self.__cluster_description.get("selector_config", {})
if not self.need_txt_files and not self.use_new_style_kikimr_cfg:
assert "cannot remove txt files without new style kikimr cfg!"
diff --git a/ydb/tools/cfg/bin/__main__.py b/ydb/tools/cfg/bin/__main__.py
index 88f0bdaddd..c8666be7e8 100644
--- a/ydb/tools/cfg/bin/__main__.py
+++ b/ydb/tools/cfg/bin/__main__.py
@@ -5,12 +5,10 @@ import os
import sys
from logging import config as logging_config
-import yaml
-
from ydb.tools.cfg.configurator_setup import get_parser, parse_optional_arguments
from ydb.tools.cfg.dynamic import DynamicConfigGenerator
from ydb.tools.cfg.static import StaticConfigGenerator
-from ydb.tools.cfg.utils import write_to_file, backport
+from ydb.tools.cfg.utils import write_to_file, backport, load_yaml
from ydb.tools.cfg.walle import NopHostsInformationProvider, WalleHostsInformationProvider
from ydb.tools.cfg.k8s_api import K8sApiHostsInformationProvider
@@ -46,8 +44,7 @@ def cfg_generate(args):
else:
cfg_cls = StaticConfigGenerator
- with open(args.cluster_description, "r") as yaml_template:
- cluster_template = yaml.safe_load(yaml_template)
+ cluster_template = load_yaml(args.cluster_description)
host_info_provider = NopHostsInformationProvider()
diff --git a/ydb/tools/cfg/static.py b/ydb/tools/cfg/static.py
index 22e0719c7f..b9a80ab3f9 100644
--- a/ydb/tools/cfg/static.py
+++ b/ydb/tools/cfg/static.py
@@ -8,7 +8,6 @@ import logging
import subprocess
import tempfile
-import yaml
from ydb.core.fq.libs.config.protos.fq_config_pb2 import TConfig as TFederatedQueryConfig
from ydb.core.protos import blobstorage_pdisk_config_pb2 as pdisk_config_pb
from google.protobuf import json_format
@@ -19,6 +18,8 @@ from ydb.core.protos import (
bootstrap_pb2,
cms_pb2,
config_pb2,
+ blobstorage_config_pb2,
+ blobstorage_base3_pb2,
feature_flags_pb2,
key_pb2,
netclassifier_pb2,
@@ -396,7 +397,9 @@ class StaticConfigGenerator(object):
all_configs["kikimr.cfg"] = self.kikimr_cfg
all_configs["dynamic_server.cfg"] = self.dynamic_server_common_args
normalized_config = self.get_normalized_config()
+
all_configs["config.yaml"] = self.get_yaml_format_config(normalized_config)
+
all_configs["dynconfig.yaml"] = self.get_yaml_format_dynconfig(normalized_config)
return all_configs
@@ -612,7 +615,7 @@ class StaticConfigGenerator(object):
return normalized_config
def get_yaml_format_config(self, normalized_config):
- return yaml.safe_dump(normalized_config, sort_keys=True, default_flow_style=False, indent=2)
+ return utils.dump_yaml(normalized_config)
def get_yaml_format_dynconfig(self, normalized_config):
cluster_uuid = normalized_config.get('nameservice_config', {}).get('cluster_uuid', '')
@@ -631,7 +634,7 @@ class StaticConfigGenerator(object):
'selector_config': [],
}
- if self.__cluster_details.use_auto_config:
+ if self.__cluster_details.use_auto_config or normalized_config.get('actor_system_config', {}).get('use_auto_config', False):
dynconfig['selector_config'].append({
'description': 'actor system config for dynnodes',
'selector': {
@@ -645,11 +648,18 @@ class StaticConfigGenerator(object):
}
}
})
+
+ # copy all selector_config elements without validation (for now) to dynconfig
+ for elem in self.__cluster_details.selector_config:
+ dynconfig['selector_config'].append(elem)
+
# emulate dumping ordered dict to yaml
lines = []
for key in ['metadata', 'config', 'allowed_labels', 'selector_config']:
lines.append(key + ':')
- substr = yaml.safe_dump(dynconfig[key], sort_keys=True, default_flow_style=False, indent=2)
+
+ substr = utils.dump_yaml(dynconfig[key])
+
for line in substr.split('\n'):
lines.append(' ' + line)
return '\n'.join(lines)
@@ -1104,8 +1114,61 @@ class StaticConfigGenerator(object):
else:
self.__generate_domains_from_proto(domains_config)
+ def __generate_default_pool_with_kind(self, pool_kind):
+ pool = config_pb2.TDomainsConfig.TStoragePoolType()
+ pool.Kind = pool_kind
+ pool_config = blobstorage_config_pb2.TDefineStoragePool()
+
+ pool_config.BoxId = 1
+ pool_config.Kind = pool_kind
+ pool_config.VDiskKind = "Default"
+ pdisk_filter = pool_config.PDiskFilter.add()
+ property = pdisk_filter.Property.add()
+ diskTypeToProto = {
+ 'ssd': blobstorage_base3_pb2.EPDiskType.SSD,
+ 'rot': blobstorage_base3_pb2.EPDiskType.ROT,
+ 'ssdencrypted': blobstorage_base3_pb2.EPDiskType.SSD,
+ 'rotencrypted': blobstorage_base3_pb2.EPDiskType.ROT,
+ }
+
+ property.Type = diskTypeToProto[pool_kind]
+
+ pool.PoolConfig.CopyFrom(pool_config)
+ return pool
+
def __generate_domains_from_proto(self, domains_config):
- self.__configure_security_config(domains_config)
+ domains = domains_config.Domain
+ if len(domains) > 1:
+ raise ValueError('Multiple domains specified: len(domains_config.domain) > 1. This is unsupported')
+
+ domain = domains[0]
+ pool_kinds = []
+ if not domain.StoragePoolTypes:
+ pool_kinds = ['ssd', 'rot', 'ssdencrypted', 'rotencrypted']
+ for pool_kind in pool_kinds:
+ storage_pool_type = domain.StoragePoolTypes.add()
+ default_storage_pool_type = self.__generate_default_pool_with_kind(pool_kind)
+ storage_pool_type.MergeFrom(default_storage_pool_type)
+ else:
+ for pool in domain.StoragePoolTypes:
+ # do a little dance to keep the specified fields prioritized
+ # while filling the remaining defaults (MergeFrom overwrites)
+ defaultPool = self.__generate_default_pool_with_kind(pool.Kind)
+ defaultPool.MergeFrom(pool)
+ pool.CopyFrom(defaultPool)
+
+ if not domain.DomainId:
+ domain.DomainId = 1
+ if not domain.PlanResolution:
+ domain.PlanResolution = base.DEFAULT_PLAN_RESOLUTION
+ if not domain.SchemeRoot:
+ domain.SchemeRoot = self.__tablet_types.FLAT_SCHEMESHARD.tablet_id_for(0)
+ if not domain.SSId:
+ domain.SSId.append(domain.DomainId)
+
+ if not domains_config.StateStorage:
+ self._configure_default_state_storage(domains_config, domain.DomainId)
+
self.__proto_configs["domains.txt"] = domains_config
def __generate_domains_from_old_domains_key(self):
diff --git a/ydb/tools/cfg/utils.py b/ydb/tools/cfg/utils.py
index f0345f569b..4b53995675 100644
--- a/ydb/tools/cfg/utils.py
+++ b/ydb/tools/cfg/utils.py
@@ -3,12 +3,15 @@
import os
import random
import string
-import yaml
+import yaml as pyyaml
import six
from google.protobuf import text_format, json_format
from google.protobuf.pyext._message import FieldDescriptor
+from ruamel.yaml import YAML
+from io import StringIO
+
from library.python import resource
from ydb.tools.cfg import types
@@ -191,7 +194,7 @@ def wrap_parse_dict(dictionary, proto):
# template file and dump it again, there will be a lot of meaningless diff
# in formatting, which is undesirable.
def backport(template_path, config_yaml, backported_sections):
- config_data = yaml.safe_load(config_yaml)
+ config_data = pyyaml.safe_load(config_yaml)
with open(template_path, 'r') as file:
lines = file.readlines()
@@ -202,7 +205,7 @@ def backport(template_path, config_yaml, backported_sections):
if section is None:
raise KeyError(f"The key '{section_key}' was not found in config_yaml")
- new_section_yaml = yaml.safe_dump({section_key: section}, default_flow_style=False).splitlines(True)
+ new_section_yaml = pyyaml.safe_dump({section_key: section}, default_flow_style=False).splitlines(True)
new_section_yaml.append(os.linesep)
start_index = None
@@ -237,3 +240,57 @@ def need_generate_bs_config(template_bs_config):
return True
return template_bs_config.get("service_set", {}).get("groups") is None
+
+
+use_alternative_yaml_handler = False
+
+
+def determine_yaml_parsing(yaml_template_file):
+ ruamel_yaml = YAML()
+ with open(yaml_template_file, "r") as yaml_template:
+ data = ruamel_yaml.load(yaml_template)
+ return data.get("use_alternative_yaml_parser", False)
+
+
+def load_yaml(yaml_template_file):
+ global use_alternative_yaml_handler
+ use_alternative_yaml_handler = determine_yaml_parsing(yaml_template_file)
+
+ if use_alternative_yaml_handler:
+ ruamel_yaml = YAML()
+
+ with open(yaml_template_file, "r") as yaml_template:
+ data = ruamel_yaml.load(yaml_template)
+
+ return data
+ else:
+ with open(yaml_template_file, "r") as yaml_template:
+ return pyyaml.safe_load(yaml_template)
+
+
+def sort_dict_recursively(obj):
+ if isinstance(obj, dict):
+ return dict(sorted((k, sort_dict_recursively(v)) for k, v in obj.items()))
+ elif isinstance(obj, list):
+ return [sort_dict_recursively(i) for i in obj]
+ else:
+ return obj
+
+
+def dump_yaml(data):
+ global use_alternative_yaml_handler
+
+ if use_alternative_yaml_handler:
+ yaml = YAML()
+
+ yaml.default_flow_style = False
+ yaml.indent(mapping=2, sequence=4, offset=2)
+ yaml.sort_base_mapping_type_on_output = True
+
+ sorted_data = sort_dict_recursively(data)
+
+ stream = StringIO()
+ yaml.dump(sorted_data, stream)
+ return stream.getvalue()
+ else:
+ return pyyaml.safe_dump(data, sort_keys=True, default_flow_style=False, indent=2)
diff --git a/ydb/tools/cfg/ya.make b/ydb/tools/cfg/ya.make
index e088dc8b9e..5ebb919467 100644
--- a/ydb/tools/cfg/ya.make
+++ b/ydb/tools/cfg/ya.make
@@ -22,6 +22,7 @@ PEERDIR(
contrib/python/jsonschema
contrib/python/requests
contrib/python/six
+ contrib/python/ruamel.yaml
ydb/tools/cfg/walle
ydb/tools/cfg/k8s_api
library/cpp/resource
diff --git a/ydb/tools/stress_tool/device_test_tool.h b/ydb/tools/stress_tool/device_test_tool.h
index 5cad171551..5784ecfac7 100644
--- a/ydb/tools/stress_tool/device_test_tool.h
+++ b/ydb/tools/stress_tool/device_test_tool.h
@@ -116,23 +116,34 @@ public:
}
void PrintResultsHumanFormat() {
- const ui32 screenWidth = 140;
- const ui32 columnWidth = screenWidth / Results.size() - 3; // 3 symbols is additional 2 spaces and | sign
- TStringStream formatStr;
- formatStr << " %" << columnWidth << "s |";
- const char *format = formatStr.Str().c_str();
+ const ui32 screenWidth = 250;
+ const ui32 maxColumnWidthLimit = screenWidth / Results.size() - 3; // 3 symbols is additional 2 spaces and | sign
+
+ // calculate max width per column
+ TVector<ui32> columnWidths;
+ columnWidths.reserve(Results.size());
+ for (const auto& counter : Results) {
+ ui32 maxWidth = std::max(counter.Name.size(), counter.Value.size());
+ columnWidths.push_back(std::min<ui32>(maxWidth, maxColumnWidthLimit));
+ }
+
if (!HeaderPrinted) {
HeaderPrinted = true;
PrintGlobalParams();
TEST_COUT("|");
- for (const auto& counter : Results) {
- TEST_COUT(Sprintf(format, counter.Name.c_str()));
+ for (size_t i = 0; i < Results.size(); ++i) {
+ const auto& name = Results[i].Name;
+ ui32 width = columnWidths[i];
+ TEST_COUT(std::format(" {:>{}.{}} |", name.c_str(), width, width));
}
TEST_COUT_LN("");
}
+
TEST_COUT("|");
- for (const auto& counter : Results) {
- TEST_COUT(Sprintf(format, counter.Value.c_str()));
+ for (size_t i = 0; i < Results.size(); ++i) {
+ const auto& value = Results[i].Value;
+ ui32 width = columnWidths[i];
+ TEST_COUT(std::format(" {:>{}.{}} |", value.c_str(), width, width));
}
TEST_COUT_LN("");
}
diff --git a/ydb/tools/stress_tool/device_test_tool_pdisk_test.h b/ydb/tools/stress_tool/device_test_tool_pdisk_test.h
index 0700ca8ba5..4fee1a37ca 100644
--- a/ydb/tools/stress_tool/device_test_tool_pdisk_test.h
+++ b/ydb/tools/stress_tool/device_test_tool_pdisk_test.h
@@ -141,7 +141,7 @@ protected:
TIntrusivePtr<TEvLoad::TLoadReport> report = ev->Get()->Report;
if (report) {
Printer->AddResult("Name", Cfg.Name);
- Printer->AddResult("Test duration, sec", report->Duration.Seconds());
+ Printer->AddResult("Duration, sec", report->Duration.Seconds());
Printer->AddResult("Load", report->LoadTypeName());
Printer->AddResult("Size", ToString(HumanReadableSize(report->Size, SF_BYTES)));
Printer->AddResult("InFlight", report->InFlight);
@@ -152,7 +152,7 @@ protected:
Printer->AddResult("IOPS", TString("N/A"));
}
for (double perc : {1.0, 0.9999, 0.999, 0.99, 0.95, 0.9, 0.5, 0.1}) {
- TString perc_name = Sprintf("%.2f perc", perc * 100);
+ TString perc_name = Sprintf("p%.2f", perc * 100);
size_t val = report->LatencyUs.GetPercentile(perc);
Printer->AddResult(perc_name, Sprintf("%zu us", val));
}
diff --git a/ydb/tools/ydbd_slice/__init__.py b/ydb/tools/ydbd_slice/__init__.py
index bb701808f7..56e294d7ed 100644
--- a/ydb/tools/ydbd_slice/__init__.py
+++ b/ydb/tools/ydbd_slice/__init__.py
@@ -471,7 +471,7 @@ def binaries_args():
"--kikimr",
metavar="BIN",
default=None,
- help="explicit path to ydbd. Can be url: 'rbtorrent:<torrent>' for rbtorrent, 'sbr:<id>' for sandbox resource or 'http(s)://<url>' for other."
+ help="explicit path to ydbd. Can be url: 'rbtorrent:<torrent>' for rbtorrent, 'sbr:<id>' for sandbox resource or 'http(s)://<url>' for http or 'script:' for custom script."
)
args.add_argument(
"--binary-lz4",
diff --git a/ydb/tools/ydbd_slice/nodes.py b/ydb/tools/ydbd_slice/nodes.py
index 20b3e09f6f..21f6563783 100644
--- a/ydb/tools/ydbd_slice/nodes.py
+++ b/ydb/tools/ydbd_slice/nodes.py
@@ -3,6 +3,7 @@ import sys
import logging
import subprocess
import queue
+import random
logger = logging.getLogger(__name__)
@@ -178,6 +179,19 @@ class Nodes(object):
running_jobs = self.execute_async_ret(script)
self._check_async_execution(running_jobs, retry_attemps=2)
+ def _download_script(self, script, remote_path):
+ user_script = script[len('script:'):]
+ self._logger.info(f"download by script '{user_script}' to '{remote_path}'")
+ tmp_path = f'tmp_{random.randint(0, 100500)}'
+ full_script = (
+ f'mkdir -p {tmp_path} && cd {tmp_path} && '
+ f'( {user_script} ) && cd - && '
+ f'for FILE in `find {tmp_path} -name *.tgz -or -name *.tar`; do tar -C {tmp_path} -xf $FILE && rm $FILE; done && '
+ f'sudo mv {tmp_path}/* {remote_path} && rm -rf {tmp_path}'
+ )
+ running_jobs = self.execute_async_ret(full_script)
+ self._check_async_execution(running_jobs, retry_attemps=2)
+
def _download_http(self, url, remote_path):
self._logger.info(f"download from '{url}' to '{remote_path}'")
running_jobs = self.execute_async_ret(f'sudo curl --output {remote_path} {url}')
@@ -215,6 +229,8 @@ class Nodes(object):
self.execute_async("sudo mkdir -p {}".format(os.path.dirname(remote_path)))
if local_path.startswith('rbtorrent:') or local_path.startswith('sbr:'):
self._download_sky(local_path, remote_path)
+ elif local_path.startswith('script:'):
+ self._download_script(local_path, remote_path)
elif local_path.startswith('http:') or local_path.startswith('https:'):
self._download_http(local_path, remote_path)
else:
diff --git a/yql/essentials/cfg/tests/gateways-experimental.conf b/yql/essentials/cfg/tests/gateways-experimental.conf
index fe6fa97660..a68414581b 100644
--- a/yql/essentials/cfg/tests/gateways-experimental.conf
+++ b/yql/essentials/cfg/tests/gateways-experimental.conf
@@ -33,6 +33,26 @@ Yt {
Name: "UseColumnGroupsFromInputTables"
Value: "true"
}
+
+ DefaultSettings {
+ Name: "JobBlockInput"
+ Value: "true"
+ }
+
+ DefaultSettings {
+ Name: "JobBlockInputSupportedTypes"
+ Value: "tuple"
+ }
+
+ DefaultSettings {
+ Name: "JobBlockInputSupportedDataTypes"
+ Value: "Int8,Uint8,Int16,Uint16,Int32,Uint32,Int64,Uint64,Bool,Double,String,Utf8,Yson,Float"
+ }
+
+ DefaultSettings {
+ Name: "ReportEquiJoinStats"
+ Value: "true"
+ }
}
Dq {
diff --git a/yql/essentials/core/expr_nodes/yql_expr_nodes.json b/yql/essentials/core/expr_nodes/yql_expr_nodes.json
index 7eb01b6114..c051517eee 100644
--- a/yql/essentials/core/expr_nodes/yql_expr_nodes.json
+++ b/yql/essentials/core/expr_nodes/yql_expr_nodes.json
@@ -2557,6 +2557,11 @@
"Match": {"Type": "Callable", "Name": "WideFromBlocks"}
},
{
+ "Name": "TCoListFromBlocks",
+ "Base": "TCoInputBase",
+ "Match": {"Type": "Callable", "Name": "ListFromBlocks"}
+ },
+ {
"Name": "TCoReplicateScalars",
"Base": "TCoInputBase",
"Match": {"Type": "Callable", "Name": "ReplicateScalars"},
@@ -2570,6 +2575,11 @@
"Match": {"Type": "Callable", "Name": "WideToBlocks"}
},
{
+ "Name": "TCoListToBlocks",
+ "Base": "TCoInputBase",
+ "Match": {"Type": "Callable", "Name": "ListToBlocks"}
+ },
+ {
"Name": "TCoPgSelect",
"Base": "TCallable",
"Match": {"Type": "Callable", "Name": "PgSelect"},
diff --git a/yql/essentials/core/histogram/eq_width_histogram.cpp b/yql/essentials/core/histogram/eq_width_histogram.cpp
new file mode 100644
index 0000000000..3c5a452fdb
--- /dev/null
+++ b/yql/essentials/core/histogram/eq_width_histogram.cpp
@@ -0,0 +1,73 @@
+#include "eq_width_histogram.h"
+
+namespace NKikimr {
+
+TEqWidthHistogram::TEqWidthHistogram(ui32 numBuckets, EHistogramValueType valueType)
+ : ValueType(valueType), Buckets(numBuckets) {
+ // Exptected at least one bucket for histogram.
+ Y_ASSERT(numBuckets >= 1);
+}
+
+TEqWidthHistogram::TEqWidthHistogram(const char *str, ui64 size) {
+ Y_ASSERT(str && size);
+ const ui32 numBuckets = *reinterpret_cast<const ui32 *>(str);
+ Y_ABORT_UNLESS(GetBinarySize(numBuckets) == size);
+ ui32 offset = sizeof(ui32);
+ ValueType = *reinterpret_cast<const EHistogramValueType *>(str + offset);
+ offset += sizeof(EHistogramValueType);
+ Buckets = TVector<TBucket>(numBuckets);
+ for (ui32 i = 0; i < numBuckets; ++i) {
+ std::memcpy(&Buckets[i], reinterpret_cast<const char *>(str + offset), sizeof(TBucket));
+ offset += sizeof(TBucket);
+ }
+}
+
+ui64 TEqWidthHistogram::GetBinarySize(ui32 nBuckets) const {
+ return sizeof(ui32) + sizeof(EHistogramValueType) + sizeof(TBucket) * nBuckets;
+}
+
+// Binary layout:
+// [4 byte: number of buckets][1 byte: value type]
+// [sizeof(Bucket)[0]... sizeof(Bucket)[n]].
+std::unique_ptr<char> TEqWidthHistogram::Serialize(ui64 &binarySize) const {
+ binarySize = GetBinarySize(GetNumBuckets());
+ std::unique_ptr<char> binaryData(new char[binarySize]);
+ ui32 offset = 0;
+ const ui32 numBuckets = GetNumBuckets();
+ // 4 byte - number of buckets.
+ std::memcpy(binaryData.get(), &numBuckets, sizeof(ui32));
+ offset += sizeof(ui32);
+ // 1 byte - values type.
+ std::memcpy(binaryData.get() + offset, &ValueType, sizeof(EHistogramValueType));
+ offset += sizeof(EHistogramValueType);
+ // Buckets.
+ for (ui32 i = 0; i < numBuckets; ++i) {
+ std::memcpy(binaryData.get() + offset, &Buckets[i], sizeof(TBucket));
+ offset += sizeof(TBucket);
+ }
+ return binaryData;
+}
+
+TEqWidthHistogramEstimator::TEqWidthHistogramEstimator(std::shared_ptr<TEqWidthHistogram> histogram)
+ : Histogram(histogram) {
+ const auto numBuckets = Histogram->GetNumBuckets();
+ PrefixSum = TVector<ui64>(numBuckets);
+ SuffixSum = TVector<ui64>(numBuckets);
+ CreatePrefixSum(numBuckets);
+ CreateSuffixSum(numBuckets);
+}
+
+void TEqWidthHistogramEstimator::CreatePrefixSum(ui32 numBuckets) {
+ PrefixSum[0] = Histogram->GetNumElementsInBucket(0);
+ for (ui32 i = 1; i < numBuckets; ++i) {
+ PrefixSum[i] = PrefixSum[i - 1] + Histogram->GetNumElementsInBucket(i);
+ }
+}
+
+void TEqWidthHistogramEstimator::CreateSuffixSum(ui32 numBuckets) {
+ SuffixSum[numBuckets - 1] = Histogram->GetNumElementsInBucket(numBuckets - 1);
+ for (i32 i = static_cast<i32>(numBuckets) - 2; i >= 0; --i) {
+ SuffixSum[i] = SuffixSum[i + 1] + Histogram->GetNumElementsInBucket(i);
+ }
+}
+} // namespace NKikimr
diff --git a/yql/essentials/core/histogram/eq_width_histogram.h b/yql/essentials/core/histogram/eq_width_histogram.h
new file mode 100644
index 0000000000..97c660af76
--- /dev/null
+++ b/yql/essentials/core/histogram/eq_width_histogram.h
@@ -0,0 +1,228 @@
+#pragma once
+
+#include <util/generic/strbuf.h>
+#include <util/generic/vector.h>
+#include <util/stream/output.h>
+#include <util/system/types.h>
+#include <cmath>
+
+namespace NKikimr {
+
+ // Helper functions to work with histogram values.
+template <typename T>
+inline T LoadFrom(const ui8 *storage) {
+ T val;
+ std::memcpy(&val, storage, sizeof(T));
+ return val;
+}
+template <typename T>
+inline void StoreTo(ui8 *storage, T value) {
+ std::memcpy(storage, &value, sizeof(T));
+}
+template <typename T>
+inline bool CmpEqual(T left, T right) {
+ return left == right;
+}
+template <>
+inline bool CmpEqual(double left, double right) {
+ return std::fabs(left - right) < std::numeric_limits<double>::epsilon();
+}
+template <typename T>
+inline bool CmpLess(T left, T right) {
+ return left < right;
+}
+
+// Represents value types supported by histogram.
+enum class EHistogramValueType : ui8 { Int16, Int32, Int64, Uint16, Uint32, Uint64, Double, NotSupported };
+
+// Bucket storage size for Equal width histogram.
+constexpr const ui32 EqWidthHistogramBucketStorageSize = 8;
+
+// This class represents an `Equal-width` histogram.
+// Each bucket represents a range of contiguous values of equal width, and the
+// aggregate summary stored in the bucket is the number of rows whose value lies
+// within that range.
+class TEqWidthHistogram {
+ public:
+#pragma pack(push, 1)
+ struct TBucket {
+ // The number of values in a bucket.
+ ui64 Count{0};
+ // The `start` value of a bucket, the `end` of the bucket is a next start.
+ // [start = start[i], end = start[i + 1])
+ ui8 Start[EqWidthHistogramBucketStorageSize];
+ };
+ struct TBucketRange {
+ ui8 Start[EqWidthHistogramBucketStorageSize];
+ ui8 End[EqWidthHistogramBucketStorageSize];
+ };
+#pragma pack(pop)
+
+ // Have to specify the number of buckets and type of the values.
+ TEqWidthHistogram(ui32 numBuckets = 1, EHistogramValueType type = EHistogramValueType::Int32);
+ // From serialized data.
+ TEqWidthHistogram(const char *str, ui64 size);
+
+ // Adds the given `val` to a histogram.
+ template <typename T>
+ void AddElement(T val) {
+ const auto index = FindBucketIndex(val);
+ // The given `index` in range [0, numBuckets - 1].
+ const T bucketValue = LoadFrom<T>(Buckets[index].Start);
+ if (!index || ((CmpEqual<T>(bucketValue, val) || CmpLess<T>(bucketValue, val)))) {
+ Buckets[index].Count++;
+ } else {
+ Buckets[index - 1].Count++;
+ }
+ }
+
+ // Returns an index of the bucket which stores the given `val`.
+ // Returned index in range [0, numBuckets - 1].
+ // Not using `std::lower_bound()` here because need an index to map to `suffix` and `prefix` sum.
+ template <typename T>
+ ui32 FindBucketIndex(T val) const {
+ ui32 start = 0;
+ ui32 end = GetNumBuckets() - 1;
+ while (start < end) {
+ auto it = start + (end - start) / 2;
+ if (CmpLess<T>(LoadFrom<T>(Buckets[it].Start), val)) {
+ start = it + 1;
+ } else {
+ end = it;
+ }
+ }
+ return start;
+ }
+
+ // Returns a number of buckets in a histogram.
+ ui32 GetNumBuckets() const { return Buckets.size(); }
+
+ template <typename T>
+ ui32 GetBucketWidth() const {
+ Y_ASSERT(GetNumBuckets());
+ if (GetNumBuckets() == 1) {
+ return std::max(static_cast<ui32>(LoadFrom<T>(Buckets.front().Start)), 1U);
+ } else {
+ return std::max(static_cast<ui32>(LoadFrom<T>(Buckets[1].Start) - LoadFrom<T>(Buckets[0].Start)), 1U);
+ }
+ }
+
+ template <>
+ ui32 GetBucketWidth<double>() const {
+ return 1;
+ }
+
+ // Returns histogram type.
+ EHistogramValueType GetType() const { return ValueType; }
+ // Returns a number of elements in a bucket by the given `index`.
+ ui64 GetNumElementsInBucket(ui32 index) const { return Buckets[index].Count; }
+
+ // Initializes buckets with a given `range`.
+ template <typename T>
+ void InitializeBuckets(const TBucketRange &range) {
+ Y_ASSERT(CmpLess<T>(LoadFrom<T>(range.Start), LoadFrom<T>(range.End)));
+ T rangeLen = LoadFrom<T>(range.End) - LoadFrom<T>(range.Start);
+ std::memcpy(Buckets[0].Start, range.Start, sizeof(range.Start));
+ for (ui32 i = 1; i < GetNumBuckets(); ++i) {
+ const T prevStart = LoadFrom<T>(Buckets[i - 1].Start);
+ StoreTo<T>(Buckets[i].Start, prevStart + rangeLen);
+ }
+ }
+
+ // Seriailizes to a binary representation
+ std::unique_ptr<char> Serialize(ui64 &binSize) const;
+ // Returns buckets.
+ const TVector<TBucket> &GetBuckets() const { return Buckets; }
+
+ template <typename T>
+ void Aggregate(const TEqWidthHistogram &other) {
+ if ((this->ValueType != other.GetType()) || (!BucketsEqual<T>(other))) {
+ // Should we fail?
+ return;
+ }
+ for (ui32 i = 0; i < Buckets.size(); ++i) {
+ Buckets[i].Count += other.GetBuckets()[i].Count;
+ }
+ }
+
+ private:
+ template <typename T>
+ bool BucketsEqual(const TEqWidthHistogram &other) {
+ if (Buckets.size() != other.GetNumBuckets()) {
+ return false;
+ }
+ for (ui32 i = 0; i < Buckets.size(); ++i) {
+ if (!CmpEqual<T>(LoadFrom<T>(Buckets[i].Start), LoadFrom<T>(GetBuckets()[i].Start))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Returns binary size of the histogram.
+ ui64 GetBinarySize(ui32 nBuckets) const;
+ EHistogramValueType ValueType;
+ TVector<TBucket> Buckets;
+};
+
+// This class represents a machinery to estimate a value in a histogram.
+class TEqWidthHistogramEstimator {
+ public:
+ TEqWidthHistogramEstimator(std::shared_ptr<TEqWidthHistogram> histogram);
+
+ // Methods to estimate values.
+ template <typename T>
+ ui64 EstimateLessOrEqual(T val) const {
+ return EstimateOrEqual<T>(val, PrefixSum);
+ }
+
+ template <typename T>
+ ui64 EstimateGreaterOrEqual(T val) const {
+ return EstimateOrEqual<T>(val, SuffixSum);
+ }
+
+ template <typename T>
+ ui64 EstimateLess(T val) const {
+ return EstimateNotEqual<T>(val, PrefixSum);
+ }
+
+ template <typename T>
+ ui64 EstimateGreater(T val) const {
+ return EstimateNotEqual<T>(val, SuffixSum);
+ }
+
+ template <typename T>
+ ui64 EstimateEqual(T val) const {
+ const auto index = Histogram->FindBucketIndex(val);
+ // Assuming uniform distribution.
+ return std::max(1U, static_cast<ui32>(Histogram->GetNumElementsInBucket(index) / Histogram->template GetBucketWidth<T>()));
+ }
+
+ // Returns the total number elements in histogram.
+ // Could be used to adjust scale.
+ ui64 GetNumElements() const { return PrefixSum.back(); }
+
+ private:
+ template <typename T>
+ ui64 EstimateOrEqual(T val, const TVector<ui64> &sumArray) const {
+ const auto index = Histogram->FindBucketIndex(val);
+ return sumArray[index];
+ }
+
+ template <typename T>
+ ui64 EstimateNotEqual(T val, const TVector<ui64> &sumArray) const {
+ const auto index = Histogram->FindBucketIndex(val);
+ // Take the previous backet if it's not the first one.
+ if (!index) {
+ return sumArray[index];
+ }
+ return sumArray[index - 1];
+ }
+
+ void CreatePrefixSum(ui32 numBuckets);
+ void CreateSuffixSum(ui32 numBuckets);
+ std::shared_ptr<TEqWidthHistogram> Histogram;
+ TVector<ui64> PrefixSum;
+ TVector<ui64> SuffixSum;
+};
+} // namespace NKikimr
diff --git a/yql/essentials/core/histogram/ut/eq_width_histogram_ut.cpp b/yql/essentials/core/histogram/ut/eq_width_histogram_ut.cpp
new file mode 100644
index 0000000000..9c1b1d969f
--- /dev/null
+++ b/yql/essentials/core/histogram/ut/eq_width_histogram_ut.cpp
@@ -0,0 +1,127 @@
+#include <library/cpp/testing/unittest/registar.h>
+
+#include "eq_width_histogram.h"
+
+namespace NKikimr {
+
+template <typename T>
+bool EqualHistograms(const std::shared_ptr<TEqWidthHistogram> &left, const std::shared_ptr<TEqWidthHistogram> &right) {
+ // Not expecting any nullptr.
+ if (!left || !right) return false;
+
+ if (left->GetNumBuckets() != right->GetNumBuckets()) {
+ return false;
+ }
+ if (left->GetType() != right->GetType()) {
+ return false;
+ }
+
+ for (ui32 i = 0; i < left->GetNumBuckets(); ++i) {
+ const auto &leftBucket = left->GetBuckets()[i];
+ const auto &rightBucket = right->GetBuckets()[i];
+ if (leftBucket.Count != rightBucket.Count) {
+ return false;
+ }
+ if (!CmpEqual<T>(LoadFrom<T>(leftBucket.Start), LoadFrom<T>(rightBucket.Start))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename T>
+std::shared_ptr<TEqWidthHistogram> CreateHistogram(ui32 numBuckets, T start, T range, EHistogramValueType valueType) {
+ std::shared_ptr<TEqWidthHistogram> histogram(std::make_shared<TEqWidthHistogram>(numBuckets, valueType));
+ TEqWidthHistogram::TBucketRange bucketRange;
+ StoreTo<T>(bucketRange.Start, start);
+ StoreTo<T>(bucketRange.End, range);
+ histogram->InitializeBuckets<T>(bucketRange);
+ return histogram;
+}
+
+template <typename T>
+void PopulateHistogram(std::shared_ptr<TEqWidthHistogram> histogram, const std::pair<ui32, ui32> &range) {
+ for (ui32 i = range.first; i < range.second; ++i) {
+ histogram->AddElement<T>(i);
+ }
+}
+
+template <typename T>
+void TestHistogramBasic(ui32 numBuckets, std::pair<ui32, ui32> range, std::pair<T, T> bucketRange,
+ EHistogramValueType valueType, std::pair<T, ui64> less, std::pair<T, ui64> greater) {
+ auto histogram = CreateHistogram<T>(numBuckets, bucketRange.first, bucketRange.second, valueType);
+ UNIT_ASSERT_VALUES_EQUAL(histogram->GetNumBuckets(), numBuckets);
+ PopulateHistogram<T>(histogram, range);
+ TEqWidthHistogramEstimator estimator(histogram);
+ UNIT_ASSERT_VALUES_EQUAL(estimator.EstimateLessOrEqual<T>(less.first), less.second);
+ UNIT_ASSERT_VALUES_EQUAL(estimator.EstimateGreaterOrEqual<T>(greater.first), greater.second);
+}
+
+template <typename T>
+void TestHistogramSerialization(ui32 numBuckets, std::pair<ui32, ui32> range, std::pair<T, T> bucketRange,
+ EHistogramValueType valueType) {
+ auto histogram = CreateHistogram<T>(numBuckets, bucketRange.first, bucketRange.second, valueType);
+ UNIT_ASSERT(histogram);
+ PopulateHistogram<T>(histogram, range);
+ ui64 binarySize = 0;
+ auto binaryData = histogram->Serialize(binarySize);
+ UNIT_ASSERT(binaryData && binarySize);
+ TString hString(binaryData.get(), binarySize);
+ auto histogramFromString = std::make_shared<TEqWidthHistogram>(hString.data(), hString.size());
+ UNIT_ASSERT(histogramFromString);
+ UNIT_ASSERT(EqualHistograms<T>(histogram, histogramFromString));
+}
+
+template <typename T>
+void TestHistogramAggregate(ui32 numBuckets, std::pair<ui32, ui32> range, std::pair<T, T> bucketRange,
+ EHistogramValueType valueType, ui32 numCombine, const TVector<ui64> &resultCount) {
+ auto histogram = CreateHistogram<T>(numBuckets, bucketRange.first, bucketRange.second, valueType);
+ UNIT_ASSERT(histogram);
+ PopulateHistogram<T>(histogram, range);
+ auto histogramToAdd = CreateHistogram<T>(numBuckets, bucketRange.first, bucketRange.second, valueType);
+ PopulateHistogram<T>(histogramToAdd, range);
+ UNIT_ASSERT(histogram);
+ for (ui32 i = 0; i < numCombine; ++i) histogram->template Aggregate<T>(*histogramToAdd);
+ for (ui32 i = 0; i < histogram->GetNumBuckets(); ++i) {
+ UNIT_ASSERT(histogram->GetBuckets()[i].Count == resultCount[i]);
+ }
+}
+
+Y_UNIT_TEST_SUITE(EqWidthHistogram) {
+ Y_UNIT_TEST(Basic) {
+ TestHistogramBasic<ui32>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Uint32,
+ /*{value, result}=*/{9, 10},
+ /*{value, result}=*/{10, 0});
+ TestHistogramBasic<ui64>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Uint64,
+ /*{value, result}=*/{9, 10},
+ /*{value, result}=*/{10, 0});
+ TestHistogramBasic<i32>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Int32,
+ /*{value, result}=*/{9, 10},
+ /*{value, result}=*/{10, 0});
+ TestHistogramBasic<i64>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Int64,
+ /*{value, result}=*/{9, 10},
+ /*{value, result}=*/{10, 0});
+ TestHistogramBasic<double>(10, /*values range=*/{0.0, 10.0}, /*bucket range=*/{0.0, 2.0},
+ EHistogramValueType::Double,
+ /*{value, result}=*/{9.0, 10},
+ /*{value, result}=*/{10.0, 0});
+ }
+
+ Y_UNIT_TEST(Serialization) {
+ TestHistogramSerialization<ui32>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2},
+ EHistogramValueType::Uint32);
+ TestHistogramSerialization<ui64>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2},
+ EHistogramValueType::Uint64);
+ TestHistogramSerialization<i32>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Int32);
+ TestHistogramSerialization<i64>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Int64);
+ TestHistogramSerialization<double>(10, /*values range=*/{0.0, 10.0}, /*bucket range=*/{0.0, 2.0},
+ EHistogramValueType::Double);
+ }
+ Y_UNIT_TEST(AggregateHistogram) {
+ TVector<ui64> resultCount{20, 20, 20, 20, 20, 0, 0, 0, 0, 0};
+ TestHistogramAggregate<ui32>(10, /*values range=*/{0, 10}, /*bucket range=*/{0, 2}, EHistogramValueType::Uint32, 9,
+ resultCount);
+ }
+}
+} // namespace NKikimr
diff --git a/yql/essentials/core/histogram/ut/ya.make b/yql/essentials/core/histogram/ut/ya.make
new file mode 100644
index 0000000000..17e420ad07
--- /dev/null
+++ b/yql/essentials/core/histogram/ut/ya.make
@@ -0,0 +1,8 @@
+UNITTEST_FOR(yql/essentials/core/histogram)
+
+SIZE(MEDIUM)
+SRCS(
+ eq_width_histogram_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/core/histogram/ya.make b/yql/essentials/core/histogram/ya.make
new file mode 100644
index 0000000000..bcc309c379
--- /dev/null
+++ b/yql/essentials/core/histogram/ya.make
@@ -0,0 +1,12 @@
+LIBRARY()
+
+SRCS(
+ eq_width_histogram.h
+ eq_width_histogram.cpp
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/core/minsketch/ut/ya.make b/yql/essentials/core/minsketch/ut/ya.make
index 30f60c7229..d951550fac 100644
--- a/yql/essentials/core/minsketch/ut/ya.make
+++ b/yql/essentials/core/minsketch/ut/ya.make
@@ -1,15 +1,6 @@
UNITTEST_FOR(yql/essentials/core/minsketch)
-FORK_SUBTESTS()
-IF (WITH_VALGRIND)
- SPLIT_FACTOR(30)
- TIMEOUT(1200)
- SIZE(LARGE)
- TAG(ya:fat)
-ELSE()
- TIMEOUT(600)
- SIZE(MEDIUM)
-ENDIF()
+SIZE(MEDIUM)
SRCS(
count_min_sketch_ut.cpp
diff --git a/yql/essentials/core/peephole_opt/yql_opt_peephole_physical.cpp b/yql/essentials/core/peephole_opt/yql_opt_peephole_physical.cpp
index 1b39f7dd86..19c55dbfcf 100644
--- a/yql/essentials/core/peephole_opt/yql_opt_peephole_physical.cpp
+++ b/yql/essentials/core/peephole_opt/yql_opt_peephole_physical.cpp
@@ -126,33 +126,33 @@ TExprNode::TPtr RebuildArgumentsOnlyLambdaForBlocks(const TExprNode& lambda, TEx
return ctx.NewLambda(lambda.Pos(), ctx.NewArguments(lambda.Pos(), std::move(newArgs)), std::move(newRoots));
}
+TExprNode::TPtr SwapFlowNodeWithStreamNode(const TExprNode::TPtr& flowNode, const TExprNode::TPtr& streamNode, TExprContext& ctx) {
+ const auto streamInput = streamNode->HeadPtr();
+ // If streamInput is FromFlow, its input is WideFlow and can
+ // be used intact; Otherwise the input is WideStream, so the
+ // new input should be converted to WideFlow.
+ auto flowInput = streamInput->IsCallable("FromFlow") ? streamInput->HeadPtr()
+ : ctx.NewCallable(streamInput->Pos(), "ToFlow", { streamInput });
+ // XXX: ChangeChild has to be used here and below, since
+ // the callable might have more than one input, but only
+ // the first one should be substituted.
+ const auto newFlowNode = ctx.ChangeChild(*flowNode, 0, std::move(flowInput));
+ const auto newStreamNode = ctx.ChangeChild(*streamNode, 0, {
+ ctx.NewCallable(newFlowNode->Pos(), "FromFlow", { newFlowNode })
+ });
+ return ctx.Builder(flowNode->Pos())
+ .Callable("ToFlow")
+ .Add(0, newStreamNode)
+ .Seal()
+ .Build();
+}
+
TExprNode::TPtr OptimizeWideToBlocks(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
const auto& input = node->Head();
if (input.IsCallable("WideFromBlocks")) {
YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << node->Content() << " over " << input.Content();
- // If tail is FromFlow, its input is WideFlow and can be
- // used intact; Otherwise the input is WideStream, so the
- // new input should be converted to WideFlow.
- const auto tail = input.HeadPtr();
- const auto flowInput = tail->IsCallable("FromFlow") ? tail->HeadPtr()
- : ctx.NewCallable(tail->Pos(), "ToFlow", { tail });
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- return ctx.Builder(node->Pos())
- .Callable("FromFlow")
- .Callable(0, "ReplicateScalars")
- .Add(0, flowInput)
- .Seal()
- .Seal()
- .Build();
+ return ctx.NewCallable(node->Pos(), "ReplicateScalars", { input.HeadPtr() });
}
if (input.IsCallable("FromFlow") && input.Head().IsCallable({"Extend", "OrderedExtend"})) {
@@ -199,30 +199,9 @@ TExprNode::TPtr OptimizeWideFromBlocks(const TExprNode::TPtr& node, TExprContext
return input.HeadPtr();
}
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (input.IsCallable("FromFlow") && input.Head().IsCallable("ReplicateScalars")) {
- const auto& replicateScalars = input.Head();
- // Technically, the code below rewrites the following sequence
- // (WideFromBlocks (FromFlow (ReplicateScalars (<input>))))
- // into (WideFromBlocks (FromFlow (<input>))), but ToFlow/FromFlow
- // wrappers will be removed when all other nodes in block
- // pipeline start using WideStream instead of the WideFlow.
- // Hence, the logging is left intact.
- YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << replicateScalars.Content() << " as input of " << node->Content();
- return ctx.Builder(node->Pos())
- .Callable(node->Content())
- .Callable(0, input.Content())
- .Add(0, replicateScalars.HeadPtr())
- .Seal()
- .Seal()
- .Build();
+ if (input.IsCallable("ReplicateScalars")) {
+ YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << input.Content() << " as input of " << node->Content();
+ return ctx.ChangeChild(*node, 0, input.HeadPtr());
}
return node;
@@ -230,18 +209,17 @@ TExprNode::TPtr OptimizeWideFromBlocks(const TExprNode::TPtr& node, TExprContext
TExprNode::TPtr OptimizeWideTakeSkipBlocks(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (node->Head().IsCallable("ReplicateScalars")) {
- YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << node->Head().Content();
- return ctx.SwapWithHead(*node);
+ const auto& input = node->HeadPtr();
+ if (input->IsCallable("ToFlow") && input->Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input->HeadPtr();
+ // Technically, the code below rewrites the following sequence
+ // (Wide{Skip,Take}Blocks (ToFlow (ReplicateScalars (<input>))))
+ // into (ToFlow (ReplicateScalars (FromFlow (Wide{Skip,Take}Blocks (<input>))))),
+ // but ToFlow/FromFlow wrappers will be removed when all other
+ // nodes in block pipeline start using WideStream instead of the
+ // WideFlow. Hence, the logging is left intact.
+ YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << replicateScalars->Content();
+ return SwapFlowNodeWithStreamNode(node, replicateScalars, ctx);
}
return node;
@@ -249,35 +227,31 @@ TExprNode::TPtr OptimizeWideTakeSkipBlocks(const TExprNode::TPtr& node, TExprCon
TExprNode::TPtr OptimizeBlockCompress(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (node->Head().IsCallable("ReplicateScalars")) {
- YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << node->Head().Content();
- if (node->Head().ChildrenSize() == 1) {
- return ctx.SwapWithHead(*node);
+ const auto& input = node->HeadPtr();
+ if (input->IsCallable("ToFlow") && input->Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input->HeadPtr();
+ // Technically, the code below rewrites the following sequence
+ // (BlockCompress (ToFlow (ReplicateScalars (<input>))))
+ // into (ToFlow (ReplicateScalars (FromFlow (BlockCompress (<input>))))),
+ // but ToFlow/FromFlow wrappers will be removed when all other
+ // nodes in block pipeline start using WideStream instead of the
+ // WideFlow. Hence, the logging is left intact.
+ YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << replicateScalars->Content();
+ if (replicateScalars->ChildrenSize() == 1) {
+ return SwapFlowNodeWithStreamNode(node, replicateScalars, ctx);
}
const ui32 compressIndex = FromString<ui32>(node->Child(1)->Content());
TExprNodeList newReplicateIndexes;
- for (auto atom : node->Head().Child(1)->ChildrenList()) {
+ for (auto atom : replicateScalars->Child(1)->ChildrenList()) {
ui32 idx = FromString<ui32>(atom->Content());
if (idx != compressIndex) {
newReplicateIndexes.push_back((idx < compressIndex) ? atom : ctx.NewAtom(atom->Pos(), idx - 1));
}
}
- return ctx.Builder(node->Pos())
- .Callable("ReplicateScalars")
- .Add(0, ctx.ChangeChild(*node, 0, node->Head().HeadPtr()))
- .Add(1, ctx.NewList(node->Head().Child(1)->Pos(), std::move(newReplicateIndexes)))
- .Seal()
- .Build();
+ const auto& newReplicateScalars = ctx.ChangeChild(*replicateScalars, 1,
+ ctx.NewList(replicateScalars->Child(1)->Pos(), std::move(newReplicateIndexes)));
+ return SwapFlowNodeWithStreamNode(node, newReplicateScalars, ctx);
}
return node;
@@ -285,18 +259,17 @@ TExprNode::TPtr OptimizeBlockCompress(const TExprNode::TPtr& node, TExprContext&
TExprNode::TPtr OptimizeBlocksTopOrSort(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (node->Head().IsCallable("ReplicateScalars")) {
- YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << node->Head().Content();
- return ctx.SwapWithHead(*node);
+ const auto& input = node->HeadPtr();
+ if (input->IsCallable("ToFlow") && input->Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input->HeadPtr();
+ // Technically, the code below rewrites the following sequence
+ // (Wide{Top,TopSort,Sort}Blocks (ToFlow (ReplicateScalars (<input>))))
+ // into (ToFlow (ReplicateScalars (FromFlow (Wide{Top,TopSort,Sort}Blocks (<input>))))),
+ // but ToFlow/FromFlow wrappers will be removed when all other
+ // nodes in block pipeline start using WideStream instead of the
+ // WideFlow. Hence, the logging is left intact.
+ YQL_CLOG(DEBUG, CorePeepHole) << "Swap " << node->Content() << " with " << replicateScalars->Content();
+ return SwapFlowNodeWithStreamNode(node, replicateScalars, ctx);
}
return node;
@@ -307,18 +280,15 @@ TExprNode::TPtr OptimizeBlockExtend(const TExprNode::TPtr& node, TExprContext& c
TExprNodeList inputs = node->ChildrenList();
bool hasReplicateScalars = false;
for (auto& input : inputs) {
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (input->IsCallable("ReplicateScalars")) {
+ if (input->IsCallable("ToFlow") && input->Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input->Head();
hasReplicateScalars = true;
- input = input->HeadPtr();
+ // If tail is FromFlow, its input is WideFlow and can be
+ // used intact; Otherwise the input is WideStream, so the
+ // new input should be converted to WideFlow.
+ const auto tail = replicateScalars.HeadPtr();
+ input = tail->IsCallable("FromFlow") ? tail->HeadPtr()
+ : ctx.NewCallable(tail->Pos(), "ToFlow", { tail });
}
}
@@ -332,15 +302,6 @@ TExprNode::TPtr OptimizeBlockExtend(const TExprNode::TPtr& node, TExprContext& c
TExprNode::TPtr OptimizeReplicateScalars(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
if (node->Head().IsCallable("ReplicateScalars")) {
if (node->ChildrenSize() == 1) {
YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << node->Head().Content() << " as input of " << node->Content();
@@ -384,16 +345,20 @@ TExprNode::TPtr ExpandBlockExtend(const TExprNode::TPtr& node, TExprContext& ctx
const bool hasScalars = AnyOf(items.begin(), items.end() - 1, [](const auto& item) { return item->IsScalar(); });
seenScalars = seenScalars || hasScalars;
+ TExprNode::TPtr newChild = child;
+ if (hasScalars) {
+ newChild = ctx.Builder(child->Pos())
+ .Callable("ToFlow")
+ .Callable(0, "ReplicateScalars")
+ .Callable(0, "FromFlow")
+ .Add(0, std::move(child))
+ .Seal()
+ .Seal()
+ .Seal()
+ .Build();
+ }
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- newChildren.push_back(ctx.WrapByCallableIf(hasScalars, "ReplicateScalars", std::move(child)));
+ newChildren.push_back(newChild);
}
const TStringBuf newName = node->IsCallable("BlockExtend") ? "Extend" : "OrderedExtend";
@@ -405,17 +370,8 @@ TExprNode::TPtr ExpandBlockExtend(const TExprNode::TPtr& node, TExprContext& ctx
TExprNode::TPtr ExpandReplicateScalars(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
YQL_CLOG(DEBUG, CorePeepHole) << "Expand " << node->Content();
- const auto& items = node->Head().GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
+ const auto& items = node->Head().GetTypeAnn()->Cast<TStreamExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
const ui32 width = items.size();
TExprNodeList args;
@@ -447,9 +403,13 @@ TExprNode::TPtr ExpandReplicateScalars(const TExprNode::TPtr& node, TExprContext
}
return ctx.Builder(node->Pos())
- .Callable("WideMap")
- .Add(0, node->HeadPtr())
- .Add(1, ctx.NewLambda(node->Pos(), ctx.NewArguments(node->Pos(), std::move(args)), std::move(bodyItems)))
+ .Callable("FromFlow")
+ .Callable(0, "WideMap")
+ .Callable(0, "ToFlow")
+ .Add(0, node->HeadPtr())
+ .Seal()
+ .Add(1, ctx.NewLambda(node->Pos(), ctx.NewArguments(node->Pos(), std::move(args)), std::move(bodyItems)))
+ .Seal()
.Seal()
.Build();
}
@@ -6402,7 +6362,15 @@ bool CanRewriteToBlocksWithInput(const TExprNode& input, const TTypeAnnotationCo
case NYql::EBlockEngineMode::Disable:
return false;
case NYql::EBlockEngineMode::Auto:
- return input.IsCallable("WideFromBlocks");
+ // The code below matches, whether the input is one of
+ // the following:
+ // * (WideFromBlocks (...))
+ // * (ToFlow (WideFromBlocks (...)))
+ // FIXME: The latter option can be removed when
+ // WideStream overloads are implemented for all nodes,
+ // using this helper.
+ return input.IsCallable("WideFromBlocks") ||
+ input.IsCallable("ToFlow") && input.Head().IsCallable("WideFromBlocks");
case NYql::EBlockEngineMode::Force:
return true;
}
@@ -6744,8 +6712,9 @@ TExprNode::TPtr UpdateBlockCombineColumns(const TExprNode::TPtr& node, std::opti
TExprNode::TPtr OptimizeBlockCombine(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
- if (node->Head().IsCallable("WideMap")) {
- const auto& lambda = node->Head().Tail();
+ const auto& input = node->Head();
+ if (input.IsCallable("WideMap")) {
+ const auto& lambda = input.Tail();
TVector<ui32> argIndices;
bool onlyArguments = IsArgumentsOnlyLambda(lambda, argIndices);
if (onlyArguments) {
@@ -6754,10 +6723,10 @@ TExprNode::TPtr OptimizeBlockCombine(const TExprNode::TPtr& node, TExprContext&
}
}
- if (node->Head().IsCallable("BlockCompress") && node->Child(1)->IsCallable("Void")) {
- auto filterIndex = FromString<ui32>(node->Head().Child(1)->Content());
+ if (input.IsCallable("BlockCompress") && node->Child(1)->IsCallable("Void")) {
+ auto filterIndex = FromString<ui32>(input.Child(1)->Content());
TVector<ui32> argIndices;
- argIndices.resize(node->Head().GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetSize());
+ argIndices.resize(input.GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetSize());
for (ui32 i = 0; i < argIndices.size(); ++i) {
argIndices[i] = (i < filterIndex) ? i : i + 1;
}
@@ -6766,17 +6735,22 @@ TExprNode::TPtr OptimizeBlockCombine(const TExprNode::TPtr& node, TExprContext&
return UpdateBlockCombineColumns(node, filterIndex, argIndices, ctx);
}
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (node->Head().IsCallable("ReplicateScalars")) {
- YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << node->Head().Content() << " as input of " << node->Content();
- return ctx.ChangeChild(*node, 0, node->Head().HeadPtr());
+ if (input.IsCallable("ToFlow") && input.Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input.Head();
+ // Technically, the code below rewrites the following sequence
+ // (BlockCombine{All,Hashed} (ToFlow (ReplicateScalars (<input>))))
+ // into (BlockCombine{All,Hashed} (<input>)), but ToFlow/FromFlow
+ // wrappers will be removed when all other nodes in block pipeline
+ // start using WideStream instead of the WideFlow. Hence, the
+ // logging is left intact.
+ YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << replicateScalars.Content() << " as input of " << node->Content();
+ // If tail is FromFlow, its input is WideFlow and can be
+ // used intact; Otherwise the input is WideStream, so the
+ // new input should be converted to WideFlow.
+ const auto tail = replicateScalars.HeadPtr();
+ auto flowInput = tail->IsCallable("FromFlow") ? tail->HeadPtr()
+ : ctx.NewCallable(tail->Pos(), "ToFlow", { tail });
+ return ctx.ChangeChild(*node, 0, std::move(flowInput));
}
return node;
@@ -6784,31 +6758,37 @@ TExprNode::TPtr OptimizeBlockCombine(const TExprNode::TPtr& node, TExprContext&
TExprNode::TPtr OptimizeBlockMerge(const TExprNode::TPtr& node, TExprContext& ctx, TTypeAnnotationContext& types) {
Y_UNUSED(types);
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- if (node->Head().IsCallable("ReplicateScalars")) {
- YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << node->Head().Content() << " as input of " << node->Content();
- return ctx.ChangeChild(*node, 0, node->Head().HeadPtr());
+ const auto& input = node->Head();
+ if (input.IsCallable("ToFlow") && input.Head().IsCallable("ReplicateScalars")) {
+ const auto& replicateScalars = input.Head();
+ // Technically, the code below rewrites the following sequence
+ // (BlockMerge{,Many}FinalizeHashed (ToFlow (ReplicateScalars (<input>))))
+ // into (BlockMerge{,Many}FinalizeHashed (<input>)), but
+ // ToFlow/FromFlow wrappers will be removed when all other nodes
+ // in block pipeline start using WideStream instead of the WideFlow.
+ // Hence, the logging is left intact.
+ YQL_CLOG(DEBUG, CorePeepHole) << "Drop " << replicateScalars.Content() << " as input of " << node->Content();
+ // If tail is FromFlow, its input is WideFlow and can be
+ // used intact; Otherwise the input is WideStream, so the
+ // new input should be converted to WideFlow.
+ const auto tail = replicateScalars.HeadPtr();
+ auto flowInput = tail->IsCallable("FromFlow") ? tail->HeadPtr()
+ : ctx.NewCallable(tail->Pos(), "ToFlow", { tail });
+ return ctx.ChangeChild(*node, 0, std::move(flowInput));
}
return node;
}
TExprNode::TPtr SwapReplicateScalarsWithWideMap(const TExprNode::TPtr& wideMap, TExprContext& ctx) {
- YQL_ENSURE(wideMap->IsCallable("WideMap") && wideMap->Head().IsCallable("ReplicateScalars"));
- const auto& input = wideMap->Head();
- auto inputTypes = input.GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
+ const auto& child = wideMap->Head();
+ YQL_ENSURE(wideMap->IsCallable("WideMap") && child.IsCallable("ToFlow") && child.Head().IsCallable("ReplicateScalars"));
+ const auto& input = child.Head();
+ auto inputTypes = input.GetTypeAnn()->Cast<TStreamExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
YQL_ENSURE(inputTypes.size() > 0);
THashSet<ui32> replicatedInputIndexes;
- auto replicateScalarsInputTypes = input.Head().GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
+ auto replicateScalarsInputTypes = input.Head().GetTypeAnn()->Cast<TStreamExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
YQL_ENSURE(replicateScalarsInputTypes.size() > 0);
if (input.ChildrenSize() == 1) {
for (ui32 i = 0; i + 1 < replicateScalarsInputTypes.size(); ++i) {
@@ -6865,21 +6845,19 @@ TExprNode::TPtr SwapReplicateScalarsWithWideMap(const TExprNode::TPtr& wideMap,
}
}
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
return ctx.Builder(wideMap->Pos())
- .Callable("ReplicateScalars")
- .Callable(0, "WideMap")
- .Add(0, input.HeadPtr())
- .Add(1, ctx.DeepCopyLambda(lambda))
+ .Callable("ToFlow")
+ .Callable(0, "ReplicateScalars")
+ .Callable(0, "FromFlow")
+ .Callable(0, "WideMap")
+ .Callable(0, "ToFlow")
+ .Add(0, input.HeadPtr())
+ .Seal()
+ .Add(1, ctx.DeepCopyLambda(lambda))
+ .Seal()
+ .Seal()
+ .Add(1, ctx.NewList(input.Pos(), std::move(replicatedOutputIndexes)))
.Seal()
- .Add(1, ctx.NewList(input.Pos(), std::move(replicatedOutputIndexes)))
.Seal()
.Build();
}
@@ -7053,17 +7031,8 @@ TExprNode::TPtr OptimizeWideMaps(const TExprNode::TPtr& node, TExprContext& ctx)
.Add(0, ctx.ChangeChildren(input, std::move(children)))
.Add(1, DropUnusedArgs(node->Tail(), unused, ctx))
.Seal().Build();
- } else if (node->IsCallable("WideMap") && input.IsCallable("ReplicateScalars")) {
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
- YQL_CLOG(DEBUG, CorePeepHole) << node->Content() << " over " << input.Content();
+ } else if (node->IsCallable("WideMap") && input.IsCallable("ToFlow") && input.Head().IsCallable("ReplicateScalars")) {
+ YQL_CLOG(DEBUG, CorePeepHole) << node->Content() << " over " << input.Head().Content();
return SwapReplicateScalarsWithWideMap(node, ctx);
}
}
diff --git a/yql/essentials/core/type_ann/type_ann_blocks.cpp b/yql/essentials/core/type_ann/type_ann_blocks.cpp
index c674d61ed5..c988284b2b 100644
--- a/yql/essentials/core/type_ann/type_ann_blocks.cpp
+++ b/yql/essentials/core/type_ann/type_ann_blocks.cpp
@@ -76,26 +76,17 @@ IGraphTransformer::TStatus ReplicateScalarWrapper(const TExprNode::TPtr& input,
}
IGraphTransformer::TStatus ReplicateScalarsWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
-
- // Static assert to ensure backward compatible change: if the
- // constant below is true, both input and output types of
- // ReplicateScalars callable have to be WideStream; otherwise,
- // both input and output types have to be WideFlow.
- // FIXME: When all spots using ReplicateScalars are adjusted
- // to work with WideStream, drop the assertion below.
- static_assert(!NYql::NBlockStreamIO::ReplicateScalars);
-
if (!EnsureMinArgsCount(*input, 1, ctx.Expr) || !EnsureMaxArgsCount(*input, 2, ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
}
TTypeAnnotationNode::TListType blockItemTypes;
- if (!EnsureWideFlowBlockType(input->Head(), blockItemTypes, ctx.Expr)) {
+ if (!EnsureWideStreamBlockType(input->Head(), blockItemTypes, ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
}
- auto flowItemTypes = input->Head().GetTypeAnn()->Cast<TFlowExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
- YQL_ENSURE(flowItemTypes.size() > 0);
+ auto streamItemTypes = input->Head().GetTypeAnn()->Cast<TStreamExprType>()->GetItemType()->Cast<TMultiExprType>()->GetItems();
+ YQL_ENSURE(streamItemTypes.size() > 0);
TMaybe<THashSet<ui32>> replicateIndexes;
if (input->ChildrenSize() == 2) {
@@ -110,16 +101,16 @@ IGraphTransformer::TStatus ReplicateScalarsWrapper(const TExprNode::TPtr& input,
TStringBuilder() << "Expecting integer as replicate index, got: " << atom->Content()));
return IGraphTransformer::TStatus::Error;
}
- if (idx >= flowItemTypes.size() - 1) {
+ if (idx >= streamItemTypes.size() - 1) {
ctx.Expr.AddError(TIssue(ctx.Expr.GetPosition(atom->Pos()),
- TStringBuilder() << "Replicate index too big: " << idx << ", should be less than " << (flowItemTypes.size() - 1)));
+ TStringBuilder() << "Replicate index too big: " << idx << ", should be less than " << (streamItemTypes.size() - 1)));
return IGraphTransformer::TStatus::Error;
}
if (!replicateIndexes->insert(idx).second) {
ctx.Expr.AddError(TIssue(ctx.Expr.GetPosition(atom->Pos()), TStringBuilder() << "Duplicate replicate index " << idx));
return IGraphTransformer::TStatus::Error;
}
- if (flowItemTypes[idx]->GetKind() != ETypeAnnotationKind::Scalar) {
+ if (streamItemTypes[idx]->GetKind() != ETypeAnnotationKind::Scalar) {
ctx.Expr.AddError(TIssue(ctx.Expr.GetPosition(atom->Pos()), TStringBuilder() << "Invalid replicate index " << idx << ": input item is not scalar"));
return IGraphTransformer::TStatus::Error;
}
@@ -128,8 +119,8 @@ IGraphTransformer::TStatus ReplicateScalarsWrapper(const TExprNode::TPtr& input,
bool hasScalarsToConvert = false;
size_t inputScalarsCount = 0;
- for (size_t i = 0; i + 1 < flowItemTypes.size(); ++i) {
- auto& itemType = flowItemTypes[i];
+ for (size_t i = 0; i + 1 < streamItemTypes.size(); ++i) {
+ auto& itemType = streamItemTypes[i];
if (itemType->IsScalar()) {
++inputScalarsCount;
if (!replicateIndexes.Defined() || replicateIndexes->contains(i)) {
@@ -151,7 +142,7 @@ IGraphTransformer::TStatus ReplicateScalarsWrapper(const TExprNode::TPtr& input,
return IGraphTransformer::TStatus::Repeat;
}
- input->SetTypeAnn(ctx.Expr.MakeType<TFlowExprType>(ctx.Expr.MakeType<TMultiExprType>(flowItemTypes)));
+ input->SetTypeAnn(ctx.Expr.MakeType<TStreamExprType>(ctx.Expr.MakeType<TMultiExprType>(streamItemTypes)));
return IGraphTransformer::TStatus::Ok;
}
@@ -906,6 +897,43 @@ IGraphTransformer::TStatus WideToBlocksWrapper(const TExprNode::TPtr& input, TEx
return IGraphTransformer::TStatus::Ok;
}
+IGraphTransformer::TStatus ListToBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExtContext& ctx) {
+ Y_UNUSED(output);
+ if (!EnsureArgsCount(*input, 1U, ctx.Expr)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ if (!EnsureListType(input->Head(), ctx.Expr)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ const auto listItemType = input->Head().GetTypeAnn()->Cast<TListExprType>()->GetItemType();
+ if (!EnsureStructType(input->Head().Pos(), *listItemType, ctx.Expr)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+ const auto structType = listItemType->Cast<TStructExprType>();
+
+ TVector<const TItemExprType*> outputStructItems;
+ for (auto item : structType->GetItems()) {
+ auto itemType = item->GetItemType();
+ if (itemType->IsBlockOrScalar()) {
+ ctx.Expr.AddError(TIssue(ctx.Expr.GetPosition(input->Pos()), "Input type should not be a block or scalar"));
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ if (!EnsureSupportedAsBlockType(input->Pos(), *itemType, ctx.Expr, ctx.Types)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ outputStructItems.push_back(ctx.Expr.MakeType<TItemExprType>(item->GetName(), ctx.Expr.MakeType<TBlockExprType>(itemType)));
+ }
+ outputStructItems.push_back(ctx.Expr.MakeType<TItemExprType>(BlockLengthColumnName, ctx.Expr.MakeType<TScalarExprType>(ctx.Expr.MakeType<TDataExprType>(EDataSlot::Uint64))));
+
+ auto outputStructType = ctx.Expr.MakeType<TStructExprType>(outputStructItems);
+ input->SetTypeAnn(ctx.Expr.MakeType<TListExprType>(outputStructType));
+ return IGraphTransformer::TStatus::Ok;
+}
+
IGraphTransformer::TStatus WideFromBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
Y_UNUSED(output);
if (!EnsureArgsCount(*input, 1U, ctx.Expr)) {
@@ -924,6 +952,22 @@ IGraphTransformer::TStatus WideFromBlocksWrapper(const TExprNode::TPtr& input, T
return IGraphTransformer::TStatus::Ok;
}
+IGraphTransformer::TStatus ListFromBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
+ Y_UNUSED(output);
+ if (!EnsureArgsCount(*input, 1U, ctx.Expr)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ TVector<const TItemExprType*> outputStructItems;
+ if (!EnsureBlockListType(input->Head(), outputStructItems, ctx.Expr)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ auto outputStructType = ctx.Expr.MakeType<TStructExprType>(outputStructItems);
+ input->SetTypeAnn(ctx.Expr.MakeType<TListExprType>(outputStructType));
+ return IGraphTransformer::TStatus::Ok;
+}
+
IGraphTransformer::TStatus WideSkipTakeBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx) {
if (!EnsureArgsCount(*input, 2U, ctx.Expr)) {
return IGraphTransformer::TStatus::Error;
diff --git a/yql/essentials/core/type_ann/type_ann_blocks.h b/yql/essentials/core/type_ann/type_ann_blocks.h
index 8a16376f9b..223758ef7f 100644
--- a/yql/essentials/core/type_ann/type_ann_blocks.h
+++ b/yql/essentials/core/type_ann/type_ann_blocks.h
@@ -28,7 +28,9 @@ namespace NTypeAnnImpl {
IGraphTransformer::TStatus BlockCombineHashedWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExtContext& ctx);
IGraphTransformer::TStatus BlockMergeFinalizeHashedWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExtContext& ctx);
IGraphTransformer::TStatus WideToBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExtContext& ctx);
+ IGraphTransformer::TStatus ListToBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExtContext& ctx);
IGraphTransformer::TStatus WideFromBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx);
+ IGraphTransformer::TStatus ListFromBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx);
IGraphTransformer::TStatus WideSkipTakeBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx);
IGraphTransformer::TStatus WideTopBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx);
IGraphTransformer::TStatus WideSortBlocksWrapper(const TExprNode::TPtr& input, TExprNode::TPtr& output, TContext& ctx);
diff --git a/yql/essentials/core/type_ann/type_ann_core.cpp b/yql/essentials/core/type_ann/type_ann_core.cpp
index 75e07ab3a7..cd6bd71608 100644
--- a/yql/essentials/core/type_ann/type_ann_core.cpp
+++ b/yql/essentials/core/type_ann/type_ann_core.cpp
@@ -12983,6 +12983,7 @@ template <NKikimr::NUdf::EDataSlot DataSlot>
Functions["NarrowMultiMap"] = &NarrowMultiMapWrapper;
Functions["WideFromBlocks"] = &WideFromBlocksWrapper;
+ Functions["ListFromBlocks"] = &ListFromBlocksWrapper;
Functions["WideSkipBlocks"] = &WideSkipTakeBlocksWrapper;
Functions["WideTakeBlocks"] = &WideSkipTakeBlocksWrapper;
Functions["BlockCompress"] = &BlockCompressWrapper;
@@ -13018,6 +13019,7 @@ template <NKikimr::NUdf::EDataSlot DataSlot>
ExtFunctions["AsScalar"] = &AsScalarWrapper;
ExtFunctions["WideToBlocks"] = &WideToBlocksWrapper;
+ ExtFunctions["ListToBlocks"] = &ListToBlocksWrapper;
ExtFunctions["BlockCombineAll"] = &BlockCombineAllWrapper;
ExtFunctions["BlockCombineHashed"] = &BlockCombineHashedWrapper;
ExtFunctions["BlockMergeFinalizeHashed"] = &BlockMergeFinalizeHashedWrapper;
diff --git a/yql/essentials/core/ya.make b/yql/essentials/core/ya.make
index d1142fe083..8626132b6f 100644
--- a/yql/essentials/core/ya.make
+++ b/yql/essentials/core/ya.make
@@ -71,6 +71,7 @@ PEERDIR(
yql/essentials/minikql
yql/essentials/minikql/jsonpath/parser
yql/essentials/core/minsketch
+ yql/essentials/core/histogram
yql/essentials/protos
yql/essentials/public/udf
yql/essentials/public/udf/tz
diff --git a/yql/essentials/core/yql_expr_constraint.cpp b/yql/essentials/core/yql_expr_constraint.cpp
index 32cba8f9d2..fdfc21946b 100644
--- a/yql/essentials/core/yql_expr_constraint.cpp
+++ b/yql/essentials/core/yql_expr_constraint.cpp
@@ -245,7 +245,9 @@ public:
Functions["WideTopSortBlocks"] = &TCallableConstraintTransformer::WideTopWrap<true>;
Functions["WideSortBlocks"] = &TCallableConstraintTransformer::WideTopWrap<true>;
Functions["WideToBlocks"] = &TCallableConstraintTransformer::CopyAllFrom<0>;
+ Functions["ListToBlocks"] = &TCallableConstraintTransformer::CopyAllFrom<0>;
Functions["WideFromBlocks"] = &TCallableConstraintTransformer::CopyAllFrom<0>;
+ Functions["ListFromBlocks"] = &TCallableConstraintTransformer::CopyAllFrom<0>;
Functions["ReplicateScalars"] = &TCallableConstraintTransformer::CopyAllFrom<0>;
Functions["BlockMergeFinalizeHashed"] = &TCallableConstraintTransformer::AggregateWrap<true>;
Functions["BlockMergeManyFinalizeHashed"] = &TCallableConstraintTransformer::AggregateWrap<true>;
diff --git a/yql/essentials/core/yql_expr_type_annotation.cpp b/yql/essentials/core/yql_expr_type_annotation.cpp
index 80207d35bc..16e29a60c8 100644
--- a/yql/essentials/core/yql_expr_type_annotation.cpp
+++ b/yql/essentials/core/yql_expr_type_annotation.cpp
@@ -10,6 +10,7 @@
#include <yql/essentials/minikql/dom/json.h>
#include <yql/essentials/minikql/dom/yson.h>
#include <yql/essentials/minikql/jsonpath/parser/parser.h>
+#include <yql/essentials/core/sql_types/block.h>
#include <yql/essentials/core/sql_types/simple_types.h>
#include "yql/essentials/parser/pg_catalog/catalog.h"
#include <yql/essentials/parser/pg_wrapper/interface/utils.h>
@@ -3269,6 +3270,52 @@ bool EnsureWideBlockType(TPositionHandle position, const TTypeAnnotationNode& ty
return true;
}
+bool EnsureBlockStructType(TPositionHandle position, const TTypeAnnotationNode& type, TVector<const TItemExprType*>& structItems, TExprContext& ctx) {
+ if (HasError(&type, ctx)) {
+ return false;
+ }
+
+ if (type.GetKind() != ETypeAnnotationKind::Struct) {
+ ctx.AddError(TIssue(ctx.GetPosition(position), TStringBuilder() << "Expected struct, but got: " << type));
+ return false;
+ }
+
+ auto& items = type.Cast<TStructExprType>()->GetItems();
+ if (items.empty()) {
+ ctx.AddError(TIssue(ctx.GetPosition(position), "Expected at least one column"));
+ return false;
+ }
+
+ bool hasBlockLengthColumn = false;
+ for (auto item : items) {
+ auto blockType = item->GetItemType();
+ if (!EnsureBlockOrScalarType(position, *blockType, ctx)) {
+ return false;
+ }
+
+ bool isScalar = false;
+ auto itemType = GetBlockItemType(*blockType, isScalar);
+
+ if (item->GetName() == BlockLengthColumnName) {
+ if (!isScalar) {
+ ctx.AddError(TIssue(ctx.GetPosition(position), "Block length column should be a scalar"));
+ return false;
+ }
+ if (!EnsureSpecificDataType(position, *itemType, EDataSlot::Uint64, ctx)) {
+ return false;
+ }
+ hasBlockLengthColumn = true;
+ } else {
+ structItems.push_back(ctx.MakeType<TItemExprType>(item->GetName(), itemType));
+ }
+ }
+ if (!hasBlockLengthColumn) {
+ ctx.AddError(TIssue(ctx.GetPosition(position), "Block struct must contain block length column"));
+ return false;
+ }
+ return true;
+}
+
bool EnsureWideFlowBlockType(const TExprNode& node, TTypeAnnotationNode::TListType& blockItemTypes, TExprContext& ctx, bool allowScalar) {
if (!EnsureWideFlowType(node, ctx)) {
return false;
@@ -3285,6 +3332,14 @@ bool EnsureWideStreamBlockType(const TExprNode& node, TTypeAnnotationNode::TList
return EnsureWideBlockType(node.Pos(), *node.GetTypeAnn()->Cast<TStreamExprType>()->GetItemType(), blockItemTypes, ctx, allowScalar);
}
+bool EnsureBlockListType(const TExprNode& node, TVector<const TItemExprType*>& structItems, TExprContext& ctx) {
+ if (!EnsureListType(node, ctx)) {
+ return false;
+ }
+
+ return EnsureBlockStructType(node.Pos(), *node.GetTypeAnn()->Cast<TListExprType>()->GetItemType(), structItems, ctx);
+}
+
bool EnsureOptionalType(const TExprNode& node, TExprContext& ctx) {
if (!node.GetTypeAnn()) {
YQL_ENSURE(node.Type() == TExprNode::Lambda);
diff --git a/yql/essentials/core/yql_expr_type_annotation.h b/yql/essentials/core/yql_expr_type_annotation.h
index b429f46f39..d23ce163e3 100644
--- a/yql/essentials/core/yql_expr_type_annotation.h
+++ b/yql/essentials/core/yql_expr_type_annotation.h
@@ -134,8 +134,10 @@ bool IsWideSequenceBlockType(const TTypeAnnotationNode& type);
bool IsSupportedAsBlockType(TPositionHandle pos, const TTypeAnnotationNode& type, TExprContext& ctx, TTypeAnnotationContext& types, bool reportUnspported = false);
bool EnsureSupportedAsBlockType(TPositionHandle pos, const TTypeAnnotationNode& type, TExprContext& ctx, TTypeAnnotationContext& types);
bool EnsureWideBlockType(TPositionHandle position, const TTypeAnnotationNode& type, TTypeAnnotationNode::TListType& blockItemTypes, TExprContext& ctx, bool allowScalar = true);
+bool EnsureBlockStructType(TPositionHandle position, const TTypeAnnotationNode& type, TVector<const TItemExprType*>& structItems, TExprContext& ctx);
bool EnsureWideFlowBlockType(const TExprNode& node, TTypeAnnotationNode::TListType& blockItemTypes, TExprContext& ctx, bool allowScalar = true);
bool EnsureWideStreamBlockType(const TExprNode& node, TTypeAnnotationNode::TListType& blockItemTypes, TExprContext& ctx, bool allowScalar = true);
+bool EnsureBlockListType(const TExprNode& node, TVector<const TItemExprType*>& structItems, TExprContext& ctx);
bool EnsureOptionalType(const TExprNode& node, TExprContext& ctx);
bool EnsureOptionalType(TPositionHandle position, const TTypeAnnotationNode& type, TExprContext& ctx);
bool EnsureType(const TExprNode& node, TExprContext& ctx);
@@ -354,7 +356,7 @@ TStringBuf NormalizeCallableName(TStringBuf name);
void CheckExpectedTypeAndColumnOrder(const TExprNode& node, TExprContext& ctx, TTypeAnnotationContext& typesCtx);
namespace NBlockStreamIO {
- constexpr bool ReplicateScalars = false;
+ constexpr bool ReplicateScalars = true;
} // namespace NBlockStreamIO
}
diff --git a/yql/essentials/core/yql_join.cpp b/yql/essentials/core/yql_join.cpp
index 84bce2d913..7cca45604d 100644
--- a/yql/essentials/core/yql_join.cpp
+++ b/yql/essentials/core/yql_join.cpp
@@ -340,14 +340,18 @@ namespace {
else if (option.IsAtom("join_algo")) {
//do nothing
}
- else if (option.IsAtom("shuffle_lhs_by") || option.IsAtom("shuffle_rhs_by")) {
- //do nothing
- }
else if (option.IsAtom("compact")) {
if (!EnsureTupleSize(*child, 1, ctx)) {
return IGraphTransformer::TStatus::Error;
}
}
+ else if (IsCachedJoinLinkOption(option.Content())) {
+ if (option.IsAtom("shuffle_lhs_by") || option.IsAtom("shuffle_rhs_by")) {
+ //do nothing
+ } else {
+ YQL_ENSURE(false, "Cached join link option '" << option.Content() << "' not handled");
+ }
+ }
else {
ctx.AddError(TIssue(ctx.GetPosition(option.Pos()), TStringBuilder() <<
"Unknown option name: " << option.Content()));
@@ -787,40 +791,38 @@ IGraphTransformer::TStatus ValidateEquiJoinOptions(TPositionHandle positionHandl
options.Flatten = true;
} else if (optionName == "strict_keys") {
options.StrictKeys = true;
- } else if (optionName == "preferred_sort") {
- THashSet<TStringBuf> sortBySet;
- TVector<TStringBuf> sortBy;
- if (!EnsureTupleSize(*child, 2, ctx)) {
- return IGraphTransformer::TStatus::Error;
- }
- if (!EnsureTupleMinSize(*child->Child(1), 1, ctx)) {
- return IGraphTransformer::TStatus::Error;
- }
- for (auto column : child->Child(1)->Children()) {
- if (!EnsureAtom(*column, ctx)) {
+ } else if (IsCachedJoinOption(optionName)) {
+ if (optionName == "preferred_sort") {
+ THashSet<TStringBuf> sortBySet;
+ TVector<TStringBuf> sortBy;
+ if (!EnsureTupleSize(*child, 2, ctx)) {
return IGraphTransformer::TStatus::Error;
}
- if (!sortBySet.insert(column->Content()).second) {
- ctx.AddError(TIssue(ctx.GetPosition(column->Pos()), TStringBuilder() <<
- "Duplicated preferred_sort column: " << column->Content()));
+ if (!EnsureTupleMinSize(*child->Child(1), 1, ctx)) {
return IGraphTransformer::TStatus::Error;
}
- sortBy.push_back(column->Content());
- }
- if (!options.PreferredSortSets.insert(sortBy).second) {
- ctx.AddError(TIssue(ctx.GetPosition(child->Child(1)->Pos()), TStringBuilder() <<
- "Duplicated preferred_sort set: " << JoinSeq(", ", sortBy)));
+ for (auto column : child->Child(1)->Children()) {
+ if (!EnsureAtom(*column, ctx)) {
+ return IGraphTransformer::TStatus::Error;
+ }
+ if (!sortBySet.insert(column->Content()).second) {
+ ctx.AddError(TIssue(ctx.GetPosition(column->Pos()), TStringBuilder() <<
+ "Duplicated preferred_sort column: " << column->Content()));
+ return IGraphTransformer::TStatus::Error;
+ }
+ sortBy.push_back(column->Content());
+ }
+ if (!options.PreferredSortSets.insert(sortBy).second) {
+ ctx.AddError(TIssue(ctx.GetPosition(child->Child(1)->Pos()), TStringBuilder() <<
+ "Duplicated preferred_sort set: " << JoinSeq(", ", sortBy)));
+ }
+ } else if (optionName == "cbo_passed") {
+ // do nothing
+ } else if (optionName == "multiple_joins") {
+ // do nothing
+ } else {
+ YQL_ENSURE(false, "Cached join option '" << optionName << "' not handled");
}
- } else if (optionName == "cbo_passed") {
- // do nothing
- } else if (optionName == "join_algo") {
- // do nothing
- } else if (optionName == "shuffle_lhs_by" || optionName == "shuffle_rhs_by") {
- // do nothing
- } else if (optionName == "multiple_joins") {
- // do nothing
- } else if (optionName == "compact") {
- options.Compact = true;
} else {
ctx.AddError(TIssue(position, TStringBuilder() <<
"Unknown option name: " << optionName));
@@ -2004,4 +2006,14 @@ void GatherJoinInputs(const TExprNode::TPtr& expr, const TExprNode& row,
}
}
+bool IsCachedJoinOption(TStringBuf name) {
+ static THashSet<TStringBuf> CachedJoinOptions = {"preferred_sort", "cbo_passed", "multiple_joins"};
+ return CachedJoinOptions.contains(name);
+}
+
+bool IsCachedJoinLinkOption(TStringBuf name) {
+ static THashSet<TStringBuf> CachedJoinLinkOptions = {"shuffle_lhs_by", "shuffle_rhs_by"};
+ return CachedJoinLinkOptions.contains(name);
+}
+
} // namespace NYql
diff --git a/yql/essentials/core/yql_join.h b/yql/essentials/core/yql_join.h
index f556a46724..a313aceefa 100644
--- a/yql/essentials/core/yql_join.h
+++ b/yql/essentials/core/yql_join.h
@@ -60,7 +60,6 @@ struct TJoinOptions {
bool Flatten = false;
bool StrictKeys = false;
- bool Compact = false;
};
IGraphTransformer::TStatus ValidateEquiJoinOptions(
@@ -178,5 +177,7 @@ void GatherJoinInputs(const TExprNode::TPtr& expr, const TExprNode& row,
const TParentsMap& parentsMap, const THashMap<TString, TString>& backRenameMap,
const TJoinLabels& labels, TSet<ui32>& inputs, TSet<TStringBuf>& usedFields);
+bool IsCachedJoinOption(TStringBuf name);
+bool IsCachedJoinLinkOption(TStringBuf name);
}
diff --git a/yql/essentials/core/yql_statistics.cpp b/yql/essentials/core/yql_statistics.cpp
index f0958b8976..cf70ad2cf5 100644
--- a/yql/essentials/core/yql_statistics.cpp
+++ b/yql/essentials/core/yql_statistics.cpp
@@ -189,6 +189,16 @@ std::shared_ptr<TOptimizerStatistics> NYql::OverrideStatistics(const NYql::TOpti
Base64StrictDecode(countMinBase64, countMinRaw);
cStat.CountMinSketch.reset(NKikimr::TCountMinSketch::FromString(countMinRaw.data(), countMinRaw.size()));
}
+ if (auto eqWidthHistogram = colMap.find("histogram"); eqWidthHistogram != colMap.end()) {
+ TString histogramBase64 = eqWidthHistogram->second.GetStringSafe();
+
+ TString histogramBinary{};
+ Base64StrictDecode(histogramBase64, histogramBinary);
+ auto histogram = std::make_shared<NKikimr::TEqWidthHistogram>(
+ histogramBinary.data(), histogramBinary.size());
+ cStat.EqWidthHistogramEstimator =
+ std::make_shared<NKikimr::TEqWidthHistogramEstimator>(histogram);
+ }
res->ColumnStatistics->Data[columnName] = cStat;
}
diff --git a/yql/essentials/core/yql_statistics.h b/yql/essentials/core/yql_statistics.h
index f3875138f3..e96e8b9f50 100644
--- a/yql/essentials/core/yql_statistics.h
+++ b/yql/essentials/core/yql_statistics.h
@@ -2,6 +2,7 @@
#include "yql_cost_function.h"
#include <yql/essentials/core/minsketch/count_min_sketch.h>
+#include <yql/essentials/core/histogram/eq_width_histogram.h>
#include <library/cpp/json/json_reader.h>
@@ -36,6 +37,7 @@ struct TColumnStatistics {
std::optional<double> NumUniqueVals;
std::optional<double> HyperLogLog;
std::shared_ptr<NKikimr::TCountMinSketch> CountMinSketch;
+ std::shared_ptr<NKikimr::TEqWidthHistogramEstimator> EqWidthHistogramEstimator;
TString Type;
TColumnStatistics() {}
diff --git a/yql/essentials/core/yql_type_annotation.cpp b/yql/essentials/core/yql_type_annotation.cpp
index 8934949d3f..6de6ecf8bd 100644
--- a/yql/essentials/core/yql_type_annotation.cpp
+++ b/yql/essentials/core/yql_type_annotation.cpp
@@ -303,7 +303,7 @@ IGraphTransformer::TStatus TTypeAnnotationContext::SetColumnOrder(const TExprNod
allColumns.erase(it);
}
- if (!allColumns.empty()) {
+ if (!allColumns.empty() && !(allColumns.size() == 1 && *allColumns.begin() == BlockLengthColumnName)) {
ctx.AddError(TIssue(ctx.GetPosition(node.Pos()),
TStringBuilder() << "Some columns are left unordered with column order " << FormatColumnOrder(columnOrder) << " for node "
<< node.Content() << " with type: " << *node.GetTypeAnn()));
diff --git a/yql/essentials/data/language/pragmas_opensource.json b/yql/essentials/data/language/pragmas_opensource.json
new file mode 100644
index 0000000000..4e24ec9b5a
--- /dev/null
+++ b/yql/essentials/data/language/pragmas_opensource.json
@@ -0,0 +1 @@
+[{"name":"yt.Annotations"},{"name":"yt.ApplyStoredConstraints"},{"name":"yt.Auth"},{"name":"yt.AutoMerge"},{"name":"yt.BatchListFolderConcurrency"},{"name":"yt.BinaryExpirationInterval"},{"name":"yt.BinaryTmpFolder"},{"name":"yt.BlockMapJoin"},{"name":"yt.BlockReaderSupportedDataTypes"},{"name":"yt.BlockReaderSupportedTypes"},{"name":"yt.BufferRowCount"},{"name":"yt.ClientMapTimeout"},{"name":"yt.ColumnGroupMode"},{"name":"yt.CombineCoreLimit"},{"name":"yt.CommonJoinCoreLimit"},{"name":"yt.CompactForDistinct"},{"name":"yt.CoreDumpPath"},{"name":"yt.DQRPCReaderInflight"},{"name":"yt.DQRPCReaderTimeout"},{"name":"yt.DataSizePerJob"},{"name":"yt.DataSizePerMapJob"},{"name":"yt.DataSizePerPartition"},{"name":"yt.DataSizePerSortJob"},{"name":"yt.DefaultCalcMemoryLimit"},{"name":"yt.DefaultCluster"},{"name":"yt.DefaultLocalityTimeout"},{"name":"yt.DefaultMapSelectivityFactor"},{"name":"yt.DefaultMaxJobFails"},{"name":"yt.DefaultMemoryDigestLowerBound"},{"name":"yt.DefaultMemoryLimit"},{"name":"yt.DefaultMemoryReserveFactor"},{"name":"yt.DefaultOperationWeight"},{"name":"yt.DefaultRuntimeCluster"},{"name":"yt.Description"},{"name":"yt.DisableFuseOperations"},{"name":"yt.DisableJobSplitting"},{"name":"yt.DisableOptimizers"},{"name":"yt.DockerImage"},{"name":"yt.DqPruneKeyFilterLambda"},{"name":"yt.DropUnusedKeysFromKeyFilter"},{"name":"yt.EnableDynamicStoreReadInDQ"},{"name":"yt.EnableFuseMapToMapReduce"},{"name":"yt.EnforceJobUtc"},{"name":"yt.ErasureCodecCpu"},{"name":"yt.ErasureCodecCpuForDq"},{"name":"yt.EvaluationTableSizeLimit"},{"name":"yt.ExpirationDeadline"},{"name":"yt.ExpirationInterval"},{"name":"yt.ExtendTableLimit"},{"name":"yt.ExtendedStatsMaxChunkCount"},{"name":"yt.ExternalTx"},{"name":"yt.ExtraTmpfsSize"},{"name":"yt.FileCacheTtl"},{"name":"yt.FmrOperationSpec"},{"name":"yt.FolderInlineDataLimit"},{"name":"yt.FolderInlineItemsLimit"},{"name":"yt.ForceInferSchema"},{"name":"yt.ForceJobSizeAdjuster"},{"name":"yt.ForceTmpSecurity"},{"name":"yt.GeobaseDownloadUrl"},{"name":"yt.HybridDqDataSizeLimitForOrdered"},{"name":"yt.HybridDqDataSizeLimitForUnordered"},{"name":"yt.HybridDqExecution"},{"name":"yt.HybridDqExecutionFallback"},{"name":"yt.IgnoreTypeV3"},{"name":"yt.IgnoreWeakSchema"},{"name":"yt.IgnoreYamrDsv"},{"name":"yt.InferSchema"},{"name":"yt.InferSchemaMode"},{"name":"yt.InferSchemaTableCountThreshold"},{"name":"yt.InflightTempTablesLimit"},{"name":"yt.IntermediateAccount"},{"name":"yt.IntermediateDataMedium"},{"name":"yt.IntermediateReplicationFactor"},{"name":"yt.JavascriptCpu"},{"name":"yt.JobBlockInput"},{"name":"yt.JobBlockInputSupportedDataTypes"},{"name":"yt.JobBlockInputSupportedTypes"},{"name":"yt.JobBlockOutput"},{"name":"yt.JobBlockOutputSupportedDataTypes"},{"name":"yt.JobBlockOutputSupportedTypes"},{"name":"yt.JobBlockTableContent"},{"name":"yt.JobEnv"},{"name":"yt.JoinAllowColumnRenames"},{"name":"yt.JoinCollectColumnarStatistics"},{"name":"yt.JoinColumnarStatisticsFetcherMode"},{"name":"yt.JoinCommonUseMapMultiOut"},{"name":"yt.JoinEnableStarJoin"},{"name":"yt.JoinMergeForce"},{"name":"yt.JoinMergeReduceJobMaxSize"},{"name":"yt.JoinMergeSetTopLevelFullSort"},{"name":"yt.JoinMergeTablesLimit"},{"name":"yt.JoinMergeUnsortedFactor"},{"name":"yt.JoinMergeUseSmallAsPrimary"},{"name":"yt.JoinUseColumnarStatistics"},{"name":"yt.JoinWaitAllInputs"},{"name":"yt.KeepTempTables"},{"name":"yt.KeyFilterForStartsWith"},{"name":"yt.LLVMMemSize"},{"name":"yt.LLVMNodeCountLimit"},{"name":"yt.LLVMPerNodeMemSize"},{"name":"yt.LayerPaths"},{"name":"yt.LocalCalcLimit"},{"name":"yt.LookupJoinLimit"},{"name":"yt.LookupJoinMaxRows"},{"name":"yt.MapJoinLimit"},{"name":"yt.MapJoinShardCount"},{"name":"yt.MapJoinShardMinRows"},{"name":"yt.MapJoinUseFlow"},{"name":"yt.MapLocalityTimeout"},{"name":"yt.MaxChunksForDqRead"},{"name":"yt.MaxColumnGroups"},{"name":"yt.MaxCpuUsageToFuseMultiOuts"},{"name":"yt.MaxExtraJobMemoryToFuseOperations"},{"name":"yt.MaxInputTables"},{"name":"yt.MaxInputTablesForSortedMerge"},{"name":"yt.MaxJobCount"},{"name":"yt.MaxKeyRangeCount"},{"name":"yt.MaxKeyWeight"},{"name":"yt.MaxOperationFiles"},{"name":"yt.MaxOutputTables"},{"name":"yt.MaxReplicationFactorToFuseMultiOuts"},{"name":"yt.MaxReplicationFactorToFuseOperations"},{"name":"yt.MaxRowWeight"},{"name":"yt.MaxSpeculativeJobCountPerTask"},{"name":"yt.MergeAdjacentPointRanges"},{"name":"yt.MinColumnGroupSize"},{"name":"yt.MinLocalityInputDataWeight"},{"name":"yt.MinPublishedAvgChunkSize"},{"name":"yt.MinTempAvgChunkSize"},{"name":"yt.NativeYtTypeCompatibility"},{"name":"yt.NetworkProject"},{"name":"yt.NightlyCompress"},{"name":"yt.OperationReaders"},{"name":"yt.OperationSpec"},{"name":"yt.OptimizeFor"},{"name":"yt.Owners"},{"name":"yt.ParallelOperationsLimit"},{"name":"yt.PartitionByConstantKeysViaMap"},{"name":"yt.Pool"},{"name":"yt.PoolTrees"},{"name":"yt.PrimaryMedium"},{"name":"yt.PruneKeyFilterLambda"},{"name":"yt.PruneQLFilterLambda"},{"name":"yt.PublishedAutoMerge"},{"name":"yt.PublishedCompressionCodec"},{"name":"yt.PublishedErasureCodec"},{"name":"yt.PublishedMedia"},{"name":"yt.PublishedPrimaryMedium"},{"name":"yt.PublishedReplicationFactor"},{"name":"yt.PythonCpu"},{"name":"yt.QueryCacheChunkLimit"},{"name":"yt.QueryCacheIgnoreTableRevision"},{"name":"yt.QueryCacheMode"},{"name":"yt.QueryCacheSalt"},{"name":"yt.QueryCacheTtl"},{"name":"yt.QueryCacheUseExpirationTimeout"},{"name":"yt.QueryCacheUseForCalc"},{"name":"yt.ReduceLocalityTimeout"},{"name":"yt.ReleaseTempData"},{"name":"yt.ReportEquiJoinStats"},{"name":"yt.RuntimeCluster"},{"name":"yt.RuntimeClusterSelection"},{"name":"yt.SamplingIoBlockSize"},{"name":"yt.SchedulingTag"},{"name":"yt.SchedulingTagFilter"},{"name":"yt.ScriptCpu"},{"name":"yt.SortLocalityTimeout"},{"name":"yt.StartedBy"},{"name":"yt.StaticPool"},{"name":"yt.SuspendIfAccountLimitExceeded"},{"name":"yt.SwitchLimit"},{"name":"yt.TableContentColumnarStatistics"},{"name":"yt.TableContentCompressLevel"},{"name":"yt.TableContentDeliveryMode"},{"name":"yt.TableContentLocalExecution"},{"name":"yt.TableContentMaxChunksForNativeDelivery"},{"name":"yt.TableContentMaxInputTables"},{"name":"yt.TableContentMinAvgChunkSize"},{"name":"yt.TableContentTmpFolder"},{"name":"yt.TableContentUseSkiff"},{"name":"yt.TablesTmpFolder"},{"name":"yt.TempTablesTtl"},{"name":"yt.TemporaryAutoMerge"},{"name":"yt.TemporaryCompressionCodec"},{"name":"yt.TemporaryErasureCodec"},{"name":"yt.TemporaryMedia"},{"name":"yt.TemporaryPrimaryMedium"},{"name":"yt.TemporaryReplicationFactor"},{"name":"yt.TentativePoolTrees"},{"name":"yt.TentativeTreeEligibilityMaxJobDurationRatio"},{"name":"yt.TentativeTreeEligibilityMinJobDuration"},{"name":"yt.TentativeTreeEligibilitySampleJobCount"},{"name":"yt.TmpFolder"},{"name":"yt.TopSortMaxLimit"},{"name":"yt.TopSortRowMultiplierPerJob"},{"name":"yt.TopSortSizePerJob"},{"name":"yt.UseAggPhases"},{"name":"yt.UseColumnGroupsFromInputTables"},{"name":"yt.UseColumnarStatistics"},{"name":"yt.UseDefaultTentativePoolTrees"},{"name":"yt.UseFlow"},{"name":"yt.UseIntermediateSchema"},{"name":"yt.UseIntermediateStreams"},{"name":"yt.UseNativeDescSort"},{"name":"yt.UseNativeYtTypes"},{"name":"yt.UseNewPredicateExtraction"},{"name":"yt.UsePartitionsByKeysForFinalAgg"},{"name":"yt.UseQLFilter"},{"name":"yt.UseRPCReaderInDQ"},{"name":"yt.UseSkiff"},{"name":"yt.UseSystemColumns"},{"name":"yt.UseTmpfs"},{"name":"yt.UseTypeV2"},{"name":"yt.UseYqlRowSpecCompactForm"},{"name":"yt.UserSlots"},{"name":"yt.ViewIsolation"},{"name":"yt.WideFlowLimit"},{"name":"dq.AggregateStatsByStage"},{"name":"dq.AnalyticsHopping"},{"name":"dq.AnalyzeQuery"},{"name":"dq.ChannelBufferSize"},{"name":"dq.ChunkSizeLimit"},{"name":"dq.CollectCoreDumps"},{"name":"dq.ComputeActorType"},{"name":"dq.DataSizePerJob"},{"name":"dq.DisableCheckpoints"},{"name":"dq.DisableLLVMForBlockStages"},{"name":"dq.EnableChannelStats"},{"name":"dq.EnableComputeActor"},{"name":"dq.EnableDqReplicate"},{"name":"dq.EnableFullResultWrite"},{"name":"dq.EnableInsert"},{"name":"dq.EnableSpillingInChannels"},{"name":"dq.EnableSpillingNodes"},{"name":"dq.EnableStrip"},{"name":"dq.ExportStats"},{"name":"dq.FallbackPolicy"},{"name":"dq.HashJoinMode"},{"name":"dq.HashShuffleMaxTasks"},{"name":"dq.HashShuffleTasksRatio"},{"name":"dq.MaxDataSizePerJob"},{"name":"dq.MaxDataSizePerQuery"},{"name":"dq.MaxNetworkRetries"},{"name":"dq.MaxRetries"},{"name":"dq.MaxTasksPerOperation"},{"name":"dq.MaxTasksPerStage"},{"name":"dq.MemoryLimit"},{"name":"dq.OptLLVM"},{"name":"dq.OutputChunkMaxSize"},{"name":"dq.ParallelOperationsLimit"},{"name":"dq.PingTimeoutMs"},{"name":"dq.PullRequestTimeoutMs"},{"name":"dq.QueryTimeout"},{"name":"dq.RetryBackoffMs"},{"name":"dq.Scheduler"},{"name":"dq.SpillingEngine"},{"name":"dq.SplitStageOnDqReplicate"},{"name":"dq.TaskRunnerStats"},{"name":"dq.UseAggPhases"},{"name":"dq.UseBlockReader"},{"name":"dq.UseFastPickleTransport"},{"name":"dq.UseFinalizeByKey"},{"name":"dq.UseGraceJoinCoreForMap"},{"name":"dq.UseOOBTransport"},{"name":"dq.UseSimpleYtReader"},{"name":"dq.UseWideBlockChannels"},{"name":"dq.UseWideChannels"},{"name":"dq.WatermarksEnableIdlePartitions"},{"name":"dq.WatermarksGranularityMs"},{"name":"dq.WatermarksLateArrivalDelayMs"},{"name":"dq.WatermarksMode"},{"name":"dq.WorkerFilter"},{"name":"dq.WorkersPerOperation"},{"name":"AllowDotInAlias"},{"name":"AllowUnnamedColumns"},{"name":"AnsiCurrentRow"},{"name":"AnsiImplicitCrossJoin"},{"name":"AnsiInForEmptyOrNullableItemsCollections"},{"name":"AnsiLike"},{"name":"AnsiOptionalAs"},{"name":"AnsiRankForNullableKeys"},{"name":"AutoCommit"},{"name":"BlockEngine"},{"name":"BlockEngineEnable"},{"name":"BlockEngineForce"},{"name":"BogousStarInGroupByOverJoin"},{"name":"CheckedOps"},{"name":"ClassicDivision"},{"name":"CoalesceJoinKeysOnQualifiedAll"},{"name":"CompactGroupBy"},{"name":"CompactNamedExprs"},{"name":"CostBasedOptimizer"},{"name":"DataWatermarks"},{"name":"DirectRead"},{"name":"DisableAnsiCurrentRow"},{"name":"DisableAnsiImplicitCrossJoin"},{"name":"DisableAnsiInForEmptyOrNullableItemsCollections"},{"name":"DisableAnsiLike"},{"name":"DisableAnsiOptionalAs"},{"name":"DisableAnsiRankForNullableKeys"},{"name":"DisableBlockEngineEnable"},{"name":"DisableBlockEngineForce"},{"name":"DisableBogousStarInGroupByOverJoin"},{"name":"DisableCoalesceJoinKeysOnQualifiedAll"},{"name":"DisableCompactGroupBy"},{"name":"DisableCompactNamedExprs"},{"name":"DisableDistinctOverWindow"},{"name":"DisableDqEngineEnable"},{"name":"DisableDqEngineForce"},{"name":"DisableEmitAggApply"},{"name":"DisableEmitStartsWith"},{"name":"DisableEmitTableSource"},{"name":"DisableEmitUnionMerge"},{"name":"DisableFilterPushdownOverJoinOptionalSide"},{"name":"DisableFlexibleTypes"},{"name":"DisableJsonQueryReturnsJsonDocument"},{"name":"DisableOrderedColumns"},{"name":"DisablePullUpFlatMapOverJoin"},{"name":"DisableRegexUseRe2"},{"name":"DisableRotateJoinTree"},{"name":"DisableSeqMode"},{"name":"DisableSimpleColumns"},{"name":"DisableStrictJoinKeyTypes"},{"name":"DisableUnicodeLiterals"},{"name":"DisableUnorderedResult"},{"name":"DisableUnorderedSubqueries"},{"name":"DisableUseBlocks"},{"name":"DisableValidateUnusedExprs"},{"name":"DisableWarnOnAnsiAliasShadowing"},{"name":"DisableWarnUntypedStringLiterals"},{"name":"DiscoveryMode"},{"name":"DistinctOverWindow"},{"name":"DqEngine"},{"name":"DqEngineEnable"},{"name":"DqEngineForce"},{"name":"EmitAggApply"},{"name":"EmitStartsWith"},{"name":"EmitTableSource"},{"name":"EmitUnionMerge"},{"name":"EnableSystemColumns"},{"name":"Engine"},{"name":"ErrorMsg"},{"name":"FeatureR010"},{"name":"File"},{"name":"FileOption"},{"name":"FilterPushdownOverJoinOptionalSide"},{"name":"FlexibleTypes"},{"name":"Folder"},{"name":"Greetings"},{"name":"GroupByCubeLimit"},{"name":"GroupByLimit"},{"name":"JsonQueryReturnsJsonDocument"},{"name":"Library"},{"name":"OrderedColumns"},{"name":"OverrideLibrary"},{"name":"Package"},{"name":"PackageVersion"},{"name":"PathPrefix"},{"name":"PositionalUnionAll"},{"name":"PqReadBy"},{"name":"PullUpFlatMapOverJoin"},{"name":"RefSelect"},{"name":"RegexUseRe2"},{"name":"ResultRowsLimit"},{"name":"ResultSizeLimit"},{"name":"RotateJoinTree"},{"name":"RuntimeLogLevel"},{"name":"SampleSelect"},{"name":"SeqMode"},{"name":"SimpleColumns"},{"name":"StrictJoinKeyTypes"},{"name":"Udf"},{"name":"UnicodeLiterals"},{"name":"UnorderedResult"},{"name":"UnorderedSubqueries"},{"name":"UseBlocks"},{"name":"UseTablePrefixForEach"},{"name":"ValidateUnusedExprs"},{"name":"WarnOnAnsiAliasShadowing"},{"name":"WarnUnnamedColumns"},{"name":"WarnUntypedStringLiterals"},{"name":"Warning"},{"name":"WarningMsg"},{"name":"yson.AutoConvert"},{"name":"yson.CastToString"},{"name":"yson.DisableCastToString"},{"name":"yson.DisableStrict"},{"name":"yson.Strict"}]
diff --git a/yql/essentials/data/language/rules_corr_basic.json b/yql/essentials/data/language/rules_corr_basic.json
index 08839c98f7..738d88fc61 100644
--- a/yql/essentials/data/language/rules_corr_basic.json
+++ b/yql/essentials/data/language/rules_corr_basic.json
@@ -1 +1 @@
-[{"parent":"FUNC","rule":"ABC","sum":1},{"parent":"FUNC","rule":"ABS","sum":3087662},{"parent":"FUNC","rule":"ABs","sum":1},{"parent":"FUNC","rule":"ADAPTIVE_WARD_HISTOGRAM","sum":1},{"parent":"FUNC","rule":"ADDTIMEZONE","sum":383},{"parent":"FUNC","rule":"AGGLIST","sum":1185},{"parent":"FUNC","rule":"AGGList","sum":1},{"parent":"FUNC","rule":"AGGREATE_LIST","sum":2},{"parent":"FUNC","rule":"AGGREGATELIST","sum":72},{"parent":"FUNC","rule":"AGGREGATE_BY","sum":1546765},{"parent":"FUNC","rule":"AGGREGATE_LIST","sum":19689134},{"parent":"FUNC","rule":"AGGREGATE_LIST_","sum":2},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTINCT","sum":12601412},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTINCt","sum":2},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTNCT","sum":1},{"parent":"FUNC","rule":"AGGREGATE_LIST_DiSTINCT","sum":4},{"parent":"FUNC","rule":"AGGREGATE_LIST_Distinct","sum":804},{"parent":"FUNC","rule":"AGGREGATE_LIST_distINCT","sum":4},{"parent":"FUNC","rule":"AGGREGATE_LIST_distinct","sum":18880},{"parent":"FUNC","rule":"AGGREGATE_LISt","sum":6},{"parent":"FUNC","rule":"AGGREGATE_LiST","sum":106},{"parent":"FUNC","rule":"AGGREGATE_List","sum":75},{"parent":"FUNC","rule":"AGGREGATE_lIST","sum":59},{"parent":"FUNC","rule":"AGGREGATE_lIST_DISTINCT","sum":6},{"parent":"FUNC","rule":"AGGREGATE_liST_DISTINCT","sum":9},{"parent":"FUNC","rule":"AGGREGATE_list","sum":310},{"parent":"FUNC","rule":"AGGREGATE_list_distinct","sum":1},{"parent":"FUNC","rule":"AGGREGATIONFACTORY","sum":57},{"parent":"FUNC","rule":"AGGREGATION_FACTORY","sum":65919},{"parent":"FUNC","rule":"AGGR_LIST","sum":2938},{"parent":"FUNC","rule":"AGGR_LIST_DISTINCT","sum":44719},{"parent":"FUNC","rule":"AGGReGATE_LIST","sum":3},{"parent":"FUNC","rule":"AGGReGate_list","sum":1},{"parent":"FUNC","rule":"AGG_LIST","sum":5086855},{"parent":"FUNC","rule":"AGG_LIST_","sum":5},{"parent":"FUNC","rule":"AGG_LIST_DISTINCT","sum":2853522},{"parent":"FUNC","rule":"AGG_LIST_DISTINCt","sum":9},{"parent":"FUNC","rule":"AGG_LIST_DIStiNCT","sum":10},{"parent":"FUNC","rule":"AGG_LIST_DiSTINCT","sum":4},{"parent":"FUNC","rule":"AGG_LIST_Distinct","sum":47},{"parent":"FUNC","rule":"AGG_LIST_distinct","sum":647},{"parent":"FUNC","rule":"AGG_LISt","sum":2},{"parent":"FUNC","rule":"AGG_LIst","sum":11},{"parent":"FUNC","rule":"AGG_LiST","sum":11},{"parent":"FUNC","rule":"AGG_LiST_DIStiNCT","sum":2},{"parent":"FUNC","rule":"AGG_List","sum":6275},{"parent":"FUNC","rule":"AGG_List_DISTINCT","sum":1},{"parent":"FUNC","rule":"AGG_List_Distinct","sum":949},{"parent":"FUNC","rule":"AGG_List_distinct","sum":103},{"parent":"FUNC","rule":"AGG_lIST_DISTINCT","sum":5},{"parent":"FUNC","rule":"AGG_list","sum":17870},{"parent":"FUNC","rule":"AGG_list_DISTINCT","sum":3116},{"parent":"FUNC","rule":"AGG_list_distinct","sum":769},{"parent":"FUNC","rule":"AGGrEGATE_LIST","sum":2},{"parent":"FUNC","rule":"AGGreGATE_LIST_DISTINCT","sum":3},{"parent":"FUNC","rule":"AGGregateList","sum":19},{"parent":"FUNC","rule":"AGGregate_LIST","sum":3},{"parent":"FUNC","rule":"AGGregate_LIST_DISTINCT","sum":1},{"parent":"FUNC","rule":"AGGregate_List_Distinct","sum":4},{"parent":"FUNC","rule":"AGGregate_list","sum":6},{"parent":"FUNC","rule":"AGGregate_list_distinct","sum":65},{"parent":"FUNC","rule":"AND","sum":46},{"parent":"FUNC","rule":"ARRAY_AGG","sum":2},{"parent":"FUNC","rule":"ASDICT","sum":187},{"parent":"FUNC","rule":"ASDict","sum":640},{"parent":"FUNC","rule":"ASEnum","sum":78},{"parent":"FUNC","rule":"ASIN","sum":2},{"parent":"FUNC","rule":"ASLIST","sum":27913},{"parent":"FUNC","rule":"ASLIst","sum":1},{"parent":"FUNC","rule":"ASList","sum":23309},{"parent":"FUNC","rule":"ASSET","sum":17},{"parent":"FUNC","rule":"ASSTRUCT","sum":2847},{"parent":"FUNC","rule":"ASSet","sum":5},{"parent":"FUNC","rule":"ASStruct","sum":2108},{"parent":"FUNC","rule":"ASTAGGED","sum":15},{"parent":"FUNC","rule":"ASTAgged","sum":3},{"parent":"FUNC","rule":"ASTUPLE","sum":1603},{"parent":"FUNC","rule":"ASTagged","sum":62},{"parent":"FUNC","rule":"ASTuple","sum":5590},{"parent":"FUNC","rule":"AS_DICT","sum":6},{"parent":"FUNC","rule":"AS_LIST","sum":119},{"parent":"FUNC","rule":"AS_STRUCT","sum":13241},{"parent":"FUNC","rule":"AS_TABLE","sum":32},{"parent":"FUNC","rule":"AS_TUPLE","sum":832},{"parent":"FUNC","rule":"ASdict","sum":4},{"parent":"FUNC","rule":"ASlist","sum":996},{"parent":"FUNC","rule":"ASstruct","sum":41},{"parent":"FUNC","rule":"AStagged","sum":3},{"parent":"FUNC","rule":"AStuple","sum":422},{"parent":"FUNC","rule":"AVG","sum":9007449},{"parent":"FUNC","rule":"AVGIF","sum":15},{"parent":"FUNC","rule":"AVG_IF","sum":728455},{"parent":"FUNC","rule":"AVG_If","sum":8},{"parent":"FUNC","rule":"AVG_if","sum":14767},{"parent":"FUNC","rule":"AVg","sum":4},{"parent":"FUNC","rule":"Abs","sum":322850},{"parent":"FUNC","rule":"AdaptiveDistanceHistogramCDF","sum":1},{"parent":"FUNC","rule":"AdaptiveWardHistogram","sum":1},{"parent":"FUNC","rule":"AdaptiveWardHistogramCDF","sum":3},{"parent":"FUNC","rule":"AdaptiveWeightHistogram","sum":57716},{"parent":"FUNC","rule":"AddMember","sum":875107},{"parent":"FUNC","rule":"AddTimeZone","sum":510670},{"parent":"FUNC","rule":"AddTimezone","sum":12113370},{"parent":"FUNC","rule":"Addtimezone","sum":60},{"parent":"FUNC","rule":"AggLIst","sum":1},{"parent":"FUNC","rule":"AggList","sum":18445},{"parent":"FUNC","rule":"AggListDistinct","sum":69},{"parent":"FUNC","rule":"Agg_LIST","sum":60},{"parent":"FUNC","rule":"Agg_LIST_DISTINCT","sum":3},{"parent":"FUNC","rule":"Agg_LIST_Distinct","sum":1},{"parent":"FUNC","rule":"Agg_LIst","sum":2},{"parent":"FUNC","rule":"Agg_List","sum":128583},{"parent":"FUNC","rule":"Agg_List_","sum":8},{"parent":"FUNC","rule":"Agg_List_DISTINCT","sum":730},{"parent":"FUNC","rule":"Agg_List_Distinct","sum":2541},{"parent":"FUNC","rule":"Agg_List_distinct","sum":4},{"parent":"FUNC","rule":"Agg_list","sum":35301},{"parent":"FUNC","rule":"Agg_list_distinct","sum":8200},{"parent":"FUNC","rule":"Aggergate_List","sum":1},{"parent":"FUNC","rule":"AggrList","sum":6},{"parent":"FUNC","rule":"Aggr_List","sum":1},{"parent":"FUNC","rule":"AggreGate_List","sum":6},{"parent":"FUNC","rule":"AggregateBy","sum":42097},{"parent":"FUNC","rule":"AggregateFlatten","sum":52405},{"parent":"FUNC","rule":"AggregateList","sum":40540},{"parent":"FUNC","rule":"AggregateListDistinct","sum":7726},{"parent":"FUNC","rule":"AggregateTransformInput","sum":25439},{"parent":"FUNC","rule":"AggregateTransformOutput","sum":45216},{"parent":"FUNC","rule":"Aggregate_BY","sum":248},{"parent":"FUNC","rule":"Aggregate_By","sum":138311},{"parent":"FUNC","rule":"Aggregate_LIST","sum":835},{"parent":"FUNC","rule":"Aggregate_LIST_DISTINCT","sum":2},{"parent":"FUNC","rule":"Aggregate_LIst","sum":12},{"parent":"FUNC","rule":"Aggregate_List","sum":344071},{"parent":"FUNC","rule":"Aggregate_List_Distinct","sum":5125},{"parent":"FUNC","rule":"Aggregate_List_distinct","sum":1241},{"parent":"FUNC","rule":"Aggregate_by","sum":79},{"parent":"FUNC","rule":"Aggregate_list","sum":225628},{"parent":"FUNC","rule":"Aggregate_list_DISTINCT","sum":36},{"parent":"FUNC","rule":"Aggregate_list_Distinct","sum":7},{"parent":"FUNC","rule":"Aggregate_list_distinct","sum":200172},{"parent":"FUNC","rule":"AggregationFactory","sum":1500874},{"parent":"FUNC","rule":"Apply","sum":1},{"parent":"FUNC","rule":"AsAtom","sum":688274},{"parent":"FUNC","rule":"AsDict","sum":2916201},{"parent":"FUNC","rule":"AsDictStrict","sum":4546},{"parent":"FUNC","rule":"AsEnum","sum":49363},{"parent":"FUNC","rule":"AsLIST","sum":921},{"parent":"FUNC","rule":"AsLIst","sum":2852},{"parent":"FUNC","rule":"AsLisT","sum":1493},{"parent":"FUNC","rule":"AsList","sum":17228428},{"parent":"FUNC","rule":"AsListStrict","sum":85930},{"parent":"FUNC","rule":"AsListstrict","sum":1},{"parent":"FUNC","rule":"AsSTruct","sum":371},{"parent":"FUNC","rule":"AsSet","sum":1583958},{"parent":"FUNC","rule":"AsSetStrict","sum":11208},{"parent":"FUNC","rule":"AsStruct","sum":23386032},{"parent":"FUNC","rule":"AsTAgged","sum":293},{"parent":"FUNC","rule":"AsTUPLE","sum":442},{"parent":"FUNC","rule":"AsTUple","sum":270},{"parent":"FUNC","rule":"AsTable","sum":6},{"parent":"FUNC","rule":"AsTaggeD","sum":4},{"parent":"FUNC","rule":"AsTagged","sum":183083},{"parent":"FUNC","rule":"AsTuPle","sum":1},{"parent":"FUNC","rule":"AsTuple","sum":30038540},{"parent":"FUNC","rule":"AsVariant","sum":356380},{"parent":"FUNC","rule":"As_List","sum":6},{"parent":"FUNC","rule":"As_Struct","sum":6},{"parent":"FUNC","rule":"As_list","sum":20},{"parent":"FUNC","rule":"As_tuple","sum":791},{"parent":"FUNC","rule":"Asdict","sum":176},{"parent":"FUNC","rule":"AslIST","sum":1},{"parent":"FUNC","rule":"AslIst","sum":14},{"parent":"FUNC","rule":"Aslist","sum":23055},{"parent":"FUNC","rule":"Asset","sum":49},{"parent":"FUNC","rule":"Asstruct","sum":374},{"parent":"FUNC","rule":"AssumeStrict","sum":2155},{"parent":"FUNC","rule":"Astagged","sum":166},{"parent":"FUNC","rule":"Astuple","sum":603},{"parent":"FUNC","rule":"AtomCode","sum":516000},{"parent":"FUNC","rule":"Avg","sum":138598},{"parent":"FUNC","rule":"Avg_IF","sum":162},{"parent":"FUNC","rule":"Avg_If","sum":106},{"parent":"FUNC","rule":"Avg_if","sum":251},{"parent":"FUNC","rule":"BIT_AND","sum":4},{"parent":"FUNC","rule":"BIT_OR","sum":38095},{"parent":"FUNC","rule":"BIT_XOR","sum":422858},{"parent":"FUNC","rule":"BOOL_AND","sum":266030},{"parent":"FUNC","rule":"BOOL_OR","sum":881252},{"parent":"FUNC","rule":"BOOL_XOR","sum":46},{"parent":"FUNC","rule":"BOOL_and","sum":2},{"parent":"FUNC","rule":"BOOl_OR","sum":366},{"parent":"FUNC","rule":"BOTTOM","sum":11733},{"parent":"FUNC","rule":"BOTTOM_BY","sum":46084},{"parent":"FUNC","rule":"BOTTOM_by","sum":2},{"parent":"FUNC","rule":"Bool","sum":1345},{"parent":"FUNC","rule":"Bool_And","sum":118},{"parent":"FUNC","rule":"Bool_Or","sum":568},{"parent":"FUNC","rule":"Bool_and","sum":85},{"parent":"FUNC","rule":"Bool_or","sum":823},{"parent":"FUNC","rule":"Bottom","sum":283},{"parent":"FUNC","rule":"Bottom_BY","sum":10},{"parent":"FUNC","rule":"Bottom_By","sum":87},{"parent":"FUNC","rule":"Bottom_by","sum":1583},{"parent":"FUNC","rule":"ByteAt","sum":12113},{"parent":"FUNC","rule":"Bytes","sum":3},{"parent":"FUNC","rule":"CHAR_LENGTH","sum":357},{"parent":"FUNC","rule":"COALECSE","sum":1},{"parent":"FUNC","rule":"COALESCE","sum":51205369},{"parent":"FUNC","rule":"COALESCe","sum":10},{"parent":"FUNC","rule":"COALESce","sum":8},{"parent":"FUNC","rule":"COALEsCE","sum":1},{"parent":"FUNC","rule":"COALEsce","sum":4},{"parent":"FUNC","rule":"COALeSCE","sum":1},{"parent":"FUNC","rule":"COALesce","sum":14},{"parent":"FUNC","rule":"COAlESCE","sum":2736},{"parent":"FUNC","rule":"COAlesce","sum":12},{"parent":"FUNC","rule":"CONCAT","sum":19},{"parent":"FUNC","rule":"COOUNT","sum":2},{"parent":"FUNC","rule":"CORR","sum":431},{"parent":"FUNC","rule":"CORRELATION","sum":19898},{"parent":"FUNC","rule":"COS","sum":4},{"parent":"FUNC","rule":"COUNT","sum":52424641},{"parent":"FUNC","rule":"COUNTD","sum":4},{"parent":"FUNC","rule":"COUNTDISTINCTESTIMATE","sum":609},{"parent":"FUNC","rule":"COUNTDistinctEstimate","sum":7},{"parent":"FUNC","rule":"COUNTIF","sum":16861},{"parent":"FUNC","rule":"COUNT_","sum":1},{"parent":"FUNC","rule":"COUNT_IF","sum":24213017},{"parent":"FUNC","rule":"COUNT_IF_","sum":24},{"parent":"FUNC","rule":"COUNT_If","sum":13225},{"parent":"FUNC","rule":"COUNT_iF","sum":43},{"parent":"FUNC","rule":"COUNT_if","sum":19804},{"parent":"FUNC","rule":"COUNt","sum":1256},{"parent":"FUNC","rule":"COUNt_IF","sum":24},{"parent":"FUNC","rule":"COUNt_If","sum":3},{"parent":"FUNC","rule":"COUNt_if","sum":11},{"parent":"FUNC","rule":"COUnT","sum":9},{"parent":"FUNC","rule":"COUnT_IF","sum":7},{"parent":"FUNC","rule":"COUnt","sum":39},{"parent":"FUNC","rule":"COVAR","sum":2814},{"parent":"FUNC","rule":"COVARIANCE","sum":1498},{"parent":"FUNC","rule":"COVARIANCE_POPULATION","sum":64},{"parent":"FUNC","rule":"COVARIANCE_SAMPLE","sum":4},{"parent":"FUNC","rule":"COVAR_POP","sum":4},{"parent":"FUNC","rule":"COalesce","sum":22},{"parent":"FUNC","rule":"COuNT","sum":64},{"parent":"FUNC","rule":"COuNT_If","sum":1},{"parent":"FUNC","rule":"COunt","sum":188},{"parent":"FUNC","rule":"COunt_IF","sum":57},{"parent":"FUNC","rule":"COunt_If","sum":2},{"parent":"FUNC","rule":"COunt_iF","sum":1},{"parent":"FUNC","rule":"COunt_if","sum":3},{"parent":"FUNC","rule":"CUME_DIST","sum":109},{"parent":"FUNC","rule":"CURRENTUTCDATE","sum":5416},{"parent":"FUNC","rule":"CURRENTUTCDATETIME","sum":6623},{"parent":"FUNC","rule":"CURRENTUTCDate","sum":36},{"parent":"FUNC","rule":"CURRENT_UTC_DATE","sum":2},{"parent":"FUNC","rule":"CallableArgument","sum":10045},{"parent":"FUNC","rule":"CallableArgumentType","sum":19066},{"parent":"FUNC","rule":"CallableResultType","sum":116},{"parent":"FUNC","rule":"CallableType","sum":174356},{"parent":"FUNC","rule":"CallableTypeHandle","sum":5515},{"parent":"FUNC","rule":"Ceil","sum":3},{"parent":"FUNC","rule":"ChooseMembers","sum":2497546},{"parent":"FUNC","rule":"Choosemembers","sum":21082},{"parent":"FUNC","rule":"ChosenMembers","sum":8},{"parent":"FUNC","rule":"ClearBit","sum":31148},{"parent":"FUNC","rule":"CoALESCE","sum":1},{"parent":"FUNC","rule":"CoUNT","sum":30},{"parent":"FUNC","rule":"CoUNT_IF","sum":17},{"parent":"FUNC","rule":"CoUNt","sum":2},{"parent":"FUNC","rule":"CoUnt","sum":2},{"parent":"FUNC","rule":"Coalesce","sum":722176},{"parent":"FUNC","rule":"Collect","sum":1},{"parent":"FUNC","rule":"CollectList","sum":1},{"parent":"FUNC","rule":"CombineMembers","sum":501363},{"parent":"FUNC","rule":"Concat","sum":1},{"parent":"FUNC","rule":"Correlation","sum":178},{"parent":"FUNC","rule":"CouNT","sum":2},{"parent":"FUNC","rule":"CounT","sum":17},{"parent":"FUNC","rule":"Count","sum":395725},{"parent":"FUNC","rule":"CountDistinctEstimate","sum":158460},{"parent":"FUNC","rule":"CountIF","sum":81},{"parent":"FUNC","rule":"CountIf","sum":526},{"parent":"FUNC","rule":"Count_IF","sum":15252},{"parent":"FUNC","rule":"Count_If","sum":42727},{"parent":"FUNC","rule":"Count_if","sum":324404},{"parent":"FUNC","rule":"Countif","sum":12},{"parent":"FUNC","rule":"CurrentAuthenticatedUser","sum":89327},{"parent":"FUNC","rule":"CurrentDatetime","sum":1},{"parent":"FUNC","rule":"CurrentOperationId","sum":90375},{"parent":"FUNC","rule":"CurrentOperationSharedId","sum":5558},{"parent":"FUNC","rule":"CurrentTZDate","sum":733},{"parent":"FUNC","rule":"CurrentTZDateTime","sum":35},{"parent":"FUNC","rule":"CurrentTZDatetime","sum":514},{"parent":"FUNC","rule":"CurrentTZTimestamp","sum":388},{"parent":"FUNC","rule":"CurrentTZdatetime","sum":67},{"parent":"FUNC","rule":"CurrentTzDate","sum":1608844},{"parent":"FUNC","rule":"CurrentTzDateTime","sum":1470331},{"parent":"FUNC","rule":"CurrentTzDatetime","sum":1978212},{"parent":"FUNC","rule":"CurrentTzTimeStamp","sum":4372},{"parent":"FUNC","rule":"CurrentTzTimestamp","sum":1888512},{"parent":"FUNC","rule":"CurrentUTCDATE","sum":22},{"parent":"FUNC","rule":"CurrentUTCDAte","sum":39},{"parent":"FUNC","rule":"CurrentUTCDate","sum":921323},{"parent":"FUNC","rule":"CurrentUTCDateTime","sum":182215},{"parent":"FUNC","rule":"CurrentUTCDatetime","sum":130451},{"parent":"FUNC","rule":"CurrentUTCTimeStamp","sum":105},{"parent":"FUNC","rule":"CurrentUTCTimestamp","sum":455213},{"parent":"FUNC","rule":"CurrentUTCdate","sum":71188},{"parent":"FUNC","rule":"CurrentUTCdatetime","sum":12},{"parent":"FUNC","rule":"CurrentUTcDate","sum":63},{"parent":"FUNC","rule":"CurrentUtCDate","sum":5},{"parent":"FUNC","rule":"CurrentUtCDatetime","sum":3},{"parent":"FUNC","rule":"CurrentUtcDATE","sum":2},{"parent":"FUNC","rule":"CurrentUtcDAte","sum":3},{"parent":"FUNC","rule":"CurrentUtcDatE","sum":1},{"parent":"FUNC","rule":"CurrentUtcDate","sum":19532934},{"parent":"FUNC","rule":"CurrentUtcDateTIME","sum":1},{"parent":"FUNC","rule":"CurrentUtcDateTime","sum":3179621},{"parent":"FUNC","rule":"CurrentUtcDatetime","sum":13760831},{"parent":"FUNC","rule":"CurrentUtcDttm","sum":3},{"parent":"FUNC","rule":"CurrentUtcTimeStamp","sum":60865},{"parent":"FUNC","rule":"CurrentUtcTimestamp","sum":15105793},{"parent":"FUNC","rule":"CurrentUtcdate","sum":11803},{"parent":"FUNC","rule":"CurrenttzDate","sum":310},{"parent":"FUNC","rule":"CurrenttzDatetime","sum":1},{"parent":"FUNC","rule":"CurrentutcDate","sum":20599},{"parent":"FUNC","rule":"CurrentutcDateTime","sum":492},{"parent":"FUNC","rule":"CurrentutcTimestamp","sum":4},{"parent":"FUNC","rule":"Currentutcdate","sum":1465},{"parent":"FUNC","rule":"Currentutcdatetime","sum":22622},{"parent":"FUNC","rule":"D","sum":1},{"parent":"FUNC","rule":"DATE","sum":127693},{"parent":"FUNC","rule":"DATEADD","sum":3},{"parent":"FUNC","rule":"DATEDIFF","sum":9},{"parent":"FUNC","rule":"DATETIME","sum":832},{"parent":"FUNC","rule":"DATE_PART","sum":16},{"parent":"FUNC","rule":"DATE_TRUNC","sum":29},{"parent":"FUNC","rule":"DATe","sum":20},{"parent":"FUNC","rule":"DAte","sum":169},{"parent":"FUNC","rule":"DAtetime","sum":61},{"parent":"FUNC","rule":"DENSE_RANK","sum":272243},{"parent":"FUNC","rule":"DICTKEYS","sum":30},{"parent":"FUNC","rule":"DICTLENGTH","sum":4},{"parent":"FUNC","rule":"DICTLength","sum":2},{"parent":"FUNC","rule":"DICTPAYLOADS","sum":2},{"parent":"FUNC","rule":"DICT_CONTAINS","sum":5},{"parent":"FUNC","rule":"DIctHasItems","sum":1},{"parent":"FUNC","rule":"DIctItems","sum":3},{"parent":"FUNC","rule":"DIctKeys","sum":1},{"parent":"FUNC","rule":"DIctLength","sum":1},{"parent":"FUNC","rule":"DIctLookup","sum":16},{"parent":"FUNC","rule":"DOUBLE","sum":63},{"parent":"FUNC","rule":"DatE","sum":3},{"parent":"FUNC","rule":"DataType","sum":68043},{"parent":"FUNC","rule":"DataTypeComponents","sum":36704},{"parent":"FUNC","rule":"DataTypeHandle","sum":2},{"parent":"FUNC","rule":"Datatype","sum":2},{"parent":"FUNC","rule":"Date","sum":1767677},{"parent":"FUNC","rule":"Date32","sum":74},{"parent":"FUNC","rule":"DateTime","sum":433506},{"parent":"FUNC","rule":"DateTime64","sum":45},{"parent":"FUNC","rule":"Date_Diff","sum":1},{"parent":"FUNC","rule":"DatetimE","sum":8},{"parent":"FUNC","rule":"Datetime","sum":245619},{"parent":"FUNC","rule":"Datetime64","sum":6},{"parent":"FUNC","rule":"Decimal","sum":30941},{"parent":"FUNC","rule":"DenseRank","sum":26},{"parent":"FUNC","rule":"Dense_RANK","sum":1},{"parent":"FUNC","rule":"Dense_Rank","sum":49},{"parent":"FUNC","rule":"Dense_rank","sum":6},{"parent":"FUNC","rule":"DicTKeys","sum":1},{"parent":"FUNC","rule":"DictAggregate","sum":141677},{"parent":"FUNC","rule":"DictCOntains","sum":351},{"parent":"FUNC","rule":"DictContains","sum":2023048},{"parent":"FUNC","rule":"DictCreate","sum":68793},{"parent":"FUNC","rule":"DictHasItems","sum":619694},{"parent":"FUNC","rule":"DictHasitems","sum":262},{"parent":"FUNC","rule":"DictItems","sum":2543449},{"parent":"FUNC","rule":"DictKEys","sum":1},{"parent":"FUNC","rule":"DictKeYS","sum":3},{"parent":"FUNC","rule":"DictKeyType","sum":940},{"parent":"FUNC","rule":"DictKeys","sum":2714488},{"parent":"FUNC","rule":"DictLOokup","sum":7},{"parent":"FUNC","rule":"DictLength","sum":1010189},{"parent":"FUNC","rule":"DictLookUP","sum":3},{"parent":"FUNC","rule":"DictLookUp","sum":54807},{"parent":"FUNC","rule":"DictLookup","sum":5990848},{"parent":"FUNC","rule":"DictPayLoads","sum":10617},{"parent":"FUNC","rule":"DictPayloadType","sum":208},{"parent":"FUNC","rule":"DictPayloads","sum":925682},{"parent":"FUNC","rule":"DictType","sum":626},{"parent":"FUNC","rule":"DictTypeComponents","sum":3},{"parent":"FUNC","rule":"DictTypeHandle","sum":1},{"parent":"FUNC","rule":"DictValues","sum":5},{"parent":"FUNC","rule":"Dict_Keys","sum":12},{"parent":"FUNC","rule":"Dictcontains","sum":7},{"parent":"FUNC","rule":"Dictitems","sum":19821},{"parent":"FUNC","rule":"Dictkeys","sum":1107},{"parent":"FUNC","rule":"Dictlength","sum":6},{"parent":"FUNC","rule":"Dictlookup","sum":75177},{"parent":"FUNC","rule":"Double","sum":34701},{"parent":"FUNC","rule":"DyNumber","sum":4},{"parent":"FUNC","rule":"EACH","sum":4},{"parent":"FUNC","rule":"ENDSWITH","sum":13303},{"parent":"FUNC","rule":"ENDsWith","sum":85},{"parent":"FUNC","rule":"ENSURE","sum":830120},{"parent":"FUNC","rule":"EOMONTH","sum":1},{"parent":"FUNC","rule":"EmptyDict","sum":420},{"parent":"FUNC","rule":"EmptyDictTypeHandle","sum":3},{"parent":"FUNC","rule":"EmptyList","sum":24582},{"parent":"FUNC","rule":"Emptydict","sum":6},{"parent":"FUNC","rule":"EndsWIth","sum":870},{"parent":"FUNC","rule":"EndsWith","sum":1538493},{"parent":"FUNC","rule":"Endswith","sum":19798},{"parent":"FUNC","rule":"Ensure","sum":1428331},{"parent":"FUNC","rule":"EnsureConvertibleTo","sum":1956},{"parent":"FUNC","rule":"EnsureType","sum":228291},{"parent":"FUNC","rule":"EvaluateAtom","sum":514},{"parent":"FUNC","rule":"EvaluateCode","sum":469846},{"parent":"FUNC","rule":"EvaluateExpr","sum":897702},{"parent":"FUNC","rule":"EvaluateType","sum":120824},{"parent":"FUNC","rule":"ExpandStruct","sum":257039},{"parent":"FUNC","rule":"ExtractUkropCtx","sum":1},{"parent":"FUNC","rule":"FIND","sum":5423952},{"parent":"FUNC","rule":"FIRST","sum":3},{"parent":"FUNC","rule":"FIRST_VALUE","sum":8847477},{"parent":"FUNC","rule":"FIRST_value","sum":785},{"parent":"FUNC","rule":"FIRsT_VALUE","sum":1},{"parent":"FUNC","rule":"FIleContent","sum":1},{"parent":"FUNC","rule":"FLATTEN","sum":9},{"parent":"FUNC","rule":"FLOAT","sum":7},{"parent":"FUNC","rule":"FLOOR","sum":1},{"parent":"FUNC","rule":"FORMATTYPE","sum":1},{"parent":"FUNC","rule":"FROMBYTES","sum":5},{"parent":"FUNC","rule":"FROmbytes","sum":4},{"parent":"FUNC","rule":"FileCOntent","sum":29},{"parent":"FUNC","rule":"FileContent","sum":2330268},{"parent":"FUNC","rule":"FilePath","sum":1447956},{"parent":"FUNC","rule":"Filecontent","sum":11721},{"parent":"FUNC","rule":"Filepath","sum":10},{"parent":"FUNC","rule":"FinD","sum":16},{"parent":"FUNC","rule":"Find","sum":322367},{"parent":"FUNC","rule":"FirsT_VALUE","sum":3},{"parent":"FUNC","rule":"FirstValue","sum":34},{"parent":"FUNC","rule":"First_VALUE","sum":12},{"parent":"FUNC","rule":"First_Value","sum":28},{"parent":"FUNC","rule":"First_value","sum":180},{"parent":"FUNC","rule":"FlattenMembers","sum":108910},{"parent":"FUNC","rule":"Float","sum":12294},{"parent":"FUNC","rule":"FoldMap","sum":13},{"parent":"FUNC","rule":"Folder","sum":13},{"parent":"FUNC","rule":"FolderPath","sum":10124},{"parent":"FUNC","rule":"ForceRemoveMember","sum":399636},{"parent":"FUNC","rule":"ForceRemoveMembers","sum":1072339},{"parent":"FUNC","rule":"ForceRenameMembers","sum":63857},{"parent":"FUNC","rule":"ForceSpreadMembers","sum":113357},{"parent":"FUNC","rule":"ForceSpreadmembers","sum":4},{"parent":"FUNC","rule":"Format","sum":1},{"parent":"FUNC","rule":"FormatCode","sum":688},{"parent":"FUNC","rule":"FormatType","sum":490404},{"parent":"FUNC","rule":"FormatTypeDiff","sum":781},{"parent":"FUNC","rule":"FormatTypeDiffPretty","sum":102},{"parent":"FUNC","rule":"Formattype","sum":101},{"parent":"FUNC","rule":"FromBytes","sum":98581},{"parent":"FUNC","rule":"FromPg","sum":4802},{"parent":"FUNC","rule":"FromYsonSimpleType","sum":1},{"parent":"FUNC","rule":"From_bytes","sum":1},{"parent":"FUNC","rule":"FromatType","sum":1},{"parent":"FUNC","rule":"Frombytes","sum":12},{"parent":"FUNC","rule":"FuncCode","sum":1219779},{"parent":"FUNC","rule":"GETDATE","sum":1},{"parent":"FUNC","rule":"GREATEST","sum":388297},{"parent":"FUNC","rule":"GROUPING","sum":70388},{"parent":"FUNC","rule":"GROUPINg","sum":4},{"parent":"FUNC","rule":"GROUP_CONCAT","sum":1},{"parent":"FUNC","rule":"GatherMembers","sum":1477114},{"parent":"FUNC","rule":"Gather_Members","sum":38},{"parent":"FUNC","rule":"Gathermembers","sum":8},{"parent":"FUNC","rule":"GetLength","sum":2},{"parent":"FUNC","rule":"GetWeekOfYear","sum":1},{"parent":"FUNC","rule":"Greatest","sum":6010},{"parent":"FUNC","rule":"Grouping","sum":242},{"parent":"FUNC","rule":"HISTOGRAM","sum":268900},{"parent":"FUNC","rule":"HISTOGRAMCDF","sum":468},{"parent":"FUNC","rule":"HISTOGRAMCdf","sum":47},{"parent":"FUNC","rule":"HISTOGRAM_CDF","sum":3},{"parent":"FUNC","rule":"HISTOGRAMcdf","sum":2},{"parent":"FUNC","rule":"HISTOGrAM","sum":56},{"parent":"FUNC","rule":"HISTOgram","sum":1},{"parent":"FUNC","rule":"HISToGRAM","sum":1},{"parent":"FUNC","rule":"HISTogram","sum":1},{"parent":"FUNC","rule":"HIStOGRAM","sum":1},{"parent":"FUNC","rule":"HIstogram","sum":34},{"parent":"FUNC","rule":"HLL","sum":12525},{"parent":"FUNC","rule":"HOP_END","sum":16},{"parent":"FUNC","rule":"HOP_START","sum":4},{"parent":"FUNC","rule":"Histogram","sum":35170},{"parent":"FUNC","rule":"HistogramCDF","sum":368},{"parent":"FUNC","rule":"HistogramCdf","sum":34},{"parent":"FUNC","rule":"Histogram_CDF","sum":58},{"parent":"FUNC","rule":"Histogramcdf","sum":1},{"parent":"FUNC","rule":"Hll","sum":1976},{"parent":"FUNC","rule":"HyperLogLog","sum":4221},{"parent":"FUNC","rule":"IF","sum":74831249},{"parent":"FUNC","rule":"IFNULL","sum":9},{"parent":"FUNC","rule":"IF_STRICT","sum":1},{"parent":"FUNC","rule":"IN","sum":2},{"parent":"FUNC","rule":"INT","sum":16},{"parent":"FUNC","rule":"INT32","sum":1},{"parent":"FUNC","rule":"INTERVAL","sum":886616},{"parent":"FUNC","rule":"INterval","sum":767},{"parent":"FUNC","rule":"If","sum":1068086},{"parent":"FUNC","rule":"IfNull","sum":6},{"parent":"FUNC","rule":"InstanceOf","sum":244693},{"parent":"FUNC","rule":"Int","sum":1},{"parent":"FUNC","rule":"Int16","sum":37},{"parent":"FUNC","rule":"Int32","sum":32725},{"parent":"FUNC","rule":"Int64","sum":3232},{"parent":"FUNC","rule":"Int8","sum":302},{"parent":"FUNC","rule":"InterVal","sum":4},{"parent":"FUNC","rule":"Interval","sum":16006942},{"parent":"FUNC","rule":"Interval64","sum":2},{"parent":"FUNC","rule":"IntervalFromDays","sum":9},{"parent":"FUNC","rule":"IsInt64","sum":1},{"parent":"FUNC","rule":"JSON","sum":78},{"parent":"FUNC","rule":"JUST","sum":127189},{"parent":"FUNC","rule":"Join","sum":1},{"parent":"FUNC","rule":"JoinTableRow","sum":1288166},{"parent":"FUNC","rule":"JoinTablerow","sum":721},{"parent":"FUNC","rule":"JointableRow","sum":1},{"parent":"FUNC","rule":"Json","sum":51068},{"parent":"FUNC","rule":"JsonDocument","sum":9},{"parent":"FUNC","rule":"Jsut","sum":1},{"parent":"FUNC","rule":"Just","sum":6491447},{"parent":"FUNC","rule":"LAG","sum":3999334},{"parent":"FUNC","rule":"LAST","sum":1},{"parent":"FUNC","rule":"LAST_VALUE","sum":1318288},{"parent":"FUNC","rule":"LAST_value","sum":2},{"parent":"FUNC","rule":"LEAD","sum":1634916},{"parent":"FUNC","rule":"LEAST","sum":426055},{"parent":"FUNC","rule":"LEFT","sum":3},{"parent":"FUNC","rule":"LEFT_SHIFT","sum":1},{"parent":"FUNC","rule":"LEN","sum":1686381},{"parent":"FUNC","rule":"LENGTH","sum":3834935},{"parent":"FUNC","rule":"LENgth","sum":23},{"parent":"FUNC","rule":"LEngth","sum":12},{"parent":"FUNC","rule":"LIKELY","sum":141169},{"parent":"FUNC","rule":"LINEARHISTOGRAM","sum":374},{"parent":"FUNC","rule":"LINEARHISTOGRAMCDF","sum":1},{"parent":"FUNC","rule":"LINEARHistogram","sum":3},{"parent":"FUNC","rule":"LINEAR_HISTOGRAM","sum":13},{"parent":"FUNC","rule":"LISTALL","sum":2698},{"parent":"FUNC","rule":"LISTANY","sum":130},{"parent":"FUNC","rule":"LISTAVG","sum":17},{"parent":"FUNC","rule":"LISTAny","sum":10},{"parent":"FUNC","rule":"LISTCOLLECT","sum":1},{"parent":"FUNC","rule":"LISTCONCAT","sum":1524},{"parent":"FUNC","rule":"LISTEXTEND","sum":13},{"parent":"FUNC","rule":"LISTFILTER","sum":1627},{"parent":"FUNC","rule":"LISTFLATTEN","sum":4219},{"parent":"FUNC","rule":"LISTFROMRANGE","sum":1488},{"parent":"FUNC","rule":"LISTHAS","sum":36584},{"parent":"FUNC","rule":"LISTHASITEMS","sum":2089},{"parent":"FUNC","rule":"LISTHEAD","sum":18498},{"parent":"FUNC","rule":"LISTHas","sum":9},{"parent":"FUNC","rule":"LISTHead","sum":2},{"parent":"FUNC","rule":"LISTLAST","sum":1510},{"parent":"FUNC","rule":"LISTLENGTH","sum":48055},{"parent":"FUNC","rule":"LISTLENGth","sum":4},{"parent":"FUNC","rule":"LISTLenGTH","sum":7},{"parent":"FUNC","rule":"LISTLength","sum":2085},{"parent":"FUNC","rule":"LISTMAP","sum":14251},{"parent":"FUNC","rule":"LISTMAX","sum":5639},{"parent":"FUNC","rule":"LISTMIN","sum":187},{"parent":"FUNC","rule":"LISTMap","sum":365},{"parent":"FUNC","rule":"LISTNOTNULL","sum":8480},{"parent":"FUNC","rule":"LISTREVERSE","sum":2},{"parent":"FUNC","rule":"LISTSKIP","sum":15},{"parent":"FUNC","rule":"LISTSORT","sum":7572},{"parent":"FUNC","rule":"LISTSORTASC","sum":1673},{"parent":"FUNC","rule":"LISTSORTDESC","sum":104},{"parent":"FUNC","rule":"LISTSUM","sum":175},{"parent":"FUNC","rule":"LISTSort","sum":19},{"parent":"FUNC","rule":"LISTSum","sum":6},{"parent":"FUNC","rule":"LISTTAKE","sum":265},{"parent":"FUNC","rule":"LISTUNIQ","sum":596},{"parent":"FUNC","rule":"LISTUniq","sum":26},{"parent":"FUNC","rule":"LISTZIP","sum":261},{"parent":"FUNC","rule":"LISTZIPALL","sum":290},{"parent":"FUNC","rule":"LIST_AGGREGATE","sum":1},{"parent":"FUNC","rule":"LIST_ALL","sum":1},{"parent":"FUNC","rule":"LIST_ANY","sum":7},{"parent":"FUNC","rule":"LIST_CONCAT","sum":18},{"parent":"FUNC","rule":"LIST_EXTEND","sum":1},{"parent":"FUNC","rule":"LIST_EXTRACT","sum":73},{"parent":"FUNC","rule":"LIST_FLATTEN","sum":3},{"parent":"FUNC","rule":"LIST_FOLD","sum":30},{"parent":"FUNC","rule":"LIST_FROM_RANGE","sum":346},{"parent":"FUNC","rule":"LIST_HAS","sum":1993},{"parent":"FUNC","rule":"LIST_HEAD","sum":100},{"parent":"FUNC","rule":"LIST_LAST","sum":54},{"parent":"FUNC","rule":"LIST_LENGTH","sum":2457},{"parent":"FUNC","rule":"LIST_Length","sum":1},{"parent":"FUNC","rule":"LIST_MAP","sum":9},{"parent":"FUNC","rule":"LIST_MAX","sum":285},{"parent":"FUNC","rule":"LIST_SORT","sum":647},{"parent":"FUNC","rule":"LIST_SUM","sum":2},{"parent":"FUNC","rule":"LIST_TAKE","sum":2},{"parent":"FUNC","rule":"LIST_UNIQ","sum":334},{"parent":"FUNC","rule":"LIST_length","sum":1},{"parent":"FUNC","rule":"LISTfilter","sum":7},{"parent":"FUNC","rule":"LISTfromRange","sum":1410},{"parent":"FUNC","rule":"LISTfromrange","sum":3},{"parent":"FUNC","rule":"LISThas","sum":2},{"parent":"FUNC","rule":"LISTnotNull","sum":2},{"parent":"FUNC","rule":"LIStExtend","sum":3},{"parent":"FUNC","rule":"LIStHas","sum":2},{"parent":"FUNC","rule":"LIStLENGth","sum":1},{"parent":"FUNC","rule":"LIStLength","sum":1},{"parent":"FUNC","rule":"LIStmap","sum":1},{"parent":"FUNC","rule":"LIstAny","sum":3},{"parent":"FUNC","rule":"LIstConcat","sum":304},{"parent":"FUNC","rule":"LIstExtend","sum":3},{"parent":"FUNC","rule":"LIstFilter","sum":71},{"parent":"FUNC","rule":"LIstFromRange","sum":8},{"parent":"FUNC","rule":"LIstFromrange","sum":1},{"parent":"FUNC","rule":"LIstHas","sum":16},{"parent":"FUNC","rule":"LIstHasItems","sum":6},{"parent":"FUNC","rule":"LIstHead","sum":3},{"parent":"FUNC","rule":"LIstLength","sum":374},{"parent":"FUNC","rule":"LIstMap","sum":1315},{"parent":"FUNC","rule":"LIstMax","sum":7},{"parent":"FUNC","rule":"LIstSkip","sum":2},{"parent":"FUNC","rule":"LIstSort","sum":1},{"parent":"FUNC","rule":"LIstSum","sum":1},{"parent":"FUNC","rule":"LIstfilter","sum":2},{"parent":"FUNC","rule":"LIstlength","sum":13},{"parent":"FUNC","rule":"LIstmap","sum":12},{"parent":"FUNC","rule":"LOG","sum":1},{"parent":"FUNC","rule":"LOG10","sum":6},{"parent":"FUNC","rule":"LOGHISTOGRAM","sum":1},{"parent":"FUNC","rule":"LOWER","sum":5},{"parent":"FUNC","rule":"Lag","sum":12443},{"parent":"FUNC","rule":"LambdaArgumentsCount","sum":32},{"parent":"FUNC","rule":"LambdaCode","sum":326513},{"parent":"FUNC","rule":"LastValue","sum":41},{"parent":"FUNC","rule":"Last_VALUE","sum":14},{"parent":"FUNC","rule":"Last_Value","sum":29},{"parent":"FUNC","rule":"Last_value","sum":84},{"parent":"FUNC","rule":"Lead","sum":11223},{"parent":"FUNC","rule":"Least","sum":2190},{"parent":"FUNC","rule":"Len","sum":92600},{"parent":"FUNC","rule":"LenGTH","sum":12},{"parent":"FUNC","rule":"Length","sum":1242695},{"parent":"FUNC","rule":"LiSTMAP","sum":2},{"parent":"FUNC","rule":"Likely","sum":20141},{"parent":"FUNC","rule":"LinearHISTOGRAM","sum":86},{"parent":"FUNC","rule":"LinearHistogram","sum":45772},{"parent":"FUNC","rule":"LinearHistogramCDF","sum":481},{"parent":"FUNC","rule":"LinearHistogramcdf","sum":10},{"parent":"FUNC","rule":"Linear_Histogram","sum":7},{"parent":"FUNC","rule":"Linearhistogram","sum":4},{"parent":"FUNC","rule":"Lis","sum":1},{"parent":"FUNC","rule":"LisMap","sum":2},{"parent":"FUNC","rule":"LisTHas","sum":8},{"parent":"FUNC","rule":"LisTLength","sum":2},{"parent":"FUNC","rule":"ListALL","sum":1571},{"parent":"FUNC","rule":"ListALl","sum":8},{"parent":"FUNC","rule":"ListANY","sum":1726},{"parent":"FUNC","rule":"ListAVG","sum":246},{"parent":"FUNC","rule":"ListAgg","sum":2},{"parent":"FUNC","rule":"ListAggregate","sum":424587},{"parent":"FUNC","rule":"ListAll","sum":263261},{"parent":"FUNC","rule":"ListAny","sum":1878420},{"parent":"FUNC","rule":"ListAppend","sum":1},{"parent":"FUNC","rule":"ListAvg","sum":534208},{"parent":"FUNC","rule":"ListCOncat","sum":2},{"parent":"FUNC","rule":"ListCode","sum":207058},{"parent":"FUNC","rule":"ListCollect","sum":902802},{"parent":"FUNC","rule":"ListConCat","sum":77},{"parent":"FUNC","rule":"ListConcat","sum":5449410},{"parent":"FUNC","rule":"ListCreate","sum":1958900},{"parent":"FUNC","rule":"ListDistinct","sum":1},{"parent":"FUNC","rule":"ListEnumerate","sum":1392489},{"parent":"FUNC","rule":"ListExtend","sum":4359935},{"parent":"FUNC","rule":"ListExtendStrict","sum":87958},{"parent":"FUNC","rule":"ListExtract","sum":3299019},{"parent":"FUNC","rule":"ListFILTER","sum":495},{"parent":"FUNC","rule":"ListFIlter","sum":339},{"parent":"FUNC","rule":"ListFLatMap","sum":270},{"parent":"FUNC","rule":"ListFLatten","sum":596},{"parent":"FUNC","rule":"ListFROMRange","sum":5024},{"parent":"FUNC","rule":"ListFilteR","sum":4},{"parent":"FUNC","rule":"ListFilter","sum":17748233},{"parent":"FUNC","rule":"ListFirst","sum":3},{"parent":"FUNC","rule":"ListFlatMap","sum":910460},{"parent":"FUNC","rule":"ListFlatmap","sum":28521},{"parent":"FUNC","rule":"ListFlatten","sum":2811746},{"parent":"FUNC","rule":"ListFold","sum":229604},{"parent":"FUNC","rule":"ListFold1","sum":35297},{"parent":"FUNC","rule":"ListFold1Map","sum":3440},{"parent":"FUNC","rule":"ListFoldMap","sum":38611},{"parent":"FUNC","rule":"ListFromPython","sum":2},{"parent":"FUNC","rule":"ListFromRANGE","sum":23},{"parent":"FUNC","rule":"ListFromRAnge","sum":45},{"parent":"FUNC","rule":"ListFromRange","sum":3070356},{"parent":"FUNC","rule":"ListFromTuple","sum":58796},{"parent":"FUNC","rule":"ListFromTyple","sum":2},{"parent":"FUNC","rule":"ListFromrange","sum":12},{"parent":"FUNC","rule":"ListHAS","sum":412},{"parent":"FUNC","rule":"ListHAs","sum":572},{"parent":"FUNC","rule":"ListHAsItems","sum":26},{"parent":"FUNC","rule":"ListHEAD","sum":2},{"parent":"FUNC","rule":"ListHEad","sum":66},{"parent":"FUNC","rule":"ListHaS","sum":1},{"parent":"FUNC","rule":"ListHas","sum":20684140},{"parent":"FUNC","rule":"ListHasITems","sum":9},{"parent":"FUNC","rule":"ListHasItemhs","sum":1},{"parent":"FUNC","rule":"ListHasItems","sum":5096505},{"parent":"FUNC","rule":"ListHasitems","sum":331},{"parent":"FUNC","rule":"ListHeaD","sum":102},{"parent":"FUNC","rule":"ListHead","sum":10553525},{"parent":"FUNC","rule":"ListINdexOf","sum":2},{"parent":"FUNC","rule":"ListIndex","sum":1},{"parent":"FUNC","rule":"ListIndexOF","sum":10},{"parent":"FUNC","rule":"ListIndexOf","sum":1218088},{"parent":"FUNC","rule":"ListIndexof","sum":19},{"parent":"FUNC","rule":"ListItemType","sum":206671},{"parent":"FUNC","rule":"ListJoin","sum":3},{"parent":"FUNC","rule":"ListJsonDocument","sum":1},{"parent":"FUNC","rule":"ListLAst","sum":10},{"parent":"FUNC","rule":"ListLENGTH","sum":2290},{"parent":"FUNC","rule":"ListLEngth","sum":19},{"parent":"FUNC","rule":"ListLasT","sum":12},{"parent":"FUNC","rule":"ListLast","sum":5980021},{"parent":"FUNC","rule":"ListLeNgth","sum":1},{"parent":"FUNC","rule":"ListLegth","sum":1},{"parent":"FUNC","rule":"ListLenght","sum":1},{"parent":"FUNC","rule":"ListLengtH","sum":2},{"parent":"FUNC","rule":"ListLength","sum":18360734},{"parent":"FUNC","rule":"ListMAP","sum":227},{"parent":"FUNC","rule":"ListMAX","sum":9790},{"parent":"FUNC","rule":"ListMAp","sum":4717},{"parent":"FUNC","rule":"ListMAx","sum":1},{"parent":"FUNC","rule":"ListMIN","sum":52},{"parent":"FUNC","rule":"ListMIn","sum":1},{"parent":"FUNC","rule":"ListMaP","sum":1218},{"parent":"FUNC","rule":"ListMap","sum":35320671},{"parent":"FUNC","rule":"ListMax","sum":1413335},{"parent":"FUNC","rule":"ListMin","sum":811946},{"parent":"FUNC","rule":"ListNOTNull","sum":712},{"parent":"FUNC","rule":"ListNoTNull","sum":17},{"parent":"FUNC","rule":"ListNoTnull","sum":881},{"parent":"FUNC","rule":"ListNonNull","sum":1},{"parent":"FUNC","rule":"ListNotNULL","sum":30190},{"parent":"FUNC","rule":"ListNotNUll","sum":7920},{"parent":"FUNC","rule":"ListNotNuLL","sum":5},{"parent":"FUNC","rule":"ListNotNul","sum":1},{"parent":"FUNC","rule":"ListNotNulL","sum":3},{"parent":"FUNC","rule":"ListNotNull","sum":9108047},{"parent":"FUNC","rule":"ListNotnull","sum":22},{"parent":"FUNC","rule":"ListREplicate","sum":2},{"parent":"FUNC","rule":"ListRange","sum":4},{"parent":"FUNC","rule":"ListRepeat","sum":1},{"parent":"FUNC","rule":"ListReplicate","sum":946286},{"parent":"FUNC","rule":"ListReverse","sum":1334209},{"parent":"FUNC","rule":"ListSORT","sum":1},{"parent":"FUNC","rule":"ListSORtAsc","sum":26},{"parent":"FUNC","rule":"ListSOrt","sum":3},{"parent":"FUNC","rule":"ListSUM","sum":866},{"parent":"FUNC","rule":"ListSUm","sum":12},{"parent":"FUNC","rule":"ListSample","sum":6},{"parent":"FUNC","rule":"ListSampleN","sum":645},{"parent":"FUNC","rule":"ListShuffle","sum":481},{"parent":"FUNC","rule":"ListShuffleN","sum":1},{"parent":"FUNC","rule":"ListSkip","sum":617241},{"parent":"FUNC","rule":"ListSkipWhile","sum":139816},{"parent":"FUNC","rule":"ListSkipWhileInclusive","sum":8636},{"parent":"FUNC","rule":"ListSort","sum":4987535},{"parent":"FUNC","rule":"ListSortASC","sum":17601},{"parent":"FUNC","rule":"ListSortAsc","sum":481868},{"parent":"FUNC","rule":"ListSortDESC","sum":53},{"parent":"FUNC","rule":"ListSortDEsc","sum":2},{"parent":"FUNC","rule":"ListSortDesc","sum":1133917},{"parent":"FUNC","rule":"ListSortasc","sum":9},{"parent":"FUNC","rule":"ListSortdesc","sum":669},{"parent":"FUNC","rule":"ListSum","sum":1741577},{"parent":"FUNC","rule":"ListTail","sum":1},{"parent":"FUNC","rule":"ListTake","sum":2140859},{"parent":"FUNC","rule":"ListTakeWhile","sum":89508},{"parent":"FUNC","rule":"ListTakeWhileInclusive","sum":2460},{"parent":"FUNC","rule":"ListToTuple","sum":6188},{"parent":"FUNC","rule":"ListTop","sum":2203},{"parent":"FUNC","rule":"ListTopAsc","sum":132},{"parent":"FUNC","rule":"ListTopDESC","sum":1},{"parent":"FUNC","rule":"ListTopDesc","sum":9595},{"parent":"FUNC","rule":"ListTopSort","sum":1009},{"parent":"FUNC","rule":"ListTopSortAsc","sum":40},{"parent":"FUNC","rule":"ListTopSortDesc","sum":60040},{"parent":"FUNC","rule":"ListTopdesc","sum":1},{"parent":"FUNC","rule":"ListType","sum":31551},{"parent":"FUNC","rule":"ListTypeHandle","sum":1},{"parent":"FUNC","rule":"ListUNiq","sum":24},{"parent":"FUNC","rule":"ListUnionALL","sum":127},{"parent":"FUNC","rule":"ListUnionAll","sum":56559},{"parent":"FUNC","rule":"ListUniq","sum":3966479},{"parent":"FUNC","rule":"ListUniqStable","sum":74015},{"parent":"FUNC","rule":"ListZIP","sum":8},{"parent":"FUNC","rule":"ListZIp","sum":1},{"parent":"FUNC","rule":"ListZip","sum":2521995},{"parent":"FUNC","rule":"ListZipALL","sum":291025},{"parent":"FUNC","rule":"ListZipAll","sum":275263},{"parent":"FUNC","rule":"List_FromRange","sum":11},{"parent":"FUNC","rule":"List_Has","sum":23},{"parent":"FUNC","rule":"List_Length","sum":15},{"parent":"FUNC","rule":"List_Sort","sum":10},{"parent":"FUNC","rule":"List_Uniq","sum":1},{"parent":"FUNC","rule":"List_concat","sum":3},{"parent":"FUNC","rule":"List_length","sum":2},{"parent":"FUNC","rule":"List_sort","sum":80},{"parent":"FUNC","rule":"Listall","sum":4433},{"parent":"FUNC","rule":"Listany","sum":38},{"parent":"FUNC","rule":"Listcollect","sum":5146},{"parent":"FUNC","rule":"Listconcat","sum":14947},{"parent":"FUNC","rule":"Listenumerate","sum":3},{"parent":"FUNC","rule":"Listextend","sum":5},{"parent":"FUNC","rule":"Listfilter","sum":270192},{"parent":"FUNC","rule":"Listflatmap","sum":297},{"parent":"FUNC","rule":"Listflatten","sum":35},{"parent":"FUNC","rule":"ListfromRange","sum":32},{"parent":"FUNC","rule":"Listfromrange","sum":98},{"parent":"FUNC","rule":"Listfromtuple","sum":1},{"parent":"FUNC","rule":"Listhas","sum":71109},{"parent":"FUNC","rule":"ListhasItems","sum":274},{"parent":"FUNC","rule":"Listhasitems","sum":467},{"parent":"FUNC","rule":"Listhead","sum":18611},{"parent":"FUNC","rule":"ListindexOf","sum":1943},{"parent":"FUNC","rule":"Listindexof","sum":4},{"parent":"FUNC","rule":"ListlENGTH","sum":1},{"parent":"FUNC","rule":"ListlEngth","sum":2},{"parent":"FUNC","rule":"Listlast","sum":3720},{"parent":"FUNC","rule":"Listlength","sum":10868},{"parent":"FUNC","rule":"Listmap","sum":11186},{"parent":"FUNC","rule":"Listmax","sum":488},{"parent":"FUNC","rule":"Listmin","sum":1607},{"parent":"FUNC","rule":"ListnotNull","sum":38},{"parent":"FUNC","rule":"Listnotnull","sum":11},{"parent":"FUNC","rule":"Listreverse","sum":17},{"parent":"FUNC","rule":"Listskip","sum":148},{"parent":"FUNC","rule":"ListskipWhile","sum":409},{"parent":"FUNC","rule":"Listsort","sum":989},{"parent":"FUNC","rule":"Listsortasc","sum":1},{"parent":"FUNC","rule":"Listsortdesc","sum":1},{"parent":"FUNC","rule":"Listsum","sum":27},{"parent":"FUNC","rule":"Listtake","sum":20255},{"parent":"FUNC","rule":"ListtakeWhile","sum":2},{"parent":"FUNC","rule":"Listuniq","sum":323},{"parent":"FUNC","rule":"Listzip","sum":751},{"parent":"FUNC","rule":"Listzipall","sum":1},{"parent":"FUNC","rule":"LogHISTOGRAM","sum":7},{"parent":"FUNC","rule":"LogHistogram","sum":287},{"parent":"FUNC","rule":"LogHistogramCDF","sum":15},{"parent":"FUNC","rule":"LogarithmicHISTOGRAM","sum":1},{"parent":"FUNC","rule":"LogarithmicHistogram","sum":622},{"parent":"FUNC","rule":"Logarithmichistogram","sum":20},{"parent":"FUNC","rule":"Lookup","sum":1},{"parent":"FUNC","rule":"MAX","sum":34405108},{"parent":"FUNC","rule":"MAXBY","sum":123},{"parent":"FUNC","rule":"MAXOF","sum":3},{"parent":"FUNC","rule":"MAX_BY","sum":22818637},{"parent":"FUNC","rule":"MAX_By","sum":1},{"parent":"FUNC","rule":"MAX_IF","sum":1},{"parent":"FUNC","rule":"MAX_OF","sum":2201416},{"parent":"FUNC","rule":"MAX_Of","sum":2},{"parent":"FUNC","rule":"MAX_bY","sum":7},{"parent":"FUNC","rule":"MAX_by","sum":10454},{"parent":"FUNC","rule":"MAX_of","sum":649},{"parent":"FUNC","rule":"MAx","sum":294},{"parent":"FUNC","rule":"MEDIAN","sum":905336},{"parent":"FUNC","rule":"MEDiAN","sum":4},{"parent":"FUNC","rule":"MEdian","sum":14},{"parent":"FUNC","rule":"MIN","sum":14617885},{"parent":"FUNC","rule":"MINBY","sum":1894},{"parent":"FUNC","rule":"MIN_BY","sum":5253791},{"parent":"FUNC","rule":"MIN_IF","sum":8},{"parent":"FUNC","rule":"MIN_OF","sum":2254327},{"parent":"FUNC","rule":"MIN_by","sum":1237},{"parent":"FUNC","rule":"MIN_of","sum":1},{"parent":"FUNC","rule":"MIn","sum":201},{"parent":"FUNC","rule":"MIn_OF","sum":3},{"parent":"FUNC","rule":"MODE","sum":902020},{"parent":"FUNC","rule":"MODe","sum":1},{"parent":"FUNC","rule":"MONTH","sum":1},{"parent":"FUNC","rule":"MULTI_AGGREGATE_BY","sum":686176},{"parent":"FUNC","rule":"MaX","sum":7},{"parent":"FUNC","rule":"MakeDate","sum":1},{"parent":"FUNC","rule":"Map","sum":4},{"parent":"FUNC","rule":"Max","sum":376925},{"parent":"FUNC","rule":"MaxBy","sum":5442},{"parent":"FUNC","rule":"MaxOf","sum":207},{"parent":"FUNC","rule":"Max_BY","sum":3170},{"parent":"FUNC","rule":"Max_By","sum":68730},{"parent":"FUNC","rule":"Max_OF","sum":669},{"parent":"FUNC","rule":"Max_Of","sum":648},{"parent":"FUNC","rule":"Max_by","sum":141102},{"parent":"FUNC","rule":"Max_of","sum":3235},{"parent":"FUNC","rule":"Median","sum":19448},{"parent":"FUNC","rule":"MiN","sum":35},{"parent":"FUNC","rule":"Min","sum":192664},{"parent":"FUNC","rule":"MinBy","sum":1173},{"parent":"FUNC","rule":"MinOf","sum":892},{"parent":"FUNC","rule":"Min_BY","sum":440},{"parent":"FUNC","rule":"Min_By","sum":10675},{"parent":"FUNC","rule":"Min_OF","sum":1097},{"parent":"FUNC","rule":"Min_Of","sum":794},{"parent":"FUNC","rule":"Min_by","sum":9084},{"parent":"FUNC","rule":"Min_of","sum":1340},{"parent":"FUNC","rule":"MoDE","sum":1},{"parent":"FUNC","rule":"Mode","sum":14375},{"parent":"FUNC","rule":"Mul","sum":1},{"parent":"FUNC","rule":"MultiAggregateBy","sum":629},{"parent":"FUNC","rule":"Multi_Aggregate_BY","sum":4},{"parent":"FUNC","rule":"Multi_Aggregate_By","sum":1156},{"parent":"FUNC","rule":"Multi_Aggregate_by","sum":4},{"parent":"FUNC","rule":"Multi_aggregate_by","sum":436},{"parent":"FUNC","rule":"NANVL","sum":718226},{"parent":"FUNC","rule":"NOTHING","sum":3020},{"parent":"FUNC","rule":"NOW","sum":2},{"parent":"FUNC","rule":"NOW64","sum":2},{"parent":"FUNC","rule":"NOthing","sum":1},{"parent":"FUNC","rule":"NTH_VALUE","sum":2934},{"parent":"FUNC","rule":"NTILE","sum":1642},{"parent":"FUNC","rule":"NULLIF","sum":1},{"parent":"FUNC","rule":"NVL","sum":17598846},{"parent":"FUNC","rule":"NVl","sum":817},{"parent":"FUNC","rule":"NaNvl","sum":26},{"parent":"FUNC","rule":"NanVL","sum":1},{"parent":"FUNC","rule":"NanVl","sum":3},{"parent":"FUNC","rule":"Nanvl","sum":10531},{"parent":"FUNC","rule":"NothiNG","sum":2},{"parent":"FUNC","rule":"Nothing","sum":806416},{"parent":"FUNC","rule":"NullTypeHandle","sum":2529},{"parent":"FUNC","rule":"Nvl","sum":27846},{"parent":"FUNC","rule":"OR","sum":6},{"parent":"FUNC","rule":"OVER","sum":1},{"parent":"FUNC","rule":"OptionalItemType","sum":37184},{"parent":"FUNC","rule":"OptionalType","sum":136958},{"parent":"FUNC","rule":"OptionalTypeHandle","sum":44055},{"parent":"FUNC","rule":"PERCENTILE","sum":29614610},{"parent":"FUNC","rule":"PERCENTIlE","sum":4},{"parent":"FUNC","rule":"PERCENT_RANK","sum":5854},{"parent":"FUNC","rule":"PERCENtILE","sum":1897},{"parent":"FUNC","rule":"PERCEnTILE","sum":1},{"parent":"FUNC","rule":"PICKLE","sum":77},{"parent":"FUNC","rule":"PIckle","sum":7},{"parent":"FUNC","rule":"POPULATION_STDDEV","sum":372},{"parent":"FUNC","rule":"POPULATION_VARIANCE","sum":19},{"parent":"FUNC","rule":"POWER","sum":4},{"parent":"FUNC","rule":"ParseDuration","sum":2},{"parent":"FUNC","rule":"ParseFILE","sum":22},{"parent":"FUNC","rule":"ParseFIle","sum":8},{"parent":"FUNC","rule":"ParseFile","sum":955948},{"parent":"FUNC","rule":"ParseType","sum":459502},{"parent":"FUNC","rule":"ParseTypeHandle","sum":1160868},{"parent":"FUNC","rule":"Parsefile","sum":109},{"parent":"FUNC","rule":"Path","sum":4},{"parent":"FUNC","rule":"PeRCENTILE","sum":365},{"parent":"FUNC","rule":"Percentile","sum":73722},{"parent":"FUNC","rule":"PgArray","sum":17},{"parent":"FUNC","rule":"PgBool","sum":145},{"parent":"FUNC","rule":"PgCall","sum":538},{"parent":"FUNC","rule":"PgCast","sum":5475},{"parent":"FUNC","rule":"PgCircle","sum":5},{"parent":"FUNC","rule":"PgConst","sum":99},{"parent":"FUNC","rule":"PgDate","sum":115},{"parent":"FUNC","rule":"PgGeometry","sum":6},{"parent":"FUNC","rule":"PgInt4","sum":1},{"parent":"FUNC","rule":"PgInterval","sum":33},{"parent":"FUNC","rule":"PgOp","sum":1347},{"parent":"FUNC","rule":"PgPoint","sum":75},{"parent":"FUNC","rule":"PgPolygon","sum":516},{"parent":"FUNC","rule":"PgRangeCall","sum":7},{"parent":"FUNC","rule":"PgText","sum":24},{"parent":"FUNC","rule":"PgTimestamp","sum":42},{"parent":"FUNC","rule":"PgVarBit","sum":2},{"parent":"FUNC","rule":"Pickle","sum":118777},{"parent":"FUNC","rule":"QuoteCode","sum":735840},{"parent":"FUNC","rule":"RADIANS","sum":12},{"parent":"FUNC","rule":"RAND","sum":2},{"parent":"FUNC","rule":"RANDOM","sum":484333},{"parent":"FUNC","rule":"RANDOMNUMBER","sum":3991},{"parent":"FUNC","rule":"RANDOMNumber","sum":11},{"parent":"FUNC","rule":"RANDOMUUID","sum":5},{"parent":"FUNC","rule":"RANDOM_NUMBER","sum":83},{"parent":"FUNC","rule":"RANGE","sum":150},{"parent":"FUNC","rule":"RANK","sum":931701},{"parent":"FUNC","rule":"RAndom","sum":7},{"parent":"FUNC","rule":"REGEXP","sum":1},{"parent":"FUNC","rule":"REMOVEMEMBERS","sum":41},{"parent":"FUNC","rule":"REMOVE_mEMBER","sum":6},{"parent":"FUNC","rule":"REPLACE","sum":6},{"parent":"FUNC","rule":"RFIND","sum":3509212},{"parent":"FUNC","rule":"RFind","sum":258233},{"parent":"FUNC","rule":"RIGHT","sum":2},{"parent":"FUNC","rule":"ROUND","sum":10},{"parent":"FUNC","rule":"ROWNUMBER","sum":33},{"parent":"FUNC","rule":"ROW_NUMBER","sum":9342600},{"parent":"FUNC","rule":"ROW_NUMber","sum":11},{"parent":"FUNC","rule":"ROW_Number","sum":8},{"parent":"FUNC","rule":"ROW_nUMBER","sum":1},{"parent":"FUNC","rule":"ROW_nuMBER","sum":4},{"parent":"FUNC","rule":"ROW_number","sum":13605},{"parent":"FUNC","rule":"Rand","sum":1},{"parent":"FUNC","rule":"Random","sum":721944},{"parent":"FUNC","rule":"RandomNUmber","sum":183},{"parent":"FUNC","rule":"RandomNumber","sum":269061},{"parent":"FUNC","rule":"RandomUUID","sum":8345},{"parent":"FUNC","rule":"RandomUUid","sum":33585},{"parent":"FUNC","rule":"RandomUuid","sum":215027},{"parent":"FUNC","rule":"Randomnumber","sum":2},{"parent":"FUNC","rule":"Range","sum":2},{"parent":"FUNC","rule":"Rank","sum":3074},{"parent":"FUNC","rule":"RemoveMEmbers","sum":16},{"parent":"FUNC","rule":"RemoveMember","sum":429995},{"parent":"FUNC","rule":"RemoveMembers","sum":721426},{"parent":"FUNC","rule":"RemoveTimeZone","sum":13},{"parent":"FUNC","rule":"RemoveTimezone","sum":432859},{"parent":"FUNC","rule":"Removemember","sum":692},{"parent":"FUNC","rule":"Removemembers","sum":4},{"parent":"FUNC","rule":"RenameMembers","sum":486595},{"parent":"FUNC","rule":"ReplaceMember","sum":9397},{"parent":"FUNC","rule":"ReprCode","sum":194797},{"parent":"FUNC","rule":"ResourceType","sum":9},{"parent":"FUNC","rule":"Rfind","sum":873},{"parent":"FUNC","rule":"RootAttributes","sum":20},{"parent":"FUNC","rule":"RowNum","sum":1},{"parent":"FUNC","rule":"RowNumber","sum":21310},{"parent":"FUNC","rule":"Row_NUMBER","sum":29},{"parent":"FUNC","rule":"Row_NUmber","sum":12},{"parent":"FUNC","rule":"Row_Number","sum":60795},{"parent":"FUNC","rule":"Row_number","sum":4003},{"parent":"FUNC","rule":"SESSIONWINDOW","sum":131},{"parent":"FUNC","rule":"SETINTERSECTION","sum":20},{"parent":"FUNC","rule":"SIGN","sum":1},{"parent":"FUNC","rule":"SIN","sum":4},{"parent":"FUNC","rule":"SOME","sum":32912475},{"parent":"FUNC","rule":"SOMe","sum":17},{"parent":"FUNC","rule":"SON_VALUE","sum":1},{"parent":"FUNC","rule":"SOmE","sum":3},{"parent":"FUNC","rule":"SOme","sum":9},{"parent":"FUNC","rule":"SQL","sum":6},{"parent":"FUNC","rule":"SQRT","sum":2},{"parent":"FUNC","rule":"STARTSWITH","sum":25639},{"parent":"FUNC","rule":"STARTS_WITH","sum":42},{"parent":"FUNC","rule":"STARTsWITH","sum":17},{"parent":"FUNC","rule":"STATICMAP","sum":46},{"parent":"FUNC","rule":"STD","sum":6},{"parent":"FUNC","rule":"STDDEV","sum":401637},{"parent":"FUNC","rule":"STDDEVPOP","sum":2917},{"parent":"FUNC","rule":"STDDEVSAMP","sum":1094},{"parent":"FUNC","rule":"STDDEV_POP","sum":14},{"parent":"FUNC","rule":"STDDEV_POPULATION","sum":18435},{"parent":"FUNC","rule":"STDDEV_SAMP","sum":3551},{"parent":"FUNC","rule":"STDDEV_SAMPLE","sum":9894},{"parent":"FUNC","rule":"STDDev","sum":2},{"parent":"FUNC","rule":"STDdev","sum":4},{"parent":"FUNC","rule":"STRING_SPLIT","sum":1},{"parent":"FUNC","rule":"ST_AsText","sum":3},{"parent":"FUNC","rule":"ST_ClosestPoint","sum":3},{"parent":"FUNC","rule":"ST_Distance","sum":5},{"parent":"FUNC","rule":"ST_GeomFromGeoHash","sum":1},{"parent":"FUNC","rule":"ST_Point","sum":10},{"parent":"FUNC","rule":"ST_PolygonFromText","sum":5},{"parent":"FUNC","rule":"ST_SetSRID","sum":6},{"parent":"FUNC","rule":"STartsWith","sum":2},{"parent":"FUNC","rule":"STdDEV","sum":9},{"parent":"FUNC","rule":"SUBQUERYExtendFor","sum":6},{"parent":"FUNC","rule":"SUBSTIRNG","sum":4},{"parent":"FUNC","rule":"SUBSTRING","sum":29234003},{"parent":"FUNC","rule":"SUBSTRINg","sum":2},{"parent":"FUNC","rule":"SUBSTRInG","sum":4},{"parent":"FUNC","rule":"SUBSTRiNG","sum":2},{"parent":"FUNC","rule":"SUBSTRinG","sum":2},{"parent":"FUNC","rule":"SUBSTRing","sum":224},{"parent":"FUNC","rule":"SUBSTrING","sum":21},{"parent":"FUNC","rule":"SUBSTring","sum":14},{"parent":"FUNC","rule":"SUBStRING","sum":2},{"parent":"FUNC","rule":"SUBString","sum":20},{"parent":"FUNC","rule":"SUBstring","sum":3},{"parent":"FUNC","rule":"SUM","sum":42261625},{"parent":"FUNC","rule":"SUMIF","sum":3317},{"parent":"FUNC","rule":"SUM_","sum":320},{"parent":"FUNC","rule":"SUM_IF","sum":8449934},{"parent":"FUNC","rule":"SUM_If","sum":110},{"parent":"FUNC","rule":"SUM_iF","sum":19},{"parent":"FUNC","rule":"SUM_if","sum":264857},{"parent":"FUNC","rule":"SUN","sum":1},{"parent":"FUNC","rule":"SUbstring","sum":5},{"parent":"FUNC","rule":"SUm","sum":78},{"parent":"FUNC","rule":"SUm_IF","sum":30},{"parent":"FUNC","rule":"SWITCH","sum":2},{"parent":"FUNC","rule":"SecureParam","sum":891313},{"parent":"FUNC","rule":"SessionStart","sum":24374},{"parent":"FUNC","rule":"SessionState","sum":708},{"parent":"FUNC","rule":"SessionWindow","sum":101921},{"parent":"FUNC","rule":"SetBit","sum":45764},{"parent":"FUNC","rule":"SetContains","sum":1},{"parent":"FUNC","rule":"SetCreate","sum":45653},{"parent":"FUNC","rule":"SetDIfference","sum":11},{"parent":"FUNC","rule":"SetDifference","sum":483448},{"parent":"FUNC","rule":"SetIncludes","sum":248781},{"parent":"FUNC","rule":"SetInterSection","sum":21},{"parent":"FUNC","rule":"SetIntersection","sum":1262511},{"parent":"FUNC","rule":"SetIsDisJOINt","sum":8271},{"parent":"FUNC","rule":"SetIsDisJoint","sum":1665},{"parent":"FUNC","rule":"SetIsDisjoint","sum":1224131},{"parent":"FUNC","rule":"SetSymmetricDifference","sum":8950},{"parent":"FUNC","rule":"SetUNION","sum":1},{"parent":"FUNC","rule":"SetUnion","sum":581567},{"parent":"FUNC","rule":"Setintersection","sum":4333},{"parent":"FUNC","rule":"SizeOf","sum":1},{"parent":"FUNC","rule":"Some","sum":2506744},{"parent":"FUNC","rule":"SplitToList","sum":6},{"parent":"FUNC","rule":"SpreadMembers","sum":346135},{"parent":"FUNC","rule":"StablePicke","sum":1},{"parent":"FUNC","rule":"StablePickle","sum":210832},{"parent":"FUNC","rule":"StartSwith","sum":105},{"parent":"FUNC","rule":"StartsWIth","sum":696},{"parent":"FUNC","rule":"StartsWith","sum":5729777},{"parent":"FUNC","rule":"Startswith","sum":18841},{"parent":"FUNC","rule":"StaticFold","sum":297},{"parent":"FUNC","rule":"StaticMap","sum":685188},{"parent":"FUNC","rule":"StaticZip","sum":13310},{"parent":"FUNC","rule":"Staticmap","sum":2},{"parent":"FUNC","rule":"StdDev","sum":33},{"parent":"FUNC","rule":"Stddev","sum":43},{"parent":"FUNC","rule":"StreamItemType","sum":5736},{"parent":"FUNC","rule":"StreamType","sum":76883},{"parent":"FUNC","rule":"StreamTypeHandle","sum":17},{"parent":"FUNC","rule":"String","sum":34646},{"parent":"FUNC","rule":"StringLength","sum":5},{"parent":"FUNC","rule":"StringSplitToList","sum":4},{"parent":"FUNC","rule":"StructDifference","sum":229},{"parent":"FUNC","rule":"StructIntersection","sum":779},{"parent":"FUNC","rule":"StructMemberType","sum":18722},{"parent":"FUNC","rule":"StructMembers","sum":964266},{"parent":"FUNC","rule":"StructSymmetricDifference","sum":172},{"parent":"FUNC","rule":"StructType","sum":18113},{"parent":"FUNC","rule":"StructTypeComponents","sum":99048},{"parent":"FUNC","rule":"StructTypeHandle","sum":87242},{"parent":"FUNC","rule":"StructUnion","sum":95400},{"parent":"FUNC","rule":"SuBSTRING","sum":1},{"parent":"FUNC","rule":"SuM","sum":9},{"parent":"FUNC","rule":"SuM_IF","sum":4},{"parent":"FUNC","rule":"SubQueryExtendFor","sum":3},{"parent":"FUNC","rule":"SubSTRING","sum":3},{"parent":"FUNC","rule":"SubString","sum":27042},{"parent":"FUNC","rule":"SubqueryAssumeOrderBy","sum":809},{"parent":"FUNC","rule":"SubqueryExtend","sum":36478},{"parent":"FUNC","rule":"SubqueryExtendFor","sum":769666},{"parent":"FUNC","rule":"SubqueryMerge","sum":17},{"parent":"FUNC","rule":"SubqueryMergeFor","sum":36094},{"parent":"FUNC","rule":"SubqueryOrderBy","sum":181209},{"parent":"FUNC","rule":"SubqueryUnionALLFor","sum":9},{"parent":"FUNC","rule":"SubqueryUnionALlFor","sum":17},{"parent":"FUNC","rule":"SubqueryUnionAll","sum":26106},{"parent":"FUNC","rule":"SubqueryUnionAllFor","sum":137115},{"parent":"FUNC","rule":"SubqueryUnionAllfor","sum":380},{"parent":"FUNC","rule":"SubqueryUnionMerge","sum":30587},{"parent":"FUNC","rule":"SubqueryUnionMergeFor","sum":73721},{"parent":"FUNC","rule":"SubsTRING","sum":3},{"parent":"FUNC","rule":"Substring","sum":2938868},{"parent":"FUNC","rule":"Sum","sum":452555},{"parent":"FUNC","rule":"SumIf","sum":1597},{"parent":"FUNC","rule":"Sum_IF","sum":85},{"parent":"FUNC","rule":"Sum_If","sum":14878},{"parent":"FUNC","rule":"Sum_if","sum":52115},{"parent":"FUNC","rule":"TABLENAME","sum":11760},{"parent":"FUNC","rule":"TABLEPATH","sum":316},{"parent":"FUNC","rule":"TABLERECORDINDEX","sum":45},{"parent":"FUNC","rule":"TABLEROW","sum":1277},{"parent":"FUNC","rule":"TABLE_NAME","sum":124},{"parent":"FUNC","rule":"TABLE_PATH","sum":53},{"parent":"FUNC","rule":"TABLE_ROW","sum":124},{"parent":"FUNC","rule":"TABLEname","sum":4},{"parent":"FUNC","rule":"TAbleName","sum":65},{"parent":"FUNC","rule":"TAblePath","sum":2},{"parent":"FUNC","rule":"TAbleRow","sum":25},{"parent":"FUNC","rule":"TAblename","sum":1},{"parent":"FUNC","rule":"TESTBIT","sum":3},{"parent":"FUNC","rule":"TEstBit","sum":1},{"parent":"FUNC","rule":"TIMESTAMP","sum":426},{"parent":"FUNC","rule":"TIMESTAMPDIFF","sum":2},{"parent":"FUNC","rule":"TIMESTAMP_SECONDS","sum":1},{"parent":"FUNC","rule":"TOBytes","sum":1},{"parent":"FUNC","rule":"TODICT","sum":21},{"parent":"FUNC","rule":"TODIct","sum":1},{"parent":"FUNC","rule":"TOP","sum":499603},{"parent":"FUNC","rule":"TOPBY","sum":1},{"parent":"FUNC","rule":"TOPFREQ","sum":513458},{"parent":"FUNC","rule":"TOPFreq","sum":3},{"parent":"FUNC","rule":"TOP_BY","sum":1756151},{"parent":"FUNC","rule":"TOP_FREQ","sum":46},{"parent":"FUNC","rule":"TOP_by","sum":25},{"parent":"FUNC","rule":"TOSET","sum":41},{"parent":"FUNC","rule":"TOSet","sum":762},{"parent":"FUNC","rule":"TO_NUMBER","sum":14},{"parent":"FUNC","rule":"TO_TIMESTAMP","sum":1},{"parent":"FUNC","rule":"TObytes","sum":3},{"parent":"FUNC","rule":"TRY_MEMBER","sum":37},{"parent":"FUNC","rule":"TYPEOF","sum":1},{"parent":"FUNC","rule":"TYPEof","sum":28},{"parent":"FUNC","rule":"TZDateTime","sum":129},{"parent":"FUNC","rule":"TZDatetime","sum":3},{"parent":"FUNC","rule":"TZTimestamp","sum":64},{"parent":"FUNC","rule":"TabLeName","sum":1},{"parent":"FUNC","rule":"TableNAME","sum":3},{"parent":"FUNC","rule":"TableNAme","sum":1509},{"parent":"FUNC","rule":"TableNamE","sum":2},{"parent":"FUNC","rule":"TableName","sum":14993581},{"parent":"FUNC","rule":"TablePATH","sum":1},{"parent":"FUNC","rule":"TablePAth","sum":5},{"parent":"FUNC","rule":"TablePath","sum":1494366},{"parent":"FUNC","rule":"TableROW","sum":4},{"parent":"FUNC","rule":"TableROw","sum":19},{"parent":"FUNC","rule":"TableRecordINdex","sum":4},{"parent":"FUNC","rule":"TableRecordIndex","sum":2323768},{"parent":"FUNC","rule":"TableRedordIndex","sum":1},{"parent":"FUNC","rule":"TableRow","sum":26859346},{"parent":"FUNC","rule":"TableRowIndex","sum":1},{"parent":"FUNC","rule":"TableRows","sum":1668276},{"parent":"FUNC","rule":"Table_Name","sum":55},{"parent":"FUNC","rule":"Table_Row","sum":5},{"parent":"FUNC","rule":"Table_name","sum":75},{"parent":"FUNC","rule":"Table_path","sum":3},{"parent":"FUNC","rule":"Tablename","sum":103643},{"parent":"FUNC","rule":"Tablepath","sum":217},{"parent":"FUNC","rule":"TablerRow","sum":1},{"parent":"FUNC","rule":"TablerecordIndex","sum":1},{"parent":"FUNC","rule":"Tablerecordindex","sum":3},{"parent":"FUNC","rule":"Tablerow","sum":26448},{"parent":"FUNC","rule":"TestBit","sum":251665},{"parent":"FUNC","rule":"Text","sum":20},{"parent":"FUNC","rule":"TimeStamp","sum":4393},{"parent":"FUNC","rule":"Timestamp","sum":1354341},{"parent":"FUNC","rule":"Timestamp64","sum":59},{"parent":"FUNC","rule":"ToBytes","sum":6604327},{"parent":"FUNC","rule":"ToDIct","sum":269},{"parent":"FUNC","rule":"ToDict","sum":6382929},{"parent":"FUNC","rule":"ToList","sum":3},{"parent":"FUNC","rule":"ToLower","sum":4},{"parent":"FUNC","rule":"ToMilliseconds","sum":2},{"parent":"FUNC","rule":"ToMultiDict","sum":339431},{"parent":"FUNC","rule":"ToPg","sum":3053},{"parent":"FUNC","rule":"ToSET","sum":80},{"parent":"FUNC","rule":"ToSet","sum":5458552},{"parent":"FUNC","rule":"ToSortedDict","sum":1116},{"parent":"FUNC","rule":"ToStartOfMonth","sum":4},{"parent":"FUNC","rule":"To_bytes","sum":3},{"parent":"FUNC","rule":"Todict","sum":232},{"parent":"FUNC","rule":"Top","sum":766},{"parent":"FUNC","rule":"TopBy","sum":79},{"parent":"FUNC","rule":"TopFreq","sum":13374},{"parent":"FUNC","rule":"Top_BY","sum":22},{"parent":"FUNC","rule":"Top_By","sum":125},{"parent":"FUNC","rule":"Top_by","sum":119},{"parent":"FUNC","rule":"Topfreq","sum":28},{"parent":"FUNC","rule":"Toset","sum":2347},{"parent":"FUNC","rule":"TryMember","sum":16627965},{"parent":"FUNC","rule":"Trymember","sum":36871},{"parent":"FUNC","rule":"TupleElementType","sum":7546},{"parent":"FUNC","rule":"TupleType","sum":2404},{"parent":"FUNC","rule":"TupleTypeComponents","sum":60},{"parent":"FUNC","rule":"TupleTypeHandle","sum":5725},{"parent":"FUNC","rule":"TypeHandle","sum":163107},{"parent":"FUNC","rule":"TypeKind","sum":54320},{"parent":"FUNC","rule":"TypeOF","sum":37},{"parent":"FUNC","rule":"TypeOf","sum":842640},{"parent":"FUNC","rule":"Typeof","sum":78},{"parent":"FUNC","rule":"TzDate","sum":676},{"parent":"FUNC","rule":"TzDate32","sum":7},{"parent":"FUNC","rule":"TzDateTime","sum":24915},{"parent":"FUNC","rule":"TzDateTime64","sum":7},{"parent":"FUNC","rule":"TzDatetime","sum":46254},{"parent":"FUNC","rule":"TzTimeStamp","sum":4},{"parent":"FUNC","rule":"TzTimestamp","sum":342200},{"parent":"FUNC","rule":"TzTimestamp64","sum":7},{"parent":"FUNC","rule":"UBSTRING","sum":1},{"parent":"FUNC","rule":"UDAF","sum":141439},{"parent":"FUNC","rule":"UDF","sum":1686},{"parent":"FUNC","rule":"UINT32","sum":4},{"parent":"FUNC","rule":"UINT64","sum":6},{"parent":"FUNC","rule":"UInt32","sum":108286},{"parent":"FUNC","rule":"UInt64","sum":747},{"parent":"FUNC","rule":"UInt8","sum":740},{"parent":"FUNC","rule":"UNIQ","sum":1},{"parent":"FUNC","rule":"UNPICKLE","sum":28},{"parent":"FUNC","rule":"UNTAG","sum":18},{"parent":"FUNC","rule":"UNWRAP","sum":13846988},{"parent":"FUNC","rule":"UNWRAp","sum":44},{"parent":"FUNC","rule":"UNWRaP","sum":1},{"parent":"FUNC","rule":"UNWRap","sum":9838},{"parent":"FUNC","rule":"UNWrAP","sum":4},{"parent":"FUNC","rule":"UNWraP","sum":5},{"parent":"FUNC","rule":"UNWrap","sum":2},{"parent":"FUNC","rule":"UNwRAP","sum":6},{"parent":"FUNC","rule":"UNwrap","sum":1274},{"parent":"FUNC","rule":"USING","sum":6},{"parent":"FUNC","rule":"UTF8","sum":342555},{"parent":"FUNC","rule":"UUID","sum":1},{"parent":"FUNC","rule":"Udf","sum":295914},{"parent":"FUNC","rule":"Uint16","sum":83},{"parent":"FUNC","rule":"Uint32","sum":331609},{"parent":"FUNC","rule":"Uint64","sum":12037},{"parent":"FUNC","rule":"Uint8","sum":2898},{"parent":"FUNC","rule":"UnTag","sum":55},{"parent":"FUNC","rule":"UnWRAP","sum":10},{"parent":"FUNC","rule":"UnWrAp","sum":1},{"parent":"FUNC","rule":"UnWrap","sum":1079656},{"parent":"FUNC","rule":"UnionAll","sum":3},{"parent":"FUNC","rule":"Unpickle","sum":180},{"parent":"FUNC","rule":"Untag","sum":14594},{"parent":"FUNC","rule":"Unwarp","sum":1},{"parent":"FUNC","rule":"Unwrap","sum":35630413},{"parent":"FUNC","rule":"UtcCurrentDatetime","sum":8},{"parent":"FUNC","rule":"Utf8","sum":41450},{"parent":"FUNC","rule":"Uuid","sum":459},{"parent":"FUNC","rule":"VALUES","sum":15},{"parent":"FUNC","rule":"VARIANCE","sum":30976},{"parent":"FUNC","rule":"VARIANCE_POPULATION","sum":2917},{"parent":"FUNC","rule":"VARIANCE_SAMPLE","sum":1299},{"parent":"FUNC","rule":"VARP","sum":146},{"parent":"FUNC","rule":"VARPOP","sum":15},{"parent":"FUNC","rule":"VAR_POP","sum":4},{"parent":"FUNC","rule":"VAR_SAMP","sum":44},{"parent":"FUNC","rule":"VERSION","sum":2},{"parent":"FUNC","rule":"Variance","sum":1146},{"parent":"FUNC","rule":"Variance_SAMPLE","sum":30},{"parent":"FUNC","rule":"Variance_Sample","sum":23},{"parent":"FUNC","rule":"Variance_sample","sum":24},{"parent":"FUNC","rule":"VariantType","sum":3235},{"parent":"FUNC","rule":"VariantTypeHandle","sum":8},{"parent":"FUNC","rule":"VariantUnderlyingType","sum":6437},{"parent":"FUNC","rule":"Version","sum":3},{"parent":"FUNC","rule":"Visit","sum":18},{"parent":"FUNC","rule":"Void","sum":93478},{"parent":"FUNC","rule":"WEAKFIELD","sum":833},{"parent":"FUNC","rule":"WEAK_FIELD","sum":384},{"parent":"FUNC","rule":"WEakField","sum":2},{"parent":"FUNC","rule":"Way","sum":8613},{"parent":"FUNC","rule":"WeakFIeld","sum":43},{"parent":"FUNC","rule":"WeakField","sum":21081419},{"parent":"FUNC","rule":"WeakFiled","sum":2},{"parent":"FUNC","rule":"Weakfield","sum":1841},{"parent":"FUNC","rule":"WorldCode","sum":365},{"parent":"FUNC","rule":"YPathDouble","sum":4},{"parent":"FUNC","rule":"YPathExtract","sum":4},{"parent":"FUNC","rule":"YPathInt64","sum":6},{"parent":"FUNC","rule":"YPathString","sum":4},{"parent":"FUNC","rule":"YSON","sum":762},{"parent":"FUNC","rule":"YSONExtractString","sum":6},{"parent":"FUNC","rule":"Yson","sum":601523},{"parent":"FUNC","rule":"aGGREGATE_LIST","sum":13},{"parent":"FUNC","rule":"aGGREGATE_LIST_DISTINCT","sum":4},{"parent":"FUNC","rule":"aGG_LIST","sum":2},{"parent":"FUNC","rule":"aGG_LIST_DISTINCT","sum":10},{"parent":"FUNC","rule":"aSSTRUCT","sum":1},{"parent":"FUNC","rule":"aSTuple","sum":3},{"parent":"FUNC","rule":"aVG","sum":2},{"parent":"FUNC","rule":"abs","sum":1545323},{"parent":"FUNC","rule":"addMember","sum":2503},{"parent":"FUNC","rule":"addTimezone","sum":784},{"parent":"FUNC","rule":"age","sum":2},{"parent":"FUNC","rule":"aggList","sum":268},{"parent":"FUNC","rule":"agg_LIST","sum":76},{"parent":"FUNC","rule":"agg_LIST_DISTINCT","sum":1},{"parent":"FUNC","rule":"agg_List","sum":546},{"parent":"FUNC","rule":"agg_List_distinct","sum":24},{"parent":"FUNC","rule":"agg_list","sum":1278790},{"parent":"FUNC","rule":"agg_list_DISTINCT","sum":2},{"parent":"FUNC","rule":"agg_list_Distinct","sum":1},{"parent":"FUNC","rule":"agg_list_distinct","sum":433891},{"parent":"FUNC","rule":"agg_set","sum":1},{"parent":"FUNC","rule":"agglist","sum":122},{"parent":"FUNC","rule":"agglistdistinct","sum":25},{"parent":"FUNC","rule":"aggr_list","sum":2752},{"parent":"FUNC","rule":"aggr_list_distinct","sum":35},{"parent":"FUNC","rule":"aggr_set","sum":2},{"parent":"FUNC","rule":"aggregATE_LIST","sum":16},{"parent":"FUNC","rule":"aggregateList","sum":373},{"parent":"FUNC","rule":"aggregateListDistinct","sum":33},{"parent":"FUNC","rule":"aggregate_List","sum":993},{"parent":"FUNC","rule":"aggregate_List_Distinct","sum":8},{"parent":"FUNC","rule":"aggregate_List_distinct","sum":10},{"parent":"FUNC","rule":"aggregate_by","sum":370741},{"parent":"FUNC","rule":"aggregate_list","sum":2509693},{"parent":"FUNC","rule":"aggregate_list_","sum":1},{"parent":"FUNC","rule":"aggregate_list_DISTINCT","sum":1760},{"parent":"FUNC","rule":"aggregate_list_Distinct","sum":27},{"parent":"FUNC","rule":"aggregate_list_distinct","sum":932178},{"parent":"FUNC","rule":"aggregatelist","sum":534},{"parent":"FUNC","rule":"aggregatetransforminput","sum":1},{"parent":"FUNC","rule":"aggregationFactory","sum":85},{"parent":"FUNC","rule":"aggregation_factory","sum":9375},{"parent":"FUNC","rule":"aggregationfactory","sum":71},{"parent":"FUNC","rule":"agregate_list_distinct","sum":1},{"parent":"FUNC","rule":"and","sum":7},{"parent":"FUNC","rule":"anyLast","sum":8},{"parent":"FUNC","rule":"argMax","sum":14},{"parent":"FUNC","rule":"arrayElement","sum":1},{"parent":"FUNC","rule":"arrayJoin","sum":10},{"parent":"FUNC","rule":"arrayMax","sum":4},{"parent":"FUNC","rule":"array_agg","sum":5},{"parent":"FUNC","rule":"array_to_string","sum":2},{"parent":"FUNC","rule":"asDIct","sum":1},{"parent":"FUNC","rule":"asDict","sum":3719},{"parent":"FUNC","rule":"asLIST","sum":50},{"parent":"FUNC","rule":"asList","sum":173093},{"parent":"FUNC","rule":"asSet","sum":1949},{"parent":"FUNC","rule":"asStruct","sum":106781},{"parent":"FUNC","rule":"asTUPLE","sum":1},{"parent":"FUNC","rule":"asTagged","sum":2015},{"parent":"FUNC","rule":"asTuple","sum":83815},{"parent":"FUNC","rule":"asVariant","sum":10},{"parent":"FUNC","rule":"as_dict","sum":4},{"parent":"FUNC","rule":"as_list","sum":77},{"parent":"FUNC","rule":"as_struct","sum":122},{"parent":"FUNC","rule":"as_table","sum":30},{"parent":"FUNC","rule":"as_tagged","sum":1},{"parent":"FUNC","rule":"as_tuple","sum":360},{"parent":"FUNC","rule":"asdict","sum":3903},{"parent":"FUNC","rule":"asenum","sum":13},{"parent":"FUNC","rule":"aslist","sum":165329},{"parent":"FUNC","rule":"assessments_integralListReverse","sum":2},{"parent":"FUNC","rule":"asset","sum":1208},{"parent":"FUNC","rule":"asstruct","sum":12857},{"parent":"FUNC","rule":"assumeNotNull","sum":3},{"parent":"FUNC","rule":"astagged","sum":1374},{"parent":"FUNC","rule":"astuple","sum":31282},{"parent":"FUNC","rule":"asvariant","sum":3},{"parent":"FUNC","rule":"atan2","sum":4},{"parent":"FUNC","rule":"avG","sum":56},{"parent":"FUNC","rule":"avg","sum":5965083},{"parent":"FUNC","rule":"avgIf","sum":18733},{"parent":"FUNC","rule":"avg_","sum":1},{"parent":"FUNC","rule":"avg_IF","sum":155},{"parent":"FUNC","rule":"avg_If","sum":129},{"parent":"FUNC","rule":"avg_if","sum":495871},{"parent":"FUNC","rule":"ax","sum":13},{"parent":"FUNC","rule":"bit_or","sum":26835},{"parent":"FUNC","rule":"bool","sum":286},{"parent":"FUNC","rule":"bool_and","sum":65763},{"parent":"FUNC","rule":"bool_or","sum":239951},{"parent":"FUNC","rule":"bool_xor","sum":2},{"parent":"FUNC","rule":"bottom","sum":11156},{"parent":"FUNC","rule":"bottom_by","sum":325871},{"parent":"FUNC","rule":"business_id","sum":1},{"parent":"FUNC","rule":"bytes","sum":2},{"parent":"FUNC","rule":"cOALESCE","sum":5},{"parent":"FUNC","rule":"cOUNT","sum":62},{"parent":"FUNC","rule":"cOUNT_IF","sum":39},{"parent":"FUNC","rule":"cOunt","sum":2},{"parent":"FUNC","rule":"ceil","sum":1},{"parent":"FUNC","rule":"char_LENGTH","sum":2},{"parent":"FUNC","rule":"char_length","sum":18},{"parent":"FUNC","rule":"check_google_id","sum":1},{"parent":"FUNC","rule":"choosemembers","sum":5},{"parent":"FUNC","rule":"client_id","sum":1},{"parent":"FUNC","rule":"cnt","sum":2},{"parent":"FUNC","rule":"coALESCE","sum":14},{"parent":"FUNC","rule":"coUNT","sum":3},{"parent":"FUNC","rule":"coUNt","sum":3},{"parent":"FUNC","rule":"coalESCE","sum":40},{"parent":"FUNC","rule":"coalescE","sum":1},{"parent":"FUNC","rule":"coalesce","sum":24843533},{"parent":"FUNC","rule":"coalescue","sum":2},{"parent":"FUNC","rule":"coalsece","sum":1},{"parent":"FUNC","rule":"combinemembers","sum":5},{"parent":"FUNC","rule":"concat","sum":25},{"parent":"FUNC","rule":"conunt","sum":1},{"parent":"FUNC","rule":"convert_to_360","sum":2},{"parent":"FUNC","rule":"corr","sum":701},{"parent":"FUNC","rule":"correlation","sum":3830},{"parent":"FUNC","rule":"cos","sum":8},{"parent":"FUNC","rule":"couNT","sum":1},{"parent":"FUNC","rule":"couNT_IF","sum":7},{"parent":"FUNC","rule":"counT","sum":24},{"parent":"FUNC","rule":"count","sum":38597076},{"parent":"FUNC","rule":"countDistinct","sum":1},{"parent":"FUNC","rule":"countDistinctEstimate","sum":284728},{"parent":"FUNC","rule":"countIF","sum":8421},{"parent":"FUNC","rule":"countIf","sum":1395469},{"parent":"FUNC","rule":"count_","sum":587},{"parent":"FUNC","rule":"count_IF","sum":33881},{"parent":"FUNC","rule":"count_If","sum":26345},{"parent":"FUNC","rule":"count_distinct_estimate","sum":14},{"parent":"FUNC","rule":"count_if","sum":18886551},{"parent":"FUNC","rule":"countdistinctEstimate","sum":1906},{"parent":"FUNC","rule":"countdistinctestimate","sum":2796},{"parent":"FUNC","rule":"countif","sum":22798},{"parent":"FUNC","rule":"covar","sum":239},{"parent":"FUNC","rule":"covariance","sum":278},{"parent":"FUNC","rule":"covariance_sample","sum":165},{"parent":"FUNC","rule":"cpunt","sum":1},{"parent":"FUNC","rule":"cume_dist","sum":69},{"parent":"FUNC","rule":"currentTzDate","sum":4108},{"parent":"FUNC","rule":"currentTzTimestamp","sum":53},{"parent":"FUNC","rule":"currentUTCDATETIME","sum":19},{"parent":"FUNC","rule":"currentUTCDate","sum":646},{"parent":"FUNC","rule":"currentUTCDateTime","sum":291},{"parent":"FUNC","rule":"currentUTCdate","sum":358},{"parent":"FUNC","rule":"currentUTCdatetime","sum":20},{"parent":"FUNC","rule":"currentUTcdate","sum":1305},{"parent":"FUNC","rule":"currentUtcDate","sum":6074},{"parent":"FUNC","rule":"currentUtcDateTime","sum":13854},{"parent":"FUNC","rule":"currentUtcDatetime","sum":2309},{"parent":"FUNC","rule":"currentUtcTimestamp","sum":197},{"parent":"FUNC","rule":"current_utc_timestamp","sum":2},{"parent":"FUNC","rule":"currenttzdate","sum":419},{"parent":"FUNC","rule":"currenttzdatetime","sum":258},{"parent":"FUNC","rule":"currenttztimestamp","sum":47},{"parent":"FUNC","rule":"currentutcDateTime","sum":3},{"parent":"FUNC","rule":"currentutcdate","sum":117924},{"parent":"FUNC","rule":"currentutcdatetime","sum":57736},{"parent":"FUNC","rule":"currentutctimestamp","sum":48035},{"parent":"FUNC","rule":"d","sum":1},{"parent":"FUNC","rule":"dATE","sum":4},{"parent":"FUNC","rule":"date","sum":413808},{"parent":"FUNC","rule":"date32","sum":11},{"parent":"FUNC","rule":"dateDiff","sum":1},{"parent":"FUNC","rule":"dateNow","sum":2},{"parent":"FUNC","rule":"dateTIME","sum":2},{"parent":"FUNC","rule":"dateTime","sum":10},{"parent":"FUNC","rule":"date_add","sum":8},{"parent":"FUNC","rule":"date_format","sum":2},{"parent":"FUNC","rule":"date_from_ts","sum":1},{"parent":"FUNC","rule":"date_sub","sum":2},{"parent":"FUNC","rule":"dateadd","sum":1},{"parent":"FUNC","rule":"datetime","sum":12284},{"parent":"FUNC","rule":"datetime64","sum":19},{"parent":"FUNC","rule":"decimal","sum":333},{"parent":"FUNC","rule":"dense_RANK","sum":42},{"parent":"FUNC","rule":"dense_rank","sum":45856},{"parent":"FUNC","rule":"dictAggregate","sum":14},{"parent":"FUNC","rule":"dictContains","sum":7},{"parent":"FUNC","rule":"dictGetString","sum":1},{"parent":"FUNC","rule":"dictItems","sum":8154},{"parent":"FUNC","rule":"dictKeys","sum":1175},{"parent":"FUNC","rule":"dictLength","sum":104},{"parent":"FUNC","rule":"dictLookUp","sum":9405},{"parent":"FUNC","rule":"dictLookup","sum":671},{"parent":"FUNC","rule":"dictPayloads","sum":90},{"parent":"FUNC","rule":"dict_keys","sum":8709},{"parent":"FUNC","rule":"dictcontains","sum":349},{"parent":"FUNC","rule":"dictcreate","sum":2},{"parent":"FUNC","rule":"dicthasitems","sum":24},{"parent":"FUNC","rule":"dictitems","sum":2383},{"parent":"FUNC","rule":"dictkeys","sum":888},{"parent":"FUNC","rule":"dictlength","sum":6542},{"parent":"FUNC","rule":"dictlookup","sum":1561},{"parent":"FUNC","rule":"dictpayloads","sum":113},{"parent":"FUNC","rule":"disctinct","sum":2},{"parent":"FUNC","rule":"dol_show1","sum":1},{"parent":"FUNC","rule":"double","sum":134},{"parent":"FUNC","rule":"dynumber","sum":12},{"parent":"FUNC","rule":"each","sum":1},{"parent":"FUNC","rule":"empty","sum":3},{"parent":"FUNC","rule":"endsWith","sum":2155},{"parent":"FUNC","rule":"endswith","sum":30845},{"parent":"FUNC","rule":"ensure","sum":404947},{"parent":"FUNC","rule":"ensuretype","sum":710},{"parent":"FUNC","rule":"evaluateCode","sum":59},{"parent":"FUNC","rule":"evaluateExpr","sum":280},{"parent":"FUNC","rule":"expandstruct","sum":20},{"parent":"FUNC","rule":"f","sum":1},{"parent":"FUNC","rule":"filecontent","sum":603},{"parent":"FUNC","rule":"filepath","sum":990},{"parent":"FUNC","rule":"filter","sum":2},{"parent":"FUNC","rule":"find","sum":4031435},{"parent":"FUNC","rule":"first_VALUE","sum":19},{"parent":"FUNC","rule":"first_route_timestamp","sum":1},{"parent":"FUNC","rule":"first_value","sum":1150137},{"parent":"FUNC","rule":"flatten","sum":1},{"parent":"FUNC","rule":"float","sum":23057},{"parent":"FUNC","rule":"floor","sum":1},{"parent":"FUNC","rule":"foo","sum":1},{"parent":"FUNC","rule":"forceremovemember","sum":373},{"parent":"FUNC","rule":"format","sum":1},{"parent":"FUNC","rule":"formatType","sum":8},{"parent":"FUNC","rule":"formattype","sum":143},{"parent":"FUNC","rule":"fromBytes","sum":12},{"parent":"FUNC","rule":"fromPg","sum":115},{"parent":"FUNC","rule":"fromUnixTimestamp64Micro","sum":1},{"parent":"FUNC","rule":"from_bytes","sum":2},{"parent":"FUNC","rule":"frombytes","sum":51},{"parent":"FUNC","rule":"frompg","sum":122},{"parent":"FUNC","rule":"gatherMembers","sum":67},{"parent":"FUNC","rule":"gathermembers","sum":7},{"parent":"FUNC","rule":"get_auto_label","sum":1},{"parent":"FUNC","rule":"get_html","sum":1},{"parent":"FUNC","rule":"get_is_in_collection_feature","sum":1},{"parent":"FUNC","rule":"get_metrika_bro","sum":1},{"parent":"FUNC","rule":"get_pay_processing","sum":2},{"parent":"FUNC","rule":"get_post_profiles","sum":1},{"parent":"FUNC","rule":"get_rewrite_prompt","sum":1},{"parent":"FUNC","rule":"get_support_line","sum":1},{"parent":"FUNC","rule":"get_test_id","sum":7},{"parent":"FUNC","rule":"getdate","sum":2},{"parent":"FUNC","rule":"greatest","sum":318861},{"parent":"FUNC","rule":"groupArray","sum":4},{"parent":"FUNC","rule":"groupUniqArray","sum":2},{"parent":"FUNC","rule":"grouping","sum":17523},{"parent":"FUNC","rule":"hISTOGRAM","sum":2},{"parent":"FUNC","rule":"has","sum":16},{"parent":"FUNC","rule":"histOGRAM","sum":4},{"parent":"FUNC","rule":"histograM","sum":356},{"parent":"FUNC","rule":"histogram","sum":52833},{"parent":"FUNC","rule":"histogramcdf","sum":84},{"parent":"FUNC","rule":"hll","sum":82171},{"parent":"FUNC","rule":"iF","sum":2678},{"parent":"FUNC","rule":"iNtErVaL","sum":1},{"parent":"FUNC","rule":"if","sum":42963453},{"parent":"FUNC","rule":"ifNull","sum":6},{"parent":"FUNC","rule":"in","sum":10},{"parent":"FUNC","rule":"indexOf","sum":12},{"parent":"FUNC","rule":"instanceof","sum":55},{"parent":"FUNC","rule":"instr","sum":1},{"parent":"FUNC","rule":"int","sum":31625},{"parent":"FUNC","rule":"int32","sum":2},{"parent":"FUNC","rule":"int64","sum":19},{"parent":"FUNC","rule":"int8","sum":2},{"parent":"FUNC","rule":"intervaL","sum":9012},{"parent":"FUNC","rule":"interval","sum":1785040},{"parent":"FUNC","rule":"interval64","sum":7},{"parent":"FUNC","rule":"isNull","sum":7},{"parent":"FUNC","rule":"is_allowed_in_kz","sum":2},{"parent":"FUNC","rule":"is_valid_intent","sum":4},{"parent":"FUNC","rule":"is_valid_organic","sum":4},{"parent":"FUNC","rule":"istLast","sum":1},{"parent":"FUNC","rule":"isum","sum":6},{"parent":"FUNC","rule":"joinTableRow","sum":5},{"parent":"FUNC","rule":"jointablerow","sum":215},{"parent":"FUNC","rule":"json","sum":5551},{"parent":"FUNC","rule":"json_extract","sum":3},{"parent":"FUNC","rule":"json_object_agg","sum":1},{"parent":"FUNC","rule":"jsondocument","sum":6},{"parent":"FUNC","rule":"just","sum":239226},{"parent":"FUNC","rule":"lAG","sum":2309},{"parent":"FUNC","rule":"lEAD","sum":6792},{"parent":"FUNC","rule":"lEN","sum":30},{"parent":"FUNC","rule":"lINEARHISTOGRAM","sum":115},{"parent":"FUNC","rule":"lISTlENGTH","sum":2},{"parent":"FUNC","rule":"lISTlength","sum":1},{"parent":"FUNC","rule":"lag","sum":1010915},{"parent":"FUNC","rule":"last_VALUE","sum":14},{"parent":"FUNC","rule":"last_value","sum":1134965},{"parent":"FUNC","rule":"lead","sum":922115},{"parent":"FUNC","rule":"least","sum":367295},{"parent":"FUNC","rule":"len","sum":712028},{"parent":"FUNC","rule":"lenGTH","sum":1},{"parent":"FUNC","rule":"lenght","sum":1},{"parent":"FUNC","rule":"lengtH","sum":2},{"parent":"FUNC","rule":"length","sum":2513539},{"parent":"FUNC","rule":"like","sum":4},{"parent":"FUNC","rule":"likely","sum":16421},{"parent":"FUNC","rule":"linearHISTOGRAM","sum":10},{"parent":"FUNC","rule":"linearHistogram","sum":9},{"parent":"FUNC","rule":"linearhistogram","sum":167},{"parent":"FUNC","rule":"linearhistogramcdf","sum":20},{"parent":"FUNC","rule":"listALL","sum":3},{"parent":"FUNC","rule":"listAVG","sum":831},{"parent":"FUNC","rule":"listAggregateUnique","sum":1},{"parent":"FUNC","rule":"listAll","sum":86},{"parent":"FUNC","rule":"listAny","sum":116},{"parent":"FUNC","rule":"listAvg","sum":96},{"parent":"FUNC","rule":"listCollect","sum":378},{"parent":"FUNC","rule":"listConcat","sum":11135},{"parent":"FUNC","rule":"listEnumerate","sum":150},{"parent":"FUNC","rule":"listExtend","sum":6421},{"parent":"FUNC","rule":"listExtract","sum":24},{"parent":"FUNC","rule":"listFilter","sum":38539},{"parent":"FUNC","rule":"listFlatten","sum":292},{"parent":"FUNC","rule":"listFold","sum":294},{"parent":"FUNC","rule":"listFromRange","sum":791},{"parent":"FUNC","rule":"listHAs","sum":52},{"parent":"FUNC","rule":"listHas","sum":40660},{"parent":"FUNC","rule":"listHasItems","sum":3198},{"parent":"FUNC","rule":"listHead","sum":2174},{"parent":"FUNC","rule":"listIndexOf","sum":15},{"parent":"FUNC","rule":"listLENGTH","sum":231},{"parent":"FUNC","rule":"listLast","sum":1009},{"parent":"FUNC","rule":"listLength","sum":29071},{"parent":"FUNC","rule":"listMAX","sum":1},{"parent":"FUNC","rule":"listMIN","sum":1},{"parent":"FUNC","rule":"listMap","sum":52384},{"parent":"FUNC","rule":"listMax","sum":804},{"parent":"FUNC","rule":"listMin","sum":172},{"parent":"FUNC","rule":"listNotNull","sum":274},{"parent":"FUNC","rule":"listReverse","sum":3733},{"parent":"FUNC","rule":"listSkip","sum":21},{"parent":"FUNC","rule":"listSort","sum":21769},{"parent":"FUNC","rule":"listSortAsc","sum":2},{"parent":"FUNC","rule":"listSortDesc","sum":193},{"parent":"FUNC","rule":"listSum","sum":1467},{"parent":"FUNC","rule":"listTake","sum":784},{"parent":"FUNC","rule":"listTopSort","sum":18},{"parent":"FUNC","rule":"listUniq","sum":10565},{"parent":"FUNC","rule":"listUniqStable","sum":1},{"parent":"FUNC","rule":"listZip","sum":1226},{"parent":"FUNC","rule":"listZipAll","sum":1580},{"parent":"FUNC","rule":"list_Length","sum":1},{"parent":"FUNC","rule":"list_MAX","sum":1},{"parent":"FUNC","rule":"list_agg","sum":2},{"parent":"FUNC","rule":"list_avg","sum":25},{"parent":"FUNC","rule":"list_concat","sum":254},{"parent":"FUNC","rule":"list_filter","sum":2},{"parent":"FUNC","rule":"list_flatten","sum":15},{"parent":"FUNC","rule":"list_has","sum":6090},{"parent":"FUNC","rule":"list_has_items","sum":2},{"parent":"FUNC","rule":"list_head","sum":16},{"parent":"FUNC","rule":"list_length","sum":866},{"parent":"FUNC","rule":"list_map","sum":3},{"parent":"FUNC","rule":"list_max","sum":3},{"parent":"FUNC","rule":"list_min","sum":3},{"parent":"FUNC","rule":"list_not_null","sum":1},{"parent":"FUNC","rule":"list_sort","sum":144},{"parent":"FUNC","rule":"list_uniq","sum":1},{"parent":"FUNC","rule":"list_zip","sum":36},{"parent":"FUNC","rule":"listaggregate","sum":87},{"parent":"FUNC","rule":"listall","sum":13166},{"parent":"FUNC","rule":"listany","sum":15097},{"parent":"FUNC","rule":"listavg","sum":11305},{"parent":"FUNC","rule":"listcollect","sum":483},{"parent":"FUNC","rule":"listconcat","sum":19554},{"parent":"FUNC","rule":"listcreate","sum":10},{"parent":"FUNC","rule":"listenumerate","sum":2065},{"parent":"FUNC","rule":"listextend","sum":3758},{"parent":"FUNC","rule":"listextendstrict","sum":61},{"parent":"FUNC","rule":"listextract","sum":1904},{"parent":"FUNC","rule":"listfilter","sum":103472},{"parent":"FUNC","rule":"listflatmap","sum":7081},{"parent":"FUNC","rule":"listflatten","sum":21498},{"parent":"FUNC","rule":"listfold","sum":14},{"parent":"FUNC","rule":"listfold1map","sum":60},{"parent":"FUNC","rule":"listfromRange","sum":15},{"parent":"FUNC","rule":"listfromrange","sum":13893},{"parent":"FUNC","rule":"listfromtuple","sum":58},{"parent":"FUNC","rule":"listhas","sum":171277},{"parent":"FUNC","rule":"listhasItems","sum":34},{"parent":"FUNC","rule":"listhasitems","sum":10455},{"parent":"FUNC","rule":"listhead","sum":18559},{"parent":"FUNC","rule":"listindexof","sum":1209},{"parent":"FUNC","rule":"listlast","sum":3107},{"parent":"FUNC","rule":"listlength","sum":342025},{"parent":"FUNC","rule":"listmap","sum":524953},{"parent":"FUNC","rule":"listmax","sum":3903},{"parent":"FUNC","rule":"listmin","sum":2675},{"parent":"FUNC","rule":"listnotNull","sum":1},{"parent":"FUNC","rule":"listnotnull","sum":12807},{"parent":"FUNC","rule":"listreplicate","sum":49},{"parent":"FUNC","rule":"listreverse","sum":2311},{"parent":"FUNC","rule":"listskip","sum":1193},{"parent":"FUNC","rule":"listsort","sum":56164},{"parent":"FUNC","rule":"listsortDesc","sum":1},{"parent":"FUNC","rule":"listsortasc","sum":377},{"parent":"FUNC","rule":"listsortdesc","sum":3978},{"parent":"FUNC","rule":"listsum","sum":3366},{"parent":"FUNC","rule":"listtake","sum":15952},{"parent":"FUNC","rule":"listtop","sum":68},{"parent":"FUNC","rule":"listunionall","sum":8},{"parent":"FUNC","rule":"listuniq","sum":24918},{"parent":"FUNC","rule":"listuniqstable","sum":94},{"parent":"FUNC","rule":"listzip","sum":16490},{"parent":"FUNC","rule":"listzipALL","sum":2},{"parent":"FUNC","rule":"listzipAll","sum":11},{"parent":"FUNC","rule":"listzipall","sum":100},{"parent":"FUNC","rule":"log","sum":2},{"parent":"FUNC","rule":"logarithmicHistogram","sum":1},{"parent":"FUNC","rule":"logarithmichistogram","sum":6},{"parent":"FUNC","rule":"loghistogram","sum":1},{"parent":"FUNC","rule":"lower","sum":3},{"parent":"FUNC","rule":"mAX","sum":15},{"parent":"FUNC","rule":"mAX_BY","sum":128},{"parent":"FUNC","rule":"mIN","sum":2},{"parent":"FUNC","rule":"mIN_by","sum":10},{"parent":"FUNC","rule":"maX","sum":5},{"parent":"FUNC","rule":"maX_BY","sum":1},{"parent":"FUNC","rule":"map","sum":2},{"parent":"FUNC","rule":"max","sum":25061450},{"parent":"FUNC","rule":"maxBy","sum":1},{"parent":"FUNC","rule":"maxOf","sum":3433},{"parent":"FUNC","rule":"max_","sum":1},{"parent":"FUNC","rule":"max_BY","sum":2350},{"parent":"FUNC","rule":"max_By","sum":4552},{"parent":"FUNC","rule":"max_OF","sum":8},{"parent":"FUNC","rule":"max_Of","sum":30},{"parent":"FUNC","rule":"max_by","sum":28896698},{"parent":"FUNC","rule":"max_if","sum":3},{"parent":"FUNC","rule":"max_of","sum":1287338},{"parent":"FUNC","rule":"maxby","sum":5161},{"parent":"FUNC","rule":"maxof","sum":849},{"parent":"FUNC","rule":"md5int","sum":1},{"parent":"FUNC","rule":"median","sum":377952},{"parent":"FUNC","rule":"metric_exp","sum":1},{"parent":"FUNC","rule":"min","sum":10208917},{"parent":"FUNC","rule":"minOf","sum":12},{"parent":"FUNC","rule":"min_BY","sum":8639},{"parent":"FUNC","rule":"min_By","sum":6},{"parent":"FUNC","rule":"min_OF","sum":9},{"parent":"FUNC","rule":"min_Of","sum":337},{"parent":"FUNC","rule":"min_by","sum":2325677},{"parent":"FUNC","rule":"min_if","sum":10},{"parent":"FUNC","rule":"min_of","sum":577834},{"parent":"FUNC","rule":"minby","sum":1},{"parent":"FUNC","rule":"minof","sum":94},{"parent":"FUNC","rule":"mode","sum":117549},{"parent":"FUNC","rule":"multiIf","sum":2},{"parent":"FUNC","rule":"multi_aggregate_by","sum":115834},{"parent":"FUNC","rule":"nanvl","sum":129203},{"parent":"FUNC","rule":"notEmpty","sum":2},{"parent":"FUNC","rule":"nothing","sum":12757},{"parent":"FUNC","rule":"now","sum":16},{"parent":"FUNC","rule":"nth_value","sum":17},{"parent":"FUNC","rule":"ntile","sum":337},{"parent":"FUNC","rule":"nvL","sum":61},{"parent":"FUNC","rule":"nvl","sum":14001743},{"parent":"FUNC","rule":"on","sum":1},{"parent":"FUNC","rule":"optionaltype","sum":161},{"parent":"FUNC","rule":"or","sum":3},{"parent":"FUNC","rule":"order_nr","sum":1},{"parent":"FUNC","rule":"p25","sum":1},{"parent":"FUNC","rule":"p75","sum":1},{"parent":"FUNC","rule":"pERCENTILE","sum":11},{"parent":"FUNC","rule":"parseFile","sum":138},{"parent":"FUNC","rule":"parseForErrors","sum":32},{"parent":"FUNC","rule":"parsefile","sum":11204},{"parent":"FUNC","rule":"percent_rank","sum":2159},{"parent":"FUNC","rule":"percentile","sum":3344572},{"parent":"FUNC","rule":"pgInt2","sum":2},{"parent":"FUNC","rule":"pgarray","sum":2},{"parent":"FUNC","rule":"pgbpchar","sum":2},{"parent":"FUNC","rule":"pgbytea","sum":6},{"parent":"FUNC","rule":"pgcast","sum":39},{"parent":"FUNC","rule":"pgdate","sum":94},{"parent":"FUNC","rule":"pgfloat4","sum":5},{"parent":"FUNC","rule":"pgfloat8","sum":3},{"parent":"FUNC","rule":"pgint2","sum":9},{"parent":"FUNC","rule":"pginterval","sum":176},{"parent":"FUNC","rule":"pgjson","sum":10},{"parent":"FUNC","rule":"pgname","sum":4},{"parent":"FUNC","rule":"pgnumeric","sum":4},{"parent":"FUNC","rule":"pgoidvector","sum":1},{"parent":"FUNC","rule":"pgtext","sum":9},{"parent":"FUNC","rule":"pgtimestamp","sum":7},{"parent":"FUNC","rule":"pgtimestamptz","sum":4},{"parent":"FUNC","rule":"pickle","sum":480},{"parent":"FUNC","rule":"pow","sum":8},{"parent":"FUNC","rule":"power","sum":3},{"parent":"FUNC","rule":"quantile","sum":1},{"parent":"FUNC","rule":"quantileExact","sum":1},{"parent":"FUNC","rule":"rFIND","sum":3},{"parent":"FUNC","rule":"rand","sum":3},{"parent":"FUNC","rule":"random","sum":258306},{"parent":"FUNC","rule":"randomNumber","sum":8},{"parent":"FUNC","rule":"randomUuid","sum":145},{"parent":"FUNC","rule":"random_number","sum":11},{"parent":"FUNC","rule":"randomnumber","sum":479},{"parent":"FUNC","rule":"randomuuid","sum":37},{"parent":"FUNC","rule":"range","sum":97},{"parent":"FUNC","rule":"rank","sum":222787},{"parent":"FUNC","rule":"regex_full_match","sum":1},{"parent":"FUNC","rule":"regex_replace_first","sum":1},{"parent":"FUNC","rule":"regionIn","sum":2},{"parent":"FUNC","rule":"removeMember","sum":160},{"parent":"FUNC","rule":"removemember","sum":18},{"parent":"FUNC","rule":"removemembers","sum":77},{"parent":"FUNC","rule":"removetimezone","sum":5},{"parent":"FUNC","rule":"renamemembers","sum":4},{"parent":"FUNC","rule":"replace","sum":1},{"parent":"FUNC","rule":"replaceRegexpAll","sum":2},{"parent":"FUNC","rule":"rfind","sum":127805},{"parent":"FUNC","rule":"round","sum":27},{"parent":"FUNC","rule":"row_NUMBER","sum":5},{"parent":"FUNC","rule":"row_Number","sum":3},{"parent":"FUNC","rule":"row_number","sum":2158139},{"parent":"FUNC","rule":"rownumber","sum":370},{"parent":"FUNC","rule":"sUBSTRING","sum":3},{"parent":"FUNC","rule":"sUM","sum":440},{"parent":"FUNC","rule":"sUM_IF","sum":18},{"parent":"FUNC","rule":"sUm","sum":8},{"parent":"FUNC","rule":"sessionWindow","sum":4},{"parent":"FUNC","rule":"session_start","sum":3},{"parent":"FUNC","rule":"sessionwindow","sum":166},{"parent":"FUNC","rule":"setDifference","sum":59},{"parent":"FUNC","rule":"setIntersection","sum":13},{"parent":"FUNC","rule":"setIsDisjoint","sum":1},{"parent":"FUNC","rule":"setUnion","sum":172},{"parent":"FUNC","rule":"setbit","sum":20},{"parent":"FUNC","rule":"setdifference","sum":162},{"parent":"FUNC","rule":"setincludes","sum":20},{"parent":"FUNC","rule":"setintersection","sum":118},{"parent":"FUNC","rule":"setisdisjoint","sum":1114},{"parent":"FUNC","rule":"setsymmetricdifference","sum":204},{"parent":"FUNC","rule":"setunion","sum":1780},{"parent":"FUNC","rule":"sign","sum":1},{"parent":"FUNC","rule":"sin","sum":16},{"parent":"FUNC","rule":"sipHash64","sum":2},{"parent":"FUNC","rule":"size","sum":1},{"parent":"FUNC","rule":"somE","sum":2},{"parent":"FUNC","rule":"some","sum":20260898},{"parent":"FUNC","rule":"somr","sum":1},{"parent":"FUNC","rule":"splitByChar","sum":1},{"parent":"FUNC","rule":"splitByString","sum":6},{"parent":"FUNC","rule":"spreadmembers","sum":3},{"parent":"FUNC","rule":"sqrt","sum":11},{"parent":"FUNC","rule":"ssubstring","sum":2},{"parent":"FUNC","rule":"stablepickle","sum":30},{"parent":"FUNC","rule":"startsWith","sum":201462},{"parent":"FUNC","rule":"starts_with","sum":11},{"parent":"FUNC","rule":"startswith","sum":21778},{"parent":"FUNC","rule":"staticmap","sum":23},{"parent":"FUNC","rule":"staticzip","sum":1},{"parent":"FUNC","rule":"std_dev","sum":4},{"parent":"FUNC","rule":"stddev","sum":612665},{"parent":"FUNC","rule":"stddevPop","sum":11},{"parent":"FUNC","rule":"stddev_pop","sum":108},{"parent":"FUNC","rule":"stddev_population","sum":88},{"parent":"FUNC","rule":"stddev_samp","sum":27},{"parent":"FUNC","rule":"stddev_sample","sum":895},{"parent":"FUNC","rule":"stddevpop","sum":19},{"parent":"FUNC","rule":"stddevsamp","sum":5},{"parent":"FUNC","rule":"str","sum":51},{"parent":"FUNC","rule":"strfdate","sum":164},{"parent":"FUNC","rule":"string","sum":77},{"parent":"FUNC","rule":"string_agg","sum":1},{"parent":"FUNC","rule":"string_split","sum":1},{"parent":"FUNC","rule":"string_to_array","sum":3},{"parent":"FUNC","rule":"string_to_features","sum":1},{"parent":"FUNC","rule":"structMembers","sum":7},{"parent":"FUNC","rule":"structUnion","sum":41},{"parent":"FUNC","rule":"structdifference","sum":2},{"parent":"FUNC","rule":"structunion","sum":7},{"parent":"FUNC","rule":"suM","sum":176},{"parent":"FUNC","rule":"suM_if","sum":16},{"parent":"FUNC","rule":"subDate","sum":1},{"parent":"FUNC","rule":"subSTRING","sum":1},{"parent":"FUNC","rule":"subString","sum":573},{"parent":"FUNC","rule":"subqueryMergeFor","sum":21},{"parent":"FUNC","rule":"subqueryUnionMergeFor","sum":4450},{"parent":"FUNC","rule":"subquerymergefor","sum":1863},{"parent":"FUNC","rule":"subsTRING","sum":2},{"parent":"FUNC","rule":"subsrting","sum":3},{"parent":"FUNC","rule":"substing","sum":2},{"parent":"FUNC","rule":"substr","sum":172},{"parent":"FUNC","rule":"substring","sum":16470645},{"parent":"FUNC","rule":"substringUTF8","sum":1},{"parent":"FUNC","rule":"substring_index","sum":1},{"parent":"FUNC","rule":"sum","sum":45713563},{"parent":"FUNC","rule":"sumIF","sum":84},{"parent":"FUNC","rule":"sumIf","sum":96775},{"parent":"FUNC","rule":"sum_","sum":356},{"parent":"FUNC","rule":"sum_IF","sum":19423},{"parent":"FUNC","rule":"sum_If","sum":12653},{"parent":"FUNC","rule":"sum_if","sum":4247363},{"parent":"FUNC","rule":"sum_range2","sum":47},{"parent":"FUNC","rule":"sum_recursive_range","sum":1},{"parent":"FUNC","rule":"suma","sum":1},{"parent":"FUNC","rule":"sumif","sum":7586},{"parent":"FUNC","rule":"summ","sum":10},{"parent":"FUNC","rule":"sunstring","sum":2},{"parent":"FUNC","rule":"susbstring","sum":1},{"parent":"FUNC","rule":"tableName","sum":43016},{"parent":"FUNC","rule":"tablePath","sum":1433},{"parent":"FUNC","rule":"tableRecordIndex","sum":19},{"parent":"FUNC","rule":"tableRow","sum":22420},{"parent":"FUNC","rule":"table_name","sum":2928},{"parent":"FUNC","rule":"table_path","sum":46},{"parent":"FUNC","rule":"table_row","sum":354},{"parent":"FUNC","rule":"tablename","sum":165184},{"parent":"FUNC","rule":"tablepath","sum":33063},{"parent":"FUNC","rule":"tablerecordindex","sum":111},{"parent":"FUNC","rule":"tablerow","sum":42403},{"parent":"FUNC","rule":"tablerows","sum":5},{"parent":"FUNC","rule":"testBit","sum":34},{"parent":"FUNC","rule":"testbit","sum":38269},{"parent":"FUNC","rule":"testid","sum":2},{"parent":"FUNC","rule":"timestamp","sum":9963},{"parent":"FUNC","rule":"timestamp64","sum":7},{"parent":"FUNC","rule":"timezone","sum":2},{"parent":"FUNC","rule":"toBytes","sum":24336},{"parent":"FUNC","rule":"toDate","sum":57},{"parent":"FUNC","rule":"toDate32","sum":10},{"parent":"FUNC","rule":"toDateTime","sum":21},{"parent":"FUNC","rule":"toDateTimeOrNull","sum":3},{"parent":"FUNC","rule":"toDayOfWeek","sum":2},{"parent":"FUNC","rule":"toDict","sum":95252},{"parent":"FUNC","rule":"toFloat32","sum":4},{"parent":"FUNC","rule":"toInt128","sum":3},{"parent":"FUNC","rule":"toIntervalMonth","sum":2},{"parent":"FUNC","rule":"toLastDayOfMonth","sum":1},{"parent":"FUNC","rule":"toMonth","sum":1},{"parent":"FUNC","rule":"toMultiDict","sum":264},{"parent":"FUNC","rule":"toQuarter","sum":1},{"parent":"FUNC","rule":"toSet","sum":417292},{"parent":"FUNC","rule":"toStartOfMonth","sum":15},{"parent":"FUNC","rule":"toStartOfQuarter","sum":1},{"parent":"FUNC","rule":"toStartOfWeek","sum":9},{"parent":"FUNC","rule":"toString","sum":46},{"parent":"FUNC","rule":"toUInt64","sum":5},{"parent":"FUNC","rule":"toUnixTimestamp","sum":2},{"parent":"FUNC","rule":"toUnixTimestamp64Micro","sum":2},{"parent":"FUNC","rule":"toYear","sum":9},{"parent":"FUNC","rule":"to_bytes","sum":36},{"parent":"FUNC","rule":"to_char","sum":2},{"parent":"FUNC","rule":"to_date","sum":4},{"parent":"FUNC","rule":"to_dict","sum":65},{"parent":"FUNC","rule":"tobytes","sum":154},{"parent":"FUNC","rule":"today","sum":1},{"parent":"FUNC","rule":"todict","sum":96532},{"parent":"FUNC","rule":"tomultidict","sum":1132},{"parent":"FUNC","rule":"top","sum":52785},{"parent":"FUNC","rule":"topFreq","sum":90},{"parent":"FUNC","rule":"top_BY","sum":2},{"parent":"FUNC","rule":"top_by","sum":116981},{"parent":"FUNC","rule":"top_freq","sum":113},{"parent":"FUNC","rule":"topfreq","sum":23256},{"parent":"FUNC","rule":"topg","sum":2},{"parent":"FUNC","rule":"toset","sum":37979},{"parent":"FUNC","rule":"trunc","sum":13},{"parent":"FUNC","rule":"truncate","sum":2},{"parent":"FUNC","rule":"tryMember","sum":527},{"parent":"FUNC","rule":"trymember","sum":9922},{"parent":"FUNC","rule":"tupleElement","sum":2},{"parent":"FUNC","rule":"typeOf","sum":38},{"parent":"FUNC","rule":"typeof","sum":417},{"parent":"FUNC","rule":"tzdate","sum":8},{"parent":"FUNC","rule":"tzdate32","sum":7},{"parent":"FUNC","rule":"tzdatetime","sum":44},{"parent":"FUNC","rule":"tzdatetime64","sum":7},{"parent":"FUNC","rule":"tztimestamp","sum":35},{"parent":"FUNC","rule":"tztimestamp64","sum":7},{"parent":"FUNC","rule":"uNWRAP","sum":88},{"parent":"FUNC","rule":"udaf","sum":18138},{"parent":"FUNC","rule":"uint32","sum":13393},{"parent":"FUNC","rule":"uint64","sum":30},{"parent":"FUNC","rule":"uint8","sum":2},{"parent":"FUNC","rule":"unWRap","sum":3},{"parent":"FUNC","rule":"unWrap","sum":5},{"parent":"FUNC","rule":"uniq","sum":9},{"parent":"FUNC","rule":"uniqExact","sum":10},{"parent":"FUNC","rule":"unique","sum":1},{"parent":"FUNC","rule":"unique_pairs","sum":1},{"parent":"FUNC","rule":"unnest","sum":3},{"parent":"FUNC","rule":"untag","sum":1569},{"parent":"FUNC","rule":"unwrap","sum":26989251},{"parent":"FUNC","rule":"unwraped","sum":1},{"parent":"FUNC","rule":"upper","sum":2},{"parent":"FUNC","rule":"using","sum":12},{"parent":"FUNC","rule":"utc_action_created_dttm","sum":1},{"parent":"FUNC","rule":"utf8","sum":2041},{"parent":"FUNC","rule":"uuid","sum":25},{"parent":"FUNC","rule":"values","sum":6},{"parent":"FUNC","rule":"varPop","sum":75},{"parent":"FUNC","rule":"varSamp","sum":85},{"parent":"FUNC","rule":"var_samp","sum":43},{"parent":"FUNC","rule":"variance","sum":66920},{"parent":"FUNC","rule":"variance_population","sum":4},{"parent":"FUNC","rule":"variance_sample","sum":394},{"parent":"FUNC","rule":"varpop","sum":19},{"parent":"FUNC","rule":"version","sum":16},{"parent":"FUNC","rule":"visitParamExtractFloat","sum":4},{"parent":"FUNC","rule":"vl","sum":2},{"parent":"FUNC","rule":"way","sum":38547},{"parent":"FUNC","rule":"weakField","sum":1077},{"parent":"FUNC","rule":"weakfield","sum":967371},{"parent":"FUNC","rule":"windowFunnel","sum":1},{"parent":"FUNC","rule":"worked_rules","sum":3},{"parent":"FUNC","rule":"wrap","sum":1},{"parent":"FUNC","rule":"yesterday","sum":4},{"parent":"FUNC","rule":"yson","sum":42},{"parent":"FUNC","rule":"ytListTables","sum":1},{"parent":"MODULE","rule":"Compress","sum":84392},{"parent":"MODULE","rule":"DATETIME","sum":1138},{"parent":"MODULE","rule":"DATEtime","sum":2},{"parent":"MODULE","rule":"DAteTime","sum":3313},{"parent":"MODULE","rule":"DAtetime","sum":7},{"parent":"MODULE","rule":"DaTETIME","sum":342},{"parent":"MODULE","rule":"DaTeTime","sum":84},{"parent":"MODULE","rule":"DateTIME","sum":725},{"parent":"MODULE","rule":"DateTIme","sum":4270},{"parent":"MODULE","rule":"DateTime","sum":274712777},{"parent":"MODULE","rule":"DatetIme","sum":367},{"parent":"MODULE","rule":"Datetime","sum":7049867},{"parent":"MODULE","rule":"Decompress","sum":24116},{"parent":"MODULE","rule":"Digest","sum":7633296},{"parent":"MODULE","rule":"HyperScan","sum":2366},{"parent":"MODULE","rule":"Hyperscan","sum":388855},{"parent":"MODULE","rule":"Ip","sum":1388948},{"parent":"MODULE","rule":"JSON","sum":27043},{"parent":"MODULE","rule":"JSon","sum":2},{"parent":"MODULE","rule":"Json","sum":1037994},{"parent":"MODULE","rule":"MATH","sum":4},{"parent":"MODULE","rule":"Math","sum":44475654},{"parent":"MODULE","rule":"PIRE","sum":29},{"parent":"MODULE","rule":"Pire","sum":1912006},{"parent":"MODULE","rule":"Protobuf","sum":266369},{"parent":"MODULE","rule":"RE2","sum":5040},{"parent":"MODULE","rule":"Re2","sum":12119791},{"parent":"MODULE","rule":"STRING","sum":6},{"parent":"MODULE","rule":"String","sum":94884988},{"parent":"MODULE","rule":"TryDecompress","sum":4663},{"parent":"MODULE","rule":"URL","sum":2},{"parent":"MODULE","rule":"Unicode","sum":4876299},{"parent":"MODULE","rule":"Url","sum":23432845},{"parent":"MODULE","rule":"YSON","sum":25},{"parent":"MODULE","rule":"YSon","sum":10},{"parent":"MODULE","rule":"Yson","sum":393818003},{"parent":"MODULE","rule":"dateTime","sum":458},{"parent":"MODULE","rule":"datetime","sum":14231},{"parent":"MODULE","rule":"digest","sum":1},{"parent":"MODULE","rule":"hyperscan","sum":1},{"parent":"MODULE","rule":"json","sum":8},{"parent":"MODULE","rule":"math","sum":9},{"parent":"MODULE","rule":"pire","sum":36},{"parent":"MODULE","rule":"re2","sum":3094},{"parent":"MODULE","rule":"string","sum":21},{"parent":"MODULE","rule":"url","sum":1},{"parent":"MODULE","rule":"ySoN","sum":1},{"parent":"MODULE","rule":"yson","sum":54},{"parent":"MODULE_FUNC","rule":"Compress::BZip2","sum":2},{"parent":"MODULE_FUNC","rule":"Compress::BlockCodec","sum":6},{"parent":"MODULE_FUNC","rule":"Compress::Brotli","sum":160},{"parent":"MODULE_FUNC","rule":"Compress::Gzip","sum":83425},{"parent":"MODULE_FUNC","rule":"Compress::Lz4","sum":605},{"parent":"MODULE_FUNC","rule":"Compress::Lzma","sum":4},{"parent":"MODULE_FUNC","rule":"Compress::Snappy","sum":7},{"parent":"MODULE_FUNC","rule":"Compress::Zlib","sum":32},{"parent":"MODULE_FUNC","rule":"Compress::Zstd","sum":151},{"parent":"MODULE_FUNC","rule":"DATETIME::Format","sum":10},{"parent":"MODULE_FUNC","rule":"DATETIME::FromMilliseconds","sum":50},{"parent":"MODULE_FUNC","rule":"DATETIME::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DATETIME::GetYear","sum":18},{"parent":"MODULE_FUNC","rule":"DATETIME::MakeDate","sum":744},{"parent":"MODULE_FUNC","rule":"DATETIME::MakeDatetime","sum":292},{"parent":"MODULE_FUNC","rule":"DATETIME::Parse","sum":9},{"parent":"MODULE_FUNC","rule":"DATETIME::StartOfWeek","sum":14},{"parent":"MODULE_FUNC","rule":"DATEtime::GetMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DATEtime::GetYear","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::GetDayOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::MakeDate","sum":562},{"parent":"MODULE_FUNC","rule":"DAteTime::Parse","sum":150},{"parent":"MODULE_FUNC","rule":"DAteTime::StartOfMonth","sum":2590},{"parent":"MODULE_FUNC","rule":"DAteTime::StartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::ToDays","sum":8},{"parent":"MODULE_FUNC","rule":"DAtetime::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DAtetime::MakeDatetime","sum":2},{"parent":"MODULE_FUNC","rule":"DAtetime::ToStartOfWeek","sum":4},{"parent":"MODULE_FUNC","rule":"DaTETIME::StartOfWeek","sum":342},{"parent":"MODULE_FUNC","rule":"DaTeTime::GetMonth","sum":4},{"parent":"MODULE_FUNC","rule":"DaTeTime::GetYear","sum":4},{"parent":"MODULE_FUNC","rule":"DaTeTime::IntervalFromDays","sum":1},{"parent":"MODULE_FUNC","rule":"DaTeTime::MakeDate","sum":32},{"parent":"MODULE_FUNC","rule":"DaTeTime::ShiftMonths","sum":42},{"parent":"MODULE_FUNC","rule":"DaTeTime::StartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIME::IntervalFromDays","sum":725},{"parent":"MODULE_FUNC","rule":"DateTIme::EndOfMonth","sum":3},{"parent":"MODULE_FUNC","rule":"DateTIme::Format","sum":11},{"parent":"MODULE_FUNC","rule":"DateTIme::FromMicroseconds","sum":11},{"parent":"MODULE_FUNC","rule":"DateTIme::FromSeconds","sum":54},{"parent":"MODULE_FUNC","rule":"DateTIme::GetDayOfWeek","sum":44},{"parent":"MODULE_FUNC","rule":"DateTIme::GetHour","sum":2},{"parent":"MODULE_FUNC","rule":"DateTIme::GetMinute","sum":38},{"parent":"MODULE_FUNC","rule":"DateTIme::GetYear","sum":47},{"parent":"MODULE_FUNC","rule":"DateTIme::IntervalFromDays","sum":46},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeDate","sum":3553},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeDatetime","sum":284},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeTimestamp","sum":7},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeTzTimestamp","sum":4},{"parent":"MODULE_FUNC","rule":"DateTIme::Parse","sum":7},{"parent":"MODULE_FUNC","rule":"DateTIme::ParseIso8601","sum":2},{"parent":"MODULE_FUNC","rule":"DateTIme::ShiftMonths","sum":6},{"parent":"MODULE_FUNC","rule":"DateTIme::StartOfMonth","sum":135},{"parent":"MODULE_FUNC","rule":"DateTIme::StartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToDays","sum":12},{"parent":"MODULE_FUNC","rule":"DateTIme::ToHours","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::AddTimezone","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Convert","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentDate","sum":5},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentDateTimeUTC","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentUtcDate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Date","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::DateTime","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::DatetimeStartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::DayOfWeek","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::Days","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::DiffMinutes","sum":8},{"parent":"MODULE_FUNC","rule":"DateTime::DiffMonths","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Difference","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::EndOf","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfDay","sum":251},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfMonth","sum":37082},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfQuarter","sum":306},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfWeek","sum":508},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfYear","sum":136},{"parent":"MODULE_FUNC","rule":"DateTime::EndtOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ExtractHour","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::FROMMilliseconds","sum":14},{"parent":"MODULE_FUNC","rule":"DateTime::FROMSeconds","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Format","sum":39707127},{"parent":"MODULE_FUNC","rule":"DateTime::FormatTime","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::FromDays","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroseconds","sum":4616490},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroseconds64","sum":118},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliseconds","sum":9165394},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliseconds64","sum":765},{"parent":"MODULE_FUNC","rule":"DateTime::FromSecond","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::FromSeconds","sum":15247163},{"parent":"MODULE_FUNC","rule":"DateTime::FromSeconds64","sum":857},{"parent":"MODULE_FUNC","rule":"DateTime::FromString","sum":368},{"parent":"MODULE_FUNC","rule":"DateTime::FromTimeZone","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::Fromat","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetDay","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfMonth","sum":585699},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfWeek","sum":631982},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfWeekName","sum":125803},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfYear","sum":43988},{"parent":"MODULE_FUNC","rule":"DateTime::GetHour","sum":1448753},{"parent":"MODULE_FUNC","rule":"DateTime::GetLastDayOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetMicrosecondOfSecond","sum":1288},{"parent":"MODULE_FUNC","rule":"DateTime::GetMillisecondOfSecond","sum":57},{"parent":"MODULE_FUNC","rule":"DateTime::GetMinute","sum":290921},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonth","sum":700509},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonthName","sum":38082},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonthOfYear","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetSecond","sum":88516},{"parent":"MODULE_FUNC","rule":"DateTime::GetTimezoneId","sum":103},{"parent":"MODULE_FUNC","rule":"DateTime::GetTimezoneName","sum":523},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeek","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeekOfYear","sum":499728},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeekOfYearIso8601","sum":33942},{"parent":"MODULE_FUNC","rule":"DateTime::GetYEAR","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetYear","sum":773010},{"parent":"MODULE_FUNC","rule":"DateTime::Interval","sum":21},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromDays","sum":1750},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromHours","sum":3701},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMicroseconds","sum":9},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMilliseconds","sum":8},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMinutes","sum":107},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromSeconds","sum":122},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFROMDays","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFrom","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromDays","sum":15010793},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromHDays","sum":28},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromHours","sum":8923033},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMicroseconds","sum":76251},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMilliseconds","sum":512537},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMinute","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMinutes","sum":4240370},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMonth","sum":9},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMonths","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromSeconds","sum":945211},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromYears","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalfromDays","sum":23},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalfromHours","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::LastDayOfMonth","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::MakeData","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDate","sum":23028266},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDate32","sum":68},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDateTime","sum":226},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDatetime","sum":36736676},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDatetime64","sum":110},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTimestamp","sum":7825820},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTimestamp64","sum":290},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDate","sum":261460},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDateTime","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDatetime","sum":3135906},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDatetime64","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzTimestamp","sum":151709},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzTimestamp64","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Makedate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MilliSeconds","sum":156},{"parent":"MODULE_FUNC","rule":"DateTime::NOW","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::Now","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Parce","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Parse","sum":23530104},{"parent":"MODULE_FUNC","rule":"DateTime::Parse64","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Parse8601","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ParseDateTime","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseDateTimeBestEffort","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::ParseFromString","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseHttp","sum":26823},{"parent":"MODULE_FUNC","rule":"DateTime::ParseIso","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseIso8601","sum":18450539},{"parent":"MODULE_FUNC","rule":"DateTime::ParseRfc822","sum":1853},{"parent":"MODULE_FUNC","rule":"DateTime::ParseX509","sum":237},{"parent":"MODULE_FUNC","rule":"DateTime::STartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Shift","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftDay","sum":51},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftDays","sum":202},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMonth","sum":11},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMonths","sum":3278312},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftQuarters","sum":382929},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftWeek","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftWeeks","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftYears","sum":667243},{"parent":"MODULE_FUNC","rule":"DateTime::Split","sum":706293},{"parent":"MODULE_FUNC","rule":"DateTime::StartOf","sum":2326728},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfDay","sum":2630469},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfHour","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfMohth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfMonth","sum":4135195},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfQuarter","sum":547470},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfWeek","sum":2073964},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfYear","sum":934564},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfmonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfweek","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::StartofQuarter","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::TimeOfDay","sum":67013},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMicroSeconds","sum":364},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMilliSeconds","sum":2599},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromSeconds","sum":391},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromString","sum":425},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampStartOfMonth","sum":818},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampStartOfWeek","sum":394},{"parent":"MODULE_FUNC","rule":"DateTime::To","sum":5},{"parent":"MODULE_FUNC","rule":"DateTime::ToDate","sum":1460},{"parent":"MODULE_FUNC","rule":"DateTime::ToDateTime","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::ToDays","sum":3455240},{"parent":"MODULE_FUNC","rule":"DateTime::ToHours","sum":1318595},{"parent":"MODULE_FUNC","rule":"DateTime::ToIsoFormat","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ToMicroseconds","sum":2192443},{"parent":"MODULE_FUNC","rule":"DateTime::ToMilliseconds","sum":5240333},{"parent":"MODULE_FUNC","rule":"DateTime::ToMinutes","sum":850881},{"parent":"MODULE_FUNC","rule":"DateTime::ToMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ToMonths","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ToSeconds","sum":23356009},{"parent":"MODULE_FUNC","rule":"DateTime::ToSeconds64","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Today","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Trunc","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Update","sum":3638500},{"parent":"MODULE_FUNC","rule":"DateTime::format","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::fromSeconds","sum":15},{"parent":"MODULE_FUNC","rule":"DateTime::parse","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::parseiso8601","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::toDate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::toSeconds","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::toStartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::todate","sum":1},{"parent":"MODULE_FUNC","rule":"DatetIme::IntervalFromDays","sum":367},{"parent":"MODULE_FUNC","rule":"Datetime::CurrentDate","sum":2},{"parent":"MODULE_FUNC","rule":"Datetime::CurrentUtcDatetime","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::DaysInMonth","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::EndOfMonth","sum":157},{"parent":"MODULE_FUNC","rule":"Datetime::EndOfWeek","sum":43},{"parent":"MODULE_FUNC","rule":"Datetime::Format","sum":366004},{"parent":"MODULE_FUNC","rule":"Datetime::FromMicroseconds","sum":36568},{"parent":"MODULE_FUNC","rule":"Datetime::FromMilliseconds","sum":768052},{"parent":"MODULE_FUNC","rule":"Datetime::FromSeconds","sum":566372},{"parent":"MODULE_FUNC","rule":"Datetime::FromSeconds64","sum":24},{"parent":"MODULE_FUNC","rule":"Datetime::GetDay","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfMonth","sum":757},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfWeek","sum":12730},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfWeekName","sum":850},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfYear","sum":59},{"parent":"MODULE_FUNC","rule":"Datetime::GetHour","sum":14277},{"parent":"MODULE_FUNC","rule":"Datetime::GetMinute","sum":13316},{"parent":"MODULE_FUNC","rule":"Datetime::GetMonth","sum":1025},{"parent":"MODULE_FUNC","rule":"Datetime::GetMonthName","sum":330},{"parent":"MODULE_FUNC","rule":"Datetime::GetWeekOfYear","sum":2649},{"parent":"MODULE_FUNC","rule":"Datetime::GetWeekOfYearIso8601","sum":3},{"parent":"MODULE_FUNC","rule":"Datetime::GetYear","sum":2683},{"parent":"MODULE_FUNC","rule":"Datetime::Interval","sum":9},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromDays","sum":369388},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromHours","sum":263462},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMicroseconds","sum":194},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMilliseconds","sum":91},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMinutes","sum":49017},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromSeconds","sum":9503},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDate","sum":671239},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDate32","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDateTime","sum":2},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDatetime","sum":916311},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDatetime64","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTimestamp","sum":423835},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzDate","sum":2262},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzDatetime","sum":23148},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzTimestamp","sum":33029},{"parent":"MODULE_FUNC","rule":"Datetime::Makedate","sum":6},{"parent":"MODULE_FUNC","rule":"Datetime::Parse","sum":137796},{"parent":"MODULE_FUNC","rule":"Datetime::ParseIso8601","sum":255648},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftMonths","sum":25955},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftQuarters","sum":538},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftYears","sum":286},{"parent":"MODULE_FUNC","rule":"Datetime::Split","sum":209},{"parent":"MODULE_FUNC","rule":"Datetime::StartOf","sum":64222},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfDay","sum":20717},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfMonth","sum":74361},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfQuarter","sum":4336},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfWeek","sum":23122},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfYear","sum":11695},{"parent":"MODULE_FUNC","rule":"Datetime::TimeOfDay","sum":127},{"parent":"MODULE_FUNC","rule":"Datetime::ToDatetime","sum":12},{"parent":"MODULE_FUNC","rule":"Datetime::ToDays","sum":139662},{"parent":"MODULE_FUNC","rule":"Datetime::ToHours","sum":22923},{"parent":"MODULE_FUNC","rule":"Datetime::ToMicroseconds","sum":587},{"parent":"MODULE_FUNC","rule":"Datetime::ToMilliseconds","sum":122898},{"parent":"MODULE_FUNC","rule":"Datetime::ToMinutes","sum":44700},{"parent":"MODULE_FUNC","rule":"Datetime::ToSeconds","sum":1523247},{"parent":"MODULE_FUNC","rule":"Datetime::Update","sum":29424},{"parent":"MODULE_FUNC","rule":"Decompress::BZip2","sum":513},{"parent":"MODULE_FUNC","rule":"Decompress::Brotli","sum":7933},{"parent":"MODULE_FUNC","rule":"Decompress::Gzip","sum":3929},{"parent":"MODULE_FUNC","rule":"Decompress::Lz4","sum":890},{"parent":"MODULE_FUNC","rule":"Decompress::Lzma","sum":1},{"parent":"MODULE_FUNC","rule":"Decompress::Snappy","sum":470},{"parent":"MODULE_FUNC","rule":"Decompress::Xz","sum":1},{"parent":"MODULE_FUNC","rule":"Decompress::Zlib","sum":10349},{"parent":"MODULE_FUNC","rule":"Decompress::Zstd","sum":30},{"parent":"MODULE_FUNC","rule":"Digest::Argon2","sum":48904},{"parent":"MODULE_FUNC","rule":"Digest::Blake2B","sum":4570},{"parent":"MODULE_FUNC","rule":"Digest::CityHash","sum":1214048},{"parent":"MODULE_FUNC","rule":"Digest::CityHash128","sum":17592},{"parent":"MODULE_FUNC","rule":"Digest::Crc32c","sum":77608},{"parent":"MODULE_FUNC","rule":"Digest::Crc64","sum":139930},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint","sum":67997},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint128","sum":26},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint2","sum":161650},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint32","sum":208},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint64","sum":1043933},{"parent":"MODULE_FUNC","rule":"Digest::Fnv32","sum":1733},{"parent":"MODULE_FUNC","rule":"Digest::Fnv64","sum":261975},{"parent":"MODULE_FUNC","rule":"Digest::IntHash64","sum":18637},{"parent":"MODULE_FUNC","rule":"Digest::MD5Hex","sum":1},{"parent":"MODULE_FUNC","rule":"Digest::Md5","sum":2},{"parent":"MODULE_FUNC","rule":"Digest::Md5HalfMix","sum":425111},{"parent":"MODULE_FUNC","rule":"Digest::Md5Hex","sum":564006},{"parent":"MODULE_FUNC","rule":"Digest::Md5Raw","sum":15887},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash","sum":2387613},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash2A","sum":2482},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash2A32","sum":854},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash32","sum":258742},{"parent":"MODULE_FUNC","rule":"Digest::NimericHash","sum":3},{"parent":"MODULE_FUNC","rule":"Digest::NumericHash","sum":268509},{"parent":"MODULE_FUNC","rule":"Digest::Sha1","sum":42089},{"parent":"MODULE_FUNC","rule":"Digest::Sha256","sum":395607},{"parent":"MODULE_FUNC","rule":"Digest::SipHash","sum":130836},{"parent":"MODULE_FUNC","rule":"Digest::SuperFastHash","sum":33943},{"parent":"MODULE_FUNC","rule":"Digest::XXH3","sum":48781},{"parent":"MODULE_FUNC","rule":"Digest::XXH3_128","sum":19},{"parent":"MODULE_FUNC","rule":"HyperScan::BacktrackingGrep","sum":1},{"parent":"MODULE_FUNC","rule":"HyperScan::Grep","sum":2359},{"parent":"MODULE_FUNC","rule":"HyperScan::Match","sum":6},{"parent":"MODULE_FUNC","rule":"Hyperscan::BacktrackingGrep","sum":45875},{"parent":"MODULE_FUNC","rule":"Hyperscan::BacktrackingMatch","sum":127},{"parent":"MODULE_FUNC","rule":"Hyperscan::Capture","sum":5834},{"parent":"MODULE_FUNC","rule":"Hyperscan::Grep","sum":145920},{"parent":"MODULE_FUNC","rule":"Hyperscan::Match","sum":53623},{"parent":"MODULE_FUNC","rule":"Hyperscan::MultiGrep","sum":62},{"parent":"MODULE_FUNC","rule":"Hyperscan::MultiMatch","sum":40908},{"parent":"MODULE_FUNC","rule":"Hyperscan::Replace","sum":96506},{"parent":"MODULE_FUNC","rule":"Ip::ConvertToIPv6","sum":44300},{"parent":"MODULE_FUNC","rule":"Ip::FromString","sum":389162},{"parent":"MODULE_FUNC","rule":"Ip::GetSubnet","sum":135675},{"parent":"MODULE_FUNC","rule":"Ip::GetSubnetByMask","sum":4},{"parent":"MODULE_FUNC","rule":"Ip::IsEmbeddedIPv4","sum":6914},{"parent":"MODULE_FUNC","rule":"Ip::IsIPv4","sum":136411},{"parent":"MODULE_FUNC","rule":"Ip::IsIPv6","sum":118870},{"parent":"MODULE_FUNC","rule":"Ip::SubnetFromString","sum":548},{"parent":"MODULE_FUNC","rule":"Ip::SubnetMatch","sum":501},{"parent":"MODULE_FUNC","rule":"Ip::ToFixedIPv6String","sum":3829},{"parent":"MODULE_FUNC","rule":"Ip::ToString","sum":552734},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToDouble","sum":19612},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToInt64","sum":3261},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToList","sum":15},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToString","sum":133},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToStringList","sum":4},{"parent":"MODULE_FUNC","rule":"JSON::From","sum":5},{"parent":"MODULE_FUNC","rule":"JSON::LookupBool","sum":5},{"parent":"MODULE_FUNC","rule":"JSON::LookupDouble","sum":10},{"parent":"MODULE_FUNC","rule":"JSON::LookupInt64","sum":8},{"parent":"MODULE_FUNC","rule":"JSON::LookupString","sum":245},{"parent":"MODULE_FUNC","rule":"JSON::PARSE","sum":2},{"parent":"MODULE_FUNC","rule":"JSON::Parse","sum":3743},{"parent":"MODULE_FUNC","rule":"JSon::From","sum":2},{"parent":"MODULE_FUNC","rule":"Json::Attributes","sum":2},{"parent":"MODULE_FUNC","rule":"Json::Contains","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToBool","sum":6},{"parent":"MODULE_FUNC","rule":"Json::ConvertToDict","sum":114},{"parent":"MODULE_FUNC","rule":"Json::ConvertToDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToList","sum":116},{"parent":"MODULE_FUNC","rule":"Json::ConvertToString","sum":168},{"parent":"MODULE_FUNC","rule":"Json::ConvertToStringDict","sum":5},{"parent":"MODULE_FUNC","rule":"Json::ConvertToStringList","sum":21},{"parent":"MODULE_FUNC","rule":"Json::From","sum":3765},{"parent":"MODULE_FUNC","rule":"Json::FromString","sum":16},{"parent":"MODULE_FUNC","rule":"Json::GetField","sum":21},{"parent":"MODULE_FUNC","rule":"Json::GetHash","sum":1},{"parent":"MODULE_FUNC","rule":"Json::GetLength","sum":19},{"parent":"MODULE_FUNC","rule":"Json::Lookup","sum":1},{"parent":"MODULE_FUNC","rule":"Json::LookupInt64","sum":847912},{"parent":"MODULE_FUNC","rule":"Json::LookupString","sum":53},{"parent":"MODULE_FUNC","rule":"Json::Options","sum":852},{"parent":"MODULE_FUNC","rule":"Json::Parse","sum":181050},{"parent":"MODULE_FUNC","rule":"Json::ParseJson","sum":205},{"parent":"MODULE_FUNC","rule":"Json::Serialize","sum":1556},{"parent":"MODULE_FUNC","rule":"Json::SerializeJson","sum":933},{"parent":"MODULE_FUNC","rule":"Json::SerializePretty","sum":1137},{"parent":"MODULE_FUNC","rule":"Json::SerializeText","sum":3},{"parent":"MODULE_FUNC","rule":"Json::YPath","sum":20},{"parent":"MODULE_FUNC","rule":"Json::YPathDict","sum":14},{"parent":"MODULE_FUNC","rule":"Json::YPathString","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::ROUND","sum":4},{"parent":"MODULE_FUNC","rule":"Math::Aabs","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Abs","sum":477715},{"parent":"MODULE_FUNC","rule":"Math::Acos","sum":12415},{"parent":"MODULE_FUNC","rule":"Math::Asin","sum":10403},{"parent":"MODULE_FUNC","rule":"Math::Asinh","sum":4},{"parent":"MODULE_FUNC","rule":"Math::Atan","sum":7800},{"parent":"MODULE_FUNC","rule":"Math::Atan2","sum":12085},{"parent":"MODULE_FUNC","rule":"Math::Cbrt","sum":480},{"parent":"MODULE_FUNC","rule":"Math::Ceil","sum":1248484},{"parent":"MODULE_FUNC","rule":"Math::Cos","sum":87693},{"parent":"MODULE_FUNC","rule":"Math::Cosh","sum":5},{"parent":"MODULE_FUNC","rule":"Math::Crbt","sum":1},{"parent":"MODULE_FUNC","rule":"Math::E","sum":9683},{"parent":"MODULE_FUNC","rule":"Math::EXP","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Eps","sum":181},{"parent":"MODULE_FUNC","rule":"Math::Erf","sum":2543},{"parent":"MODULE_FUNC","rule":"Math::ErfInv","sum":42},{"parent":"MODULE_FUNC","rule":"Math::ErfcInv","sum":6},{"parent":"MODULE_FUNC","rule":"Math::Exp","sum":558803},{"parent":"MODULE_FUNC","rule":"Math::Exp2","sum":2138},{"parent":"MODULE_FUNC","rule":"Math::Fabs","sum":145504},{"parent":"MODULE_FUNC","rule":"Math::Floor","sum":326311},{"parent":"MODULE_FUNC","rule":"Math::Flor","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Fmod","sum":9},{"parent":"MODULE_FUNC","rule":"Math::FuzzyEquals","sum":18761},{"parent":"MODULE_FUNC","rule":"Math::Hypot","sum":18561},{"parent":"MODULE_FUNC","rule":"Math::IsFinite","sum":185199},{"parent":"MODULE_FUNC","rule":"Math::IsInf","sum":74643},{"parent":"MODULE_FUNC","rule":"Math::IsNaN","sum":251807},{"parent":"MODULE_FUNC","rule":"Math::Ldexp","sum":46},{"parent":"MODULE_FUNC","rule":"Math::Lgamma","sum":4},{"parent":"MODULE_FUNC","rule":"Math::Log","sum":824060},{"parent":"MODULE_FUNC","rule":"Math::Log10","sum":150347},{"parent":"MODULE_FUNC","rule":"Math::Log2","sum":196490},{"parent":"MODULE_FUNC","rule":"Math::Max","sum":3},{"parent":"MODULE_FUNC","rule":"Math::Min","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Mod","sum":65399},{"parent":"MODULE_FUNC","rule":"Math::NearbyInt","sum":410555},{"parent":"MODULE_FUNC","rule":"Math::Pi","sum":71587},{"parent":"MODULE_FUNC","rule":"Math::Pow","sum":1326346},{"parent":"MODULE_FUNC","rule":"Math::Power","sum":6},{"parent":"MODULE_FUNC","rule":"Math::ROUND","sum":5},{"parent":"MODULE_FUNC","rule":"Math::Rem","sum":2325},{"parent":"MODULE_FUNC","rule":"Math::Remainder","sum":171},{"parent":"MODULE_FUNC","rule":"Math::Rint","sum":20445},{"parent":"MODULE_FUNC","rule":"Math::Round","sum":36407421},{"parent":"MODULE_FUNC","rule":"Math::RoundDownward","sum":135216},{"parent":"MODULE_FUNC","rule":"Math::RoundToNearest","sum":74644},{"parent":"MODULE_FUNC","rule":"Math::RoundTowardZero","sum":907},{"parent":"MODULE_FUNC","rule":"Math::RoundUpward","sum":199858},{"parent":"MODULE_FUNC","rule":"Math::Sigmoid","sum":279068},{"parent":"MODULE_FUNC","rule":"Math::Sin","sum":75206},{"parent":"MODULE_FUNC","rule":"Math::Sinh","sum":6030},{"parent":"MODULE_FUNC","rule":"Math::Sqrt","sum":612224},{"parent":"MODULE_FUNC","rule":"Math::Tan","sum":4808},{"parent":"MODULE_FUNC","rule":"Math::Tanh","sum":4815},{"parent":"MODULE_FUNC","rule":"Math::Tgamma","sum":60},{"parent":"MODULE_FUNC","rule":"Math::Trunc","sum":156289},{"parent":"MODULE_FUNC","rule":"Math::abs","sum":2},{"parent":"MODULE_FUNC","rule":"Math::ceil","sum":8},{"parent":"MODULE_FUNC","rule":"Math::cos","sum":2},{"parent":"MODULE_FUNC","rule":"Math::exp","sum":6},{"parent":"MODULE_FUNC","rule":"Math::floor","sum":3},{"parent":"MODULE_FUNC","rule":"Math::round","sum":13},{"parent":"MODULE_FUNC","rule":"Math::sin","sum":2},{"parent":"MODULE_FUNC","rule":"Math::sqrt","sum":3},{"parent":"MODULE_FUNC","rule":"PIRE::Capture","sum":7},{"parent":"MODULE_FUNC","rule":"PIRE::Grep","sum":22},{"parent":"MODULE_FUNC","rule":"Pire::Capture","sum":533463},{"parent":"MODULE_FUNC","rule":"Pire::Grep","sum":144104},{"parent":"MODULE_FUNC","rule":"Pire::Match","sum":232792},{"parent":"MODULE_FUNC","rule":"Pire::MultiGrep","sum":607},{"parent":"MODULE_FUNC","rule":"Pire::MultiMatch","sum":152},{"parent":"MODULE_FUNC","rule":"Pire::Replace","sum":1000888},{"parent":"MODULE_FUNC","rule":"Protobuf::Parse","sum":20839},{"parent":"MODULE_FUNC","rule":"Protobuf::Serialize","sum":103024},{"parent":"MODULE_FUNC","rule":"Protobuf::TryParse","sum":142506},{"parent":"MODULE_FUNC","rule":"RE2::Capture","sum":4000},{"parent":"MODULE_FUNC","rule":"RE2::Count","sum":269},{"parent":"MODULE_FUNC","rule":"RE2::FindAndConsume","sum":31},{"parent":"MODULE_FUNC","rule":"RE2::Grep","sum":70},{"parent":"MODULE_FUNC","rule":"RE2::Match","sum":546},{"parent":"MODULE_FUNC","rule":"RE2::Replace","sum":124},{"parent":"MODULE_FUNC","rule":"Re2::Capture","sum":4380331},{"parent":"MODULE_FUNC","rule":"Re2::Catch","sum":1},{"parent":"MODULE_FUNC","rule":"Re2::Compile","sum":4},{"parent":"MODULE_FUNC","rule":"Re2::Count","sum":172802},{"parent":"MODULE_FUNC","rule":"Re2::FindAll","sum":2},{"parent":"MODULE_FUNC","rule":"Re2::FindAllSubmatch","sum":2},{"parent":"MODULE_FUNC","rule":"Re2::FindAndConsume","sum":390406},{"parent":"MODULE_FUNC","rule":"Re2::Grep","sum":646127},{"parent":"MODULE_FUNC","rule":"Re2::Match","sum":1694173},{"parent":"MODULE_FUNC","rule":"Re2::Options","sum":251696},{"parent":"MODULE_FUNC","rule":"Re2::Replace","sum":4584232},{"parent":"MODULE_FUNC","rule":"Re2::ReplaceAll","sum":15},{"parent":"MODULE_FUNC","rule":"STRING::AsciiToLower","sum":1},{"parent":"MODULE_FUNC","rule":"STRING::Contains","sum":2},{"parent":"MODULE_FUNC","rule":"STRING::RemoveAll","sum":1},{"parent":"MODULE_FUNC","rule":"STRING::SplitToList","sum":2},{"parent":"MODULE_FUNC","rule":"String::ASciiToLower","sum":1},{"parent":"MODULE_FUNC","rule":"String::AsciiToLower","sum":4111448},{"parent":"MODULE_FUNC","rule":"String::AsciiToTitle","sum":91290},{"parent":"MODULE_FUNC","rule":"String::AsciiToUpper","sum":550631},{"parent":"MODULE_FUNC","rule":"String::Base32Decode","sum":285},{"parent":"MODULE_FUNC","rule":"String::Base32Encode","sum":191},{"parent":"MODULE_FUNC","rule":"String::Base32StrictDecode","sum":53},{"parent":"MODULE_FUNC","rule":"String::Base64Decode","sum":393554},{"parent":"MODULE_FUNC","rule":"String::Base64Encode","sum":111803},{"parent":"MODULE_FUNC","rule":"String::Base64EncodeUrl","sum":5545},{"parent":"MODULE_FUNC","rule":"String::Base64StrictDecode","sum":96933},{"parent":"MODULE_FUNC","rule":"String::Bin","sum":523},{"parent":"MODULE_FUNC","rule":"String::BinText","sum":121},{"parent":"MODULE_FUNC","rule":"String::CgiEscape","sum":66088},{"parent":"MODULE_FUNC","rule":"String::CgiUnescape","sum":24021},{"parent":"MODULE_FUNC","rule":"String::ColapseText","sum":4},{"parent":"MODULE_FUNC","rule":"String::Collapse","sum":192279},{"parent":"MODULE_FUNC","rule":"String::CollapseText","sum":197906},{"parent":"MODULE_FUNC","rule":"String::Contains","sum":6172023},{"parent":"MODULE_FUNC","rule":"String::DecodeHtml","sum":3003},{"parent":"MODULE_FUNC","rule":"String::EncodeHtml","sum":423},{"parent":"MODULE_FUNC","rule":"String::EndsWith","sum":539754},{"parent":"MODULE_FUNC","rule":"String::EndsWithIgnoreCase","sum":36145},{"parent":"MODULE_FUNC","rule":"String::EscapeC","sum":56401},{"parent":"MODULE_FUNC","rule":"String::Find","sum":652932},{"parent":"MODULE_FUNC","rule":"String::From","sum":2},{"parent":"MODULE_FUNC","rule":"String::FromByteList","sum":1055651},{"parent":"MODULE_FUNC","rule":"String::HasPrefix","sum":20389},{"parent":"MODULE_FUNC","rule":"String::HasPrefixIgnoreCase","sum":48},{"parent":"MODULE_FUNC","rule":"String::HasSuffix","sum":4980},{"parent":"MODULE_FUNC","rule":"String::HasSuffixIgnoreCase","sum":29164},{"parent":"MODULE_FUNC","rule":"String::Hex","sum":371063},{"parent":"MODULE_FUNC","rule":"String::HexDecode","sum":164295},{"parent":"MODULE_FUNC","rule":"String::HexEncode","sum":192212},{"parent":"MODULE_FUNC","rule":"String::HexText","sum":80085},{"parent":"MODULE_FUNC","rule":"String::HumanReadableBytes","sum":243},{"parent":"MODULE_FUNC","rule":"String::HumanReadableDuration","sum":1104910},{"parent":"MODULE_FUNC","rule":"String::HumanReadableQuantity","sum":333},{"parent":"MODULE_FUNC","rule":"String::IsAscii","sum":13898},{"parent":"MODULE_FUNC","rule":"String::IsAsciiAlnum","sum":981},{"parent":"MODULE_FUNC","rule":"String::IsAsciiAlpha","sum":380},{"parent":"MODULE_FUNC","rule":"String::IsAsciiDigit","sum":8629},{"parent":"MODULE_FUNC","rule":"String::IsAsciiHex","sum":19541},{"parent":"MODULE_FUNC","rule":"String::IsAsciiLower","sum":22},{"parent":"MODULE_FUNC","rule":"String::IsAsciiSpace","sum":13},{"parent":"MODULE_FUNC","rule":"String::IsAsciiUpper","sum":350},{"parent":"MODULE_FUNC","rule":"String::Join","sum":2},{"parent":"MODULE_FUNC","rule":"String::JoinFROMList","sum":33364},{"parent":"MODULE_FUNC","rule":"String::JoinFromList","sum":13950209},{"parent":"MODULE_FUNC","rule":"String::LeftPad","sum":64974},{"parent":"MODULE_FUNC","rule":"String::Length","sum":2},{"parent":"MODULE_FUNC","rule":"String::LevenshteinDistance","sum":4},{"parent":"MODULE_FUNC","rule":"String::LevensteinDistance","sum":11541},{"parent":"MODULE_FUNC","rule":"String::Prec","sum":1854},{"parent":"MODULE_FUNC","rule":"String::RaplaceAll","sum":3},{"parent":"MODULE_FUNC","rule":"String::RemoveAll","sum":1246160},{"parent":"MODULE_FUNC","rule":"String::RemoveFirst","sum":637168},{"parent":"MODULE_FUNC","rule":"String::RemoveLast","sum":551351},{"parent":"MODULE_FUNC","rule":"String::Replace","sum":11},{"parent":"MODULE_FUNC","rule":"String::ReplaceALL","sum":1},{"parent":"MODULE_FUNC","rule":"String::ReplaceAll","sum":15572806},{"parent":"MODULE_FUNC","rule":"String::ReplaceFirst","sum":1349608},{"parent":"MODULE_FUNC","rule":"String::ReplaceFirstStartsWith","sum":1},{"parent":"MODULE_FUNC","rule":"String::ReplaceLast","sum":170281},{"parent":"MODULE_FUNC","rule":"String::ReplaceRegex","sum":1},{"parent":"MODULE_FUNC","rule":"String::Reverse","sum":117285},{"parent":"MODULE_FUNC","rule":"String::ReverseFind","sum":39020},{"parent":"MODULE_FUNC","rule":"String::RightPad","sum":364701},{"parent":"MODULE_FUNC","rule":"String::SBin","sum":7},{"parent":"MODULE_FUNC","rule":"String::SHex","sum":6295},{"parent":"MODULE_FUNC","rule":"String::Split","sum":10},{"parent":"MODULE_FUNC","rule":"String::SplitToList","sum":31568162},{"parent":"MODULE_FUNC","rule":"String::SplitToSet","sum":12},{"parent":"MODULE_FUNC","rule":"String::StartWith","sum":1},{"parent":"MODULE_FUNC","rule":"String::StartsWith","sum":3461444},{"parent":"MODULE_FUNC","rule":"String::StartsWithIgnoreCase","sum":57315},{"parent":"MODULE_FUNC","rule":"String::Strip","sum":3166173},{"parent":"MODULE_FUNC","rule":"String::Substring","sum":300136},{"parent":"MODULE_FUNC","rule":"String::ToByteList","sum":140137},{"parent":"MODULE_FUNC","rule":"String::ToLower","sum":5220539},{"parent":"MODULE_FUNC","rule":"String::ToLowerCase","sum":1},{"parent":"MODULE_FUNC","rule":"String::ToTitle","sum":34209},{"parent":"MODULE_FUNC","rule":"String::ToUpper","sum":140356},{"parent":"MODULE_FUNC","rule":"String::Trim","sum":3},{"parent":"MODULE_FUNC","rule":"String::UnescapeC","sum":309291},{"parent":"MODULE_FUNC","rule":"String::contains","sum":1},{"parent":"MODULE_FUNC","rule":"String::splittolist","sum":1},{"parent":"MODULE_FUNC","rule":"String::tolower","sum":2},{"parent":"MODULE_FUNC","rule":"TryDecompress::BZip2","sum":5},{"parent":"MODULE_FUNC","rule":"TryDecompress::BlockCodec","sum":1},{"parent":"MODULE_FUNC","rule":"TryDecompress::Brotli","sum":12},{"parent":"MODULE_FUNC","rule":"TryDecompress::Gzip","sum":967},{"parent":"MODULE_FUNC","rule":"TryDecompress::Lz4","sum":92},{"parent":"MODULE_FUNC","rule":"TryDecompress::Lzma","sum":5},{"parent":"MODULE_FUNC","rule":"TryDecompress::Snappy","sum":11},{"parent":"MODULE_FUNC","rule":"TryDecompress::Xz","sum":5},{"parent":"MODULE_FUNC","rule":"TryDecompress::Zlib","sum":3546},{"parent":"MODULE_FUNC","rule":"TryDecompress::Zstd","sum":19},{"parent":"MODULE_FUNC","rule":"URL::GetHost","sum":2},{"parent":"MODULE_FUNC","rule":"Unicode::Find","sum":150548},{"parent":"MODULE_FUNC","rule":"Unicode::Fold","sum":29451},{"parent":"MODULE_FUNC","rule":"Unicode::FromCodePointList","sum":120601},{"parent":"MODULE_FUNC","rule":"Unicode::GetLength","sum":593619},{"parent":"MODULE_FUNC","rule":"Unicode::GetLengthn","sum":1},{"parent":"MODULE_FUNC","rule":"Unicode::IsAlnum","sum":556},{"parent":"MODULE_FUNC","rule":"Unicode::IsAlpha","sum":465},{"parent":"MODULE_FUNC","rule":"Unicode::IsAscii","sum":660},{"parent":"MODULE_FUNC","rule":"Unicode::IsDigit","sum":9012},{"parent":"MODULE_FUNC","rule":"Unicode::IsHex","sum":4},{"parent":"MODULE_FUNC","rule":"Unicode::IsLower","sum":96},{"parent":"MODULE_FUNC","rule":"Unicode::IsSpace","sum":18},{"parent":"MODULE_FUNC","rule":"Unicode::IsUnicodeSet","sum":423},{"parent":"MODULE_FUNC","rule":"Unicode::IsUpper","sum":1807},{"parent":"MODULE_FUNC","rule":"Unicode::IsUtf","sum":668623},{"parent":"MODULE_FUNC","rule":"Unicode::JoinFromList","sum":201306},{"parent":"MODULE_FUNC","rule":"Unicode::LevensteinDistance","sum":38466},{"parent":"MODULE_FUNC","rule":"Unicode::Normalize","sum":109260},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFC","sum":550},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFD","sum":37},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFKC","sum":6876},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFKD","sum":1714},{"parent":"MODULE_FUNC","rule":"Unicode::RFind","sum":73084},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveAll","sum":139648},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveFirst","sum":7903},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveLast","sum":7941},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceAll","sum":262575},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceFirst","sum":1817},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceLast","sum":318},{"parent":"MODULE_FUNC","rule":"Unicode::Reverse","sum":48417},{"parent":"MODULE_FUNC","rule":"Unicode::SUBSTRING","sum":2},{"parent":"MODULE_FUNC","rule":"Unicode::SplitToList","sum":176816},{"parent":"MODULE_FUNC","rule":"Unicode::Strip","sum":63682},{"parent":"MODULE_FUNC","rule":"Unicode::Substring","sum":597192},{"parent":"MODULE_FUNC","rule":"Unicode::ToCodePointList","sum":123182},{"parent":"MODULE_FUNC","rule":"Unicode::ToLower","sum":1184377},{"parent":"MODULE_FUNC","rule":"Unicode::ToTitle","sum":36928},{"parent":"MODULE_FUNC","rule":"Unicode::ToUint64","sum":342},{"parent":"MODULE_FUNC","rule":"Unicode::ToUpper","sum":111543},{"parent":"MODULE_FUNC","rule":"Unicode::Translit","sum":104468},{"parent":"MODULE_FUNC","rule":"Unicode::TryToUint64","sum":1971},{"parent":"MODULE_FUNC","rule":"Url::AsciiToLower","sum":1},{"parent":"MODULE_FUNC","rule":"Url::BuildQueryString","sum":25601},{"parent":"MODULE_FUNC","rule":"Url::CanBePunycodeHostName","sum":6563},{"parent":"MODULE_FUNC","rule":"Url::CutQueryStringAndFragment","sum":231682},{"parent":"MODULE_FUNC","rule":"Url::CutScheme","sum":1121349},{"parent":"MODULE_FUNC","rule":"Url::CutWWW","sum":849205},{"parent":"MODULE_FUNC","rule":"Url::CutWWW2","sum":742495},{"parent":"MODULE_FUNC","rule":"Url::Decode","sum":1696641},{"parent":"MODULE_FUNC","rule":"Url::Encode","sum":367989},{"parent":"MODULE_FUNC","rule":"Url::ForceHostNameToPunycode","sum":200127},{"parent":"MODULE_FUNC","rule":"Url::ForcePunycodeToHostName","sum":132412},{"parent":"MODULE_FUNC","rule":"Url::GetCGIParam","sum":1840630},{"parent":"MODULE_FUNC","rule":"Url::GetCgiParam","sum":7},{"parent":"MODULE_FUNC","rule":"Url::GetDomain","sum":1121266},{"parent":"MODULE_FUNC","rule":"Url::GetDomainLevel","sum":70135},{"parent":"MODULE_FUNC","rule":"Url::GetFragment","sum":501},{"parent":"MODULE_FUNC","rule":"Url::GetHost","sum":4031182},{"parent":"MODULE_FUNC","rule":"Url::GetHostPort","sum":195237},{"parent":"MODULE_FUNC","rule":"Url::GetOwner","sum":1404269},{"parent":"MODULE_FUNC","rule":"Url::GetPath","sum":1684170},{"parent":"MODULE_FUNC","rule":"Url::GetPort","sum":812419},{"parent":"MODULE_FUNC","rule":"Url::GetScheme","sum":2032067},{"parent":"MODULE_FUNC","rule":"Url::GetSchemeHost","sum":145759},{"parent":"MODULE_FUNC","rule":"Url::GetSchemeHostPort","sum":693603},{"parent":"MODULE_FUNC","rule":"Url::GetSignificantDomain","sum":468216},{"parent":"MODULE_FUNC","rule":"Url::GetTLD","sum":35496},{"parent":"MODULE_FUNC","rule":"Url::GetTail","sum":567027},{"parent":"MODULE_FUNC","rule":"Url::Getowner","sum":1},{"parent":"MODULE_FUNC","rule":"Url::HostNameToPunycode","sum":580246},{"parent":"MODULE_FUNC","rule":"Url::IsAllowedByRobotsTxt","sum":7},{"parent":"MODULE_FUNC","rule":"Url::IsKnownTLD","sum":20608},{"parent":"MODULE_FUNC","rule":"Url::IsWellKnownTLD","sum":4987},{"parent":"MODULE_FUNC","rule":"Url::Normalize","sum":1041437},{"parent":"MODULE_FUNC","rule":"Url::NormalizeWithDefaultHttpScheme","sum":616892},{"parent":"MODULE_FUNC","rule":"Url::Parse","sum":299131},{"parent":"MODULE_FUNC","rule":"Url::PunycodeToHostName","sum":201367},{"parent":"MODULE_FUNC","rule":"Url::QueryStringToDict","sum":153593},{"parent":"MODULE_FUNC","rule":"Url::QueryStringToList","sum":38519},{"parent":"MODULE_FUNC","rule":"Url::ReplaceAll","sum":8},{"parent":"MODULE_FUNC","rule":"YSON::Co","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToDoubleList","sum":2},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToInt64","sum":2},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToList","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToString","sum":5},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToStringList","sum":8},{"parent":"MODULE_FUNC","rule":"YSON::LookupDict","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::LookupInt64","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::LookupString","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::ToString","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::convertToString","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::from","sum":1},{"parent":"MODULE_FUNC","rule":"YSon::ConvertToList","sum":3},{"parent":"MODULE_FUNC","rule":"YSon::ConvertToString","sum":1},{"parent":"MODULE_FUNC","rule":"YSon::LookupString","sum":1},{"parent":"MODULE_FUNC","rule":"YSon::Parse","sum":5},{"parent":"MODULE_FUNC","rule":"Yson::AsList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::AsTuple","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Attributes","sum":1895},{"parent":"MODULE_FUNC","rule":"Yson::COntains","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::CastToStringList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Contains","sum":2970778},{"parent":"MODULE_FUNC","rule":"Yson::Conver","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConverTToInt64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConverToDouble","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConverToInt64","sum":10},{"parent":"MODULE_FUNC","rule":"Yson::ConverToList","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::ConverToString","sum":21},{"parent":"MODULE_FUNC","rule":"Yson::ConvertFromString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTo","sum":8853250},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToAttributes","sum":16},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBool","sum":8786371},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolDict","sum":105587},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolList","sum":7423},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolgDict","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBytes","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDate","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDateTime","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDict","sum":8491755},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictOfDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictString","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDouble","sum":9500865},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDoubleDict","sum":278052},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDoubleList","sum":1173216},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToFloat","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToFloat64","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToINT64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToIn64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt","sum":15},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt32","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt32List","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64","sum":20389333},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64Dict","sum":187168},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64List","sum":2391486},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToIntList","sum":5},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInteget","sum":157},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToJson","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToList","sum":18849836},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListString","sum":16},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListg","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSTring","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSTringList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSetring","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToString","sum":96410632},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringDict","sum":1445511},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringInt64","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringList","sum":24313201},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStrint","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStruct","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToText","sum":13},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUINT64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUInt64","sum":8},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUInt64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint32","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64","sum":8459681},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64Dict","sum":36715},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64List","sum":2205194},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUnit64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToevent_value","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToint64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToint64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTolIST","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTolist","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTostring","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::ConverttoList","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConverttoString","sum":11},{"parent":"MODULE_FUNC","rule":"Yson::ConvvertToString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Dict","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::Equals","sum":794322},{"parent":"MODULE_FUNC","rule":"Yson::Extract","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FROM","sum":6},{"parent":"MODULE_FUNC","rule":"Yson::Find","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::From","sum":24870602},{"parent":"MODULE_FUNC","rule":"Yson::From64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromAGG_LIST","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromASDFDSKLDJF","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromAboba","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::FromBoolDict","sum":13},{"parent":"MODULE_FUNC","rule":"Yson::FromBytes","sum":16},{"parent":"MODULE_FUNC","rule":"Yson::FromDict","sum":3079},{"parent":"MODULE_FUNC","rule":"Yson::FromDouble","sum":2822},{"parent":"MODULE_FUNC","rule":"Yson::FromDouble64Dict","sum":3692},{"parent":"MODULE_FUNC","rule":"Yson::FromDoubleDict","sum":42356},{"parent":"MODULE_FUNC","rule":"Yson::FromDoubleList","sum":1017},{"parent":"MODULE_FUNC","rule":"Yson::FromInt64Dict","sum":2156},{"parent":"MODULE_FUNC","rule":"Yson::FromInt64List","sum":7488},{"parent":"MODULE_FUNC","rule":"Yson::FromJson","sum":2090},{"parent":"MODULE_FUNC","rule":"Yson::FromKek","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::FromList","sum":4973},{"parent":"MODULE_FUNC","rule":"Yson::FromListTake","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::FromMap","sum":358},{"parent":"MODULE_FUNC","rule":"Yson::FromSHEEEEEEEEEEEEE","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::FromSHIIIIIIIIIII","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromSeconds","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::FromSring","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::FromSting","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromString","sum":56446},{"parent":"MODULE_FUNC","rule":"Yson::FromStringDict","sum":68687},{"parent":"MODULE_FUNC","rule":"Yson::FromStringList","sum":60027},{"parent":"MODULE_FUNC","rule":"Yson::FromStruct","sum":642551},{"parent":"MODULE_FUNC","rule":"Yson::FromUi64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromUin64List","sum":380},{"parent":"MODULE_FUNC","rule":"Yson::FromUint32Dict","sum":10250},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64Dict","sum":16143},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64List","sum":25149},{"parent":"MODULE_FUNC","rule":"Yson::FromY2020MachoDachaTbIhaHouse","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Fromt","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::Get","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::GetHash","sum":859162},{"parent":"MODULE_FUNC","rule":"Yson::GetLength","sum":2791907},{"parent":"MODULE_FUNC","rule":"Yson::IsBool","sum":33054},{"parent":"MODULE_FUNC","rule":"Yson::IsDict","sum":262608},{"parent":"MODULE_FUNC","rule":"Yson::IsDouble","sum":98144},{"parent":"MODULE_FUNC","rule":"Yson::IsEntity","sum":2685091},{"parent":"MODULE_FUNC","rule":"Yson::IsInt64","sum":288907},{"parent":"MODULE_FUNC","rule":"Yson::IsList","sum":286841},{"parent":"MODULE_FUNC","rule":"Yson::IsString","sum":1013476},{"parent":"MODULE_FUNC","rule":"Yson::IsUint64","sum":155716},{"parent":"MODULE_FUNC","rule":"Yson::ListMap","sum":12},{"parent":"MODULE_FUNC","rule":"Yson::Lo","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Loo","sum":10},{"parent":"MODULE_FUNC","rule":"Yson::LookUp","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookUpDict","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookUpString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Lookup","sum":6552294},{"parent":"MODULE_FUNC","rule":"Yson::LookupBool","sum":2149412},{"parent":"MODULE_FUNC","rule":"Yson::LookupDict","sum":402758},{"parent":"MODULE_FUNC","rule":"Yson::LookupDouble","sum":3070211},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt","sum":15},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt32","sum":91},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt64","sum":5509537},{"parent":"MODULE_FUNC","rule":"Yson::LookupInteger","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupList","sum":1629712},{"parent":"MODULE_FUNC","rule":"Yson::LookupSTRING","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupString","sum":41167625},{"parent":"MODULE_FUNC","rule":"Yson::LookupStringList","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::LookupStruct","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::LookupTimestamp","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupUInt64","sum":6},{"parent":"MODULE_FUNC","rule":"Yson::LookupUint64","sum":3799019},{"parent":"MODULE_FUNC","rule":"Yson::LookupsTRING","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LoopUpString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Option","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Options","sum":9252128},{"parent":"MODULE_FUNC","rule":"Yson::Parse","sum":8639995},{"parent":"MODULE_FUNC","rule":"Yson::ParseJSON","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ParseJson","sum":12631463},{"parent":"MODULE_FUNC","rule":"Yson::ParseJsonDecodeUtf8","sum":135897},{"parent":"MODULE_FUNC","rule":"Yson::Parsejson","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::Path","sum":23},{"parent":"MODULE_FUNC","rule":"Yson::Serialize","sum":3856335},{"parent":"MODULE_FUNC","rule":"Yson::SerializeJson","sum":9796705},{"parent":"MODULE_FUNC","rule":"Yson::SerializeJsonEncodeUtf8","sum":201029},{"parent":"MODULE_FUNC","rule":"Yson::SerializePretty","sum":1186662},{"parent":"MODULE_FUNC","rule":"Yson::SerializeText","sum":609005},{"parent":"MODULE_FUNC","rule":"Yson::WithAttributes","sum":667},{"parent":"MODULE_FUNC","rule":"Yson::YPath","sum":12592591},{"parent":"MODULE_FUNC","rule":"Yson::YPathBool","sum":1170476},{"parent":"MODULE_FUNC","rule":"Yson::YPathBoolean","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::YPathDict","sum":25039},{"parent":"MODULE_FUNC","rule":"Yson::YPathDouble","sum":1376465},{"parent":"MODULE_FUNC","rule":"Yson::YPathInt16","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::YPathInt64","sum":2682245},{"parent":"MODULE_FUNC","rule":"Yson::YPathList","sum":1337807},{"parent":"MODULE_FUNC","rule":"Yson::YPathListString","sum":7},{"parent":"MODULE_FUNC","rule":"Yson::YPathString","sum":13384229},{"parent":"MODULE_FUNC","rule":"Yson::YPathUint64","sum":710582},{"parent":"MODULE_FUNC","rule":"Yson::YaPathString","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::Ypath","sum":21},{"parent":"MODULE_FUNC","rule":"Yson::Yson2","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::convertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::convertToString","sum":6},{"parent":"MODULE_FUNC","rule":"Yson::from","sum":14},{"parent":"MODULE_FUNC","rule":"Yson::fromJson","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::lookupString","sum":1},{"parent":"MODULE_FUNC","rule":"dateTime::Format","sum":3},{"parent":"MODULE_FUNC","rule":"dateTime::GetMonth","sum":6},{"parent":"MODULE_FUNC","rule":"dateTime::IntervalFromDays","sum":354},{"parent":"MODULE_FUNC","rule":"dateTime::IntervalFromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"dateTime::MakeDate","sum":85},{"parent":"MODULE_FUNC","rule":"dateTime::MakeDatetime","sum":7},{"parent":"MODULE_FUNC","rule":"dateTime::StartOfMonth","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::Format","sum":7},{"parent":"MODULE_FUNC","rule":"datetime::FromMilliseconds","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::FromSeconds","sum":3796},{"parent":"MODULE_FUNC","rule":"datetime::GetDayOfWeek","sum":4},{"parent":"MODULE_FUNC","rule":"datetime::GetDayOfWeekName","sum":3},{"parent":"MODULE_FUNC","rule":"datetime::GetHour","sum":8},{"parent":"MODULE_FUNC","rule":"datetime::GetMonth","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::GetYear","sum":5},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromDays","sum":1346},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromHours","sum":37},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromMinutes","sum":6},{"parent":"MODULE_FUNC","rule":"datetime::MakeDate","sum":5460},{"parent":"MODULE_FUNC","rule":"datetime::MakeDatetime","sum":655},{"parent":"MODULE_FUNC","rule":"datetime::MakeTimestamp","sum":1},{"parent":"MODULE_FUNC","rule":"datetime::Parse","sum":398},{"parent":"MODULE_FUNC","rule":"datetime::ParseIso8601","sum":3},{"parent":"MODULE_FUNC","rule":"datetime::ShiftMonths","sum":313},{"parent":"MODULE_FUNC","rule":"datetime::StartOf","sum":1},{"parent":"MODULE_FUNC","rule":"datetime::StartOfDay","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::StartOfMonth","sum":552},{"parent":"MODULE_FUNC","rule":"datetime::StartOfWeek","sum":522},{"parent":"MODULE_FUNC","rule":"datetime::ToDays","sum":376},{"parent":"MODULE_FUNC","rule":"datetime::ToSeconds","sum":729},{"parent":"MODULE_FUNC","rule":"datetime::fromseconds","sum":3},{"parent":"MODULE_FUNC","rule":"digest::city_hash","sum":1},{"parent":"MODULE_FUNC","rule":"hyperscan::Match","sum":1},{"parent":"MODULE_FUNC","rule":"json::ConvertToString","sum":7},{"parent":"MODULE_FUNC","rule":"json::From","sum":1},{"parent":"MODULE_FUNC","rule":"math::floor","sum":1},{"parent":"MODULE_FUNC","rule":"math::log","sum":2},{"parent":"MODULE_FUNC","rule":"math::round","sum":6},{"parent":"MODULE_FUNC","rule":"pire::Capture","sum":7},{"parent":"MODULE_FUNC","rule":"pire::Match","sum":29},{"parent":"MODULE_FUNC","rule":"re2::Capture","sum":2622},{"parent":"MODULE_FUNC","rule":"re2::Grep","sum":1},{"parent":"MODULE_FUNC","rule":"re2::Match","sum":25},{"parent":"MODULE_FUNC","rule":"re2::Replace","sum":438},{"parent":"MODULE_FUNC","rule":"re2::capture","sum":8},{"parent":"MODULE_FUNC","rule":"string::JoinFromList","sum":1},{"parent":"MODULE_FUNC","rule":"string::StartsWith","sum":1},{"parent":"MODULE_FUNC","rule":"string::contains","sum":1},{"parent":"MODULE_FUNC","rule":"string::removeall","sum":4},{"parent":"MODULE_FUNC","rule":"string::splittolist","sum":12},{"parent":"MODULE_FUNC","rule":"string::strip","sum":2},{"parent":"MODULE_FUNC","rule":"url::gethost","sum":1},{"parent":"MODULE_FUNC","rule":"ySoN::CoNveRtTo","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToDict","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToDouble","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToStringList","sum":2},{"parent":"MODULE_FUNC","rule":"yson::From","sum":1},{"parent":"MODULE_FUNC","rule":"yson::convertto","sum":2},{"parent":"MODULE_FUNC","rule":"yson::converttodoubledict","sum":8},{"parent":"MODULE_FUNC","rule":"yson::converttolist","sum":1},{"parent":"MODULE_FUNC","rule":"yson::converttostring","sum":3},{"parent":"MODULE_FUNC","rule":"yson::converttostringdict","sum":8},{"parent":"MODULE_FUNC","rule":"yson::from","sum":2},{"parent":"MODULE_FUNC","rule":"yson::options","sum":8},{"parent":"MODULE_FUNC","rule":"yson::parsejson","sum":16},{"parent":"TRule_action_or_subquery_args","rule":"TRule_action_or_subquery_args.Block2","sum":4874480},{"parent":"TRule_action_or_subquery_args","rule":"TRule_action_or_subquery_args.Rule_opt_bind_parameter1","sum":13863320},{"parent":"TRule_action_or_subquery_args.TBlock2","rule":"TRule_action_or_subquery_args.TBlock2.Rule_opt_bind_parameter2","sum":7419395},{"parent":"TRule_action_or_subquery_args.TBlock2","rule":"TRule_action_or_subquery_args.TBlock2.Token1","sum":7419395},{"parent":"TRule_add_subexpr","rule":"TRule_add_subexpr.Block2","sum":136130356},{"parent":"TRule_add_subexpr","rule":"TRule_add_subexpr.Rule_mul_subexpr1","sum":15578887004},{"parent":"TRule_add_subexpr.TBlock2","rule":"TRule_add_subexpr.TBlock2.Rule_mul_subexpr2","sum":167530812},{"parent":"TRule_add_subexpr.TBlock2","rule":"TRule_add_subexpr.TBlock2.Token1","sum":167530812},{"parent":"TRule_an_id","rule":"TRule_an_id.Alt_an_id1","sum":1991339715},{"parent":"TRule_an_id.TAlt1","rule":"TRule_an_id.TAlt1.Rule_id1","sum":1991339715},{"parent":"TRule_an_id_as_compat","rule":"TRule_an_id_as_compat.Alt_an_id_as_compat1","sum":2917673},{"parent":"TRule_an_id_as_compat.TAlt1","rule":"TRule_an_id_as_compat.TAlt1.Rule_id_as_compat1","sum":2917673},{"parent":"TRule_an_id_expr","rule":"TRule_an_id_expr.Alt_an_id_expr1","sum":96096575},{"parent":"TRule_an_id_expr.TAlt1","rule":"TRule_an_id_expr.TAlt1.Rule_id_expr1","sum":96096575},{"parent":"TRule_an_id_hint","rule":"TRule_an_id_hint.Alt_an_id_hint1","sum":153546171},{"parent":"TRule_an_id_hint.TAlt1","rule":"TRule_an_id_hint.TAlt1.Rule_id_hint1","sum":153546171},{"parent":"TRule_an_id_or_type","rule":"TRule_an_id_or_type.Alt_an_id_or_type1","sum":9580237140},{"parent":"TRule_an_id_or_type","rule":"TRule_an_id_or_type.Alt_an_id_or_type2","sum":7},{"parent":"TRule_an_id_or_type.TAlt1","rule":"TRule_an_id_or_type.TAlt1.Rule_id_or_type1","sum":9580237140},{"parent":"TRule_an_id_or_type.TAlt2","rule":"TRule_an_id_or_type.TAlt2.Token1","sum":7},{"parent":"TRule_an_id_pure","rule":"TRule_an_id_pure.Alt_an_id_pure1","sum":674730413},{"parent":"TRule_an_id_pure.TAlt1","rule":"TRule_an_id_pure.TAlt1.Rule_identifier1","sum":674730413},{"parent":"TRule_an_id_schema","rule":"TRule_an_id_schema.Alt_an_id_schema1","sum":1286},{"parent":"TRule_an_id_schema.TAlt1","rule":"TRule_an_id_schema.TAlt1.Rule_id_schema1","sum":1286},{"parent":"TRule_an_id_table","rule":"TRule_an_id_table.Alt_an_id_table1","sum":330498585},{"parent":"TRule_an_id_table","rule":"TRule_an_id_table.Alt_an_id_table2","sum":1},{"parent":"TRule_an_id_table.TAlt1","rule":"TRule_an_id_table.TAlt1.Rule_id_table1","sum":330498585},{"parent":"TRule_an_id_table.TAlt2","rule":"TRule_an_id_table.TAlt2.Token1","sum":1},{"parent":"TRule_an_id_window","rule":"TRule_an_id_window.Alt_an_id_window1","sum":46472719},{"parent":"TRule_an_id_window.TAlt1","rule":"TRule_an_id_window.TAlt1.Rule_id_window1","sum":46472719},{"parent":"TRule_an_id_without","rule":"TRule_an_id_without.Alt_an_id_without1","sum":23122170},{"parent":"TRule_an_id_without.TAlt1","rule":"TRule_an_id_without.TAlt1.Rule_id_without1","sum":23122170},{"parent":"TRule_and_subexpr","rule":"TRule_and_subexpr.Block2","sum":29488},{"parent":"TRule_and_subexpr","rule":"TRule_and_subexpr.Rule_xor_subexpr1","sum":14246157279},{"parent":"TRule_and_subexpr.TBlock2","rule":"TRule_and_subexpr.TBlock2.Rule_xor_subexpr2","sum":29552},{"parent":"TRule_and_subexpr.TBlock2","rule":"TRule_and_subexpr.TBlock2.Token1","sum":29552},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr1","sum":5167329915},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr10","sum":35705142},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr11","sum":8458526},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr12","sum":14420408},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr2","sum":1761410676},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr3","sum":444713693},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr4","sum":397451793},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr5","sum":76996},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr6","sum":61323841},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr7","sum":924342021},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr8","sum":161016},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr9","sum":501677},{"parent":"TRule_atom_expr.TAlt1","rule":"TRule_atom_expr.TAlt1.Rule_literal_value1","sum":5167329915},{"parent":"TRule_atom_expr.TAlt10","rule":"TRule_atom_expr.TAlt10.Rule_list_literal1","sum":35705142},{"parent":"TRule_atom_expr.TAlt11","rule":"TRule_atom_expr.TAlt11.Rule_dict_literal1","sum":8458526},{"parent":"TRule_atom_expr.TAlt12","rule":"TRule_atom_expr.TAlt12.Rule_struct_literal1","sum":14420408},{"parent":"TRule_atom_expr.TAlt2","rule":"TRule_atom_expr.TAlt2.Rule_bind_parameter1","sum":1761410676},{"parent":"TRule_atom_expr.TAlt3","rule":"TRule_atom_expr.TAlt3.Rule_lambda1","sum":444713693},{"parent":"TRule_atom_expr.TAlt4","rule":"TRule_atom_expr.TAlt4.Rule_cast_expr1","sum":397451793},{"parent":"TRule_atom_expr.TAlt5","rule":"TRule_atom_expr.TAlt5.Rule_exists_expr1","sum":76996},{"parent":"TRule_atom_expr.TAlt6","rule":"TRule_atom_expr.TAlt6.Rule_case_expr1","sum":61323841},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Block3","sum":924342021},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Rule_an_id_or_type1","sum":924342021},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Token2","sum":924342021},{"parent":"TRule_atom_expr.TAlt7.TBlock3","rule":"TRule_atom_expr.TAlt7.TBlock3.Alt1","sum":924340776},{"parent":"TRule_atom_expr.TAlt7.TBlock3","rule":"TRule_atom_expr.TAlt7.TBlock3.Alt2","sum":1245},{"parent":"TRule_atom_expr.TAlt7.TBlock3.TAlt1","rule":"TRule_atom_expr.TAlt7.TBlock3.TAlt1.Rule_id_or_type1","sum":924340776},{"parent":"TRule_atom_expr.TAlt7.TBlock3.TAlt2","rule":"TRule_atom_expr.TAlt7.TBlock3.TAlt2.Token1","sum":1245},{"parent":"TRule_atom_expr.TAlt8","rule":"TRule_atom_expr.TAlt8.Rule_value_constructor1","sum":161016},{"parent":"TRule_atom_expr.TAlt9","rule":"TRule_atom_expr.TAlt9.Rule_bitcast_expr1","sum":501677},{"parent":"TRule_bind_parameter","rule":"TRule_bind_parameter.Block2","sum":3682021854},{"parent":"TRule_bind_parameter","rule":"TRule_bind_parameter.Token1","sum":3682021854},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt1","sum":3682009956},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt2","sum":9444},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt3","sum":2454},{"parent":"TRule_bind_parameter.TBlock2.TAlt1","rule":"TRule_bind_parameter.TBlock2.TAlt1.Rule_an_id_or_type1","sum":3682009956},{"parent":"TRule_bind_parameter.TBlock2.TAlt2","rule":"TRule_bind_parameter.TBlock2.TAlt2.Token1","sum":9444},{"parent":"TRule_bind_parameter.TBlock2.TAlt3","rule":"TRule_bind_parameter.TBlock2.TAlt3.Token1","sum":2454},{"parent":"TRule_bind_parameter_list","rule":"TRule_bind_parameter_list.Block2","sum":235943},{"parent":"TRule_bind_parameter_list","rule":"TRule_bind_parameter_list.Rule_bind_parameter1","sum":1002566900},{"parent":"TRule_bind_parameter_list.TBlock2","rule":"TRule_bind_parameter_list.TBlock2.Rule_bind_parameter2","sum":322786},{"parent":"TRule_bind_parameter_list.TBlock2","rule":"TRule_bind_parameter_list.TBlock2.Token1","sum":322786},{"parent":"TRule_bit_subexpr","rule":"TRule_bit_subexpr.Block2","sum":137841123},{"parent":"TRule_bit_subexpr","rule":"TRule_bit_subexpr.Rule_add_subexpr1","sum":15421715199},{"parent":"TRule_bit_subexpr.TBlock2","rule":"TRule_bit_subexpr.TBlock2.Rule_add_subexpr2","sum":157171805},{"parent":"TRule_bit_subexpr.TBlock2","rule":"TRule_bit_subexpr.TBlock2.Token1","sum":157171805},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Rule_expr3","sum":501677},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Rule_type_name_simple5","sum":501677},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token1","sum":501677},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token2","sum":501677},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token4","sum":501677},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token6","sum":501677},{"parent":"TRule_bool_value","rule":"TRule_bool_value.Token1","sum":103662442},{"parent":"TRule_call_action","rule":"TRule_call_action.Block1","sum":9387011},{"parent":"TRule_call_action","rule":"TRule_call_action.Block3","sum":7125589},{"parent":"TRule_call_action","rule":"TRule_call_action.Token2","sum":9387011},{"parent":"TRule_call_action","rule":"TRule_call_action.Token4","sum":9387011},{"parent":"TRule_call_action.TBlock1","rule":"TRule_call_action.TBlock1.Alt1","sum":9261232},{"parent":"TRule_call_action.TBlock1","rule":"TRule_call_action.TBlock1.Alt2","sum":125779},{"parent":"TRule_call_action.TBlock1.TAlt1","rule":"TRule_call_action.TBlock1.TAlt1.Rule_bind_parameter1","sum":9261232},{"parent":"TRule_call_action.TBlock1.TAlt2","rule":"TRule_call_action.TBlock1.TAlt2.Token1","sum":125779},{"parent":"TRule_call_action.TBlock3","rule":"TRule_call_action.TBlock3.Rule_expr_list1","sum":7125589},{"parent":"TRule_callable_arg","rule":"TRule_callable_arg.Block2","sum":1},{"parent":"TRule_callable_arg","rule":"TRule_callable_arg.Rule_variant_arg1","sum":17897089},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token1","sum":1},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token2","sum":1},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token3","sum":1},{"parent":"TRule_callable_arg_list","rule":"TRule_callable_arg_list.Block2","sum":4335701},{"parent":"TRule_callable_arg_list","rule":"TRule_callable_arg_list.Rule_callable_arg1","sum":9731361},{"parent":"TRule_callable_arg_list.TBlock2","rule":"TRule_callable_arg_list.TBlock2.Rule_callable_arg2","sum":8165728},{"parent":"TRule_callable_arg_list.TBlock2","rule":"TRule_callable_arg_list.TBlock2.Token1","sum":8165728},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block2","sum":4728673},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block3","sum":61323845},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block4","sum":61323845},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Token1","sum":61323845},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Token5","sum":61323845},{"parent":"TRule_case_expr.TBlock2","rule":"TRule_case_expr.TBlock2.Rule_expr1","sum":4728673},{"parent":"TRule_case_expr.TBlock3","rule":"TRule_case_expr.TBlock3.Rule_when_expr1","sum":157500144},{"parent":"TRule_case_expr.TBlock4","rule":"TRule_case_expr.TBlock4.Rule_expr2","sum":61323845},{"parent":"TRule_case_expr.TBlock4","rule":"TRule_case_expr.TBlock4.Token1","sum":61323845},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Rule_expr3","sum":397496235},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Rule_type_name_or_bind5","sum":397496235},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token1","sum":397496235},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token2","sum":397496235},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token4","sum":397496235},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token6","sum":397496235},{"parent":"TRule_cluster_expr","rule":"TRule_cluster_expr.Block1","sum":6754440},{"parent":"TRule_cluster_expr","rule":"TRule_cluster_expr.Block2","sum":373099947},{"parent":"TRule_cluster_expr.TBlock1","rule":"TRule_cluster_expr.TBlock1.Rule_an_id1","sum":6754440},{"parent":"TRule_cluster_expr.TBlock1","rule":"TRule_cluster_expr.TBlock1.Token2","sum":6754440},{"parent":"TRule_cluster_expr.TBlock2","rule":"TRule_cluster_expr.TBlock2.Alt1","sum":373099947},{"parent":"TRule_cluster_expr.TBlock2.TAlt1","rule":"TRule_cluster_expr.TBlock2.TAlt1.Rule_pure_column_or_named1","sum":373099947},{"parent":"TRule_column_list","rule":"TRule_column_list.Block2","sum":277335},{"parent":"TRule_column_list","rule":"TRule_column_list.Block3","sum":762},{"parent":"TRule_column_list","rule":"TRule_column_list.Rule_column_name1","sum":838466},{"parent":"TRule_column_list.TBlock2","rule":"TRule_column_list.TBlock2.Rule_column_name2","sum":1118235},{"parent":"TRule_column_list.TBlock2","rule":"TRule_column_list.TBlock2.Token1","sum":1118235},{"parent":"TRule_column_list.TBlock3","rule":"TRule_column_list.TBlock3.Token1","sum":762},{"parent":"TRule_column_name","rule":"TRule_column_name.Rule_an_id2","sum":23207765},{"parent":"TRule_column_name","rule":"TRule_column_name.Rule_opt_id_prefix1","sum":23207765},{"parent":"TRule_column_order_by_specification","rule":"TRule_column_order_by_specification.Rule_an_id1","sum":117},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_an_id_schema1","sum":1286},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_opt_column_constraints4","sum":1286},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_type_name_or_bind2","sum":1286},{"parent":"TRule_commit_stmt","rule":"TRule_commit_stmt.Token1","sum":12985670},{"parent":"TRule_con_subexpr","rule":"TRule_con_subexpr.Alt_con_subexpr1","sum":15887314010},{"parent":"TRule_con_subexpr","rule":"TRule_con_subexpr.Alt_con_subexpr2","sum":87288612},{"parent":"TRule_con_subexpr.TAlt1","rule":"TRule_con_subexpr.TAlt1.Rule_unary_subexpr1","sum":15887314010},{"parent":"TRule_con_subexpr.TAlt2","rule":"TRule_con_subexpr.TAlt2.Rule_unary_op1","sum":87288612},{"parent":"TRule_con_subexpr.TAlt2","rule":"TRule_con_subexpr.TAlt2.Rule_unary_subexpr2","sum":87288612},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr1","sum":52031343},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr2","sum":126033122},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr3","sum":172004062},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr4","sum":24780845},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr5","sum":716833017},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Block1","sum":9227033},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Block4","sum":93875},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Rule_eq_subexpr3","sum":52031343},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Rule_match_op2","sum":52031343},{"parent":"TRule_cond_expr.TAlt1.TBlock1","rule":"TRule_cond_expr.TAlt1.TBlock1.Token1","sum":9227033},{"parent":"TRule_cond_expr.TAlt1.TBlock4","rule":"TRule_cond_expr.TAlt1.TBlock4.Rule_eq_subexpr2","sum":93875},{"parent":"TRule_cond_expr.TAlt1.TBlock4","rule":"TRule_cond_expr.TAlt1.TBlock4.Token1","sum":93875},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Block1","sum":26082912},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Block3","sum":3149029},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Rule_in_expr4","sum":126033122},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Token2","sum":126033122},{"parent":"TRule_cond_expr.TAlt2.TBlock1","rule":"TRule_cond_expr.TAlt2.TBlock1.Token1","sum":26082912},{"parent":"TRule_cond_expr.TAlt2.TBlock3","rule":"TRule_cond_expr.TAlt2.TBlock3.Token1","sum":3149029},{"parent":"TRule_cond_expr.TAlt3","rule":"TRule_cond_expr.TAlt3.Block1","sum":172004062},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt1","sum":162},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt2","sum":413},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt3","sum":60447743},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt4","sum":111555744},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt1","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt1.Token1","sum":162},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt2","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt2.Token1","sum":413},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt3.Token1","sum":60447743},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt3.Token2","sum":60447743},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Block1","sum":111514343},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Token2","sum":111555744},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Token3","sum":111555744},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.TBlock1.Token1","sum":111514343},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Block1","sum":399471},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Block3","sum":25},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Rule_eq_subexpr4","sum":24780845},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Rule_eq_subexpr6","sum":24780845},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Token2","sum":24780845},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Token5","sum":24780845},{"parent":"TRule_cond_expr.TAlt4.TBlock1","rule":"TRule_cond_expr.TAlt4.TBlock1.Token1","sum":399471},{"parent":"TRule_cond_expr.TAlt4.TBlock3","rule":"TRule_cond_expr.TAlt4.TBlock3.Token1","sum":25},{"parent":"TRule_cond_expr.TAlt5","rule":"TRule_cond_expr.TAlt5.Block1","sum":716833017},{"parent":"TRule_cond_expr.TAlt5.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.Block1","sum":716835030},{"parent":"TRule_cond_expr.TAlt5.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.Rule_eq_subexpr2","sum":716835030},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt1","sum":490458705},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt2","sum":171386461},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt3","sum":49527982},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt4","sum":5069975},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt5","sum":391907},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt1.Token1","sum":490458705},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt2","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt2.Token1","sum":171386461},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt3.Token1","sum":49527982},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt4.Token1","sum":5069975},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt5","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt5.Rule_distinct_from_op1","sum":391907},{"parent":"TRule_create_table_entry","rule":"TRule_create_table_entry.Alt_create_table_entry1","sum":1286},{"parent":"TRule_create_table_entry","rule":"TRule_create_table_entry.Alt_create_table_entry2","sum":253},{"parent":"TRule_create_table_entry.TAlt1","rule":"TRule_create_table_entry.TAlt1.Rule_column_schema1","sum":1286},{"parent":"TRule_create_table_entry.TAlt2","rule":"TRule_create_table_entry.TAlt2.Rule_table_constraint1","sum":253},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Block3","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Block8","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Rule_create_table_entry7","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Rule_simple_table_ref5","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token1","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token10","sum":137},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token6","sum":137},{"parent":"TRule_create_table_stmt.TBlock3","rule":"TRule_create_table_stmt.TBlock3.Alt1","sum":137},{"parent":"TRule_create_table_stmt.TBlock3.TAlt1","rule":"TRule_create_table_stmt.TBlock3.TAlt1.Token1","sum":137},{"parent":"TRule_create_table_stmt.TBlock8","rule":"TRule_create_table_stmt.TBlock8.Rule_create_table_entry2","sum":1402},{"parent":"TRule_create_table_stmt.TBlock8","rule":"TRule_create_table_stmt.TBlock8.Token1","sum":1402},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Rule_ordinary_grouping_set_list3","sum":277764},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token1","sum":277764},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token2","sum":277764},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token4","sum":277764},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Rule_bind_parameter2","sum":154773177},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Rule_type_name4","sum":154773177},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Token1","sum":154773177},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Token3","sum":154773177},{"parent":"TRule_define_action_or_subquery_body","rule":"TRule_define_action_or_subquery_body.Block1","sum":45289},{"parent":"TRule_define_action_or_subquery_body","rule":"TRule_define_action_or_subquery_body.Block2","sum":35937107},{"parent":"TRule_define_action_or_subquery_body.TBlock1","rule":"TRule_define_action_or_subquery_body.TBlock1.Token1","sum":45289},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Block2","sum":15998025},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Block3","sum":23395636},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Rule_sql_stmt_core1","sum":35937107},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.Block1","sum":45662997},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.Rule_sql_stmt_core2","sum":45662997},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.TBlock1","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.TBlock1.Token1","sum":45676021},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock3","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock3.Token1","sum":23420787},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Block5","sum":13863320},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Rule_bind_parameter3","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Rule_define_action_or_subquery_body8","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token1","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token10","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token2","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token4","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token6","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token7","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token9","sum":22146478},{"parent":"TRule_define_action_or_subquery_stmt.TBlock5","rule":"TRule_define_action_or_subquery_stmt.TBlock5.Rule_action_or_subquery_args1","sum":13863320},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Block2","sum":8186701},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Block3","sum":2240048},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Token1","sum":8913652},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Token4","sum":8913652},{"parent":"TRule_dict_literal.TBlock2","rule":"TRule_dict_literal.TBlock2.Rule_expr_dict_list1","sum":8186701},{"parent":"TRule_dict_literal.TBlock3","rule":"TRule_dict_literal.TBlock3.Token1","sum":2240048},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Block2","sum":119057},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token1","sum":391907},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token3","sum":391907},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token4","sum":391907},{"parent":"TRule_distinct_from_op.TBlock2","rule":"TRule_distinct_from_op.TBlock2.Token1","sum":119057},{"parent":"TRule_do_stmt","rule":"TRule_do_stmt.Block2","sum":23184321},{"parent":"TRule_do_stmt","rule":"TRule_do_stmt.Token1","sum":23184321},{"parent":"TRule_do_stmt.TBlock2","rule":"TRule_do_stmt.TBlock2.Alt1","sum":9387011},{"parent":"TRule_do_stmt.TBlock2","rule":"TRule_do_stmt.TBlock2.Alt2","sum":13797310},{"parent":"TRule_do_stmt.TBlock2.TAlt1","rule":"TRule_do_stmt.TBlock2.TAlt1.Rule_call_action1","sum":9387011},{"parent":"TRule_do_stmt.TBlock2.TAlt2","rule":"TRule_do_stmt.TBlock2.TAlt2.Rule_inline_action1","sum":13797310},{"parent":"TRule_double_question","rule":"TRule_double_question.Token1","sum":108571603},{"parent":"TRule_double_question","rule":"TRule_double_question.Token2","sum":108571603},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Block2","sum":2560940},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Block3","sum":3},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Rule_simple_table_ref4","sum":2560940},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Token1","sum":2560940},{"parent":"TRule_drop_table_stmt.TBlock2","rule":"TRule_drop_table_stmt.TBlock2.Alt1","sum":2560940},{"parent":"TRule_drop_table_stmt.TBlock2.TAlt1","rule":"TRule_drop_table_stmt.TBlock2.TAlt1.Token1","sum":2560940},{"parent":"TRule_drop_table_stmt.TBlock3","rule":"TRule_drop_table_stmt.TBlock3.Token1","sum":3},{"parent":"TRule_drop_table_stmt.TBlock3","rule":"TRule_drop_table_stmt.TBlock3.Token2","sum":3},{"parent":"TRule_eq_subexpr","rule":"TRule_eq_subexpr.Block2","sum":239228334},{"parent":"TRule_eq_subexpr","rule":"TRule_eq_subexpr.Rule_neq_subexpr1","sum":15064708769},{"parent":"TRule_eq_subexpr.TBlock2","rule":"TRule_eq_subexpr.TBlock2.Rule_neq_subexpr2","sum":239228389},{"parent":"TRule_eq_subexpr.TBlock2","rule":"TRule_eq_subexpr.TBlock2.Token1","sum":239228389},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Block3","sum":76996},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token1","sum":76996},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token2","sum":76996},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token4","sum":76996},{"parent":"TRule_exists_expr.TBlock3","rule":"TRule_exists_expr.TBlock3.Alt1","sum":76996},{"parent":"TRule_exists_expr.TBlock3.TAlt1","rule":"TRule_exists_expr.TBlock3.TAlt1.Rule_select_stmt1","sum":76996},{"parent":"TRule_expr","rule":"TRule_expr.Alt_expr1","sum":13661547647},{"parent":"TRule_expr","rule":"TRule_expr.Alt_expr2","sum":17316702},{"parent":"TRule_expr.TAlt1","rule":"TRule_expr.TAlt1.Block2","sum":68173343},{"parent":"TRule_expr.TAlt1","rule":"TRule_expr.TAlt1.Rule_or_subexpr1","sum":13661547647},{"parent":"TRule_expr.TAlt1.TBlock2","rule":"TRule_expr.TAlt1.TBlock2.Rule_or_subexpr2","sum":114910990},{"parent":"TRule_expr.TAlt1.TBlock2","rule":"TRule_expr.TAlt1.TBlock2.Token1","sum":114910990},{"parent":"TRule_expr.TAlt2","rule":"TRule_expr.TAlt2.Rule_type_name_composite1","sum":17316702},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Block2","sum":6987187},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Block3","sum":6609858},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Rule_expr1","sum":8186701},{"parent":"TRule_expr_dict_list.TBlock2","rule":"TRule_expr_dict_list.TBlock2.Rule_expr2","sum":6987187},{"parent":"TRule_expr_dict_list.TBlock2","rule":"TRule_expr_dict_list.TBlock2.Token1","sum":6987187},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Block3","sum":35002670},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Rule_expr2","sum":41922446},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Token1","sum":41922446},{"parent":"TRule_expr_dict_list.TBlock3.TBlock3","rule":"TRule_expr_dict_list.TBlock3.TBlock3.Rule_expr2","sum":35002670},{"parent":"TRule_expr_dict_list.TBlock3.TBlock3","rule":"TRule_expr_dict_list.TBlock3.TBlock3.Token1","sum":35002670},{"parent":"TRule_expr_list","rule":"TRule_expr_list.Block2","sum":41640838},{"parent":"TRule_expr_list","rule":"TRule_expr_list.Rule_expr1","sum":67094752},{"parent":"TRule_expr_list.TBlock2","rule":"TRule_expr_list.TBlock2.Rule_expr2","sum":205214370},{"parent":"TRule_expr_list.TBlock2","rule":"TRule_expr_list.TBlock2.Token1","sum":205214370},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Block4","sum":12377549},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Rule_expr1","sum":14291717},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Rule_expr3","sum":14291717},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Token2","sum":14291717},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Rule_expr2","sum":30895823},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Rule_expr4","sum":30895823},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Token1","sum":30895823},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Token3","sum":30895823},{"parent":"TRule_ext_order_by_clause","rule":"TRule_ext_order_by_clause.Block1","sum":2547590},{"parent":"TRule_ext_order_by_clause","rule":"TRule_ext_order_by_clause.Rule_order_by_clause2","sum":88178773},{"parent":"TRule_ext_order_by_clause.TBlock1","rule":"TRule_ext_order_by_clause.TBlock1.Token1","sum":2547590},{"parent":"TRule_flatten_by_arg","rule":"TRule_flatten_by_arg.Alt_flatten_by_arg1","sum":21251064},{"parent":"TRule_flatten_by_arg","rule":"TRule_flatten_by_arg.Alt_flatten_by_arg2","sum":4298431},{"parent":"TRule_flatten_by_arg.TAlt1","rule":"TRule_flatten_by_arg.TAlt1.Rule_named_column1","sum":21251064},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Block3","sum":50342},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Rule_named_expr_list2","sum":4298431},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Token1","sum":4298431},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Token4","sum":4298431},{"parent":"TRule_flatten_by_arg.TAlt2.TBlock3","rule":"TRule_flatten_by_arg.TAlt2.TBlock3.Token1","sum":50342},{"parent":"TRule_flatten_source","rule":"TRule_flatten_source.Block2","sum":33439229},{"parent":"TRule_flatten_source","rule":"TRule_flatten_source.Rule_named_single_source1","sum":1049358312},{"parent":"TRule_flatten_source.TBlock2","rule":"TRule_flatten_source.TBlock2.Block2","sum":33439229},{"parent":"TRule_flatten_source.TBlock2","rule":"TRule_flatten_source.TBlock2.Token1","sum":33439229},{"parent":"TRule_flatten_source.TBlock2.TBlock2","rule":"TRule_flatten_source.TBlock2.TBlock2.Alt1","sum":25549495},{"parent":"TRule_flatten_source.TBlock2.TBlock2","rule":"TRule_flatten_source.TBlock2.TBlock2.Alt2","sum":7889734},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Block1","sum":19506949},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Rule_flatten_by_arg3","sum":25549495},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Token2","sum":25549495},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.TBlock1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.TBlock1.Token1","sum":19506949},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt2","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt2.Token1","sum":7889734},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block1","sum":6943161},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block2","sum":7},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block8","sum":64542},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_bind_parameter4","sum":6943272},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_do_stmt7","sum":6943272},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_expr6","sum":6943272},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Token3","sum":6943272},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Token5","sum":6943272},{"parent":"TRule_for_stmt.TBlock1","rule":"TRule_for_stmt.TBlock1.Token1","sum":6943161},{"parent":"TRule_for_stmt.TBlock2","rule":"TRule_for_stmt.TBlock2.Token1","sum":7},{"parent":"TRule_for_stmt.TBlock8","rule":"TRule_for_stmt.TBlock8.Rule_do_stmt2","sum":64542},{"parent":"TRule_for_stmt.TBlock8","rule":"TRule_for_stmt.TBlock8.Token1","sum":64542},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Block2","sum":1598829},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Block6","sum":6},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Rule_grouping_element_list5","sum":137629459},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Rule_opt_set_quantifier4","sum":137629459},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Token1","sum":137629459},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Token3","sum":137629459},{"parent":"TRule_group_by_clause.TBlock2","rule":"TRule_group_by_clause.TBlock2.Token1","sum":1598829},{"parent":"TRule_group_by_clause.TBlock6","rule":"TRule_group_by_clause.TBlock6.Rule_an_id2","sum":6},{"parent":"TRule_group_by_clause.TBlock6","rule":"TRule_group_by_clause.TBlock6.Token1","sum":6},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element1","sum":299922312},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element2","sum":61937},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element3","sum":277764},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element4","sum":92760},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element5","sum":23},{"parent":"TRule_grouping_element.TAlt1","rule":"TRule_grouping_element.TAlt1.Rule_ordinary_grouping_set1","sum":299922312},{"parent":"TRule_grouping_element.TAlt2","rule":"TRule_grouping_element.TAlt2.Rule_rollup_list1","sum":61937},{"parent":"TRule_grouping_element.TAlt3","rule":"TRule_grouping_element.TAlt3.Rule_cube_list1","sum":277764},{"parent":"TRule_grouping_element.TAlt4","rule":"TRule_grouping_element.TAlt4.Rule_grouping_sets_specification1","sum":92760},{"parent":"TRule_grouping_element.TAlt5","rule":"TRule_grouping_element.TAlt5.Rule_hopping_window_specification1","sum":23},{"parent":"TRule_grouping_element_list","rule":"TRule_grouping_element_list.Block2","sum":62592674},{"parent":"TRule_grouping_element_list","rule":"TRule_grouping_element_list.Rule_grouping_element1","sum":137722219},{"parent":"TRule_grouping_element_list.TBlock2","rule":"TRule_grouping_element_list.TBlock2.Rule_grouping_element2","sum":162632577},{"parent":"TRule_grouping_element_list.TBlock2","rule":"TRule_grouping_element_list.TBlock2.Token1","sum":162632577},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Rule_grouping_element_list4","sum":92760},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token1","sum":92760},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token2","sum":92760},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token3","sum":92760},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token5","sum":92760},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr3","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr5","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr7","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr9","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token1","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token10","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token2","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token4","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token6","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token8","sum":23},{"parent":"TRule_id","rule":"TRule_id.Alt_id1","sum":12063488533},{"parent":"TRule_id","rule":"TRule_id.Alt_id2","sum":528985260},{"parent":"TRule_id.TAlt1","rule":"TRule_id.TAlt1.Rule_identifier1","sum":12063488533},{"parent":"TRule_id.TAlt2","rule":"TRule_id.TAlt2.Rule_keyword1","sum":528985260},{"parent":"TRule_id_as_compat","rule":"TRule_id_as_compat.Alt_id_as_compat1","sum":2915239},{"parent":"TRule_id_as_compat","rule":"TRule_id_as_compat.Alt_id_as_compat2","sum":2434},{"parent":"TRule_id_as_compat.TAlt1","rule":"TRule_id_as_compat.TAlt1.Rule_identifier1","sum":2915239},{"parent":"TRule_id_as_compat.TAlt2","rule":"TRule_id_as_compat.TAlt2.Rule_keyword_as_compat1","sum":2434},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr1","sum":6894549449},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr2","sum":318138520},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr3","sum":447776},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr5","sum":35140433},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr6","sum":181847},{"parent":"TRule_id_expr.TAlt1","rule":"TRule_id_expr.TAlt1.Rule_identifier1","sum":6894549449},{"parent":"TRule_id_expr.TAlt2","rule":"TRule_id_expr.TAlt2.Rule_keyword_compat1","sum":318138520},{"parent":"TRule_id_expr.TAlt3","rule":"TRule_id_expr.TAlt3.Rule_keyword_alter_uncompat1","sum":447776},{"parent":"TRule_id_expr.TAlt5","rule":"TRule_id_expr.TAlt5.Rule_keyword_window_uncompat1","sum":35140433},{"parent":"TRule_id_expr.TAlt6","rule":"TRule_id_expr.TAlt6.Rule_keyword_hint_uncompat1","sum":181847},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in1","sum":5622636},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in2","sum":72371},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in4","sum":206},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in5","sum":53},{"parent":"TRule_id_expr_in.TAlt1","rule":"TRule_id_expr_in.TAlt1.Rule_identifier1","sum":5622636},{"parent":"TRule_id_expr_in.TAlt2","rule":"TRule_id_expr_in.TAlt2.Rule_keyword_compat1","sum":72371},{"parent":"TRule_id_expr_in.TAlt4","rule":"TRule_id_expr_in.TAlt4.Rule_keyword_window_uncompat1","sum":206},{"parent":"TRule_id_expr_in.TAlt5","rule":"TRule_id_expr_in.TAlt5.Rule_keyword_hint_uncompat1","sum":53},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint1","sum":153502519},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint2","sum":21826},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint3","sum":21826},{"parent":"TRule_id_hint.TAlt1","rule":"TRule_id_hint.TAlt1.Rule_identifier1","sum":153502519},{"parent":"TRule_id_hint.TAlt2","rule":"TRule_id_hint.TAlt2.Rule_keyword_compat1","sum":21826},{"parent":"TRule_id_hint.TAlt3","rule":"TRule_id_hint.TAlt3.Rule_keyword_expr_uncompat1","sum":21826},{"parent":"TRule_id_or_at","rule":"TRule_id_or_at.Block1","sum":4715132},{"parent":"TRule_id_or_at","rule":"TRule_id_or_at.Rule_an_id_or_type2","sum":133548148},{"parent":"TRule_id_or_at.TBlock1","rule":"TRule_id_or_at.TBlock1.Token1","sum":4715132},{"parent":"TRule_id_or_type","rule":"TRule_id_or_type.Alt_id_or_type1","sum":10495928453},{"parent":"TRule_id_or_type","rule":"TRule_id_or_type.Alt_id_or_type2","sum":11600652},{"parent":"TRule_id_or_type.TAlt1","rule":"TRule_id_or_type.TAlt1.Rule_id1","sum":10495928453},{"parent":"TRule_id_or_type.TAlt2","rule":"TRule_id_or_type.TAlt2.Rule_type_id1","sum":11600652},{"parent":"TRule_id_schema","rule":"TRule_id_schema.Alt_id_schema1","sum":1274},{"parent":"TRule_id_schema","rule":"TRule_id_schema.Alt_id_schema2","sum":12},{"parent":"TRule_id_schema.TAlt1","rule":"TRule_id_schema.TAlt1.Rule_identifier1","sum":1274},{"parent":"TRule_id_schema.TAlt2","rule":"TRule_id_schema.TAlt2.Rule_keyword_compat1","sum":12},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table1","sum":329845374},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table2","sum":638516},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table3","sum":371},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table4","sum":12499},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table5","sum":4},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table6","sum":1811},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table7","sum":10},{"parent":"TRule_id_table.TAlt1","rule":"TRule_id_table.TAlt1.Rule_identifier1","sum":329845374},{"parent":"TRule_id_table.TAlt2","rule":"TRule_id_table.TAlt2.Rule_keyword_compat1","sum":638516},{"parent":"TRule_id_table.TAlt3","rule":"TRule_id_table.TAlt3.Rule_keyword_expr_uncompat1","sum":371},{"parent":"TRule_id_table.TAlt4","rule":"TRule_id_table.TAlt4.Rule_keyword_select_uncompat1","sum":12499},{"parent":"TRule_id_table.TAlt5","rule":"TRule_id_table.TAlt5.Rule_keyword_in_uncompat1","sum":4},{"parent":"TRule_id_table.TAlt6","rule":"TRule_id_table.TAlt6.Rule_keyword_window_uncompat1","sum":1811},{"parent":"TRule_id_table.TAlt7","rule":"TRule_id_table.TAlt7.Rule_keyword_hint_uncompat1","sum":10},{"parent":"TRule_id_table_or_type","rule":"TRule_id_table_or_type.Alt_id_table_or_type1","sum":330498586},{"parent":"TRule_id_table_or_type","rule":"TRule_id_table_or_type.Alt_id_table_or_type2","sum":4384},{"parent":"TRule_id_table_or_type.TAlt1","rule":"TRule_id_table_or_type.TAlt1.Rule_an_id_table1","sum":330498586},{"parent":"TRule_id_table_or_type.TAlt2","rule":"TRule_id_table_or_type.TAlt2.Rule_type_id1","sum":4384},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window1","sum":46200178},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window2","sum":16509},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window3","sum":248205},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window5","sum":7827},{"parent":"TRule_id_window.TAlt1","rule":"TRule_id_window.TAlt1.Rule_identifier1","sum":46200178},{"parent":"TRule_id_window.TAlt2","rule":"TRule_id_window.TAlt2.Rule_keyword_compat1","sum":16509},{"parent":"TRule_id_window.TAlt3","rule":"TRule_id_window.TAlt3.Rule_keyword_expr_uncompat1","sum":248205},{"parent":"TRule_id_window.TAlt5","rule":"TRule_id_window.TAlt5.Rule_keyword_select_uncompat1","sum":7827},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without1","sum":22845469},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without2","sum":274994},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without6","sum":1194},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without7","sum":513},{"parent":"TRule_id_without.TAlt1","rule":"TRule_id_without.TAlt1.Rule_identifier1","sum":22845469},{"parent":"TRule_id_without.TAlt2","rule":"TRule_id_without.TAlt2.Rule_keyword_compat1","sum":274994},{"parent":"TRule_id_without.TAlt6","rule":"TRule_id_without.TAlt6.Rule_keyword_window_uncompat1","sum":1194},{"parent":"TRule_id_without.TAlt7","rule":"TRule_id_without.TAlt7.Rule_keyword_hint_uncompat1","sum":513},{"parent":"TRule_identifier","rule":"TRule_identifier.Token1","sum":20193701086},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Block1","sum":8529510},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Block5","sum":2956710},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Rule_do_stmt4","sum":8529552},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Rule_expr3","sum":8529552},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Token2","sum":8529552},{"parent":"TRule_if_stmt.TBlock1","rule":"TRule_if_stmt.TBlock1.Token1","sum":8529510},{"parent":"TRule_if_stmt.TBlock5","rule":"TRule_if_stmt.TBlock5.Rule_do_stmt2","sum":2956710},{"parent":"TRule_if_stmt.TBlock5","rule":"TRule_if_stmt.TBlock5.Token1","sum":2956710},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Rule_module_path2","sum":16448365},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Rule_named_bind_parameter_list4","sum":16448365},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Token1","sum":16448365},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Token3","sum":16448365},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr1","sum":2254},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr10","sum":5088024},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr11","sum":455126},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr2","sum":36289544},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr3","sum":65822822},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr4","sum":44442},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr5","sum":4},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr6","sum":2951189},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr7","sum":9684399},{"parent":"TRule_in_atom_expr.TAlt1","rule":"TRule_in_atom_expr.TAlt1.Rule_literal_value1","sum":2254},{"parent":"TRule_in_atom_expr.TAlt10","rule":"TRule_in_atom_expr.TAlt10.Rule_list_literal1","sum":5088024},{"parent":"TRule_in_atom_expr.TAlt11","rule":"TRule_in_atom_expr.TAlt11.Rule_dict_literal1","sum":455126},{"parent":"TRule_in_atom_expr.TAlt2","rule":"TRule_in_atom_expr.TAlt2.Rule_bind_parameter1","sum":36289544},{"parent":"TRule_in_atom_expr.TAlt3","rule":"TRule_in_atom_expr.TAlt3.Rule_lambda1","sum":65822822},{"parent":"TRule_in_atom_expr.TAlt4","rule":"TRule_in_atom_expr.TAlt4.Rule_cast_expr1","sum":44442},{"parent":"TRule_in_atom_expr.TAlt5","rule":"TRule_in_atom_expr.TAlt5.Rule_case_expr1","sum":4},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Block3","sum":2951189},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Rule_an_id_or_type1","sum":2951189},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Token2","sum":2951189},{"parent":"TRule_in_atom_expr.TAlt6.TBlock3","rule":"TRule_in_atom_expr.TAlt6.TBlock3.Alt1","sum":2951189},{"parent":"TRule_in_atom_expr.TAlt6.TBlock3.TAlt1","rule":"TRule_in_atom_expr.TAlt6.TBlock3.TAlt1.Rule_id_or_type1","sum":2951189},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Rule_select_stmt2","sum":9684399},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Token1","sum":9684399},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Token3","sum":9684399},{"parent":"TRule_in_expr","rule":"TRule_in_expr.Rule_in_unary_subexpr1","sum":126033122},{"parent":"TRule_in_unary_casual_subexpr","rule":"TRule_in_unary_casual_subexpr.Block1","sum":126033070},{"parent":"TRule_in_unary_casual_subexpr","rule":"TRule_in_unary_casual_subexpr.Rule_unary_subexpr_suffix2","sum":126033070},{"parent":"TRule_in_unary_casual_subexpr.TBlock1","rule":"TRule_in_unary_casual_subexpr.TBlock1.Alt1","sum":5695266},{"parent":"TRule_in_unary_casual_subexpr.TBlock1","rule":"TRule_in_unary_casual_subexpr.TBlock1.Alt2","sum":120337804},{"parent":"TRule_in_unary_casual_subexpr.TBlock1.TAlt1","rule":"TRule_in_unary_casual_subexpr.TBlock1.TAlt1.Rule_id_expr_in1","sum":5695266},{"parent":"TRule_in_unary_casual_subexpr.TBlock1.TAlt2","rule":"TRule_in_unary_casual_subexpr.TBlock1.TAlt2.Rule_in_atom_expr1","sum":120337804},{"parent":"TRule_in_unary_subexpr","rule":"TRule_in_unary_subexpr.Alt_in_unary_subexpr1","sum":126033070},{"parent":"TRule_in_unary_subexpr","rule":"TRule_in_unary_subexpr.Alt_in_unary_subexpr2","sum":52},{"parent":"TRule_in_unary_subexpr.TAlt1","rule":"TRule_in_unary_subexpr.TAlt1.Rule_in_unary_casual_subexpr1","sum":126033070},{"parent":"TRule_in_unary_subexpr.TAlt2","rule":"TRule_in_unary_subexpr.TAlt2.Rule_json_api_expr1","sum":52},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Rule_define_action_or_subquery_body2","sum":13797310},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token1","sum":13797310},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token3","sum":13797310},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token4","sum":13797310},{"parent":"TRule_integer","rule":"TRule_integer.Token1","sum":2818111025},{"parent":"TRule_integer_or_bind","rule":"TRule_integer_or_bind.Alt_integer_or_bind1","sum":56294441},{"parent":"TRule_integer_or_bind","rule":"TRule_integer_or_bind.Alt_integer_or_bind2","sum":48839},{"parent":"TRule_integer_or_bind.TAlt1","rule":"TRule_integer_or_bind.TAlt1.Rule_integer1","sum":56294441},{"parent":"TRule_integer_or_bind.TAlt2","rule":"TRule_integer_or_bind.TAlt2.Rule_bind_parameter1","sum":48839},{"parent":"TRule_into_simple_table_ref","rule":"TRule_into_simple_table_ref.Block2","sum":25051},{"parent":"TRule_into_simple_table_ref","rule":"TRule_into_simple_table_ref.Rule_simple_table_ref1","sum":208016439},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Rule_pure_column_list3","sum":25051},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Token1","sum":25051},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Token2","sum":25051},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Block1","sum":208016439},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Rule_into_simple_table_ref3","sum":208016439},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Rule_into_values_source4","sum":208016439},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Token2","sum":208016439},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt1","sum":207904214},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt5","sum":112224},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt6","sum":1},{"parent":"TRule_into_table_stmt.TBlock1.TAlt1","rule":"TRule_into_table_stmt.TBlock1.TAlt1.Token1","sum":207904214},{"parent":"TRule_into_table_stmt.TBlock1.TAlt5","rule":"TRule_into_table_stmt.TBlock1.TAlt5.Token1","sum":112224},{"parent":"TRule_into_table_stmt.TBlock1.TAlt6","rule":"TRule_into_table_stmt.TBlock1.TAlt6.Token1","sum":1},{"parent":"TRule_into_values_source","rule":"TRule_into_values_source.Alt_into_values_source1","sum":208016439},{"parent":"TRule_into_values_source.TAlt1","rule":"TRule_into_values_source.TAlt1.Block1","sum":4453941},{"parent":"TRule_into_values_source.TAlt1","rule":"TRule_into_values_source.TAlt1.Rule_values_source2","sum":208016439},{"parent":"TRule_into_values_source.TAlt1.TBlock1","rule":"TRule_into_values_source.TAlt1.TBlock1.Rule_pure_column_list1","sum":4453941},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Block2","sum":2751646800},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Rule_invoke_expr_tail4","sum":2865255245},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Token1","sum":2865255245},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Token3","sum":2865255245},{"parent":"TRule_invoke_expr.TBlock2","rule":"TRule_invoke_expr.TBlock2.Alt1","sum":2692693481},{"parent":"TRule_invoke_expr.TBlock2","rule":"TRule_invoke_expr.TBlock2.Alt2","sum":58953319},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Block3","sum":9565798},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Rule_named_expr_list2","sum":2692693481},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Rule_opt_set_quantifier1","sum":2692693481},{"parent":"TRule_invoke_expr.TBlock2.TAlt1.TBlock3","rule":"TRule_invoke_expr.TBlock2.TAlt1.TBlock3.Token1","sum":9565798},{"parent":"TRule_invoke_expr.TBlock2.TAlt2","rule":"TRule_invoke_expr.TBlock2.TAlt2.Token1","sum":58953319},{"parent":"TRule_invoke_expr_tail","rule":"TRule_invoke_expr_tail.Block1","sum":7082217},{"parent":"TRule_invoke_expr_tail","rule":"TRule_invoke_expr_tail.Block2","sum":45101859},{"parent":"TRule_invoke_expr_tail.TBlock1","rule":"TRule_invoke_expr_tail.TBlock1.Alt1","sum":7082217},{"parent":"TRule_invoke_expr_tail.TBlock1.TAlt1","rule":"TRule_invoke_expr_tail.TBlock1.TAlt1.Rule_null_treatment1","sum":7082217},{"parent":"TRule_invoke_expr_tail.TBlock2","rule":"TRule_invoke_expr_tail.TBlock2.Rule_window_name_or_specification2","sum":45101859},{"parent":"TRule_invoke_expr_tail.TBlock2","rule":"TRule_invoke_expr_tail.TBlock2.Token1","sum":45101859},{"parent":"TRule_join_constraint","rule":"TRule_join_constraint.Alt_join_constraint1","sum":172608937},{"parent":"TRule_join_constraint","rule":"TRule_join_constraint.Alt_join_constraint2","sum":32377040},{"parent":"TRule_join_constraint.TAlt1","rule":"TRule_join_constraint.TAlt1.Rule_expr2","sum":172608937},{"parent":"TRule_join_constraint.TAlt1","rule":"TRule_join_constraint.TAlt1.Token1","sum":172608937},{"parent":"TRule_join_constraint.TAlt2","rule":"TRule_join_constraint.TAlt2.Rule_pure_column_or_named_list2","sum":32377040},{"parent":"TRule_join_constraint.TAlt2","rule":"TRule_join_constraint.TAlt2.Token1","sum":32377040},{"parent":"TRule_join_op","rule":"TRule_join_op.Alt_join_op1","sum":27521},{"parent":"TRule_join_op","rule":"TRule_join_op.Alt_join_op2","sum":208029004},{"parent":"TRule_join_op.TAlt1","rule":"TRule_join_op.TAlt1.Token1","sum":27521},{"parent":"TRule_join_op.TAlt2","rule":"TRule_join_op.TAlt2.Block2","sum":208029004},{"parent":"TRule_join_op.TAlt2","rule":"TRule_join_op.TAlt2.Token3","sum":208029004},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt1","sum":174370818},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt2","sum":30615159},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt3","sum":3043027},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.Block1","sum":126366173},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.Block2","sum":293102},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt1","sum":118497224},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt2","sum":1699355},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt3","sum":285361},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt4","sum":5884233},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.Block2","sum":19629114},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.Token1","sum":118497224},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.TBlock2.Token1","sum":19629114},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.Block2","sum":518795},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.Token1","sum":1699355},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.TBlock2.Token1","sum":518795},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt3","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt3.Token1","sum":285361},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt4","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt4.Token1","sum":5884233},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock2.Token1","sum":293102},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt2.Token1","sum":30615159},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt3","rule":"TRule_join_op.TAlt2.TBlock2.TAlt3.Token1","sum":3043027},{"parent":"TRule_join_source","rule":"TRule_join_source.Block1","sum":598677},{"parent":"TRule_join_source","rule":"TRule_join_source.Block3","sum":145029172},{"parent":"TRule_join_source","rule":"TRule_join_source.Rule_flatten_source2","sum":841301787},{"parent":"TRule_join_source.TBlock1","rule":"TRule_join_source.TBlock1.Token1","sum":598677},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Block2","sum":13957383},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Block4","sum":204985977},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Rule_flatten_source3","sum":208056525},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Rule_join_op1","sum":208056525},{"parent":"TRule_join_source.TBlock3.TBlock2","rule":"TRule_join_source.TBlock3.TBlock2.Token1","sum":13957383},{"parent":"TRule_join_source.TBlock3.TBlock4","rule":"TRule_join_source.TBlock3.TBlock4.Rule_join_constraint1","sum":204985977},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr1","sum":5853150},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr2","sum":192353},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr3","sum":318814},{"parent":"TRule_json_api_expr.TAlt1","rule":"TRule_json_api_expr.TAlt1.Rule_json_value1","sum":5853150},{"parent":"TRule_json_api_expr.TAlt2","rule":"TRule_json_api_expr.TAlt2.Rule_json_exists1","sum":192353},{"parent":"TRule_json_api_expr.TAlt3","rule":"TRule_json_api_expr.TAlt3.Rule_json_query1","sum":318814},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler1","sum":8070},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler2","sum":14342},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler3","sum":201249},{"parent":"TRule_json_case_handler.TAlt1","rule":"TRule_json_case_handler.TAlt1.Token1","sum":8070},{"parent":"TRule_json_case_handler.TAlt2","rule":"TRule_json_case_handler.TAlt2.Token1","sum":14342},{"parent":"TRule_json_case_handler.TAlt3","rule":"TRule_json_case_handler.TAlt3.Rule_expr2","sum":201249},{"parent":"TRule_json_case_handler.TAlt3","rule":"TRule_json_case_handler.TAlt3.Token1","sum":201249},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Block4","sum":20977},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Rule_expr1","sum":6364317},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Rule_jsonpath_spec3","sum":6364317},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Token2","sum":6364317},{"parent":"TRule_json_common_args.TBlock4","rule":"TRule_json_common_args.TBlock4.Rule_json_variables2","sum":20977},{"parent":"TRule_json_common_args.TBlock4","rule":"TRule_json_common_args.TBlock4.Token1","sum":20977},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Block4","sum":47},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Rule_json_common_args3","sum":192353},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token1","sum":192353},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token2","sum":192353},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token5","sum":192353},{"parent":"TRule_json_exists.TBlock4","rule":"TRule_json_exists.TBlock4.Rule_json_exists_handler1","sum":47},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token1","sum":47},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token2","sum":47},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token3","sum":47},{"parent":"TRule_json_query","rule":"TRule_json_query.Block4","sum":190849},{"parent":"TRule_json_query","rule":"TRule_json_query.Block5","sum":86},{"parent":"TRule_json_query","rule":"TRule_json_query.Block6","sum":4096},{"parent":"TRule_json_query","rule":"TRule_json_query.Rule_json_common_args3","sum":318814},{"parent":"TRule_json_query","rule":"TRule_json_query.Token1","sum":318814},{"parent":"TRule_json_query","rule":"TRule_json_query.Token2","sum":318814},{"parent":"TRule_json_query","rule":"TRule_json_query.Token7","sum":318814},{"parent":"TRule_json_query.TBlock4","rule":"TRule_json_query.TBlock4.Rule_json_query_wrapper1","sum":190849},{"parent":"TRule_json_query.TBlock4","rule":"TRule_json_query.TBlock4.Token2","sum":190849},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Rule_json_query_handler1","sum":86},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Token2","sum":86},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Token3","sum":86},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Rule_json_query_handler1","sum":4096},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Token2","sum":4096},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Token3","sum":4096},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler1","sum":1641},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler2","sum":26},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler3","sum":2296},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler4","sum":219},{"parent":"TRule_json_query_handler.TAlt1","rule":"TRule_json_query_handler.TAlt1.Token1","sum":1641},{"parent":"TRule_json_query_handler.TAlt2","rule":"TRule_json_query_handler.TAlt2.Token1","sum":26},{"parent":"TRule_json_query_handler.TAlt3","rule":"TRule_json_query_handler.TAlt3.Token1","sum":2296},{"parent":"TRule_json_query_handler.TAlt3","rule":"TRule_json_query_handler.TAlt3.Token2","sum":2296},{"parent":"TRule_json_query_handler.TAlt4","rule":"TRule_json_query_handler.TAlt4.Token1","sum":219},{"parent":"TRule_json_query_handler.TAlt4","rule":"TRule_json_query_handler.TAlt4.Token2","sum":219},{"parent":"TRule_json_query_wrapper","rule":"TRule_json_query_wrapper.Alt_json_query_wrapper1","sum":46},{"parent":"TRule_json_query_wrapper","rule":"TRule_json_query_wrapper.Alt_json_query_wrapper2","sum":190803},{"parent":"TRule_json_query_wrapper.TAlt1","rule":"TRule_json_query_wrapper.TAlt1.Block2","sum":10},{"parent":"TRule_json_query_wrapper.TAlt1","rule":"TRule_json_query_wrapper.TAlt1.Token1","sum":46},{"parent":"TRule_json_query_wrapper.TAlt1.TBlock2","rule":"TRule_json_query_wrapper.TAlt1.TBlock2.Token1","sum":10},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Block2","sum":166762},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Block3","sum":31743},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Token1","sum":190803},{"parent":"TRule_json_query_wrapper.TAlt2.TBlock2","rule":"TRule_json_query_wrapper.TAlt2.TBlock2.Token1","sum":166762},{"parent":"TRule_json_query_wrapper.TAlt2.TBlock3","rule":"TRule_json_query_wrapper.TAlt2.TBlock3.Token1","sum":31743},{"parent":"TRule_json_value","rule":"TRule_json_value.Block4","sum":1332940},{"parent":"TRule_json_value","rule":"TRule_json_value.Block5","sum":203540},{"parent":"TRule_json_value","rule":"TRule_json_value.Rule_json_common_args3","sum":5853150},{"parent":"TRule_json_value","rule":"TRule_json_value.Token1","sum":5853150},{"parent":"TRule_json_value","rule":"TRule_json_value.Token2","sum":5853150},{"parent":"TRule_json_value","rule":"TRule_json_value.Token6","sum":5853150},{"parent":"TRule_json_value.TBlock4","rule":"TRule_json_value.TBlock4.Rule_type_name_simple2","sum":1332940},{"parent":"TRule_json_value.TBlock4","rule":"TRule_json_value.TBlock4.Token1","sum":1332940},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Rule_json_case_handler1","sum":223661},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Token2","sum":223661},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Token3","sum":223661},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Rule_expr1","sum":20984},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Rule_json_variable_name3","sum":20984},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Token2","sum":20984},{"parent":"TRule_json_variable_name","rule":"TRule_json_variable_name.Alt_json_variable_name1","sum":18797},{"parent":"TRule_json_variable_name","rule":"TRule_json_variable_name.Alt_json_variable_name2","sum":2187},{"parent":"TRule_json_variable_name.TAlt1","rule":"TRule_json_variable_name.TAlt1.Rule_id_expr1","sum":18797},{"parent":"TRule_json_variable_name.TAlt2","rule":"TRule_json_variable_name.TAlt2.Token1","sum":2187},{"parent":"TRule_json_variables","rule":"TRule_json_variables.Block2","sum":7},{"parent":"TRule_json_variables","rule":"TRule_json_variables.Rule_json_variable1","sum":20977},{"parent":"TRule_json_variables.TBlock2","rule":"TRule_json_variables.TBlock2.Rule_json_variable2","sum":7},{"parent":"TRule_json_variables.TBlock2","rule":"TRule_json_variables.TBlock2.Token1","sum":7},{"parent":"TRule_jsonpath_spec","rule":"TRule_jsonpath_spec.Token1","sum":6364317},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Rule_expr2","sum":172134618},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Token1","sum":172134618},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Token3","sum":172134618},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword1","sum":478913202},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword2","sum":33222498},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword3","sum":486056},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword4","sum":9186563},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword5","sum":772458},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword6","sum":23762},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword7","sum":3881437},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword8","sum":2499284},{"parent":"TRule_keyword.TAlt1","rule":"TRule_keyword.TAlt1.Rule_keyword_compat1","sum":478913202},{"parent":"TRule_keyword.TAlt2","rule":"TRule_keyword.TAlt2.Rule_keyword_expr_uncompat1","sum":33222498},{"parent":"TRule_keyword.TAlt3","rule":"TRule_keyword.TAlt3.Rule_keyword_table_uncompat1","sum":486056},{"parent":"TRule_keyword.TAlt4","rule":"TRule_keyword.TAlt4.Rule_keyword_select_uncompat1","sum":9186563},{"parent":"TRule_keyword.TAlt5","rule":"TRule_keyword.TAlt5.Rule_keyword_alter_uncompat1","sum":772458},{"parent":"TRule_keyword.TAlt6","rule":"TRule_keyword.TAlt6.Rule_keyword_in_uncompat1","sum":23762},{"parent":"TRule_keyword.TAlt7","rule":"TRule_keyword.TAlt7.Rule_keyword_window_uncompat1","sum":3881437},{"parent":"TRule_keyword.TAlt8","rule":"TRule_keyword.TAlt8.Rule_keyword_hint_uncompat1","sum":2499284},{"parent":"TRule_keyword_alter_uncompat","rule":"TRule_keyword_alter_uncompat.Token1","sum":1220234},{"parent":"TRule_keyword_as_compat","rule":"TRule_keyword_as_compat.Token1","sum":2434},{"parent":"TRule_keyword_compat","rule":"TRule_keyword_compat.Token1","sum":798075950},{"parent":"TRule_keyword_expr_uncompat","rule":"TRule_keyword_expr_uncompat.Token1","sum":33492900},{"parent":"TRule_keyword_hint_uncompat","rule":"TRule_keyword_hint_uncompat.Token1","sum":2681707},{"parent":"TRule_keyword_in_uncompat","rule":"TRule_keyword_in_uncompat.Token1","sum":23766},{"parent":"TRule_keyword_select_uncompat","rule":"TRule_keyword_select_uncompat.Token1","sum":9206889},{"parent":"TRule_keyword_table_uncompat","rule":"TRule_keyword_table_uncompat.Token1","sum":486056},{"parent":"TRule_keyword_window_uncompat","rule":"TRule_keyword_window_uncompat.Token1","sum":39025081},{"parent":"TRule_lambda","rule":"TRule_lambda.Block2","sum":207932792},{"parent":"TRule_lambda","rule":"TRule_lambda.Rule_smart_parenthesis1","sum":510536515},{"parent":"TRule_lambda.TBlock2","rule":"TRule_lambda.TBlock2.Block2","sum":207932792},{"parent":"TRule_lambda.TBlock2","rule":"TRule_lambda.TBlock2.Token1","sum":207932792},{"parent":"TRule_lambda.TBlock2.TBlock2","rule":"TRule_lambda.TBlock2.TBlock2.Alt1","sum":42426124},{"parent":"TRule_lambda.TBlock2.TBlock2","rule":"TRule_lambda.TBlock2.TBlock2.Alt2","sum":165506668},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Rule_expr2","sum":42426124},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Token1","sum":42426124},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Token3","sum":42426124},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Rule_lambda_body2","sum":165506668},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Token1","sum":165506668},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Token3","sum":165506668},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block1","sum":160},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block2","sum":34592028},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block5","sum":87262227},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Rule_expr4","sum":165506668},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Token3","sum":165506668},{"parent":"TRule_lambda_body.TBlock1","rule":"TRule_lambda_body.TBlock1.Token1","sum":160},{"parent":"TRule_lambda_body.TBlock2","rule":"TRule_lambda_body.TBlock2.Block2","sum":63344785},{"parent":"TRule_lambda_body.TBlock2","rule":"TRule_lambda_body.TBlock2.Rule_lambda_stmt1","sum":63344785},{"parent":"TRule_lambda_body.TBlock2.TBlock2","rule":"TRule_lambda_body.TBlock2.TBlock2.Token1","sum":63345921},{"parent":"TRule_lambda_body.TBlock5","rule":"TRule_lambda_body.TBlock5.Token1","sum":87271135},{"parent":"TRule_lambda_stmt","rule":"TRule_lambda_stmt.Alt_lambda_stmt1","sum":63342259},{"parent":"TRule_lambda_stmt","rule":"TRule_lambda_stmt.Alt_lambda_stmt2","sum":2526},{"parent":"TRule_lambda_stmt.TAlt1","rule":"TRule_lambda_stmt.TAlt1.Rule_named_nodes_stmt1","sum":63342259},{"parent":"TRule_lambda_stmt.TAlt2","rule":"TRule_lambda_stmt.TAlt2.Rule_import_stmt1","sum":2526},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Block2","sum":35129262},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Block3","sum":2261896},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Token1","sum":40793166},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Token4","sum":40793166},{"parent":"TRule_list_literal.TBlock2","rule":"TRule_list_literal.TBlock2.Rule_expr_list1","sum":35129262},{"parent":"TRule_list_literal.TBlock3","rule":"TRule_list_literal.TBlock3.Token1","sum":2261896},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value1","sum":2761816583},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value10","sum":2},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value2","sum":108407914},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value3","sum":2114359138},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value5","sum":79086090},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value9","sum":103662442},{"parent":"TRule_literal_value.TAlt1","rule":"TRule_literal_value.TAlt1.Rule_integer1","sum":2761816583},{"parent":"TRule_literal_value.TAlt10","rule":"TRule_literal_value.TAlt10.Token1","sum":2},{"parent":"TRule_literal_value.TAlt2","rule":"TRule_literal_value.TAlt2.Rule_real1","sum":108407914},{"parent":"TRule_literal_value.TAlt3","rule":"TRule_literal_value.TAlt3.Token1","sum":2114359138},{"parent":"TRule_literal_value.TAlt5","rule":"TRule_literal_value.TAlt5.Token1","sum":79086090},{"parent":"TRule_literal_value.TAlt9","rule":"TRule_literal_value.TAlt9.Rule_bool_value1","sum":103662442},{"parent":"TRule_match_op","rule":"TRule_match_op.Token1","sum":52031343},{"parent":"TRule_module_path","rule":"TRule_module_path.Block3","sum":356512},{"parent":"TRule_module_path","rule":"TRule_module_path.Rule_an_id2","sum":16448365},{"parent":"TRule_module_path.TBlock3","rule":"TRule_module_path.TBlock3.Rule_an_id2","sum":1062672},{"parent":"TRule_module_path.TBlock3","rule":"TRule_module_path.TBlock3.Token1","sum":1062672},{"parent":"TRule_mul_subexpr","rule":"TRule_mul_subexpr.Block2","sum":114586727},{"parent":"TRule_mul_subexpr","rule":"TRule_mul_subexpr.Rule_con_subexpr1","sum":15746417816},{"parent":"TRule_mul_subexpr.TBlock2","rule":"TRule_mul_subexpr.TBlock2.Rule_con_subexpr2","sum":228184806},{"parent":"TRule_mul_subexpr.TBlock2","rule":"TRule_mul_subexpr.TBlock2.Token1","sum":228184806},{"parent":"TRule_named_bind_parameter","rule":"TRule_named_bind_parameter.Block2","sum":124116},{"parent":"TRule_named_bind_parameter","rule":"TRule_named_bind_parameter.Rule_bind_parameter1","sum":37659580},{"parent":"TRule_named_bind_parameter.TBlock2","rule":"TRule_named_bind_parameter.TBlock2.Rule_bind_parameter2","sum":124116},{"parent":"TRule_named_bind_parameter.TBlock2","rule":"TRule_named_bind_parameter.TBlock2.Token1","sum":124116},{"parent":"TRule_named_bind_parameter_list","rule":"TRule_named_bind_parameter_list.Block2","sum":6985500},{"parent":"TRule_named_bind_parameter_list","rule":"TRule_named_bind_parameter_list.Rule_named_bind_parameter1","sum":16448365},{"parent":"TRule_named_bind_parameter_list.TBlock2","rule":"TRule_named_bind_parameter_list.TBlock2.Rule_named_bind_parameter2","sum":21211215},{"parent":"TRule_named_bind_parameter_list.TBlock2","rule":"TRule_named_bind_parameter_list.TBlock2.Token1","sum":21211215},{"parent":"TRule_named_column","rule":"TRule_named_column.Block2","sum":7134215},{"parent":"TRule_named_column","rule":"TRule_named_column.Rule_column_name1","sum":21251064},{"parent":"TRule_named_column.TBlock2","rule":"TRule_named_column.TBlock2.Rule_an_id2","sum":7134215},{"parent":"TRule_named_column.TBlock2","rule":"TRule_named_column.TBlock2.Token1","sum":7134215},{"parent":"TRule_named_expr","rule":"TRule_named_expr.Block2","sum":155643888},{"parent":"TRule_named_expr","rule":"TRule_named_expr.Rule_expr1","sum":7192172357},{"parent":"TRule_named_expr.TBlock2","rule":"TRule_named_expr.TBlock2.Rule_an_id_or_type2","sum":155643888},{"parent":"TRule_named_expr.TBlock2","rule":"TRule_named_expr.TBlock2.Token1","sum":155643888},{"parent":"TRule_named_expr_list","rule":"TRule_named_expr_list.Block2","sum":1177219617},{"parent":"TRule_named_expr_list","rule":"TRule_named_expr_list.Rule_named_expr1","sum":3225305268},{"parent":"TRule_named_expr_list.TBlock2","rule":"TRule_named_expr_list.TBlock2.Rule_named_expr2","sum":3473260447},{"parent":"TRule_named_expr_list.TBlock2","rule":"TRule_named_expr_list.TBlock2.Token1","sum":3473260447},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Block3","sum":1002566900},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Rule_bind_parameter_list1","sum":1002566900},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Token2","sum":1002566900},{"parent":"TRule_named_nodes_stmt.TBlock3","rule":"TRule_named_nodes_stmt.TBlock3.Alt1","sum":673827946},{"parent":"TRule_named_nodes_stmt.TBlock3","rule":"TRule_named_nodes_stmt.TBlock3.Alt2","sum":328738954},{"parent":"TRule_named_nodes_stmt.TBlock3.TAlt1","rule":"TRule_named_nodes_stmt.TBlock3.TAlt1.Rule_expr1","sum":673827946},{"parent":"TRule_named_nodes_stmt.TBlock3.TAlt2","rule":"TRule_named_nodes_stmt.TBlock3.TAlt2.Rule_subselect_stmt1","sum":328738954},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block2","sum":1},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block3","sum":441207217},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block4","sum":402903},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Rule_single_source1","sum":1054906953},{"parent":"TRule_named_single_source.TBlock2","rule":"TRule_named_single_source.TBlock2.Rule_row_pattern_recognition_clause1","sum":1},{"parent":"TRule_named_single_source.TBlock3","rule":"TRule_named_single_source.TBlock3.Block1","sum":441207217},{"parent":"TRule_named_single_source.TBlock3","rule":"TRule_named_single_source.TBlock3.Block2","sum":159399},{"parent":"TRule_named_single_source.TBlock3.TBlock1","rule":"TRule_named_single_source.TBlock3.TBlock1.Alt1","sum":439152846},{"parent":"TRule_named_single_source.TBlock3.TBlock1","rule":"TRule_named_single_source.TBlock3.TBlock1.Alt2","sum":2054371},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt1","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt1.Rule_an_id2","sum":439152846},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt1","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt1.Token1","sum":439152846},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt2","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt2.Rule_an_id_as_compat1","sum":2054371},{"parent":"TRule_named_single_source.TBlock3.TBlock2","rule":"TRule_named_single_source.TBlock3.TBlock2.Rule_pure_column_list1","sum":159399},{"parent":"TRule_named_single_source.TBlock4","rule":"TRule_named_single_source.TBlock4.Alt1","sum":75042},{"parent":"TRule_named_single_source.TBlock4","rule":"TRule_named_single_source.TBlock4.Alt2","sum":327861},{"parent":"TRule_named_single_source.TBlock4.TAlt1","rule":"TRule_named_single_source.TBlock4.TAlt1.Rule_sample_clause1","sum":75042},{"parent":"TRule_named_single_source.TBlock4.TAlt2","rule":"TRule_named_single_source.TBlock4.TAlt2.Rule_tablesample_clause1","sum":327861},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Block2","sum":9136767},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Block3","sum":112324076},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Rule_bit_subexpr1","sum":15412508761},{"parent":"TRule_neq_subexpr.TBlock2","rule":"TRule_neq_subexpr.TBlock2.Block1","sum":9206438},{"parent":"TRule_neq_subexpr.TBlock2","rule":"TRule_neq_subexpr.TBlock2.Rule_bit_subexpr2","sum":9206438},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt1","sum":8104033},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt2","sum":52838},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt3","sum":464},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt4","sum":23},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt5","sum":753087},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt6","sum":135478},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt7","sum":160515},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt1.Token1","sum":8104033},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt2","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt2.Rule_shift_right1","sum":52838},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt3","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt3.Token1","sum":464},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt4","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt4.Rule_rot_right1","sum":23},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt5","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt5.Token1","sum":753087},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt6","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt6.Token1","sum":135478},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt7","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt7.Token1","sum":160515},{"parent":"TRule_neq_subexpr.TBlock3","rule":"TRule_neq_subexpr.TBlock3.Alt1","sum":108571603},{"parent":"TRule_neq_subexpr.TBlock3","rule":"TRule_neq_subexpr.TBlock3.Alt2","sum":3752473},{"parent":"TRule_neq_subexpr.TBlock3.TAlt1","rule":"TRule_neq_subexpr.TBlock3.TAlt1.Rule_double_question1","sum":108571603},{"parent":"TRule_neq_subexpr.TBlock3.TAlt1","rule":"TRule_neq_subexpr.TBlock3.TAlt1.Rule_neq_subexpr2","sum":108571603},{"parent":"TRule_neq_subexpr.TBlock3.TAlt2","rule":"TRule_neq_subexpr.TBlock3.TAlt2.Block1","sum":3752473},{"parent":"TRule_neq_subexpr.TBlock3.TAlt2.TBlock1","rule":"TRule_neq_subexpr.TBlock3.TAlt2.TBlock1.Token1","sum":3764021},{"parent":"TRule_new_window_name","rule":"TRule_new_window_name.Rule_window_name1","sum":14110566},{"parent":"TRule_null_treatment","rule":"TRule_null_treatment.Alt_null_treatment1","sum":45},{"parent":"TRule_null_treatment","rule":"TRule_null_treatment.Alt_null_treatment2","sum":7082172},{"parent":"TRule_null_treatment.TAlt1","rule":"TRule_null_treatment.TAlt1.Token1","sum":45},{"parent":"TRule_null_treatment.TAlt1","rule":"TRule_null_treatment.TAlt1.Token2","sum":45},{"parent":"TRule_null_treatment.TAlt2","rule":"TRule_null_treatment.TAlt2.Token1","sum":7082172},{"parent":"TRule_null_treatment.TAlt2","rule":"TRule_null_treatment.TAlt2.Token2","sum":7082172},{"parent":"TRule_object_ref","rule":"TRule_object_ref.Block1","sum":5285194},{"parent":"TRule_object_ref","rule":"TRule_object_ref.Rule_id_or_at2","sum":133548148},{"parent":"TRule_object_ref.TBlock1","rule":"TRule_object_ref.TBlock1.Rule_cluster_expr1","sum":5285194},{"parent":"TRule_object_ref.TBlock1","rule":"TRule_object_ref.TBlock1.Token2","sum":5285194},{"parent":"TRule_opt_bind_parameter","rule":"TRule_opt_bind_parameter.Block2","sum":27466},{"parent":"TRule_opt_bind_parameter","rule":"TRule_opt_bind_parameter.Rule_bind_parameter1","sum":21282715},{"parent":"TRule_opt_bind_parameter.TBlock2","rule":"TRule_opt_bind_parameter.TBlock2.Token1","sum":27466},{"parent":"TRule_opt_id_prefix","rule":"TRule_opt_id_prefix.Block1","sum":82491107},{"parent":"TRule_opt_id_prefix.TBlock1","rule":"TRule_opt_id_prefix.TBlock1.Rule_an_id1","sum":82491107},{"parent":"TRule_opt_id_prefix.TBlock1","rule":"TRule_opt_id_prefix.TBlock1.Token2","sum":82491107},{"parent":"TRule_opt_id_prefix_or_type","rule":"TRule_opt_id_prefix_or_type.Block1","sum":771372817},{"parent":"TRule_opt_id_prefix_or_type.TBlock1","rule":"TRule_opt_id_prefix_or_type.TBlock1.Rule_an_id_or_type1","sum":771372817},{"parent":"TRule_opt_id_prefix_or_type.TBlock1","rule":"TRule_opt_id_prefix_or_type.TBlock1.Token2","sum":771372817},{"parent":"TRule_opt_set_quantifier","rule":"TRule_opt_set_quantifier.Block1","sum":70044287},{"parent":"TRule_opt_set_quantifier.TBlock1","rule":"TRule_opt_set_quantifier.TBlock1.Token1","sum":70044287},{"parent":"TRule_or_subexpr","rule":"TRule_or_subexpr.Block2","sum":276276983},{"parent":"TRule_or_subexpr","rule":"TRule_or_subexpr.Rule_and_subexpr1","sum":13776458637},{"parent":"TRule_or_subexpr.TBlock2","rule":"TRule_or_subexpr.TBlock2.Rule_and_subexpr2","sum":469698642},{"parent":"TRule_or_subexpr.TBlock2","rule":"TRule_or_subexpr.TBlock2.Token1","sum":469698642},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Rule_sort_specification_list3","sum":107098966},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Token1","sum":107098966},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Token2","sum":107098966},{"parent":"TRule_ordinary_grouping_set","rule":"TRule_ordinary_grouping_set.Rule_named_expr1","sum":300694524},{"parent":"TRule_ordinary_grouping_set_list","rule":"TRule_ordinary_grouping_set_list.Block2","sum":199805},{"parent":"TRule_ordinary_grouping_set_list","rule":"TRule_ordinary_grouping_set_list.Rule_ordinary_grouping_set1","sum":339701},{"parent":"TRule_ordinary_grouping_set_list.TBlock2","rule":"TRule_ordinary_grouping_set_list.TBlock2.Rule_ordinary_grouping_set2","sum":432511},{"parent":"TRule_ordinary_grouping_set_list.TBlock2","rule":"TRule_ordinary_grouping_set_list.TBlock2.Token1","sum":432511},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Block4","sum":785793323},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Rule_an_id3","sum":889708160},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Rule_opt_id_prefix_or_type2","sum":889708160},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Token1","sum":889708160},{"parent":"TRule_pragma_stmt.TBlock4","rule":"TRule_pragma_stmt.TBlock4.Alt1","sum":738017904},{"parent":"TRule_pragma_stmt.TBlock4","rule":"TRule_pragma_stmt.TBlock4.Alt2","sum":47775419},{"parent":"TRule_pragma_stmt.TBlock4.TAlt1","rule":"TRule_pragma_stmt.TBlock4.TAlt1.Rule_pragma_value2","sum":738017904},{"parent":"TRule_pragma_stmt.TBlock4.TAlt1","rule":"TRule_pragma_stmt.TBlock4.TAlt1.Token1","sum":738017904},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Block3","sum":17591907},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Rule_pragma_value2","sum":47775419},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Token1","sum":47775419},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Token4","sum":47775419},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3","rule":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3.Rule_pragma_value2","sum":18396599},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3","rule":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3.Token1","sum":18396599},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value2","sum":255302},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value3","sum":791603349},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value5","sum":12331271},{"parent":"TRule_pragma_value.TAlt2","rule":"TRule_pragma_value.TAlt2.Rule_id1","sum":255302},{"parent":"TRule_pragma_value.TAlt3","rule":"TRule_pragma_value.TAlt3.Token1","sum":791603349},{"parent":"TRule_pragma_value.TAlt5","rule":"TRule_pragma_value.TAlt5.Rule_bind_parameter1","sum":12331271},{"parent":"TRule_process_core","rule":"TRule_process_core.Block4","sum":1132},{"parent":"TRule_process_core","rule":"TRule_process_core.Block5","sum":1901404},{"parent":"TRule_process_core","rule":"TRule_process_core.Rule_named_single_source3","sum":4511331},{"parent":"TRule_process_core","rule":"TRule_process_core.Token1","sum":4511331},{"parent":"TRule_process_core.TBlock4","rule":"TRule_process_core.TBlock4.Rule_named_single_source2","sum":1133},{"parent":"TRule_process_core.TBlock4","rule":"TRule_process_core.TBlock4.Token1","sum":1133},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block3","sum":46},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block5","sum":4479},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block7","sum":9},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Rule_using_call_expr2","sum":1901404},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Token1","sum":1901404},{"parent":"TRule_process_core.TBlock5.TBlock3","rule":"TRule_process_core.TBlock5.TBlock3.Rule_an_id2","sum":46},{"parent":"TRule_process_core.TBlock5.TBlock3","rule":"TRule_process_core.TBlock5.TBlock3.Token1","sum":46},{"parent":"TRule_process_core.TBlock5.TBlock5","rule":"TRule_process_core.TBlock5.TBlock5.Rule_expr2","sum":4479},{"parent":"TRule_process_core.TBlock5.TBlock5","rule":"TRule_process_core.TBlock5.TBlock5.Token1","sum":4479},{"parent":"TRule_process_core.TBlock5.TBlock7","rule":"TRule_process_core.TBlock5.TBlock7.Rule_order_by_clause2","sum":9},{"parent":"TRule_process_core.TBlock5.TBlock7","rule":"TRule_process_core.TBlock5.TBlock7.Token1","sum":9},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Block3","sum":3902316},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Rule_an_id2","sum":4638391},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Token1","sum":4638391},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Token4","sum":4638391},{"parent":"TRule_pure_column_list.TBlock3","rule":"TRule_pure_column_list.TBlock3.Rule_an_id2","sum":34537553},{"parent":"TRule_pure_column_list.TBlock3","rule":"TRule_pure_column_list.TBlock3.Token1","sum":34537553},{"parent":"TRule_pure_column_or_named","rule":"TRule_pure_column_or_named.Alt_pure_column_or_named1","sum":6570850},{"parent":"TRule_pure_column_or_named","rule":"TRule_pure_column_or_named.Alt_pure_column_or_named2","sum":423525984},{"parent":"TRule_pure_column_or_named.TAlt1","rule":"TRule_pure_column_or_named.TAlt1.Rule_bind_parameter1","sum":6570850},{"parent":"TRule_pure_column_or_named.TAlt2","rule":"TRule_pure_column_or_named.TAlt2.Rule_an_id1","sum":423525984},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Block3","sum":11707260},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Rule_pure_column_or_named2","sum":32377040},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Token1","sum":32377040},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Token4","sum":32377040},{"parent":"TRule_pure_column_or_named_list.TBlock3","rule":"TRule_pure_column_or_named_list.TBlock3.Rule_pure_column_or_named2","sum":21738897},{"parent":"TRule_pure_column_or_named_list.TBlock3","rule":"TRule_pure_column_or_named_list.TBlock3.Token1","sum":21738897},{"parent":"TRule_real","rule":"TRule_real.Token1","sum":108407914},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block11","sum":9015},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block13","sum":25920},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block3","sum":113733},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block4","sum":295368},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block8","sum":305499},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_column_list6","sum":838466},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_named_single_source2","sum":838466},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_using_call_expr9","sum":838466},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token1","sum":838466},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token5","sum":838466},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token7","sum":838466},{"parent":"TRule_reduce_core.TBlock11","rule":"TRule_reduce_core.TBlock11.Rule_expr2","sum":9015},{"parent":"TRule_reduce_core.TBlock11","rule":"TRule_reduce_core.TBlock11.Token1","sum":9015},{"parent":"TRule_reduce_core.TBlock13","rule":"TRule_reduce_core.TBlock13.Rule_order_by_clause2","sum":25920},{"parent":"TRule_reduce_core.TBlock13","rule":"TRule_reduce_core.TBlock13.Token1","sum":25920},{"parent":"TRule_reduce_core.TBlock3","rule":"TRule_reduce_core.TBlock3.Rule_named_single_source2","sum":197711},{"parent":"TRule_reduce_core.TBlock3","rule":"TRule_reduce_core.TBlock3.Token1","sum":197711},{"parent":"TRule_reduce_core.TBlock4","rule":"TRule_reduce_core.TBlock4.Rule_sort_specification_list2","sum":295368},{"parent":"TRule_reduce_core.TBlock4","rule":"TRule_reduce_core.TBlock4.Token1","sum":295368},{"parent":"TRule_reduce_core.TBlock8","rule":"TRule_reduce_core.TBlock8.Token1","sum":305499},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Rule_expr3","sum":9174},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token1","sum":9174},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token2","sum":9174},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token4","sum":9174},{"parent":"TRule_result_column","rule":"TRule_result_column.Alt_result_column1","sum":249019740},{"parent":"TRule_result_column","rule":"TRule_result_column.Alt_result_column2","sum":3434636821},{"parent":"TRule_result_column.TAlt1","rule":"TRule_result_column.TAlt1.Rule_opt_id_prefix1","sum":249019740},{"parent":"TRule_result_column.TAlt1","rule":"TRule_result_column.TAlt1.Token2","sum":249019740},{"parent":"TRule_result_column.TAlt2","rule":"TRule_result_column.TAlt2.Block2","sum":1980143194},{"parent":"TRule_result_column.TAlt2","rule":"TRule_result_column.TAlt2.Rule_expr1","sum":3434636821},{"parent":"TRule_result_column.TAlt2.TBlock2","rule":"TRule_result_column.TAlt2.TBlock2.Alt1","sum":1979279892},{"parent":"TRule_result_column.TAlt2.TBlock2","rule":"TRule_result_column.TAlt2.TBlock2.Alt2","sum":863302},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt1","rule":"TRule_result_column.TAlt2.TBlock2.TAlt1.Rule_an_id_or_type2","sum":1979279892},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt1","rule":"TRule_result_column.TAlt2.TBlock2.TAlt1.Token1","sum":1979279892},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt2","rule":"TRule_result_column.TAlt2.TBlock2.TAlt2.Rule_an_id_as_compat1","sum":863302},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Rule_ordinary_grouping_set_list3","sum":61937},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token1","sum":61937},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token2","sum":61937},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token4","sum":61937},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token1","sum":23},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token2","sum":23},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token3","sum":23},{"parent":"TRule_row_pattern","rule":"TRule_row_pattern.Rule_row_pattern_term1","sum":2},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Rule_row_pattern5","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Rule_row_pattern_definition_list9","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token3","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token4","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token6","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token8","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Rule_row_pattern_definition_search_condition3","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Rule_row_pattern_definition_variable_name1","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Token2","sum":1},{"parent":"TRule_row_pattern_definition_list","rule":"TRule_row_pattern_definition_list.Rule_row_pattern_definition1","sum":1},{"parent":"TRule_row_pattern_definition_search_condition","rule":"TRule_row_pattern_definition_search_condition.Rule_search_condition1","sum":1},{"parent":"TRule_row_pattern_definition_variable_name","rule":"TRule_row_pattern_definition_variable_name.Rule_row_pattern_variable_name1","sum":1},{"parent":"TRule_row_pattern_factor","rule":"TRule_row_pattern_factor.Block2","sum":1},{"parent":"TRule_row_pattern_factor","rule":"TRule_row_pattern_factor.Rule_row_pattern_primary1","sum":2},{"parent":"TRule_row_pattern_factor.TBlock2","rule":"TRule_row_pattern_factor.TBlock2.Rule_row_pattern_quantifier1","sum":1},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Rule_an_id3","sum":3},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Rule_expr1","sum":3},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Token2","sum":3},{"parent":"TRule_row_pattern_measure_list","rule":"TRule_row_pattern_measure_list.Block2","sum":1},{"parent":"TRule_row_pattern_measure_list","rule":"TRule_row_pattern_measure_list.Rule_row_pattern_measure_definition1","sum":1},{"parent":"TRule_row_pattern_measure_list.TBlock2","rule":"TRule_row_pattern_measure_list.TBlock2.Rule_row_pattern_measure_definition2","sum":2},{"parent":"TRule_row_pattern_measure_list.TBlock2","rule":"TRule_row_pattern_measure_list.TBlock2.Token1","sum":2},{"parent":"TRule_row_pattern_measures","rule":"TRule_row_pattern_measures.Rule_row_pattern_measure_list2","sum":1},{"parent":"TRule_row_pattern_measures","rule":"TRule_row_pattern_measures.Token1","sum":1},{"parent":"TRule_row_pattern_primary","rule":"TRule_row_pattern_primary.Alt_row_pattern_primary1","sum":1},{"parent":"TRule_row_pattern_primary","rule":"TRule_row_pattern_primary.Alt_row_pattern_primary4","sum":1},{"parent":"TRule_row_pattern_primary.TAlt1","rule":"TRule_row_pattern_primary.TAlt1.Rule_row_pattern_primary_variable_name1","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Block2","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Token1","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Token3","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4.TBlock2","rule":"TRule_row_pattern_primary.TAlt4.TBlock2.Rule_row_pattern1","sum":1},{"parent":"TRule_row_pattern_primary_variable_name","rule":"TRule_row_pattern_primary_variable_name.Rule_row_pattern_variable_name1","sum":1},{"parent":"TRule_row_pattern_quantifier","rule":"TRule_row_pattern_quantifier.Alt_row_pattern_quantifier5","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Rule_integer2","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Token1","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Token3","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block3","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block4","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block5","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block6","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Rule_row_pattern_common_syntax7","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token1","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token2","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token8","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock3","rule":"TRule_row_pattern_recognition_clause.TBlock3.Rule_window_partition_clause1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock4","rule":"TRule_row_pattern_recognition_clause.TBlock4.Rule_order_by_clause1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock5","rule":"TRule_row_pattern_recognition_clause.TBlock5.Rule_row_pattern_measures1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock6","rule":"TRule_row_pattern_recognition_clause.TBlock6.Rule_row_pattern_rows_per_match1","sum":1},{"parent":"TRule_row_pattern_rows_per_match","rule":"TRule_row_pattern_rows_per_match.Alt_row_pattern_rows_per_match1","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token1","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token2","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token3","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token4","sum":1},{"parent":"TRule_row_pattern_term","rule":"TRule_row_pattern_term.Block1","sum":2},{"parent":"TRule_row_pattern_term.TBlock1","rule":"TRule_row_pattern_term.TBlock1.Rule_row_pattern_factor1","sum":2},{"parent":"TRule_row_pattern_variable_name","rule":"TRule_row_pattern_variable_name.Rule_identifier1","sum":2},{"parent":"TRule_sample_clause","rule":"TRule_sample_clause.Rule_expr2","sum":75042},{"parent":"TRule_sample_clause","rule":"TRule_sample_clause.Token1","sum":75042},{"parent":"TRule_sampling_mode","rule":"TRule_sampling_mode.Token1","sum":327861},{"parent":"TRule_search_condition","rule":"TRule_search_condition.Rule_expr1","sum":1},{"parent":"TRule_select_core","rule":"TRule_select_core.Block1","sum":10686557},{"parent":"TRule_select_core","rule":"TRule_select_core.Block10","sum":340046928},{"parent":"TRule_select_core","rule":"TRule_select_core.Block11","sum":137629459},{"parent":"TRule_select_core","rule":"TRule_select_core.Block12","sum":12099314},{"parent":"TRule_select_core","rule":"TRule_select_core.Block13","sum":13537900},{"parent":"TRule_select_core","rule":"TRule_select_core.Block14","sum":88178773},{"parent":"TRule_select_core","rule":"TRule_select_core.Block3","sum":117},{"parent":"TRule_select_core","rule":"TRule_select_core.Block6","sum":528240331},{"parent":"TRule_select_core","rule":"TRule_select_core.Block7","sum":93118064},{"parent":"TRule_select_core","rule":"TRule_select_core.Block8","sum":24720066},{"parent":"TRule_select_core","rule":"TRule_select_core.Block9","sum":830615230},{"parent":"TRule_select_core","rule":"TRule_select_core.Rule_opt_set_quantifier4","sum":900766423},{"parent":"TRule_select_core","rule":"TRule_select_core.Rule_result_column5","sum":900766423},{"parent":"TRule_select_core","rule":"TRule_select_core.Token2","sum":900766423},{"parent":"TRule_select_core.TBlock1","rule":"TRule_select_core.TBlock1.Rule_join_source2","sum":10686557},{"parent":"TRule_select_core.TBlock1","rule":"TRule_select_core.TBlock1.Token1","sum":10686557},{"parent":"TRule_select_core.TBlock10","rule":"TRule_select_core.TBlock10.Rule_expr2","sum":340046928},{"parent":"TRule_select_core.TBlock10","rule":"TRule_select_core.TBlock10.Token1","sum":340046928},{"parent":"TRule_select_core.TBlock11","rule":"TRule_select_core.TBlock11.Rule_group_by_clause1","sum":137629459},{"parent":"TRule_select_core.TBlock12","rule":"TRule_select_core.TBlock12.Rule_expr2","sum":12099314},{"parent":"TRule_select_core.TBlock12","rule":"TRule_select_core.TBlock12.Token1","sum":12099314},{"parent":"TRule_select_core.TBlock13","rule":"TRule_select_core.TBlock13.Rule_window_clause1","sum":13537900},{"parent":"TRule_select_core.TBlock14","rule":"TRule_select_core.TBlock14.Rule_ext_order_by_clause1","sum":88178773},{"parent":"TRule_select_core.TBlock3","rule":"TRule_select_core.TBlock3.Token1","sum":117},{"parent":"TRule_select_core.TBlock6","rule":"TRule_select_core.TBlock6.Rule_result_column2","sum":2782890138},{"parent":"TRule_select_core.TBlock6","rule":"TRule_select_core.TBlock6.Token1","sum":2782890138},{"parent":"TRule_select_core.TBlock7","rule":"TRule_select_core.TBlock7.Token1","sum":93118064},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Block2","sum":3},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Rule_without_column_list3","sum":24720066},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Token1","sum":24720066},{"parent":"TRule_select_core.TBlock8.TBlock2","rule":"TRule_select_core.TBlock8.TBlock2.Token1","sum":3},{"parent":"TRule_select_core.TBlock8.TBlock2","rule":"TRule_select_core.TBlock8.TBlock2.Token2","sum":3},{"parent":"TRule_select_core.TBlock9","rule":"TRule_select_core.TBlock9.Rule_join_source2","sum":830615230},{"parent":"TRule_select_core.TBlock9","rule":"TRule_select_core.TBlock9.Token1","sum":830615230},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block1","sum":915353},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block2","sum":906116220},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block3","sum":2880950},{"parent":"TRule_select_kind.TBlock1","rule":"TRule_select_kind.TBlock1.Token1","sum":915353},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt1","sum":4511331},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt2","sum":838466},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt3","sum":900766423},{"parent":"TRule_select_kind.TBlock2.TAlt1","rule":"TRule_select_kind.TBlock2.TAlt1.Rule_process_core1","sum":4511331},{"parent":"TRule_select_kind.TBlock2.TAlt2","rule":"TRule_select_kind.TBlock2.TAlt2.Rule_reduce_core1","sum":838466},{"parent":"TRule_select_kind.TBlock2.TAlt3","rule":"TRule_select_kind.TBlock2.TAlt3.Rule_select_core1","sum":900766423},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Rule_pure_column_or_named3","sum":2880950},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Token1","sum":2880950},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Token2","sum":2880950},{"parent":"TRule_select_kind_parenthesis","rule":"TRule_select_kind_parenthesis.Alt_select_kind_parenthesis1","sum":775950850},{"parent":"TRule_select_kind_parenthesis","rule":"TRule_select_kind_parenthesis.Alt_select_kind_parenthesis2","sum":7927813},{"parent":"TRule_select_kind_parenthesis.TAlt1","rule":"TRule_select_kind_parenthesis.TAlt1.Rule_select_kind_partial1","sum":775950850},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Rule_select_kind_partial2","sum":7927813},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Token1","sum":7927813},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Token3","sum":7927813},{"parent":"TRule_select_kind_partial","rule":"TRule_select_kind_partial.Block2","sum":30562305},{"parent":"TRule_select_kind_partial","rule":"TRule_select_kind_partial.Rule_select_kind1","sum":906116220},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Block3","sum":4496625},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Rule_expr2","sum":30562305},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Token1","sum":30562305},{"parent":"TRule_select_kind_partial.TBlock2.TBlock3","rule":"TRule_select_kind_partial.TBlock2.TBlock3.Rule_expr2","sum":4496625},{"parent":"TRule_select_kind_partial.TBlock2.TBlock3","rule":"TRule_select_kind_partial.TBlock2.TBlock3.Token1","sum":4496625},{"parent":"TRule_select_op","rule":"TRule_select_op.Alt_select_op1","sum":63830507},{"parent":"TRule_select_op.TAlt1","rule":"TRule_select_op.TAlt1.Block2","sum":62947922},{"parent":"TRule_select_op.TAlt1","rule":"TRule_select_op.TAlt1.Token1","sum":63830507},{"parent":"TRule_select_op.TAlt1.TBlock2","rule":"TRule_select_op.TAlt1.TBlock2.Token1","sum":62947922},{"parent":"TRule_select_stmt","rule":"TRule_select_stmt.Block2","sum":33216727},{"parent":"TRule_select_stmt","rule":"TRule_select_stmt.Rule_select_kind_parenthesis1","sum":720048156},{"parent":"TRule_select_stmt.TBlock2","rule":"TRule_select_stmt.TBlock2.Rule_select_kind_parenthesis2","sum":56908673},{"parent":"TRule_select_stmt.TBlock2","rule":"TRule_select_stmt.TBlock2.Rule_select_op1","sum":56908673},{"parent":"TRule_select_unparenthesized_stmt","rule":"TRule_select_unparenthesized_stmt.Block2","sum":4274079},{"parent":"TRule_select_unparenthesized_stmt","rule":"TRule_select_unparenthesized_stmt.Rule_select_kind_partial1","sum":122237557},{"parent":"TRule_select_unparenthesized_stmt.TBlock2","rule":"TRule_select_unparenthesized_stmt.TBlock2.Rule_select_kind_parenthesis2","sum":6921834},{"parent":"TRule_select_unparenthesized_stmt.TBlock2","rule":"TRule_select_unparenthesized_stmt.TBlock2.Rule_select_op1","sum":6921834},{"parent":"TRule_shift_right","rule":"TRule_shift_right.Token1","sum":52838},{"parent":"TRule_shift_right","rule":"TRule_shift_right.Token2","sum":52838},{"parent":"TRule_simple_table_ref","rule":"TRule_simple_table_ref.Block2","sum":142445588},{"parent":"TRule_simple_table_ref","rule":"TRule_simple_table_ref.Rule_simple_table_ref_core1","sum":210577516},{"parent":"TRule_simple_table_ref.TBlock2","rule":"TRule_simple_table_ref.TBlock2.Rule_table_hints1","sum":142445588},{"parent":"TRule_simple_table_ref_core","rule":"TRule_simple_table_ref_core.Alt_simple_table_ref_core1","sum":133548148},{"parent":"TRule_simple_table_ref_core","rule":"TRule_simple_table_ref_core.Alt_simple_table_ref_core2","sum":77029368},{"parent":"TRule_simple_table_ref_core.TAlt1","rule":"TRule_simple_table_ref_core.TAlt1.Rule_object_ref1","sum":133548148},{"parent":"TRule_simple_table_ref_core.TAlt2","rule":"TRule_simple_table_ref_core.TAlt2.Block1","sum":71573},{"parent":"TRule_simple_table_ref_core.TAlt2","rule":"TRule_simple_table_ref_core.TAlt2.Rule_bind_parameter2","sum":77029368},{"parent":"TRule_simple_table_ref_core.TAlt2.TBlock1","rule":"TRule_simple_table_ref_core.TAlt2.TBlock1.Token1","sum":71573},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source1","sum":950258780},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source2","sum":104480181},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source3","sum":167992},{"parent":"TRule_single_source.TAlt1","rule":"TRule_single_source.TAlt1.Rule_table_ref1","sum":950258780},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Rule_select_stmt2","sum":104480181},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Token1","sum":104480181},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Token3","sum":104480181},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Rule_values_stmt2","sum":167992},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Token1","sum":167992},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Token3","sum":167992},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Block2","sum":506638603},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Block3","sum":2604905},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Token1","sum":510536515},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Token4","sum":510536515},{"parent":"TRule_smart_parenthesis.TBlock2","rule":"TRule_smart_parenthesis.TBlock2.Rule_named_expr_list1","sum":506638603},{"parent":"TRule_smart_parenthesis.TBlock3","rule":"TRule_smart_parenthesis.TBlock3.Token1","sum":2604905},{"parent":"TRule_sort_specification","rule":"TRule_sort_specification.Block2","sum":32972666},{"parent":"TRule_sort_specification","rule":"TRule_sort_specification.Rule_expr1","sum":180557811},{"parent":"TRule_sort_specification.TBlock2","rule":"TRule_sort_specification.TBlock2.Token1","sum":32972666},{"parent":"TRule_sort_specification_list","rule":"TRule_sort_specification_list.Block2","sum":39667162},{"parent":"TRule_sort_specification_list","rule":"TRule_sort_specification_list.Rule_sort_specification1","sum":107394334},{"parent":"TRule_sort_specification_list.TBlock2","rule":"TRule_sort_specification_list.TBlock2.Rule_sort_specification2","sum":73163477},{"parent":"TRule_sort_specification_list.TBlock2","rule":"TRule_sort_specification_list.TBlock2.Token1","sum":73163477},{"parent":"TRule_sql_query","rule":"TRule_sql_query.Alt_sql_query1","sum":318387605},{"parent":"TRule_sql_query.TAlt1","rule":"TRule_sql_query.TAlt1.Rule_sql_stmt_list1","sum":318387605},{"parent":"TRule_sql_stmt","rule":"TRule_sql_stmt.Block1","sum":51},{"parent":"TRule_sql_stmt","rule":"TRule_sql_stmt.Rule_sql_stmt_core2","sum":2704776586},{"parent":"TRule_sql_stmt.TBlock1","rule":"TRule_sql_stmt.TBlock1.Token1","sum":51},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core1","sum":889708160},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core12","sum":154773177},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core13","sum":16445839},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core17","sum":4690245},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core18","sum":22146478},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core19","sum":8529552},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core2","sum":192372863},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core20","sum":6943272},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core21","sum":329},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core3","sum":939224641},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core4","sum":137},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core5","sum":2560940},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core6","sum":327978948},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core7","sum":208016439},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core8","sum":12985670},{"parent":"TRule_sql_stmt_core.TAlt1","rule":"TRule_sql_stmt_core.TAlt1.Rule_pragma_stmt1","sum":889708160},{"parent":"TRule_sql_stmt_core.TAlt12","rule":"TRule_sql_stmt_core.TAlt12.Rule_declare_stmt1","sum":154773177},{"parent":"TRule_sql_stmt_core.TAlt13","rule":"TRule_sql_stmt_core.TAlt13.Rule_import_stmt1","sum":16445839},{"parent":"TRule_sql_stmt_core.TAlt17","rule":"TRule_sql_stmt_core.TAlt17.Rule_do_stmt1","sum":4690245},{"parent":"TRule_sql_stmt_core.TAlt18","rule":"TRule_sql_stmt_core.TAlt18.Rule_define_action_or_subquery_stmt1","sum":22146478},{"parent":"TRule_sql_stmt_core.TAlt19","rule":"TRule_sql_stmt_core.TAlt19.Rule_if_stmt1","sum":8529552},{"parent":"TRule_sql_stmt_core.TAlt2","rule":"TRule_sql_stmt_core.TAlt2.Rule_select_stmt1","sum":192372863},{"parent":"TRule_sql_stmt_core.TAlt20","rule":"TRule_sql_stmt_core.TAlt20.Rule_for_stmt1","sum":6943272},{"parent":"TRule_sql_stmt_core.TAlt21","rule":"TRule_sql_stmt_core.TAlt21.Rule_values_stmt1","sum":329},{"parent":"TRule_sql_stmt_core.TAlt3","rule":"TRule_sql_stmt_core.TAlt3.Rule_named_nodes_stmt1","sum":939224641},{"parent":"TRule_sql_stmt_core.TAlt4","rule":"TRule_sql_stmt_core.TAlt4.Rule_create_table_stmt1","sum":137},{"parent":"TRule_sql_stmt_core.TAlt5","rule":"TRule_sql_stmt_core.TAlt5.Rule_drop_table_stmt1","sum":2560940},{"parent":"TRule_sql_stmt_core.TAlt6","rule":"TRule_sql_stmt_core.TAlt6.Rule_use_stmt1","sum":327978948},{"parent":"TRule_sql_stmt_core.TAlt7","rule":"TRule_sql_stmt_core.TAlt7.Rule_into_table_stmt1","sum":208016439},{"parent":"TRule_sql_stmt_core.TAlt8","rule":"TRule_sql_stmt_core.TAlt8.Rule_commit_stmt1","sum":12985670},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block1","sum":442},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block3","sum":312962614},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block4","sum":194275107},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Rule_sql_stmt2","sum":318387605},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Token5","sum":318387605},{"parent":"TRule_sql_stmt_list.TBlock1","rule":"TRule_sql_stmt_list.TBlock1.Token1","sum":465},{"parent":"TRule_sql_stmt_list.TBlock3","rule":"TRule_sql_stmt_list.TBlock3.Block1","sum":2386388981},{"parent":"TRule_sql_stmt_list.TBlock3","rule":"TRule_sql_stmt_list.TBlock3.Rule_sql_stmt2","sum":2386388981},{"parent":"TRule_sql_stmt_list.TBlock3.TBlock1","rule":"TRule_sql_stmt_list.TBlock3.TBlock1.Token1","sum":2395451248},{"parent":"TRule_sql_stmt_list.TBlock4","rule":"TRule_sql_stmt_list.TBlock4.Token1","sum":196953420},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Rule_type_name_or_bind3","sum":114249217},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Rule_type_name_tag1","sum":114249217},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Token2","sum":114249217},{"parent":"TRule_struct_arg_positional","rule":"TRule_struct_arg_positional.Alt_struct_arg_positional1","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Block3","sum":146},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Rule_type_name_or_bind2","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Rule_type_name_tag1","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1.TBlock3","rule":"TRule_struct_arg_positional.TAlt1.TBlock3.Token2","sum":146},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Block2","sum":14291717},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Block3","sum":2496630},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Token1","sum":14420408},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Token4","sum":14420408},{"parent":"TRule_struct_literal.TBlock2","rule":"TRule_struct_literal.TBlock2.Rule_expr_struct_list1","sum":14291717},{"parent":"TRule_struct_literal.TBlock3","rule":"TRule_struct_literal.TBlock3.Token1","sum":2496630},{"parent":"TRule_subselect_stmt","rule":"TRule_subselect_stmt.Block1","sum":328738954},{"parent":"TRule_subselect_stmt.TBlock1","rule":"TRule_subselect_stmt.TBlock1.Alt1","sum":206501397},{"parent":"TRule_subselect_stmt.TBlock1","rule":"TRule_subselect_stmt.TBlock1.Alt2","sum":122237557},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Rule_select_stmt2","sum":206501397},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Token1","sum":206501397},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Token3","sum":206501397},{"parent":"TRule_subselect_stmt.TBlock1.TAlt2","rule":"TRule_subselect_stmt.TBlock1.TAlt2.Rule_select_unparenthesized_stmt1","sum":122237557},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Block1","sum":115539},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Block3","sum":260484},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Rule_named_expr2","sum":192912118},{"parent":"TRule_table_arg.TBlock1","rule":"TRule_table_arg.TBlock1.Token1","sum":115539},{"parent":"TRule_table_arg.TBlock3","rule":"TRule_table_arg.TBlock3.Rule_view_name2","sum":260484},{"parent":"TRule_table_arg.TBlock3","rule":"TRule_table_arg.TBlock3.Token1","sum":260484},{"parent":"TRule_table_constraint","rule":"TRule_table_constraint.Alt_table_constraint2","sum":136},{"parent":"TRule_table_constraint","rule":"TRule_table_constraint.Alt_table_constraint3","sum":117},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Block5","sum":115},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Rule_an_id4","sum":136},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token1","sum":136},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token2","sum":136},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token3","sum":136},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token6","sum":136},{"parent":"TRule_table_constraint.TAlt2.TBlock5","rule":"TRule_table_constraint.TAlt2.TBlock5.Rule_an_id2","sum":289},{"parent":"TRule_table_constraint.TAlt2.TBlock5","rule":"TRule_table_constraint.TAlt2.TBlock5.Token1","sum":289},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Rule_column_order_by_specification4","sum":117},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token1","sum":117},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token2","sum":117},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token3","sum":117},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token6","sum":117},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint1","sum":153546171},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint2","sum":18976651},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint3","sum":12},{"parent":"TRule_table_hint.TAlt1","rule":"TRule_table_hint.TAlt1.Block2","sum":10469920},{"parent":"TRule_table_hint.TAlt1","rule":"TRule_table_hint.TAlt1.Rule_an_id_hint1","sum":153546171},{"parent":"TRule_table_hint.TAlt1.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.Block2","sum":10469920},{"parent":"TRule_table_hint.TAlt1.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.Token1","sum":10469920},{"parent":"TRule_table_hint.TAlt1.TBlock2.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.TBlock2.Alt1","sum":10469920},{"parent":"TRule_table_hint.TAlt1.TBlock2.TBlock2.TAlt1","rule":"TRule_table_hint.TAlt1.TBlock2.TBlock2.TAlt1.Rule_type_name_tag1","sum":10469920},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Block2","sum":1518},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Rule_type_name_or_bind3","sum":18976651},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Token1","sum":18976651},{"parent":"TRule_table_hint.TAlt2.TBlock2","rule":"TRule_table_hint.TAlt2.TBlock2.Token1","sum":1518},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Block4","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token1","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token3","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token6","sum":12},{"parent":"TRule_table_hint.TAlt3.TBlock4","rule":"TRule_table_hint.TAlt3.TBlock4.Block2","sum":7},{"parent":"TRule_table_hint.TAlt3.TBlock4","rule":"TRule_table_hint.TAlt3.TBlock4.Rule_struct_arg_positional1","sum":12},{"parent":"TRule_table_hint.TAlt3.TBlock4.TBlock2","rule":"TRule_table_hint.TAlt3.TBlock4.TBlock2.Rule_struct_arg_positional2","sum":163},{"parent":"TRule_table_hint.TAlt3.TBlock4.TBlock2","rule":"TRule_table_hint.TAlt3.TBlock4.TBlock2.Token1","sum":163},{"parent":"TRule_table_hints","rule":"TRule_table_hints.Block2","sum":162142057},{"parent":"TRule_table_hints","rule":"TRule_table_hints.Token1","sum":162142057},{"parent":"TRule_table_hints.TBlock2","rule":"TRule_table_hints.TBlock2.Alt1","sum":151892471},{"parent":"TRule_table_hints.TBlock2","rule":"TRule_table_hints.TBlock2.Alt2","sum":10249586},{"parent":"TRule_table_hints.TBlock2.TAlt1","rule":"TRule_table_hints.TBlock2.TAlt1.Rule_table_hint1","sum":151892471},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Block3","sum":9986448},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Rule_table_hint2","sum":10249586},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Token1","sum":10249586},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Token4","sum":10249586},{"parent":"TRule_table_hints.TBlock2.TAlt2.TBlock3","rule":"TRule_table_hints.TBlock2.TAlt2.TBlock3.Rule_table_hint2","sum":10380777},{"parent":"TRule_table_hints.TBlock2.TAlt2.TBlock3","rule":"TRule_table_hints.TBlock2.TAlt2.TBlock3.Token1","sum":10380777},{"parent":"TRule_table_key","rule":"TRule_table_key.Block2","sum":862367},{"parent":"TRule_table_key","rule":"TRule_table_key.Rule_id_table_or_type1","sum":330502970},{"parent":"TRule_table_key.TBlock2","rule":"TRule_table_key.TBlock2.Rule_view_name2","sum":862367},{"parent":"TRule_table_key.TBlock2","rule":"TRule_table_key.TBlock2.Token1","sum":862367},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block1","sum":39835805},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block2","sum":5991149},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block3","sum":950258780},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block4","sum":19696469},{"parent":"TRule_table_ref.TBlock1","rule":"TRule_table_ref.TBlock1.Rule_cluster_expr1","sum":39835805},{"parent":"TRule_table_ref.TBlock1","rule":"TRule_table_ref.TBlock1.Token2","sum":39835805},{"parent":"TRule_table_ref.TBlock2","rule":"TRule_table_ref.TBlock2.Token1","sum":5991149},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt1","sum":330502970},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt2","sum":96096575},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt3","sum":523659235},{"parent":"TRule_table_ref.TBlock3.TAlt1","rule":"TRule_table_ref.TBlock3.TAlt1.Rule_table_key1","sum":330502970},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Block3","sum":96096550},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Rule_an_id_expr1","sum":96096575},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Token2","sum":96096575},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Token4","sum":96096575},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Block2","sum":49748295},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Block3","sum":354064},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Rule_table_arg1","sum":96096550},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2.Rule_table_arg2","sum":96815568},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2.Token1","sum":96815568},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock3.Token1","sum":354064},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Block2","sum":22139268},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Block3","sum":175129},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Rule_bind_parameter1","sum":523659235},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Block2","sum":15792195},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Token1","sum":22139268},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Token3","sum":22139268},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.TBlock2.Rule_expr_list1","sum":15792195},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock3.Rule_view_name2","sum":175129},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock3.Token1","sum":175129},{"parent":"TRule_table_ref.TBlock4","rule":"TRule_table_ref.TBlock4.Rule_table_hints1","sum":19696469},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Block6","sum":9174},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Rule_expr4","sum":327861},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Rule_sampling_mode2","sum":327861},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token1","sum":327861},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token3","sum":327861},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token5","sum":327861},{"parent":"TRule_tablesample_clause.TBlock6","rule":"TRule_tablesample_clause.TBlock6.Rule_repeatable_clause1","sum":9174},{"parent":"TRule_type_id","rule":"TRule_type_id.Token1","sum":11605036},{"parent":"TRule_type_name","rule":"TRule_type_name.Alt_type_name1","sum":89143008},{"parent":"TRule_type_name","rule":"TRule_type_name.Alt_type_name2","sum":701067436},{"parent":"TRule_type_name.TAlt1","rule":"TRule_type_name.TAlt1.Rule_type_name_composite1","sum":89143008},{"parent":"TRule_type_name.TAlt2","rule":"TRule_type_name.TAlt2.Block1","sum":701067436},{"parent":"TRule_type_name.TAlt2","rule":"TRule_type_name.TAlt2.Block2","sum":73831696},{"parent":"TRule_type_name.TAlt2.TBlock1","rule":"TRule_type_name.TAlt2.TBlock1.Alt1","sum":28171640},{"parent":"TRule_type_name.TAlt2.TBlock1","rule":"TRule_type_name.TAlt2.TBlock1.Alt2","sum":672895796},{"parent":"TRule_type_name.TAlt2.TBlock1.TAlt1","rule":"TRule_type_name.TAlt2.TBlock1.TAlt1.Rule_type_name_decimal1","sum":28171640},{"parent":"TRule_type_name.TAlt2.TBlock1.TAlt2","rule":"TRule_type_name.TAlt2.TBlock1.TAlt2.Rule_type_name_simple1","sum":672895796},{"parent":"TRule_type_name.TAlt2.TBlock2","rule":"TRule_type_name.TAlt2.TBlock2.Token1","sum":73832709},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block4","sum":9720740},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block5","sum":215778},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block6","sum":10621},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Rule_type_name_or_bind9","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token1","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token10","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token2","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token3","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token7","sum":10047579},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token8","sum":10047579},{"parent":"TRule_type_name_callable.TBlock4","rule":"TRule_type_name_callable.TBlock4.Rule_callable_arg_list1","sum":9720740},{"parent":"TRule_type_name_callable.TBlock5","rule":"TRule_type_name_callable.TBlock5.Token1","sum":215778},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Rule_callable_arg_list2","sum":10621},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Token1","sum":10621},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Token3","sum":10621},{"parent":"TRule_type_name_composite","rule":"TRule_type_name_composite.Block1","sum":106459710},{"parent":"TRule_type_name_composite","rule":"TRule_type_name_composite.Block2","sum":4232015},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt1","sum":33241727},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt10","sum":19606},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt11","sum":461582},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt12","sum":8101},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt13","sum":10047579},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt2","sum":2141941},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt3","sum":26667991},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt4","sum":134576},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt5","sum":25611693},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt6","sum":1511517},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt8","sum":6600605},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt9","sum":12792},{"parent":"TRule_type_name_composite.TBlock1.TAlt1","rule":"TRule_type_name_composite.TBlock1.TAlt1.Rule_type_name_optional1","sum":33241727},{"parent":"TRule_type_name_composite.TBlock1.TAlt10","rule":"TRule_type_name_composite.TBlock1.TAlt10.Rule_type_name_enum1","sum":19606},{"parent":"TRule_type_name_composite.TBlock1.TAlt11","rule":"TRule_type_name_composite.TBlock1.TAlt11.Rule_type_name_resource1","sum":461582},{"parent":"TRule_type_name_composite.TBlock1.TAlt12","rule":"TRule_type_name_composite.TBlock1.TAlt12.Rule_type_name_tagged1","sum":8101},{"parent":"TRule_type_name_composite.TBlock1.TAlt13","rule":"TRule_type_name_composite.TBlock1.TAlt13.Rule_type_name_callable1","sum":10047579},{"parent":"TRule_type_name_composite.TBlock1.TAlt2","rule":"TRule_type_name_composite.TBlock1.TAlt2.Rule_type_name_tuple1","sum":2141941},{"parent":"TRule_type_name_composite.TBlock1.TAlt3","rule":"TRule_type_name_composite.TBlock1.TAlt3.Rule_type_name_struct1","sum":26667991},{"parent":"TRule_type_name_composite.TBlock1.TAlt4","rule":"TRule_type_name_composite.TBlock1.TAlt4.Rule_type_name_variant1","sum":134576},{"parent":"TRule_type_name_composite.TBlock1.TAlt5","rule":"TRule_type_name_composite.TBlock1.TAlt5.Rule_type_name_list1","sum":25611693},{"parent":"TRule_type_name_composite.TBlock1.TAlt6","rule":"TRule_type_name_composite.TBlock1.TAlt6.Rule_type_name_stream1","sum":1511517},{"parent":"TRule_type_name_composite.TBlock1.TAlt8","rule":"TRule_type_name_composite.TBlock1.TAlt8.Rule_type_name_dict1","sum":6600605},{"parent":"TRule_type_name_composite.TBlock1.TAlt9","rule":"TRule_type_name_composite.TBlock1.TAlt9.Rule_type_name_set1","sum":12792},{"parent":"TRule_type_name_composite.TBlock2","rule":"TRule_type_name_composite.TBlock2.Token1","sum":4232018},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Rule_integer_or_bind3","sum":28171640},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Rule_integer_or_bind5","sum":28171640},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token1","sum":28171640},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token2","sum":28171640},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token4","sum":28171640},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token6","sum":28171640},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Rule_type_name_or_bind3","sum":6600605},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Rule_type_name_or_bind5","sum":6600605},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token1","sum":6600605},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token2","sum":6600605},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token4","sum":6600605},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token6","sum":6600605},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Block4","sum":19361},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Block5","sum":379},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Rule_type_name_tag3","sum":19606},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token1","sum":19606},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token2","sum":19606},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token6","sum":19606},{"parent":"TRule_type_name_enum.TBlock4","rule":"TRule_type_name_enum.TBlock4.Rule_type_name_tag2","sum":42245},{"parent":"TRule_type_name_enum.TBlock4","rule":"TRule_type_name_enum.TBlock4.Token1","sum":42245},{"parent":"TRule_type_name_enum.TBlock5","rule":"TRule_type_name_enum.TBlock5.Token1","sum":379},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Rule_type_name_or_bind3","sum":25611693},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token1","sum":25611693},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token2","sum":25611693},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token4","sum":25611693},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Rule_type_name_or_bind3","sum":33241727},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token1","sum":33241727},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token2","sum":33241727},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token4","sum":33241727},{"parent":"TRule_type_name_or_bind","rule":"TRule_type_name_or_bind.Alt_type_name_or_bind1","sum":635437267},{"parent":"TRule_type_name_or_bind","rule":"TRule_type_name_or_bind.Alt_type_name_or_bind2","sum":2641538},{"parent":"TRule_type_name_or_bind.TAlt1","rule":"TRule_type_name_or_bind.TAlt1.Rule_type_name1","sum":635437267},{"parent":"TRule_type_name_or_bind.TAlt2","rule":"TRule_type_name_or_bind.TAlt2.Rule_bind_parameter1","sum":2641538},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Rule_type_name_tag3","sum":461582},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token1","sum":461582},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token2","sum":461582},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token4","sum":461582},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Rule_type_name_or_bind3","sum":12792},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token1","sum":12792},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token2","sum":12792},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token4","sum":12792},{"parent":"TRule_type_name_simple","rule":"TRule_type_name_simple.Rule_an_id_pure1","sum":674730413},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Rule_type_name_or_bind3","sum":1511517},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token1","sum":1511517},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token2","sum":1511517},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token4","sum":1511517},{"parent":"TRule_type_name_struct","rule":"TRule_type_name_struct.Block2","sum":26667991},{"parent":"TRule_type_name_struct","rule":"TRule_type_name_struct.Token1","sum":26667991},{"parent":"TRule_type_name_struct.TBlock2","rule":"TRule_type_name_struct.TBlock2.Alt1","sum":26665125},{"parent":"TRule_type_name_struct.TBlock2","rule":"TRule_type_name_struct.TBlock2.Alt2","sum":2866},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Block2","sum":26665114},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Token1","sum":26665125},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Token3","sum":26665125},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Block2","sum":20354699},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Block3","sum":2043506},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Rule_struct_arg1","sum":26665114},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2.Rule_struct_arg2","sum":87584103},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2.Token1","sum":87584103},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock3","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock3.Token1","sum":2043506},{"parent":"TRule_type_name_struct.TBlock2.TAlt2","rule":"TRule_type_name_struct.TBlock2.TAlt2.Token1","sum":2866},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag1","sum":104950323},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag2","sum":18081381},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag3","sum":2578002},{"parent":"TRule_type_name_tag.TAlt1","rule":"TRule_type_name_tag.TAlt1.Rule_id1","sum":104950323},{"parent":"TRule_type_name_tag.TAlt2","rule":"TRule_type_name_tag.TAlt2.Token1","sum":18081381},{"parent":"TRule_type_name_tag.TAlt3","rule":"TRule_type_name_tag.TAlt3.Rule_bind_parameter1","sum":2578002},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Rule_type_name_or_bind3","sum":8101},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Rule_type_name_tag5","sum":8101},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token1","sum":8101},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token2","sum":8101},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token4","sum":8101},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token6","sum":8101},{"parent":"TRule_type_name_tuple","rule":"TRule_type_name_tuple.Block2","sum":2141941},{"parent":"TRule_type_name_tuple","rule":"TRule_type_name_tuple.Token1","sum":2141941},{"parent":"TRule_type_name_tuple.TBlock2","rule":"TRule_type_name_tuple.TBlock2.Alt1","sum":2141931},{"parent":"TRule_type_name_tuple.TBlock2","rule":"TRule_type_name_tuple.TBlock2.Alt2","sum":10},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Block2","sum":2141931},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Token1","sum":2141931},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Token3","sum":2141931},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Block2","sum":2140421},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Block3","sum":63802},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Rule_type_name_or_bind1","sum":2141931},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2.Rule_type_name_or_bind2","sum":3322309},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2.Token1","sum":3322309},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock3","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock3.Token1","sum":63802},{"parent":"TRule_type_name_tuple.TBlock2.TAlt2","rule":"TRule_type_name_tuple.TBlock2.TAlt2.Token1","sum":10},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Block4","sum":119447},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Block5","sum":98},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Rule_variant_arg3","sum":134576},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token1","sum":134576},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token2","sum":134576},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token6","sum":134576},{"parent":"TRule_type_name_variant.TBlock4","rule":"TRule_type_name_variant.TBlock4.Rule_variant_arg2","sum":224717},{"parent":"TRule_type_name_variant.TBlock4","rule":"TRule_type_name_variant.TBlock4.Token1","sum":224717},{"parent":"TRule_type_name_variant.TBlock5","rule":"TRule_type_name_variant.TBlock5.Token1","sum":98},{"parent":"TRule_unary_casual_subexpr","rule":"TRule_unary_casual_subexpr.Block1","sum":15968238357},{"parent":"TRule_unary_casual_subexpr","rule":"TRule_unary_casual_subexpr.Rule_unary_subexpr_suffix2","sum":15968238357},{"parent":"TRule_unary_casual_subexpr.TBlock1","rule":"TRule_unary_casual_subexpr.TBlock1.Alt1","sum":7152342653},{"parent":"TRule_unary_casual_subexpr.TBlock1","rule":"TRule_unary_casual_subexpr.TBlock1.Alt2","sum":8815895704},{"parent":"TRule_unary_casual_subexpr.TBlock1.TAlt1","rule":"TRule_unary_casual_subexpr.TBlock1.TAlt1.Rule_id_expr1","sum":7152342653},{"parent":"TRule_unary_casual_subexpr.TBlock1.TAlt2","rule":"TRule_unary_casual_subexpr.TBlock1.TAlt2.Rule_atom_expr1","sum":8815895704},{"parent":"TRule_unary_op","rule":"TRule_unary_op.Token1","sum":87288612},{"parent":"TRule_unary_subexpr","rule":"TRule_unary_subexpr.Alt_unary_subexpr1","sum":15968238357},{"parent":"TRule_unary_subexpr","rule":"TRule_unary_subexpr.Alt_unary_subexpr2","sum":6364265},{"parent":"TRule_unary_subexpr.TAlt1","rule":"TRule_unary_subexpr.TAlt1.Rule_unary_casual_subexpr1","sum":15968238357},{"parent":"TRule_unary_subexpr.TAlt2","rule":"TRule_unary_subexpr.TAlt2.Rule_json_api_expr1","sum":6364265},{"parent":"TRule_unary_subexpr_suffix","rule":"TRule_unary_subexpr_suffix.Block1","sum":4753125763},{"parent":"TRule_unary_subexpr_suffix.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.Block1","sum":5006032286},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt1","sum":172134618},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt2","sum":2862515375},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt3","sum":1971382293},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt1.Rule_key_expr1","sum":172134618},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt2.Rule_invoke_expr1","sum":2862515375},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.Block2","sum":1971382293},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.Token1","sum":1971382293},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt1","sum":3151772},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt2","sum":40160019},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt3","sum":1928070502},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt1.Rule_bind_parameter1","sum":3151772},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt2.Token1","sum":40160019},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt3.Rule_an_id_or_type1","sum":1928070502},{"parent":"TRule_use_stmt","rule":"TRule_use_stmt.Rule_cluster_expr2","sum":327978948},{"parent":"TRule_use_stmt","rule":"TRule_use_stmt.Token1","sum":327978948},{"parent":"TRule_using_call_expr","rule":"TRule_using_call_expr.Block1","sum":2739870},{"parent":"TRule_using_call_expr","rule":"TRule_using_call_expr.Rule_invoke_expr2","sum":2739870},{"parent":"TRule_using_call_expr.TBlock1","rule":"TRule_using_call_expr.TBlock1.Alt1","sum":1509367},{"parent":"TRule_using_call_expr.TBlock1","rule":"TRule_using_call_expr.TBlock1.Alt3","sum":1230503},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Rule_an_id_or_type1","sum":1509367},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Rule_an_id_or_type3","sum":1509367},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Token2","sum":1509367},{"parent":"TRule_using_call_expr.TBlock1.TAlt3","rule":"TRule_using_call_expr.TBlock1.TAlt3.Rule_bind_parameter1","sum":1230503},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor1","sum":32195},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor2","sum":49134},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor3","sum":79687},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr3","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr5","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr7","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token1","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token2","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token4","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token6","sum":32195},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token8","sum":32195},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Rule_expr3","sum":49134},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Rule_expr5","sum":49134},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token1","sum":49134},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token2","sum":49134},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token4","sum":49134},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token6","sum":49134},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Rule_expr3","sum":79687},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Rule_expr5","sum":79687},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token1","sum":79687},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token2","sum":79687},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token4","sum":79687},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token6","sum":79687},{"parent":"TRule_values_source","rule":"TRule_values_source.Alt_values_source1","sum":1084119},{"parent":"TRule_values_source","rule":"TRule_values_source.Alt_values_source2","sum":206932320},{"parent":"TRule_values_source.TAlt1","rule":"TRule_values_source.TAlt1.Rule_values_stmt1","sum":1084119},{"parent":"TRule_values_source.TAlt2","rule":"TRule_values_source.TAlt2.Rule_select_stmt1","sum":206932320},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Rule_expr_list2","sum":9047706},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Token1","sum":9047706},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Token3","sum":9047706},{"parent":"TRule_values_source_row_list","rule":"TRule_values_source_row_list.Block2","sum":489068},{"parent":"TRule_values_source_row_list","rule":"TRule_values_source_row_list.Rule_values_source_row1","sum":1252440},{"parent":"TRule_values_source_row_list.TBlock2","rule":"TRule_values_source_row_list.TBlock2.Rule_values_source_row2","sum":7795266},{"parent":"TRule_values_source_row_list.TBlock2","rule":"TRule_values_source_row_list.TBlock2.Token1","sum":7795266},{"parent":"TRule_values_stmt","rule":"TRule_values_stmt.Rule_values_source_row_list2","sum":1252440},{"parent":"TRule_values_stmt","rule":"TRule_values_stmt.Token1","sum":1252440},{"parent":"TRule_variant_arg","rule":"TRule_variant_arg.Block1","sum":358860},{"parent":"TRule_variant_arg","rule":"TRule_variant_arg.Rule_type_name_or_bind2","sum":18256382},{"parent":"TRule_variant_arg.TBlock1","rule":"TRule_variant_arg.TBlock1.Rule_type_name_tag1","sum":358860},{"parent":"TRule_variant_arg.TBlock1","rule":"TRule_variant_arg.TBlock1.Token2","sum":358860},{"parent":"TRule_view_name","rule":"TRule_view_name.Alt_view_name1","sum":1297980},{"parent":"TRule_view_name.TAlt1","rule":"TRule_view_name.TAlt1.Rule_an_id1","sum":1297980},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Rule_expr2","sum":157500144},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Rule_expr4","sum":157500144},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Token1","sum":157500144},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Token3","sum":157500144},{"parent":"TRule_window_clause","rule":"TRule_window_clause.Rule_window_definition_list2","sum":13537900},{"parent":"TRule_window_clause","rule":"TRule_window_clause.Token1","sum":13537900},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Rule_new_window_name1","sum":14110566},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Rule_window_specification3","sum":14110566},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Token2","sum":14110566},{"parent":"TRule_window_definition_list","rule":"TRule_window_definition_list.Block2","sum":432400},{"parent":"TRule_window_definition_list","rule":"TRule_window_definition_list.Rule_window_definition1","sum":13537900},{"parent":"TRule_window_definition_list.TBlock2","rule":"TRule_window_definition_list.TBlock2.Rule_window_definition2","sum":572666},{"parent":"TRule_window_definition_list.TBlock2","rule":"TRule_window_definition_list.TBlock2.Token1","sum":572666},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Rule_window_frame_bound2","sum":2309562},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Rule_window_frame_bound4","sum":2309562},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Token1","sum":2309562},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Token3","sum":2309562},{"parent":"TRule_window_frame_bound","rule":"TRule_window_frame_bound.Alt_window_frame_bound1","sum":1317591},{"parent":"TRule_window_frame_bound","rule":"TRule_window_frame_bound.Alt_window_frame_bound2","sum":3359164},{"parent":"TRule_window_frame_bound.TAlt1","rule":"TRule_window_frame_bound.TAlt1.Token1","sum":1317591},{"parent":"TRule_window_frame_bound.TAlt1","rule":"TRule_window_frame_bound.TAlt1.Token2","sum":1317591},{"parent":"TRule_window_frame_bound.TAlt2","rule":"TRule_window_frame_bound.TAlt2.Block1","sum":3359164},{"parent":"TRule_window_frame_bound.TAlt2","rule":"TRule_window_frame_bound.TAlt2.Token2","sum":3359164},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.Alt1","sum":1114703},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.Alt2","sum":2244461},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt1.Rule_expr1","sum":1114703},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt2","rule":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt2.Token1","sum":2244461},{"parent":"TRule_window_frame_clause","rule":"TRule_window_frame_clause.Rule_window_frame_extent2","sum":2367193},{"parent":"TRule_window_frame_clause","rule":"TRule_window_frame_clause.Rule_window_frame_units1","sum":2367193},{"parent":"TRule_window_frame_extent","rule":"TRule_window_frame_extent.Alt_window_frame_extent1","sum":57631},{"parent":"TRule_window_frame_extent","rule":"TRule_window_frame_extent.Alt_window_frame_extent2","sum":2309562},{"parent":"TRule_window_frame_extent.TAlt1","rule":"TRule_window_frame_extent.TAlt1.Rule_window_frame_bound1","sum":57631},{"parent":"TRule_window_frame_extent.TAlt2","rule":"TRule_window_frame_extent.TAlt2.Rule_window_frame_between1","sum":2309562},{"parent":"TRule_window_frame_units","rule":"TRule_window_frame_units.Token1","sum":2367193},{"parent":"TRule_window_name","rule":"TRule_window_name.Rule_an_id_window1","sum":46472719},{"parent":"TRule_window_name_or_specification","rule":"TRule_window_name_or_specification.Alt_window_name_or_specification1","sum":32362153},{"parent":"TRule_window_name_or_specification","rule":"TRule_window_name_or_specification.Alt_window_name_or_specification2","sum":12739706},{"parent":"TRule_window_name_or_specification.TAlt1","rule":"TRule_window_name_or_specification.TAlt1.Rule_window_name1","sum":32362153},{"parent":"TRule_window_name_or_specification.TAlt2","rule":"TRule_window_name_or_specification.TAlt2.Rule_window_specification1","sum":12739706},{"parent":"TRule_window_order_clause","rule":"TRule_window_order_clause.Rule_order_by_clause1","sum":18894263},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Block2","sum":51324},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Rule_named_expr_list4","sum":21674753},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Token1","sum":21674753},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Token3","sum":21674753},{"parent":"TRule_window_partition_clause.TBlock2","rule":"TRule_window_partition_clause.TBlock2.Token1","sum":51324},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Rule_window_specification_details2","sum":26850272},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Token1","sum":26850272},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Token3","sum":26850272},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block2","sum":21674752},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block3","sum":18894263},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block4","sum":2367193},{"parent":"TRule_window_specification_details.TBlock2","rule":"TRule_window_specification_details.TBlock2.Rule_window_partition_clause1","sum":21674752},{"parent":"TRule_window_specification_details.TBlock3","rule":"TRule_window_specification_details.TBlock3.Rule_window_order_clause1","sum":18894263},{"parent":"TRule_window_specification_details.TBlock4","rule":"TRule_window_specification_details.TBlock4.Rule_window_frame_clause1","sum":2367193},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Block2","sum":9142942},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Block3","sum":1296663},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Rule_without_column_name1","sum":24720066},{"parent":"TRule_without_column_list.TBlock2","rule":"TRule_without_column_list.TBlock2.Rule_without_column_name2","sum":29091924},{"parent":"TRule_without_column_list.TBlock2","rule":"TRule_without_column_list.TBlock2.Token1","sum":29091924},{"parent":"TRule_without_column_list.TBlock3","rule":"TRule_without_column_list.TBlock3.Token1","sum":1296663},{"parent":"TRule_without_column_name","rule":"TRule_without_column_name.Alt_without_column_name1","sum":30689820},{"parent":"TRule_without_column_name","rule":"TRule_without_column_name.Alt_without_column_name2","sum":23122170},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Rule_an_id1","sum":30689820},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Rule_an_id3","sum":30689820},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Token2","sum":30689820},{"parent":"TRule_without_column_name.TAlt2","rule":"TRule_without_column_name.TAlt2.Rule_an_id_without1","sum":23122170},{"parent":"TRule_xor_subexpr","rule":"TRule_xor_subexpr.Block2","sum":1091682389},{"parent":"TRule_xor_subexpr","rule":"TRule_xor_subexpr.Rule_eq_subexpr1","sum":14246186831},{"parent":"TRule_xor_subexpr.TBlock2","rule":"TRule_xor_subexpr.TBlock2.Rule_cond_expr1","sum":1091682389},{"parent":"TSQLv1ParserAST","rule":"TSQLv1ParserAST.Rule_sql_query","sum":318387605},{"parent":"TYPE","rule":"BIGINT","sum":7101},{"parent":"TYPE","rule":"BOOL","sum":108389},{"parent":"TYPE","rule":"BYTEs","sum":1},{"parent":"TYPE","rule":"BigInt","sum":210},{"parent":"TYPE","rule":"Bool","sum":9272747},{"parent":"TYPE","rule":"Bytes","sum":414522},{"parent":"TYPE","rule":"DATE","sum":1242372},{"parent":"TYPE","rule":"DATETIME","sum":66264},{"parent":"TYPE","rule":"DATETime","sum":2},{"parent":"TYPE","rule":"DATEtIME","sum":1},{"parent":"TYPE","rule":"DATEtime","sum":34},{"parent":"TYPE","rule":"DAte","sum":9541},{"parent":"TYPE","rule":"DAteTime","sum":54},{"parent":"TYPE","rule":"DAtetime","sum":685},{"parent":"TYPE","rule":"DOUBLE","sum":1338435},{"parent":"TYPE","rule":"DOUBLe","sum":1},{"parent":"TYPE","rule":"DOUBle","sum":1},{"parent":"TYPE","rule":"DOUble","sum":3},{"parent":"TYPE","rule":"DOuble","sum":2118},{"parent":"TYPE","rule":"DaTeTime","sum":1},{"parent":"TYPE","rule":"Date","sum":20552673},{"parent":"TYPE","rule":"Date32","sum":57},{"parent":"TYPE","rule":"DateTIME","sum":19},{"parent":"TYPE","rule":"DateTIme","sum":113},{"parent":"TYPE","rule":"DateTime","sum":4961130},{"parent":"TYPE","rule":"DateTime64","sum":8},{"parent":"TYPE","rule":"DatetIME","sum":35},{"parent":"TYPE","rule":"Datetime","sum":3245135},{"parent":"TYPE","rule":"Datetime64","sum":26},{"parent":"TYPE","rule":"DoubLe","sum":13},{"parent":"TYPE","rule":"Double","sum":30004369},{"parent":"TYPE","rule":"EmptyDict","sum":28},{"parent":"TYPE","rule":"EmptyList","sum":27},{"parent":"TYPE","rule":"FLOAT","sum":196112},{"parent":"TYPE","rule":"FLoat","sum":3502},{"parent":"TYPE","rule":"FlOAT","sum":15},{"parent":"TYPE","rule":"FloaT","sum":317},{"parent":"TYPE","rule":"Float","sum":7045265},{"parent":"TYPE","rule":"Generic","sum":2},{"parent":"TYPE","rule":"INT","sum":440180},{"parent":"TYPE","rule":"INT16","sum":9699},{"parent":"TYPE","rule":"INT32","sum":139102},{"parent":"TYPE","rule":"INT64","sum":4131992},{"parent":"TYPE","rule":"INT8","sum":1770},{"parent":"TYPE","rule":"INTEGER","sum":73607},{"parent":"TYPE","rule":"INTERVAL","sum":11073},{"parent":"TYPE","rule":"INt16","sum":1},{"parent":"TYPE","rule":"INt32","sum":1154},{"parent":"TYPE","rule":"INt64","sum":38786},{"parent":"TYPE","rule":"InT32","sum":1},{"parent":"TYPE","rule":"Int","sum":522114},{"parent":"TYPE","rule":"Int16","sum":897927},{"parent":"TYPE","rule":"Int32","sum":13668505},{"parent":"TYPE","rule":"Int64","sum":47331088},{"parent":"TYPE","rule":"Int8","sum":969156},{"parent":"TYPE","rule":"Integer","sum":468888},{"parent":"TYPE","rule":"Interval","sum":203511},{"parent":"TYPE","rule":"Interval64","sum":34},{"parent":"TYPE","rule":"JSON","sum":1720792},{"parent":"TYPE","rule":"JSONDocument","sum":243},{"parent":"TYPE","rule":"JSOn","sum":1},{"parent":"TYPE","rule":"JSon","sum":11},{"parent":"TYPE","rule":"Json","sum":5441608},{"parent":"TYPE","rule":"JsonDocument","sum":17227},{"parent":"TYPE","rule":"Jsondocument","sum":25},{"parent":"TYPE","rule":"PgBool","sum":20},{"parent":"TYPE","rule":"PgBox","sum":2},{"parent":"TYPE","rule":"PgByteA","sum":16},{"parent":"TYPE","rule":"PgCString","sum":19},{"parent":"TYPE","rule":"PgDate","sum":85},{"parent":"TYPE","rule":"PgFloat4","sum":31},{"parent":"TYPE","rule":"PgFloat8","sum":31},{"parent":"TYPE","rule":"PgInt","sum":1},{"parent":"TYPE","rule":"PgInt2","sum":97},{"parent":"TYPE","rule":"PgInt4","sum":37},{"parent":"TYPE","rule":"PgInt8","sum":32},{"parent":"TYPE","rule":"PgInterval","sum":238},{"parent":"TYPE","rule":"PgMoney","sum":2},{"parent":"TYPE","rule":"PgName","sum":2},{"parent":"TYPE","rule":"PgNumeric","sum":10},{"parent":"TYPE","rule":"PgPoint","sum":1264},{"parent":"TYPE","rule":"PgPolygon","sum":622},{"parent":"TYPE","rule":"PgText","sum":414},{"parent":"TYPE","rule":"PgTimestamp","sum":1805},{"parent":"TYPE","rule":"PgVarChar","sum":1},{"parent":"TYPE","rule":"PgVarchar","sum":231},{"parent":"TYPE","rule":"STRING","sum":2682014},{"parent":"TYPE","rule":"STRINg","sum":3},{"parent":"TYPE","rule":"STRing","sum":3},{"parent":"TYPE","rule":"STring","sum":1294},{"parent":"TYPE","rule":"StrINg","sum":45},{"parent":"TYPE","rule":"StrinG","sum":27},{"parent":"TYPE","rule":"String","sum":333866429},{"parent":"TYPE","rule":"TEXT","sum":18984},{"parent":"TYPE","rule":"TIMESTAMP","sum":312271},{"parent":"TYPE","rule":"TINYINT","sum":1},{"parent":"TYPE","rule":"TZDate","sum":30},{"parent":"TYPE","rule":"TZDateTime","sum":1},{"parent":"TYPE","rule":"TZDatetime","sum":25},{"parent":"TYPE","rule":"TZtimestamp","sum":1},{"parent":"TYPE","rule":"Text","sum":185895},{"parent":"TYPE","rule":"TimeStamp","sum":296676},{"parent":"TYPE","rule":"Timestamp","sum":5958368},{"parent":"TYPE","rule":"Timestamp64","sum":458},{"parent":"TYPE","rule":"TzDATE","sum":5},{"parent":"TYPE","rule":"TzDate","sum":47109},{"parent":"TYPE","rule":"TzDateTime","sum":43848},{"parent":"TYPE","rule":"TzDatetime","sum":675925},{"parent":"TYPE","rule":"TzTimeStamp","sum":6},{"parent":"TYPE","rule":"TzTimestamp","sum":7872},{"parent":"TYPE","rule":"Tzdate","sum":5},{"parent":"TYPE","rule":"Tzdatetime","sum":4},{"parent":"TYPE","rule":"UINT16","sum":1206},{"parent":"TYPE","rule":"UINT32","sum":515088},{"parent":"TYPE","rule":"UINT64","sum":597046},{"parent":"TYPE","rule":"UINT8","sum":170},{"parent":"TYPE","rule":"UINt32","sum":44},{"parent":"TYPE","rule":"UINt64","sum":412},{"parent":"TYPE","rule":"UINt8","sum":24},{"parent":"TYPE","rule":"UInt16","sum":128862},{"parent":"TYPE","rule":"UInt32","sum":7273735},{"parent":"TYPE","rule":"UInt64","sum":9432595},{"parent":"TYPE","rule":"UInt8","sum":153493},{"parent":"TYPE","rule":"UNIT","sum":757},{"parent":"TYPE","rule":"UTF8","sum":265559},{"parent":"TYPE","rule":"UTf8","sum":132},{"parent":"TYPE","rule":"UUID","sum":1237799},{"parent":"TYPE","rule":"UiNt32","sum":4},{"parent":"TYPE","rule":"Uint16","sum":333684},{"parent":"TYPE","rule":"Uint32","sum":18570372},{"parent":"TYPE","rule":"Uint64","sum":32555431},{"parent":"TYPE","rule":"Uint8","sum":2327761},{"parent":"TYPE","rule":"Unit","sum":1182},{"parent":"TYPE","rule":"Utf8","sum":14236247},{"parent":"TYPE","rule":"Uuid","sum":43021},{"parent":"TYPE","rule":"VARCHAR","sum":515452},{"parent":"TYPE","rule":"Varchar","sum":2},{"parent":"TYPE","rule":"Void","sum":11464},{"parent":"TYPE","rule":"XML","sum":2},{"parent":"TYPE","rule":"YSON","sum":228232},{"parent":"TYPE","rule":"YSon","sum":468},{"parent":"TYPE","rule":"Yson","sum":17583554},{"parent":"TYPE","rule":"_PgMoney","sum":3},{"parent":"TYPE","rule":"bigint","sum":8678},{"parent":"TYPE","rule":"bool","sum":935018},{"parent":"TYPE","rule":"bytes","sum":35935},{"parent":"TYPE","rule":"dATE","sum":1},{"parent":"TYPE","rule":"daTE","sum":5},{"parent":"TYPE","rule":"date","sum":32278586},{"parent":"TYPE","rule":"date32","sum":40},{"parent":"TYPE","rule":"dateTIME","sum":8},{"parent":"TYPE","rule":"dateTime","sum":41625},{"parent":"TYPE","rule":"datetime","sum":7152182},{"parent":"TYPE","rule":"datetime64","sum":61},{"parent":"TYPE","rule":"double","sum":6913434},{"parent":"TYPE","rule":"emptyList","sum":1},{"parent":"TYPE","rule":"float","sum":4734731},{"parent":"TYPE","rule":"generic","sum":2},{"parent":"TYPE","rule":"iNT","sum":1},{"parent":"TYPE","rule":"iNT64","sum":27},{"parent":"TYPE","rule":"inT64","sum":260},{"parent":"TYPE","rule":"int","sum":1112457},{"parent":"TYPE","rule":"int16","sum":80702},{"parent":"TYPE","rule":"int32","sum":3188473},{"parent":"TYPE","rule":"int64","sum":8433961},{"parent":"TYPE","rule":"int8","sum":42328},{"parent":"TYPE","rule":"integer","sum":135058},{"parent":"TYPE","rule":"interval","sum":389459},{"parent":"TYPE","rule":"json","sum":1065443},{"parent":"TYPE","rule":"pgDate","sum":22},{"parent":"TYPE","rule":"pg_name","sum":2},{"parent":"TYPE","rule":"pgbigint","sum":1},{"parent":"TYPE","rule":"pgbool","sum":18},{"parent":"TYPE","rule":"pgdate","sum":14},{"parent":"TYPE","rule":"pgfloat8","sum":2},{"parent":"TYPE","rule":"pgint","sum":366},{"parent":"TYPE","rule":"pgint2","sum":6},{"parent":"TYPE","rule":"pgint4","sum":40},{"parent":"TYPE","rule":"pgint8","sum":1},{"parent":"TYPE","rule":"pginteger","sum":1},{"parent":"TYPE","rule":"pginterval","sum":374},{"parent":"TYPE","rule":"pgnumeric","sum":5},{"parent":"TYPE","rule":"pgoid","sum":1},{"parent":"TYPE","rule":"pgtext","sum":76},{"parent":"TYPE","rule":"pgtimestamp","sum":20},{"parent":"TYPE","rule":"sTRING","sum":16},{"parent":"TYPE","rule":"smallint","sum":1},{"parent":"TYPE","rule":"strINg","sum":14},{"parent":"TYPE","rule":"striNg","sum":1},{"parent":"TYPE","rule":"strinG","sum":73},{"parent":"TYPE","rule":"string","sum":45428906},{"parent":"TYPE","rule":"text","sum":4537855},{"parent":"TYPE","rule":"timeStamp","sum":99},{"parent":"TYPE","rule":"timestamp","sum":21885666},{"parent":"TYPE","rule":"timestamp64","sum":12},{"parent":"TYPE","rule":"tinyint","sum":2},{"parent":"TYPE","rule":"tzDate","sum":1},{"parent":"TYPE","rule":"tzDateTime","sum":3},{"parent":"TYPE","rule":"tzDatetime","sum":29},{"parent":"TYPE","rule":"tzTimestamp","sum":709},{"parent":"TYPE","rule":"tzdate","sum":19},{"parent":"TYPE","rule":"tzdatetime","sum":67},{"parent":"TYPE","rule":"tzdatetime64","sum":2},{"parent":"TYPE","rule":"tztimestamp","sum":32},{"parent":"TYPE","rule":"tztimestamp64","sum":3},{"parent":"TYPE","rule":"uINT32","sum":6},{"parent":"TYPE","rule":"uInt32","sum":12588},{"parent":"TYPE","rule":"uInt64","sum":2521},{"parent":"TYPE","rule":"uInt8","sum":52},{"parent":"TYPE","rule":"uint16","sum":26721},{"parent":"TYPE","rule":"uint32","sum":5285960},{"parent":"TYPE","rule":"uint64","sum":8563801},{"parent":"TYPE","rule":"uint8","sum":158497},{"parent":"TYPE","rule":"unit","sum":693132},{"parent":"TYPE","rule":"utf8","sum":4490416},{"parent":"TYPE","rule":"uuid","sum":2964762},{"parent":"TYPE","rule":"varchar","sum":59579},{"parent":"TYPE","rule":"void","sum":1},{"parent":"TYPE","rule":"xml","sum":31888},{"parent":"TYPE","rule":"yaml","sum":144},{"parent":"TYPE","rule":"yson","sum":1127334}]
+[{"parent":"FUNC","rule":"ABC","sum":1},{"parent":"FUNC","rule":"ABS","sum":3127148},{"parent":"FUNC","rule":"ADAPTIVE_WARD_HISTOGRAM","sum":1},{"parent":"FUNC","rule":"ADDTIMEZONE","sum":395},{"parent":"FUNC","rule":"AGGLIST","sum":1221},{"parent":"FUNC","rule":"AGGList","sum":1},{"parent":"FUNC","rule":"AGGREATE_LIST","sum":2},{"parent":"FUNC","rule":"AGGREGATELIST","sum":72},{"parent":"FUNC","rule":"AGGREGATE_BY","sum":1576927},{"parent":"FUNC","rule":"AGGREGATE_LIST","sum":19711752},{"parent":"FUNC","rule":"AGGREGATE_LIST_","sum":2},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTINCT","sum":12772929},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTINCt","sum":2},{"parent":"FUNC","rule":"AGGREGATE_LIST_DISTNCT","sum":1},{"parent":"FUNC","rule":"AGGREGATE_LIST_DiSTINCT","sum":4},{"parent":"FUNC","rule":"AGGREGATE_LIST_Distinct","sum":783},{"parent":"FUNC","rule":"AGGREGATE_LIST_distINCT","sum":4},{"parent":"FUNC","rule":"AGGREGATE_LIST_distinct","sum":18908},{"parent":"FUNC","rule":"AGGREGATE_LISt","sum":6},{"parent":"FUNC","rule":"AGGREGATE_LiST","sum":106},{"parent":"FUNC","rule":"AGGREGATE_List","sum":75},{"parent":"FUNC","rule":"AGGREGATE_lIST","sum":59},{"parent":"FUNC","rule":"AGGREGATE_lIST_DISTINCT","sum":6},{"parent":"FUNC","rule":"AGGREGATE_liST_DISTINCT","sum":9},{"parent":"FUNC","rule":"AGGREGATE_list","sum":316},{"parent":"FUNC","rule":"AGGREGATE_list_distinct","sum":1},{"parent":"FUNC","rule":"AGGREGATIONFACTORY","sum":57},{"parent":"FUNC","rule":"AGGREGATION_FACTORY","sum":66663},{"parent":"FUNC","rule":"AGGREGate_List","sum":2},{"parent":"FUNC","rule":"AGGR_LIST","sum":2975},{"parent":"FUNC","rule":"AGGR_LIST_DISTINCT","sum":44471},{"parent":"FUNC","rule":"AGGReGATE_LIST","sum":3},{"parent":"FUNC","rule":"AGGReGate_list","sum":1},{"parent":"FUNC","rule":"AGG_LIST","sum":5125300},{"parent":"FUNC","rule":"AGG_LIST_","sum":5},{"parent":"FUNC","rule":"AGG_LIST_DISTINCT","sum":2870859},{"parent":"FUNC","rule":"AGG_LIST_DISTINCt","sum":9},{"parent":"FUNC","rule":"AGG_LIST_DIStiNCT","sum":10},{"parent":"FUNC","rule":"AGG_LIST_DiSTINCT","sum":4},{"parent":"FUNC","rule":"AGG_LIST_Distinct","sum":47},{"parent":"FUNC","rule":"AGG_LIST_distinct","sum":647},{"parent":"FUNC","rule":"AGG_LISt","sum":2},{"parent":"FUNC","rule":"AGG_LIst","sum":11},{"parent":"FUNC","rule":"AGG_LiST","sum":4},{"parent":"FUNC","rule":"AGG_LiST_DIStiNCT","sum":2},{"parent":"FUNC","rule":"AGG_List","sum":6674},{"parent":"FUNC","rule":"AGG_List_DISTINCT","sum":33},{"parent":"FUNC","rule":"AGG_List_Distinct","sum":928},{"parent":"FUNC","rule":"AGG_List_distinct","sum":103},{"parent":"FUNC","rule":"AGG_lIST_DISTINCT","sum":5},{"parent":"FUNC","rule":"AGG_list","sum":17554},{"parent":"FUNC","rule":"AGG_list_DISTINCT","sum":3065},{"parent":"FUNC","rule":"AGG_list_distinct","sum":784},{"parent":"FUNC","rule":"AGGrEGATE_LIST","sum":2},{"parent":"FUNC","rule":"AGGreGATE_LIST_DISTINCT","sum":3},{"parent":"FUNC","rule":"AGGregateList","sum":19},{"parent":"FUNC","rule":"AGGregate_LIST","sum":3},{"parent":"FUNC","rule":"AGGregate_LIST_DISTINCT","sum":1},{"parent":"FUNC","rule":"AGGregate_List_Distinct","sum":4},{"parent":"FUNC","rule":"AGGregate_list","sum":6},{"parent":"FUNC","rule":"AGGregate_list_distinct","sum":65},{"parent":"FUNC","rule":"AND","sum":46},{"parent":"FUNC","rule":"ARRAY_AGG","sum":2},{"parent":"FUNC","rule":"ASDICT","sum":193},{"parent":"FUNC","rule":"ASDict","sum":627},{"parent":"FUNC","rule":"ASEnum","sum":84},{"parent":"FUNC","rule":"ASIN","sum":2},{"parent":"FUNC","rule":"ASLIST","sum":27702},{"parent":"FUNC","rule":"ASLIst","sum":4},{"parent":"FUNC","rule":"ASList","sum":23526},{"parent":"FUNC","rule":"ASSET","sum":17},{"parent":"FUNC","rule":"ASSTRUCT","sum":2700},{"parent":"FUNC","rule":"ASSet","sum":5},{"parent":"FUNC","rule":"ASStruct","sum":2343},{"parent":"FUNC","rule":"ASTAGGED","sum":15},{"parent":"FUNC","rule":"ASTAgged","sum":3},{"parent":"FUNC","rule":"ASTUPLE","sum":1586},{"parent":"FUNC","rule":"ASTagged","sum":63},{"parent":"FUNC","rule":"ASTuple","sum":5806},{"parent":"FUNC","rule":"AS_DICT","sum":6},{"parent":"FUNC","rule":"AS_LIST","sum":119},{"parent":"FUNC","rule":"AS_STRUCT","sum":13360},{"parent":"FUNC","rule":"AS_TABLE","sum":33},{"parent":"FUNC","rule":"AS_TUPLE","sum":844},{"parent":"FUNC","rule":"ASdict","sum":4},{"parent":"FUNC","rule":"ASlist","sum":1022},{"parent":"FUNC","rule":"ASstruct","sum":41},{"parent":"FUNC","rule":"AStagged","sum":3},{"parent":"FUNC","rule":"AStuple","sum":408},{"parent":"FUNC","rule":"AVG","sum":9080971},{"parent":"FUNC","rule":"AVGIF","sum":15},{"parent":"FUNC","rule":"AVG_IF","sum":732569},{"parent":"FUNC","rule":"AVG_If","sum":8},{"parent":"FUNC","rule":"AVG_if","sum":14378},{"parent":"FUNC","rule":"AVg","sum":4},{"parent":"FUNC","rule":"Abs","sum":323219},{"parent":"FUNC","rule":"AdaptiveDistanceHistogramCDF","sum":1},{"parent":"FUNC","rule":"AdaptiveWardHistogram","sum":1},{"parent":"FUNC","rule":"AdaptiveWardHistogramCDF","sum":3},{"parent":"FUNC","rule":"AdaptiveWeightHistogram","sum":57440},{"parent":"FUNC","rule":"AddMember","sum":880740},{"parent":"FUNC","rule":"AddTimeZone","sum":514894},{"parent":"FUNC","rule":"AddTimezone","sum":12053335},{"parent":"FUNC","rule":"Addtimezone","sum":60},{"parent":"FUNC","rule":"AggLIst","sum":1},{"parent":"FUNC","rule":"AggList","sum":19938},{"parent":"FUNC","rule":"AggListDistinct","sum":72},{"parent":"FUNC","rule":"Agg_LIST","sum":60},{"parent":"FUNC","rule":"Agg_LIST_DISTINCT","sum":20},{"parent":"FUNC","rule":"Agg_LIST_Distinct","sum":1},{"parent":"FUNC","rule":"Agg_LIst","sum":2},{"parent":"FUNC","rule":"Agg_LiSt","sum":3},{"parent":"FUNC","rule":"Agg_List","sum":129321},{"parent":"FUNC","rule":"Agg_List_","sum":8},{"parent":"FUNC","rule":"Agg_List_DISTINCT","sum":730},{"parent":"FUNC","rule":"Agg_List_Distinct","sum":2506},{"parent":"FUNC","rule":"Agg_List_distinct","sum":4},{"parent":"FUNC","rule":"Agg_list","sum":35594},{"parent":"FUNC","rule":"Agg_list_distinct","sum":8130},{"parent":"FUNC","rule":"Aggergate_List","sum":1},{"parent":"FUNC","rule":"Agglist","sum":12},{"parent":"FUNC","rule":"AggrList","sum":13},{"parent":"FUNC","rule":"Aggr_List","sum":1},{"parent":"FUNC","rule":"AggreGate_List","sum":6},{"parent":"FUNC","rule":"AggregateBy","sum":42230},{"parent":"FUNC","rule":"AggregateFlatten","sum":52396},{"parent":"FUNC","rule":"AggregateList","sum":40547},{"parent":"FUNC","rule":"AggregateListDistinct","sum":7694},{"parent":"FUNC","rule":"AggregateTransformInput","sum":25589},{"parent":"FUNC","rule":"AggregateTransformOutput","sum":44958},{"parent":"FUNC","rule":"Aggregate_BY","sum":242},{"parent":"FUNC","rule":"Aggregate_By","sum":137121},{"parent":"FUNC","rule":"Aggregate_LIST","sum":853},{"parent":"FUNC","rule":"Aggregate_LIST_DISTINCT","sum":2},{"parent":"FUNC","rule":"Aggregate_LIst","sum":12},{"parent":"FUNC","rule":"Aggregate_List","sum":345188},{"parent":"FUNC","rule":"Aggregate_List_Distinct","sum":5207},{"parent":"FUNC","rule":"Aggregate_List_distinct","sum":1306},{"parent":"FUNC","rule":"Aggregate_by","sum":79},{"parent":"FUNC","rule":"Aggregate_list","sum":228359},{"parent":"FUNC","rule":"Aggregate_list_DISTINCT","sum":36},{"parent":"FUNC","rule":"Aggregate_list_Distinct","sum":11},{"parent":"FUNC","rule":"Aggregate_list_distinct","sum":201434},{"parent":"FUNC","rule":"AggregationFactory","sum":1510101},{"parent":"FUNC","rule":"Apply","sum":1},{"parent":"FUNC","rule":"AsAtom","sum":688112},{"parent":"FUNC","rule":"AsDict","sum":2925286},{"parent":"FUNC","rule":"AsDictStrict","sum":4459},{"parent":"FUNC","rule":"AsEnum","sum":50660},{"parent":"FUNC","rule":"AsLIST","sum":917},{"parent":"FUNC","rule":"AsLIst","sum":2770},{"parent":"FUNC","rule":"AsLisT","sum":1501},{"parent":"FUNC","rule":"AsList","sum":17305413},{"parent":"FUNC","rule":"AsListStrict","sum":86208},{"parent":"FUNC","rule":"AsListstrict","sum":1},{"parent":"FUNC","rule":"AsSTruct","sum":374},{"parent":"FUNC","rule":"AsSet","sum":1592807},{"parent":"FUNC","rule":"AsSetStrict","sum":11347},{"parent":"FUNC","rule":"AsStruct","sum":23491726},{"parent":"FUNC","rule":"AsTAgged","sum":281},{"parent":"FUNC","rule":"AsTUPLE","sum":448},{"parent":"FUNC","rule":"AsTUple","sum":270},{"parent":"FUNC","rule":"AsTable","sum":6},{"parent":"FUNC","rule":"AsTaggeD","sum":4},{"parent":"FUNC","rule":"AsTagged","sum":183444},{"parent":"FUNC","rule":"AsTuPle","sum":1},{"parent":"FUNC","rule":"AsTuple","sum":30224732},{"parent":"FUNC","rule":"AsTupleunwrap","sum":4},{"parent":"FUNC","rule":"AsVariant","sum":357080},{"parent":"FUNC","rule":"As_List","sum":6},{"parent":"FUNC","rule":"As_Struct","sum":6},{"parent":"FUNC","rule":"As_list","sum":20},{"parent":"FUNC","rule":"As_tuple","sum":804},{"parent":"FUNC","rule":"Asdict","sum":181},{"parent":"FUNC","rule":"AslIst","sum":14},{"parent":"FUNC","rule":"Aslist","sum":23551},{"parent":"FUNC","rule":"Asset","sum":49},{"parent":"FUNC","rule":"Asstruct","sum":411},{"parent":"FUNC","rule":"AssumeStrict","sum":2524},{"parent":"FUNC","rule":"Astagged","sum":163},{"parent":"FUNC","rule":"Astuple","sum":728},{"parent":"FUNC","rule":"AtomCode","sum":522128},{"parent":"FUNC","rule":"Avg","sum":139147},{"parent":"FUNC","rule":"Avg_IF","sum":162},{"parent":"FUNC","rule":"Avg_If","sum":121},{"parent":"FUNC","rule":"Avg_if","sum":222},{"parent":"FUNC","rule":"BIT_AND","sum":4},{"parent":"FUNC","rule":"BIT_OR","sum":38656},{"parent":"FUNC","rule":"BIT_XOR","sum":432891},{"parent":"FUNC","rule":"BOOL_AND","sum":265915},{"parent":"FUNC","rule":"BOOL_OR","sum":878803},{"parent":"FUNC","rule":"BOOL_XOR","sum":46},{"parent":"FUNC","rule":"BOOL_and","sum":2},{"parent":"FUNC","rule":"BOOl_OR","sum":378},{"parent":"FUNC","rule":"BOTTOM","sum":11629},{"parent":"FUNC","rule":"BOTTOM_BY","sum":47254},{"parent":"FUNC","rule":"BOTTOM_by","sum":2},{"parent":"FUNC","rule":"Bool","sum":1385},{"parent":"FUNC","rule":"Bool_And","sum":118},{"parent":"FUNC","rule":"Bool_Or","sum":580},{"parent":"FUNC","rule":"Bool_and","sum":85},{"parent":"FUNC","rule":"Bool_or","sum":820},{"parent":"FUNC","rule":"Bottom","sum":278},{"parent":"FUNC","rule":"Bottom_BY","sum":10},{"parent":"FUNC","rule":"Bottom_By","sum":102},{"parent":"FUNC","rule":"Bottom_by","sum":1639},{"parent":"FUNC","rule":"ByteAt","sum":13545},{"parent":"FUNC","rule":"Bytes","sum":5},{"parent":"FUNC","rule":"CHAR_LENGTH","sum":373},{"parent":"FUNC","rule":"COALECSE","sum":1},{"parent":"FUNC","rule":"COALESCE","sum":51653941},{"parent":"FUNC","rule":"COALESCe","sum":10},{"parent":"FUNC","rule":"COALESce","sum":8},{"parent":"FUNC","rule":"COALEsCE","sum":1},{"parent":"FUNC","rule":"COALEsce","sum":4},{"parent":"FUNC","rule":"COALeSCE","sum":1},{"parent":"FUNC","rule":"COALesce","sum":14},{"parent":"FUNC","rule":"COAlESCE","sum":2736},{"parent":"FUNC","rule":"COAlesce","sum":12},{"parent":"FUNC","rule":"CONCAT","sum":19},{"parent":"FUNC","rule":"COOUNT","sum":2},{"parent":"FUNC","rule":"CORR","sum":430},{"parent":"FUNC","rule":"CORRELATION","sum":20868},{"parent":"FUNC","rule":"COS","sum":4},{"parent":"FUNC","rule":"COUNT","sum":52725662},{"parent":"FUNC","rule":"COUNTD","sum":4},{"parent":"FUNC","rule":"COUNTDISTINCTESTIMATE","sum":609},{"parent":"FUNC","rule":"COUNTDistinctEstimate","sum":7},{"parent":"FUNC","rule":"COUNTIF","sum":15797},{"parent":"FUNC","rule":"COUNT_","sum":1},{"parent":"FUNC","rule":"COUNT_IF","sum":24106602},{"parent":"FUNC","rule":"COUNT_IF_","sum":24},{"parent":"FUNC","rule":"COUNT_If","sum":12983},{"parent":"FUNC","rule":"COUNT_iF","sum":44},{"parent":"FUNC","rule":"COUNT_if","sum":19536},{"parent":"FUNC","rule":"COUNt","sum":1260},{"parent":"FUNC","rule":"COUNt_IF","sum":24},{"parent":"FUNC","rule":"COUNt_If","sum":3},{"parent":"FUNC","rule":"COUNt_if","sum":11},{"parent":"FUNC","rule":"COUnT","sum":10},{"parent":"FUNC","rule":"COUnT_IF","sum":7},{"parent":"FUNC","rule":"COUnt","sum":40},{"parent":"FUNC","rule":"COVAR","sum":2848},{"parent":"FUNC","rule":"COVARIANCE","sum":1520},{"parent":"FUNC","rule":"COVARIANCE_POPULATION","sum":64},{"parent":"FUNC","rule":"COVARIANCE_SAMPLE","sum":4},{"parent":"FUNC","rule":"COVAR_POP","sum":4},{"parent":"FUNC","rule":"COalesce","sum":22},{"parent":"FUNC","rule":"COuNT","sum":64},{"parent":"FUNC","rule":"COuNT_If","sum":1},{"parent":"FUNC","rule":"COunt","sum":189},{"parent":"FUNC","rule":"COunt_IF","sum":57},{"parent":"FUNC","rule":"COunt_If","sum":2},{"parent":"FUNC","rule":"COunt_iF","sum":1},{"parent":"FUNC","rule":"COunt_if","sum":3},{"parent":"FUNC","rule":"CUME_DIST","sum":109},{"parent":"FUNC","rule":"CURRENTUTCDATE","sum":5408},{"parent":"FUNC","rule":"CURRENTUTCDATETIME","sum":6637},{"parent":"FUNC","rule":"CURRENTUTCDate","sum":82},{"parent":"FUNC","rule":"CURRENT_UTC_DATE","sum":8},{"parent":"FUNC","rule":"CallableArgument","sum":10160},{"parent":"FUNC","rule":"CallableArgumentType","sum":19070},{"parent":"FUNC","rule":"CallableResultType","sum":115},{"parent":"FUNC","rule":"CallableType","sum":179101},{"parent":"FUNC","rule":"CallableTypeHandle","sum":5700},{"parent":"FUNC","rule":"Ceil","sum":3},{"parent":"FUNC","rule":"ChooseMembers","sum":2492144},{"parent":"FUNC","rule":"Choosemembers","sum":21067},{"parent":"FUNC","rule":"ChosenMembers","sum":8},{"parent":"FUNC","rule":"ClearBit","sum":30223},{"parent":"FUNC","rule":"CoALESCE","sum":1},{"parent":"FUNC","rule":"CoUNT","sum":35},{"parent":"FUNC","rule":"CoUNT_IF","sum":17},{"parent":"FUNC","rule":"CoUNt","sum":2},{"parent":"FUNC","rule":"CoUnt","sum":2},{"parent":"FUNC","rule":"Coalesce","sum":725375},{"parent":"FUNC","rule":"Collect","sum":1},{"parent":"FUNC","rule":"CollectList","sum":1},{"parent":"FUNC","rule":"CombineMembers","sum":505747},{"parent":"FUNC","rule":"Concat","sum":1},{"parent":"FUNC","rule":"Correlation","sum":180},{"parent":"FUNC","rule":"CouNT","sum":2},{"parent":"FUNC","rule":"CounT","sum":17},{"parent":"FUNC","rule":"Count","sum":395770},{"parent":"FUNC","rule":"CountDistinctEstimate","sum":157543},{"parent":"FUNC","rule":"CountIF","sum":81},{"parent":"FUNC","rule":"CountIf","sum":511},{"parent":"FUNC","rule":"Count_IF","sum":15751},{"parent":"FUNC","rule":"Count_If","sum":42577},{"parent":"FUNC","rule":"Count_if","sum":323527},{"parent":"FUNC","rule":"Countif","sum":12},{"parent":"FUNC","rule":"CurrentAuthenticatedUser","sum":90046},{"parent":"FUNC","rule":"CurrentDatetime","sum":1},{"parent":"FUNC","rule":"CurrentOperationId","sum":89890},{"parent":"FUNC","rule":"CurrentOperationSharedId","sum":5666},{"parent":"FUNC","rule":"CurrentTZDate","sum":615},{"parent":"FUNC","rule":"CurrentTZDateTime","sum":35},{"parent":"FUNC","rule":"CurrentTZDatetime","sum":526},{"parent":"FUNC","rule":"CurrentTZTimestamp","sum":263},{"parent":"FUNC","rule":"CurrentTZdatetime","sum":68},{"parent":"FUNC","rule":"CurrentTzDate","sum":1610399},{"parent":"FUNC","rule":"CurrentTzDateTime","sum":1466723},{"parent":"FUNC","rule":"CurrentTzDatetime","sum":1990148},{"parent":"FUNC","rule":"CurrentTzTimeStamp","sum":4342},{"parent":"FUNC","rule":"CurrentTzTimestamp","sum":1900232},{"parent":"FUNC","rule":"CurrentUTCDATE","sum":19},{"parent":"FUNC","rule":"CurrentUTCDAte","sum":38},{"parent":"FUNC","rule":"CurrentUTCDate","sum":919233},{"parent":"FUNC","rule":"CurrentUTCDateTime","sum":182498},{"parent":"FUNC","rule":"CurrentUTCDatetime","sum":131115},{"parent":"FUNC","rule":"CurrentUTCTimeStamp","sum":99},{"parent":"FUNC","rule":"CurrentUTCTimestamp","sum":452742},{"parent":"FUNC","rule":"CurrentUTCdate","sum":71306},{"parent":"FUNC","rule":"CurrentUTcDate","sum":63},{"parent":"FUNC","rule":"CurrentUtCDate","sum":5},{"parent":"FUNC","rule":"CurrentUtCDatetime","sum":3},{"parent":"FUNC","rule":"CurrentUtcDATE","sum":2},{"parent":"FUNC","rule":"CurrentUtcDAte","sum":3},{"parent":"FUNC","rule":"CurrentUtcDatE","sum":1},{"parent":"FUNC","rule":"CurrentUtcDate","sum":19601950},{"parent":"FUNC","rule":"CurrentUtcDateTIME","sum":1},{"parent":"FUNC","rule":"CurrentUtcDateTime","sum":3195699},{"parent":"FUNC","rule":"CurrentUtcDatetime","sum":13792949},{"parent":"FUNC","rule":"CurrentUtcDttm","sum":3},{"parent":"FUNC","rule":"CurrentUtcTimeStamp","sum":60287},{"parent":"FUNC","rule":"CurrentUtcTimestamp","sum":15261212},{"parent":"FUNC","rule":"CurrentUtcdate","sum":11664},{"parent":"FUNC","rule":"CurrenttzDate","sum":337},{"parent":"FUNC","rule":"CurrenttzDatetime","sum":1},{"parent":"FUNC","rule":"CurrentutcDate","sum":20941},{"parent":"FUNC","rule":"CurrentutcDateTime","sum":565},{"parent":"FUNC","rule":"CurrentutcTimestamp","sum":4},{"parent":"FUNC","rule":"Currentutcdate","sum":1501},{"parent":"FUNC","rule":"Currentutcdatetime","sum":22651},{"parent":"FUNC","rule":"D","sum":1},{"parent":"FUNC","rule":"DATE","sum":128427},{"parent":"FUNC","rule":"DATEADD","sum":3},{"parent":"FUNC","rule":"DATEDIFF","sum":9},{"parent":"FUNC","rule":"DATETIME","sum":832},{"parent":"FUNC","rule":"DATE_PART","sum":16},{"parent":"FUNC","rule":"DATE_TRUNC","sum":29},{"parent":"FUNC","rule":"DATe","sum":20},{"parent":"FUNC","rule":"DAte","sum":174},{"parent":"FUNC","rule":"DAtetime","sum":61},{"parent":"FUNC","rule":"DENSE_RANK","sum":273683},{"parent":"FUNC","rule":"DICTKEYS","sum":27},{"parent":"FUNC","rule":"DICTLENGTH","sum":4},{"parent":"FUNC","rule":"DICTLength","sum":2},{"parent":"FUNC","rule":"DICTPAYLOADS","sum":2},{"parent":"FUNC","rule":"DICT_CONTAINS","sum":5},{"parent":"FUNC","rule":"DIctHasItems","sum":1},{"parent":"FUNC","rule":"DIctItems","sum":1},{"parent":"FUNC","rule":"DIctKeys","sum":1},{"parent":"FUNC","rule":"DIctLength","sum":1},{"parent":"FUNC","rule":"DIctLookup","sum":16},{"parent":"FUNC","rule":"DOUBLE","sum":63},{"parent":"FUNC","rule":"DatE","sum":3},{"parent":"FUNC","rule":"DataType","sum":68953},{"parent":"FUNC","rule":"DataTypeComponents","sum":37116},{"parent":"FUNC","rule":"DataTypeHandle","sum":2},{"parent":"FUNC","rule":"Datatype","sum":2},{"parent":"FUNC","rule":"Date","sum":1771472},{"parent":"FUNC","rule":"Date32","sum":74},{"parent":"FUNC","rule":"DateTime","sum":434966},{"parent":"FUNC","rule":"DateTime64","sum":45},{"parent":"FUNC","rule":"Date_Diff","sum":1},{"parent":"FUNC","rule":"DatetimE","sum":8},{"parent":"FUNC","rule":"Datetime","sum":269629},{"parent":"FUNC","rule":"Datetime64","sum":6},{"parent":"FUNC","rule":"Decimal","sum":31267},{"parent":"FUNC","rule":"DenseRank","sum":26},{"parent":"FUNC","rule":"Dense_RANK","sum":1},{"parent":"FUNC","rule":"Dense_Rank","sum":54},{"parent":"FUNC","rule":"Dense_rank","sum":6},{"parent":"FUNC","rule":"DicTKeys","sum":1},{"parent":"FUNC","rule":"DictAggregate","sum":140521},{"parent":"FUNC","rule":"DictCOntains","sum":350},{"parent":"FUNC","rule":"DictContains","sum":2022763},{"parent":"FUNC","rule":"DictCreate","sum":69265},{"parent":"FUNC","rule":"DictHasItems","sum":614720},{"parent":"FUNC","rule":"DictHasitems","sum":261},{"parent":"FUNC","rule":"DictItems","sum":2551895},{"parent":"FUNC","rule":"DictKEYS","sum":12},{"parent":"FUNC","rule":"DictKEys","sum":1},{"parent":"FUNC","rule":"DictKeYS","sum":3},{"parent":"FUNC","rule":"DictKeyType","sum":940},{"parent":"FUNC","rule":"DictKeys","sum":2714504},{"parent":"FUNC","rule":"DictLOokup","sum":7},{"parent":"FUNC","rule":"DictLength","sum":1015710},{"parent":"FUNC","rule":"DictLookUP","sum":3},{"parent":"FUNC","rule":"DictLookUp","sum":59096},{"parent":"FUNC","rule":"DictLookup","sum":5977839},{"parent":"FUNC","rule":"DictPayLoads","sum":10411},{"parent":"FUNC","rule":"DictPayloadType","sum":209},{"parent":"FUNC","rule":"DictPayloads","sum":926973},{"parent":"FUNC","rule":"DictType","sum":551},{"parent":"FUNC","rule":"DictTypeComponents","sum":3},{"parent":"FUNC","rule":"DictTypeHandle","sum":1},{"parent":"FUNC","rule":"DictValues","sum":5},{"parent":"FUNC","rule":"Dict_Keys","sum":12},{"parent":"FUNC","rule":"Dictcontains","sum":7},{"parent":"FUNC","rule":"Dictitems","sum":19815},{"parent":"FUNC","rule":"Dictkeys","sum":1128},{"parent":"FUNC","rule":"Dictlength","sum":6},{"parent":"FUNC","rule":"Dictlookup","sum":75954},{"parent":"FUNC","rule":"Double","sum":34124},{"parent":"FUNC","rule":"DyNumber","sum":4},{"parent":"FUNC","rule":"EACH","sum":4},{"parent":"FUNC","rule":"ENDSWITH","sum":13654},{"parent":"FUNC","rule":"ENDsWith","sum":85},{"parent":"FUNC","rule":"ENSURE","sum":832081},{"parent":"FUNC","rule":"EOMONTH","sum":1},{"parent":"FUNC","rule":"EmptyDict","sum":430},{"parent":"FUNC","rule":"EmptyDictTypeHandle","sum":3},{"parent":"FUNC","rule":"EmptyList","sum":23968},{"parent":"FUNC","rule":"Emptydict","sum":6},{"parent":"FUNC","rule":"EndsWIth","sum":870},{"parent":"FUNC","rule":"EndsWith","sum":1563464},{"parent":"FUNC","rule":"Endswith","sum":19802},{"parent":"FUNC","rule":"Ensure","sum":1434516},{"parent":"FUNC","rule":"EnsureConvertibleTo","sum":2013},{"parent":"FUNC","rule":"EnsureType","sum":229229},{"parent":"FUNC","rule":"EvaluateAtom","sum":464},{"parent":"FUNC","rule":"EvaluateCode","sum":467542},{"parent":"FUNC","rule":"EvaluateExpr","sum":899431},{"parent":"FUNC","rule":"EvaluateType","sum":121899},{"parent":"FUNC","rule":"ExpandStruct","sum":264149},{"parent":"FUNC","rule":"ExtractUkropCtx","sum":1},{"parent":"FUNC","rule":"FIND","sum":5553194},{"parent":"FUNC","rule":"FIRST","sum":3},{"parent":"FUNC","rule":"FIRST_VALUE","sum":8897911},{"parent":"FUNC","rule":"FIRST_value","sum":811},{"parent":"FUNC","rule":"FIRsT_VALUE","sum":1},{"parent":"FUNC","rule":"FIleContent","sum":1},{"parent":"FUNC","rule":"FLATTEN","sum":9},{"parent":"FUNC","rule":"FLOAT","sum":7},{"parent":"FUNC","rule":"FLOOR","sum":1},{"parent":"FUNC","rule":"FORMATTYPE","sum":1},{"parent":"FUNC","rule":"FROMBYTES","sum":5},{"parent":"FUNC","rule":"FROmbytes","sum":4},{"parent":"FUNC","rule":"FileCOntent","sum":33},{"parent":"FUNC","rule":"FileContent","sum":2362479},{"parent":"FUNC","rule":"FilePath","sum":1452419},{"parent":"FUNC","rule":"Filecontent","sum":11771},{"parent":"FUNC","rule":"Filepath","sum":10},{"parent":"FUNC","rule":"FinD","sum":16},{"parent":"FUNC","rule":"Find","sum":321147},{"parent":"FUNC","rule":"FirsT_VALUE","sum":3},{"parent":"FUNC","rule":"FirstValue","sum":25},{"parent":"FUNC","rule":"First_VALUE","sum":12},{"parent":"FUNC","rule":"First_Value","sum":28},{"parent":"FUNC","rule":"First_value","sum":180},{"parent":"FUNC","rule":"FlattenMembers","sum":109223},{"parent":"FUNC","rule":"Float","sum":12362},{"parent":"FUNC","rule":"FoldMap","sum":13},{"parent":"FUNC","rule":"Folder","sum":13},{"parent":"FUNC","rule":"FolderPath","sum":9948},{"parent":"FUNC","rule":"ForceRemoveMember","sum":401413},{"parent":"FUNC","rule":"ForceRemoveMembers","sum":1080839},{"parent":"FUNC","rule":"ForceRenameMembers","sum":64041},{"parent":"FUNC","rule":"ForceSpreadMembers","sum":115090},{"parent":"FUNC","rule":"ForceSpreadmembers","sum":4},{"parent":"FUNC","rule":"Format","sum":1},{"parent":"FUNC","rule":"FormatCode","sum":676},{"parent":"FUNC","rule":"FormatType","sum":470195},{"parent":"FUNC","rule":"FormatTypeDiff","sum":809},{"parent":"FUNC","rule":"FormatTypeDiffPretty","sum":103},{"parent":"FUNC","rule":"Formattype","sum":101},{"parent":"FUNC","rule":"FromBytes","sum":99141},{"parent":"FUNC","rule":"FromPg","sum":4860},{"parent":"FUNC","rule":"FromYsonSimpleType","sum":1},{"parent":"FUNC","rule":"From_bytes","sum":1},{"parent":"FUNC","rule":"FromatType","sum":1},{"parent":"FUNC","rule":"Frombytes","sum":12},{"parent":"FUNC","rule":"FuncCode","sum":1237484},{"parent":"FUNC","rule":"GETDATE","sum":1},{"parent":"FUNC","rule":"GREATEST","sum":389548},{"parent":"FUNC","rule":"GROUPING","sum":71200},{"parent":"FUNC","rule":"GROUPINg","sum":4},{"parent":"FUNC","rule":"GROUP_CONCAT","sum":1},{"parent":"FUNC","rule":"GatherMembers","sum":1479159},{"parent":"FUNC","rule":"Gather_Members","sum":38},{"parent":"FUNC","rule":"Gathermembers","sum":8},{"parent":"FUNC","rule":"GetLength","sum":2},{"parent":"FUNC","rule":"GetWeekOfYear","sum":1},{"parent":"FUNC","rule":"Greatest","sum":6014},{"parent":"FUNC","rule":"Grouping","sum":243},{"parent":"FUNC","rule":"HISTOGRAM","sum":272979},{"parent":"FUNC","rule":"HISTOGRAMCDF","sum":425},{"parent":"FUNC","rule":"HISTOGRAMCdf","sum":47},{"parent":"FUNC","rule":"HISTOGRAM_CDF","sum":3},{"parent":"FUNC","rule":"HISTOGRAMcdf","sum":2},{"parent":"FUNC","rule":"HISTOGrAM","sum":49},{"parent":"FUNC","rule":"HISTOgram","sum":1},{"parent":"FUNC","rule":"HISToGRAM","sum":1},{"parent":"FUNC","rule":"HISTogram","sum":1},{"parent":"FUNC","rule":"HIStOGRAM","sum":1},{"parent":"FUNC","rule":"HIstogram","sum":34},{"parent":"FUNC","rule":"HLL","sum":12424},{"parent":"FUNC","rule":"HOP_END","sum":16},{"parent":"FUNC","rule":"HOP_START","sum":4},{"parent":"FUNC","rule":"Histogram","sum":35012},{"parent":"FUNC","rule":"HistogramCDF","sum":411},{"parent":"FUNC","rule":"HistogramCdf","sum":34},{"parent":"FUNC","rule":"Histogram_CDF","sum":57},{"parent":"FUNC","rule":"Histogramcdf","sum":1},{"parent":"FUNC","rule":"Hll","sum":1915},{"parent":"FUNC","rule":"HyperLogLog","sum":4057},{"parent":"FUNC","rule":"IF","sum":75252951},{"parent":"FUNC","rule":"IFNULL","sum":9},{"parent":"FUNC","rule":"IF_STRICT","sum":1},{"parent":"FUNC","rule":"IN","sum":2},{"parent":"FUNC","rule":"INT","sum":16},{"parent":"FUNC","rule":"INT32","sum":1},{"parent":"FUNC","rule":"INTERVAL","sum":889107},{"parent":"FUNC","rule":"INterval","sum":763},{"parent":"FUNC","rule":"If","sum":1084143},{"parent":"FUNC","rule":"IfNull","sum":6},{"parent":"FUNC","rule":"InstanceOf","sum":247258},{"parent":"FUNC","rule":"Int","sum":1},{"parent":"FUNC","rule":"Int16","sum":37},{"parent":"FUNC","rule":"Int32","sum":32669},{"parent":"FUNC","rule":"Int64","sum":3613},{"parent":"FUNC","rule":"Int8","sum":302},{"parent":"FUNC","rule":"InterVal","sum":4},{"parent":"FUNC","rule":"Interval","sum":16047614},{"parent":"FUNC","rule":"Interval64","sum":2},{"parent":"FUNC","rule":"IntervalFromDays","sum":8},{"parent":"FUNC","rule":"IsInt64","sum":1},{"parent":"FUNC","rule":"JSON","sum":68},{"parent":"FUNC","rule":"JUST","sum":128163},{"parent":"FUNC","rule":"Join","sum":1},{"parent":"FUNC","rule":"JoinTableRow","sum":1309793},{"parent":"FUNC","rule":"JoinTablerow","sum":721},{"parent":"FUNC","rule":"JointableRow","sum":1},{"parent":"FUNC","rule":"Json","sum":52981},{"parent":"FUNC","rule":"JsonDocument","sum":9},{"parent":"FUNC","rule":"Just","sum":6553330},{"parent":"FUNC","rule":"LAG","sum":4037229},{"parent":"FUNC","rule":"LAST","sum":1},{"parent":"FUNC","rule":"LAST_VALUE","sum":1335902},{"parent":"FUNC","rule":"LAST_value","sum":2},{"parent":"FUNC","rule":"LEAD","sum":1649498},{"parent":"FUNC","rule":"LEAST","sum":426967},{"parent":"FUNC","rule":"LEFT","sum":3},{"parent":"FUNC","rule":"LEFT_SHIFT","sum":1},{"parent":"FUNC","rule":"LEN","sum":1686905},{"parent":"FUNC","rule":"LENGTH","sum":3844541},{"parent":"FUNC","rule":"LENgth","sum":23},{"parent":"FUNC","rule":"LEngth","sum":12},{"parent":"FUNC","rule":"LIKELY","sum":140955},{"parent":"FUNC","rule":"LINEARHISTOGRAM","sum":374},{"parent":"FUNC","rule":"LINEARHISTOGRAMCDF","sum":1},{"parent":"FUNC","rule":"LINEARHistogram","sum":3},{"parent":"FUNC","rule":"LINEAR_HISTOGRAM","sum":13},{"parent":"FUNC","rule":"LISTALL","sum":2863},{"parent":"FUNC","rule":"LISTANY","sum":102},{"parent":"FUNC","rule":"LISTAVG","sum":25},{"parent":"FUNC","rule":"LISTAny","sum":10},{"parent":"FUNC","rule":"LISTCOLLECT","sum":1},{"parent":"FUNC","rule":"LISTCONCAT","sum":1518},{"parent":"FUNC","rule":"LISTENUMERATE","sum":2},{"parent":"FUNC","rule":"LISTEXTEND","sum":13},{"parent":"FUNC","rule":"LISTFILTER","sum":1629},{"parent":"FUNC","rule":"LISTFLATTEN","sum":4259},{"parent":"FUNC","rule":"LISTFROMRANGE","sum":1498},{"parent":"FUNC","rule":"LISTHAS","sum":31530},{"parent":"FUNC","rule":"LISTHASITEMS","sum":2093},{"parent":"FUNC","rule":"LISTHASItems","sum":1},{"parent":"FUNC","rule":"LISTHEAD","sum":18296},{"parent":"FUNC","rule":"LISTHas","sum":9},{"parent":"FUNC","rule":"LISTHead","sum":2},{"parent":"FUNC","rule":"LISTLAST","sum":1552},{"parent":"FUNC","rule":"LISTLENGTH","sum":48076},{"parent":"FUNC","rule":"LISTLENGth","sum":4},{"parent":"FUNC","rule":"LISTLenGTH","sum":7},{"parent":"FUNC","rule":"LISTLength","sum":2122},{"parent":"FUNC","rule":"LISTMAP","sum":14234},{"parent":"FUNC","rule":"LISTMAX","sum":6939},{"parent":"FUNC","rule":"LISTMIN","sum":187},{"parent":"FUNC","rule":"LISTMap","sum":365},{"parent":"FUNC","rule":"LISTNOTNULL","sum":8538},{"parent":"FUNC","rule":"LISTREVERSE","sum":2},{"parent":"FUNC","rule":"LISTSKIP","sum":15},{"parent":"FUNC","rule":"LISTSORT","sum":7613},{"parent":"FUNC","rule":"LISTSORTASC","sum":1666},{"parent":"FUNC","rule":"LISTSORTDESC","sum":109},{"parent":"FUNC","rule":"LISTSUM","sum":175},{"parent":"FUNC","rule":"LISTSort","sum":19},{"parent":"FUNC","rule":"LISTSum","sum":6},{"parent":"FUNC","rule":"LISTTAKE","sum":261},{"parent":"FUNC","rule":"LISTUNIQ","sum":596},{"parent":"FUNC","rule":"LISTUniq","sum":26},{"parent":"FUNC","rule":"LISTZIP","sum":261},{"parent":"FUNC","rule":"LISTZIPALL","sum":290},{"parent":"FUNC","rule":"LIST_AGGREGATE","sum":1},{"parent":"FUNC","rule":"LIST_ALL","sum":1},{"parent":"FUNC","rule":"LIST_ANY","sum":7},{"parent":"FUNC","rule":"LIST_CONCAT","sum":18},{"parent":"FUNC","rule":"LIST_EXTEND","sum":1},{"parent":"FUNC","rule":"LIST_EXTRACT","sum":73},{"parent":"FUNC","rule":"LIST_FOLD","sum":30},{"parent":"FUNC","rule":"LIST_FROM_RANGE","sum":351},{"parent":"FUNC","rule":"LIST_HAS","sum":2035},{"parent":"FUNC","rule":"LIST_HEAD","sum":100},{"parent":"FUNC","rule":"LIST_LAST","sum":54},{"parent":"FUNC","rule":"LIST_LENGTH","sum":2452},{"parent":"FUNC","rule":"LIST_Length","sum":1},{"parent":"FUNC","rule":"LIST_MAP","sum":9},{"parent":"FUNC","rule":"LIST_MAX","sum":285},{"parent":"FUNC","rule":"LIST_SORT","sum":626},{"parent":"FUNC","rule":"LIST_SUM","sum":2},{"parent":"FUNC","rule":"LIST_TAKE","sum":2},{"parent":"FUNC","rule":"LIST_UNIQ","sum":343},{"parent":"FUNC","rule":"LIST_length","sum":1},{"parent":"FUNC","rule":"LISTfilter","sum":7},{"parent":"FUNC","rule":"LISTfromRange","sum":1417},{"parent":"FUNC","rule":"LISTfromrange","sum":3},{"parent":"FUNC","rule":"LISThas","sum":2},{"parent":"FUNC","rule":"LISTnotNull","sum":2},{"parent":"FUNC","rule":"LIStExtend","sum":3},{"parent":"FUNC","rule":"LIStHas","sum":2},{"parent":"FUNC","rule":"LIStLENGth","sum":1},{"parent":"FUNC","rule":"LIStLength","sum":1},{"parent":"FUNC","rule":"LIStmap","sum":1},{"parent":"FUNC","rule":"LIstConcat","sum":303},{"parent":"FUNC","rule":"LIstExtend","sum":3},{"parent":"FUNC","rule":"LIstFilter","sum":71},{"parent":"FUNC","rule":"LIstFromRange","sum":8},{"parent":"FUNC","rule":"LIstFromrange","sum":1},{"parent":"FUNC","rule":"LIstHas","sum":14},{"parent":"FUNC","rule":"LIstHasItems","sum":13},{"parent":"FUNC","rule":"LIstHead","sum":3},{"parent":"FUNC","rule":"LIstLength","sum":350},{"parent":"FUNC","rule":"LIstMap","sum":1343},{"parent":"FUNC","rule":"LIstMax","sum":7},{"parent":"FUNC","rule":"LIstSkip","sum":2},{"parent":"FUNC","rule":"LIstSort","sum":1},{"parent":"FUNC","rule":"LIstSum","sum":1},{"parent":"FUNC","rule":"LIstfilter","sum":2},{"parent":"FUNC","rule":"LIstlength","sum":13},{"parent":"FUNC","rule":"LIstmap","sum":12},{"parent":"FUNC","rule":"LOG","sum":1},{"parent":"FUNC","rule":"LOG10","sum":6},{"parent":"FUNC","rule":"LOGHISTOGRAM","sum":1},{"parent":"FUNC","rule":"LOWER","sum":5},{"parent":"FUNC","rule":"Lag","sum":12315},{"parent":"FUNC","rule":"LambdaArgumentsCount","sum":32},{"parent":"FUNC","rule":"LambdaCode","sum":330239},{"parent":"FUNC","rule":"LastValue","sum":32},{"parent":"FUNC","rule":"Last_VALUE","sum":14},{"parent":"FUNC","rule":"Last_Value","sum":29},{"parent":"FUNC","rule":"Last_value","sum":101},{"parent":"FUNC","rule":"Lead","sum":11179},{"parent":"FUNC","rule":"Least","sum":2216},{"parent":"FUNC","rule":"Len","sum":93003},{"parent":"FUNC","rule":"LenGTH","sum":12},{"parent":"FUNC","rule":"Length","sum":1248306},{"parent":"FUNC","rule":"LiSTMAP","sum":2},{"parent":"FUNC","rule":"Likely","sum":20762},{"parent":"FUNC","rule":"LinearHISTOGRAM","sum":86},{"parent":"FUNC","rule":"LinearHistogram","sum":46166},{"parent":"FUNC","rule":"LinearHistogramCDF","sum":463},{"parent":"FUNC","rule":"LinearHistogramcdf","sum":10},{"parent":"FUNC","rule":"Linear_Histogram","sum":7},{"parent":"FUNC","rule":"Linearhistogram","sum":4},{"parent":"FUNC","rule":"Lis","sum":1},{"parent":"FUNC","rule":"LisMap","sum":2},{"parent":"FUNC","rule":"LisTHas","sum":8},{"parent":"FUNC","rule":"LisTLength","sum":2},{"parent":"FUNC","rule":"ListALL","sum":1552},{"parent":"FUNC","rule":"ListALl","sum":8},{"parent":"FUNC","rule":"ListANY","sum":1722},{"parent":"FUNC","rule":"ListAVG","sum":240},{"parent":"FUNC","rule":"ListAgg","sum":2},{"parent":"FUNC","rule":"ListAggregate","sum":424985},{"parent":"FUNC","rule":"ListAll","sum":265153},{"parent":"FUNC","rule":"ListAny","sum":1880286},{"parent":"FUNC","rule":"ListAppend","sum":1},{"parent":"FUNC","rule":"ListAvg","sum":542328},{"parent":"FUNC","rule":"ListCOncat","sum":2},{"parent":"FUNC","rule":"ListCode","sum":209429},{"parent":"FUNC","rule":"ListCollect","sum":903405},{"parent":"FUNC","rule":"ListConCat","sum":99},{"parent":"FUNC","rule":"ListConcat","sum":5473538},{"parent":"FUNC","rule":"ListCreate","sum":1959344},{"parent":"FUNC","rule":"ListDistinct","sum":1},{"parent":"FUNC","rule":"ListEnumerate","sum":1399312},{"parent":"FUNC","rule":"ListExtEnd","sum":1},{"parent":"FUNC","rule":"ListExtend","sum":4375262},{"parent":"FUNC","rule":"ListExtendStrict","sum":91484},{"parent":"FUNC","rule":"ListExtract","sum":3343121},{"parent":"FUNC","rule":"ListFILTER","sum":540},{"parent":"FUNC","rule":"ListFIlter","sum":345},{"parent":"FUNC","rule":"ListFLatMap","sum":263},{"parent":"FUNC","rule":"ListFLatten","sum":578},{"parent":"FUNC","rule":"ListFROMRange","sum":5061},{"parent":"FUNC","rule":"ListFilteR","sum":4},{"parent":"FUNC","rule":"ListFilter","sum":17808256},{"parent":"FUNC","rule":"ListFirst","sum":3},{"parent":"FUNC","rule":"ListFlatMap","sum":908504},{"parent":"FUNC","rule":"ListFlatmap","sum":29149},{"parent":"FUNC","rule":"ListFlatten","sum":2822248},{"parent":"FUNC","rule":"ListFold","sum":232637},{"parent":"FUNC","rule":"ListFold1","sum":35605},{"parent":"FUNC","rule":"ListFold1Map","sum":3427},{"parent":"FUNC","rule":"ListFoldMap","sum":41412},{"parent":"FUNC","rule":"ListFromPython","sum":2},{"parent":"FUNC","rule":"ListFromRANGE","sum":23},{"parent":"FUNC","rule":"ListFromRAnge","sum":45},{"parent":"FUNC","rule":"ListFromRange","sum":3090733},{"parent":"FUNC","rule":"ListFromTuple","sum":59287},{"parent":"FUNC","rule":"ListFromTyple","sum":2},{"parent":"FUNC","rule":"ListFromrange","sum":12},{"parent":"FUNC","rule":"ListHAS","sum":384},{"parent":"FUNC","rule":"ListHAs","sum":556},{"parent":"FUNC","rule":"ListHAsItems","sum":26},{"parent":"FUNC","rule":"ListHEAD","sum":2},{"parent":"FUNC","rule":"ListHEad","sum":69},{"parent":"FUNC","rule":"ListHaS","sum":1},{"parent":"FUNC","rule":"ListHas","sum":20692887},{"parent":"FUNC","rule":"ListHasITems","sum":9},{"parent":"FUNC","rule":"ListHasItemhs","sum":1},{"parent":"FUNC","rule":"ListHasItems","sum":5125383},{"parent":"FUNC","rule":"ListHasitems","sum":342},{"parent":"FUNC","rule":"ListHeaD","sum":95},{"parent":"FUNC","rule":"ListHead","sum":10716553},{"parent":"FUNC","rule":"ListINdexOf","sum":2},{"parent":"FUNC","rule":"ListIndex","sum":1},{"parent":"FUNC","rule":"ListIndexOF","sum":10},{"parent":"FUNC","rule":"ListIndexOf","sum":1227811},{"parent":"FUNC","rule":"ListIndexof","sum":19},{"parent":"FUNC","rule":"ListItemType","sum":208215},{"parent":"FUNC","rule":"ListJoin","sum":3},{"parent":"FUNC","rule":"ListJsonDocument","sum":1},{"parent":"FUNC","rule":"ListLAst","sum":10},{"parent":"FUNC","rule":"ListLENGTH","sum":2222},{"parent":"FUNC","rule":"ListLEngth","sum":19},{"parent":"FUNC","rule":"ListLasT","sum":12},{"parent":"FUNC","rule":"ListLast","sum":5986734},{"parent":"FUNC","rule":"ListLeNgth","sum":1},{"parent":"FUNC","rule":"ListLegth","sum":1},{"parent":"FUNC","rule":"ListLenght","sum":1},{"parent":"FUNC","rule":"ListLengtH","sum":2},{"parent":"FUNC","rule":"ListLength","sum":18497952},{"parent":"FUNC","rule":"ListMAP","sum":227},{"parent":"FUNC","rule":"ListMAX","sum":10229},{"parent":"FUNC","rule":"ListMAp","sum":4807},{"parent":"FUNC","rule":"ListMAx","sum":1},{"parent":"FUNC","rule":"ListMIN","sum":52},{"parent":"FUNC","rule":"ListMIn","sum":1},{"parent":"FUNC","rule":"ListMaP","sum":1164},{"parent":"FUNC","rule":"ListMap","sum":35454479},{"parent":"FUNC","rule":"ListMax","sum":1416009},{"parent":"FUNC","rule":"ListMin","sum":814448},{"parent":"FUNC","rule":"ListNOTNull","sum":714},{"parent":"FUNC","rule":"ListNoTNull","sum":17},{"parent":"FUNC","rule":"ListNoTnull","sum":953},{"parent":"FUNC","rule":"ListNonNull","sum":1},{"parent":"FUNC","rule":"ListNotNULL","sum":30544},{"parent":"FUNC","rule":"ListNotNUll","sum":7772},{"parent":"FUNC","rule":"ListNotNuLL","sum":5},{"parent":"FUNC","rule":"ListNotNul","sum":1},{"parent":"FUNC","rule":"ListNotNulL","sum":3},{"parent":"FUNC","rule":"ListNotNull","sum":9254299},{"parent":"FUNC","rule":"ListNotnull","sum":22},{"parent":"FUNC","rule":"ListREplicate","sum":2},{"parent":"FUNC","rule":"ListRange","sum":4},{"parent":"FUNC","rule":"ListRepeat","sum":1},{"parent":"FUNC","rule":"ListReplicate","sum":951308},{"parent":"FUNC","rule":"ListReverse","sum":1333079},{"parent":"FUNC","rule":"ListSORT","sum":1},{"parent":"FUNC","rule":"ListSORtAsc","sum":26},{"parent":"FUNC","rule":"ListSOrt","sum":3},{"parent":"FUNC","rule":"ListSUM","sum":854},{"parent":"FUNC","rule":"ListSUm","sum":12},{"parent":"FUNC","rule":"ListSample","sum":13},{"parent":"FUNC","rule":"ListSampleN","sum":706},{"parent":"FUNC","rule":"ListShuffle","sum":525},{"parent":"FUNC","rule":"ListShuffleN","sum":1},{"parent":"FUNC","rule":"ListSkip","sum":617760},{"parent":"FUNC","rule":"ListSkipWhile","sum":139511},{"parent":"FUNC","rule":"ListSkipWhileInclusive","sum":8743},{"parent":"FUNC","rule":"ListSort","sum":5009469},{"parent":"FUNC","rule":"ListSortASC","sum":18047},{"parent":"FUNC","rule":"ListSortAsc","sum":485962},{"parent":"FUNC","rule":"ListSortDESC","sum":56},{"parent":"FUNC","rule":"ListSortDEsc","sum":2},{"parent":"FUNC","rule":"ListSortDesc","sum":1136848},{"parent":"FUNC","rule":"ListSortasc","sum":9},{"parent":"FUNC","rule":"ListSortdesc","sum":638},{"parent":"FUNC","rule":"ListSum","sum":1750208},{"parent":"FUNC","rule":"ListTail","sum":1},{"parent":"FUNC","rule":"ListTake","sum":2153837},{"parent":"FUNC","rule":"ListTakeWhile","sum":89554},{"parent":"FUNC","rule":"ListTakeWhileInclusive","sum":2465},{"parent":"FUNC","rule":"ListToTuple","sum":6392},{"parent":"FUNC","rule":"ListTop","sum":2337},{"parent":"FUNC","rule":"ListTopAsc","sum":132},{"parent":"FUNC","rule":"ListTopDESC","sum":1},{"parent":"FUNC","rule":"ListTopDesc","sum":10812},{"parent":"FUNC","rule":"ListTopSort","sum":1051},{"parent":"FUNC","rule":"ListTopSortAsc","sum":40},{"parent":"FUNC","rule":"ListTopSortDesc","sum":60191},{"parent":"FUNC","rule":"ListTopdesc","sum":1},{"parent":"FUNC","rule":"ListType","sum":32196},{"parent":"FUNC","rule":"ListTypeHandle","sum":1},{"parent":"FUNC","rule":"ListUNiq","sum":30},{"parent":"FUNC","rule":"ListUnionALL","sum":128},{"parent":"FUNC","rule":"ListUnionAll","sum":56471},{"parent":"FUNC","rule":"ListUniq","sum":3978493},{"parent":"FUNC","rule":"ListUniqStable","sum":75921},{"parent":"FUNC","rule":"ListZIP","sum":8},{"parent":"FUNC","rule":"ListZIp","sum":1},{"parent":"FUNC","rule":"ListZip","sum":2532464},{"parent":"FUNC","rule":"ListZipALL","sum":290767},{"parent":"FUNC","rule":"ListZipAll","sum":275100},{"parent":"FUNC","rule":"List_FromRange","sum":11},{"parent":"FUNC","rule":"List_Has","sum":23},{"parent":"FUNC","rule":"List_Length","sum":15},{"parent":"FUNC","rule":"List_Sort","sum":10},{"parent":"FUNC","rule":"List_Uniq","sum":1},{"parent":"FUNC","rule":"List_concat","sum":3},{"parent":"FUNC","rule":"List_length","sum":2},{"parent":"FUNC","rule":"List_sort","sum":80},{"parent":"FUNC","rule":"Listall","sum":4448},{"parent":"FUNC","rule":"Listany","sum":38},{"parent":"FUNC","rule":"Listcollect","sum":5250},{"parent":"FUNC","rule":"Listconcat","sum":14858},{"parent":"FUNC","rule":"Listenumerate","sum":3},{"parent":"FUNC","rule":"Listextend","sum":5},{"parent":"FUNC","rule":"Listfilter","sum":263891},{"parent":"FUNC","rule":"Listflatmap","sum":297},{"parent":"FUNC","rule":"Listflatten","sum":33},{"parent":"FUNC","rule":"ListfromRange","sum":32},{"parent":"FUNC","rule":"Listfromrange","sum":110},{"parent":"FUNC","rule":"Listfromtuple","sum":1},{"parent":"FUNC","rule":"Listhas","sum":69815},{"parent":"FUNC","rule":"ListhasItems","sum":267},{"parent":"FUNC","rule":"Listhasitems","sum":488},{"parent":"FUNC","rule":"Listhead","sum":18578},{"parent":"FUNC","rule":"ListindexOf","sum":2077},{"parent":"FUNC","rule":"Listindexof","sum":4},{"parent":"FUNC","rule":"ListlENGTH","sum":1},{"parent":"FUNC","rule":"ListlEngth","sum":2},{"parent":"FUNC","rule":"Listlast","sum":4104},{"parent":"FUNC","rule":"Listlength","sum":10851},{"parent":"FUNC","rule":"Listmap","sum":11269},{"parent":"FUNC","rule":"Listmax","sum":495},{"parent":"FUNC","rule":"Listmin","sum":1627},{"parent":"FUNC","rule":"ListnotNull","sum":38},{"parent":"FUNC","rule":"Listnotnull","sum":8},{"parent":"FUNC","rule":"Listreverse","sum":17},{"parent":"FUNC","rule":"Listskip","sum":148},{"parent":"FUNC","rule":"ListskipWhile","sum":409},{"parent":"FUNC","rule":"Listsort","sum":1045},{"parent":"FUNC","rule":"Listsortasc","sum":1},{"parent":"FUNC","rule":"Listsortdesc","sum":1},{"parent":"FUNC","rule":"Listsum","sum":27},{"parent":"FUNC","rule":"Listtake","sum":20212},{"parent":"FUNC","rule":"ListtakeWhile","sum":2},{"parent":"FUNC","rule":"Listuniq","sum":326},{"parent":"FUNC","rule":"Listzip","sum":766},{"parent":"FUNC","rule":"Listzipall","sum":1},{"parent":"FUNC","rule":"LogHISTOGRAM","sum":7},{"parent":"FUNC","rule":"LogHistogram","sum":285},{"parent":"FUNC","rule":"LogHistogramCDF","sum":15},{"parent":"FUNC","rule":"LogarithmicHISTOGRAM","sum":1},{"parent":"FUNC","rule":"LogarithmicHistogram","sum":611},{"parent":"FUNC","rule":"Logarithmichistogram","sum":20},{"parent":"FUNC","rule":"Lookup","sum":1},{"parent":"FUNC","rule":"MAX","sum":34515863},{"parent":"FUNC","rule":"MAXBY","sum":123},{"parent":"FUNC","rule":"MAXOF","sum":3},{"parent":"FUNC","rule":"MAX_BY","sum":22826836},{"parent":"FUNC","rule":"MAX_By","sum":19},{"parent":"FUNC","rule":"MAX_IF","sum":1},{"parent":"FUNC","rule":"MAX_OF","sum":2198760},{"parent":"FUNC","rule":"MAX_Of","sum":2},{"parent":"FUNC","rule":"MAX_bY","sum":7},{"parent":"FUNC","rule":"MAX_by","sum":10595},{"parent":"FUNC","rule":"MAX_of","sum":596},{"parent":"FUNC","rule":"MAx","sum":286},{"parent":"FUNC","rule":"MEDIAN","sum":912799},{"parent":"FUNC","rule":"MEDiAN","sum":4},{"parent":"FUNC","rule":"MEdian","sum":14},{"parent":"FUNC","rule":"MIN","sum":14648475},{"parent":"FUNC","rule":"MINBY","sum":1906},{"parent":"FUNC","rule":"MIN_BY","sum":5284759},{"parent":"FUNC","rule":"MIN_IF","sum":8},{"parent":"FUNC","rule":"MIN_OF","sum":2264108},{"parent":"FUNC","rule":"MIN_by","sum":1285},{"parent":"FUNC","rule":"MIN_of","sum":2},{"parent":"FUNC","rule":"MIn","sum":199},{"parent":"FUNC","rule":"MIn_OF","sum":3},{"parent":"FUNC","rule":"MODE","sum":900632},{"parent":"FUNC","rule":"MODe","sum":1},{"parent":"FUNC","rule":"MONTH","sum":1},{"parent":"FUNC","rule":"MULTI_AGGREGATE_BY","sum":695092},{"parent":"FUNC","rule":"MaX","sum":7},{"parent":"FUNC","rule":"MakeDate","sum":1},{"parent":"FUNC","rule":"Map","sum":4},{"parent":"FUNC","rule":"Max","sum":379920},{"parent":"FUNC","rule":"MaxBy","sum":5401},{"parent":"FUNC","rule":"MaxOf","sum":207},{"parent":"FUNC","rule":"Max_BY","sum":3216},{"parent":"FUNC","rule":"Max_By","sum":70597},{"parent":"FUNC","rule":"Max_OF","sum":679},{"parent":"FUNC","rule":"Max_Of","sum":648},{"parent":"FUNC","rule":"Max_by","sum":141194},{"parent":"FUNC","rule":"Max_of","sum":3630},{"parent":"FUNC","rule":"Median","sum":19438},{"parent":"FUNC","rule":"MiN","sum":32},{"parent":"FUNC","rule":"Min","sum":193235},{"parent":"FUNC","rule":"MinBy","sum":1195},{"parent":"FUNC","rule":"MinOf","sum":892},{"parent":"FUNC","rule":"Min_BY","sum":433},{"parent":"FUNC","rule":"Min_By","sum":10707},{"parent":"FUNC","rule":"Min_OF","sum":1095},{"parent":"FUNC","rule":"Min_Of","sum":827},{"parent":"FUNC","rule":"Min_by","sum":9098},{"parent":"FUNC","rule":"Min_of","sum":1312},{"parent":"FUNC","rule":"MoDE","sum":1},{"parent":"FUNC","rule":"Mode","sum":14678},{"parent":"FUNC","rule":"Mul","sum":1},{"parent":"FUNC","rule":"MultiAggregateBy","sum":643},{"parent":"FUNC","rule":"Multi_Aggregate_BY","sum":4},{"parent":"FUNC","rule":"Multi_Aggregate_By","sum":1157},{"parent":"FUNC","rule":"Multi_Aggregate_by","sum":4},{"parent":"FUNC","rule":"Multi_aggregate_by","sum":411},{"parent":"FUNC","rule":"NANVL","sum":720019},{"parent":"FUNC","rule":"NOTHING","sum":3213},{"parent":"FUNC","rule":"NOW","sum":2},{"parent":"FUNC","rule":"NOW64","sum":2},{"parent":"FUNC","rule":"NOthing","sum":1},{"parent":"FUNC","rule":"NTH_VALUE","sum":2998},{"parent":"FUNC","rule":"NTILE","sum":1812},{"parent":"FUNC","rule":"NULLIF","sum":3},{"parent":"FUNC","rule":"NVL","sum":17663064},{"parent":"FUNC","rule":"NVl","sum":814},{"parent":"FUNC","rule":"NaNvl","sum":26},{"parent":"FUNC","rule":"NanVL","sum":1},{"parent":"FUNC","rule":"NanVl","sum":3},{"parent":"FUNC","rule":"Nanvl","sum":10762},{"parent":"FUNC","rule":"NothiNG","sum":2},{"parent":"FUNC","rule":"Nothing","sum":811549},{"parent":"FUNC","rule":"NullTypeHandle","sum":2344},{"parent":"FUNC","rule":"Nvl","sum":28388},{"parent":"FUNC","rule":"OR","sum":6},{"parent":"FUNC","rule":"OVER","sum":1},{"parent":"FUNC","rule":"OptionalItemType","sum":37171},{"parent":"FUNC","rule":"OptionalType","sum":137315},{"parent":"FUNC","rule":"OptionalTypeHandle","sum":44188},{"parent":"FUNC","rule":"PERCENTILE","sum":29880047},{"parent":"FUNC","rule":"PERCENTIlE","sum":4},{"parent":"FUNC","rule":"PERCENT_RANK","sum":6088},{"parent":"FUNC","rule":"PERCENtILE","sum":1952},{"parent":"FUNC","rule":"PERCEnTILE","sum":1},{"parent":"FUNC","rule":"PICKLE","sum":77},{"parent":"FUNC","rule":"PIckle","sum":7},{"parent":"FUNC","rule":"POPULATION_STDDEV","sum":365},{"parent":"FUNC","rule":"POPULATION_VARIANCE","sum":19},{"parent":"FUNC","rule":"POWER","sum":4},{"parent":"FUNC","rule":"ParseDuration","sum":2},{"parent":"FUNC","rule":"ParseFILE","sum":22},{"parent":"FUNC","rule":"ParseFIle","sum":8},{"parent":"FUNC","rule":"ParseFile","sum":952351},{"parent":"FUNC","rule":"ParseType","sum":463703},{"parent":"FUNC","rule":"ParseTypeHandle","sum":1189481},{"parent":"FUNC","rule":"Parsefile","sum":111},{"parent":"FUNC","rule":"Path","sum":4},{"parent":"FUNC","rule":"PeRCENTILE","sum":365},{"parent":"FUNC","rule":"Percentile","sum":72997},{"parent":"FUNC","rule":"PgArray","sum":17},{"parent":"FUNC","rule":"PgBool","sum":152},{"parent":"FUNC","rule":"PgCall","sum":552},{"parent":"FUNC","rule":"PgCast","sum":5580},{"parent":"FUNC","rule":"PgCircle","sum":5},{"parent":"FUNC","rule":"PgConst","sum":96},{"parent":"FUNC","rule":"PgDate","sum":110},{"parent":"FUNC","rule":"PgGeometry","sum":6},{"parent":"FUNC","rule":"PgInt4","sum":4},{"parent":"FUNC","rule":"PgInt8","sum":1},{"parent":"FUNC","rule":"PgInterval","sum":33},{"parent":"FUNC","rule":"PgOp","sum":1369},{"parent":"FUNC","rule":"PgPoint","sum":76},{"parent":"FUNC","rule":"PgPolygon","sum":517},{"parent":"FUNC","rule":"PgRangeCall","sum":7},{"parent":"FUNC","rule":"PgText","sum":24},{"parent":"FUNC","rule":"PgTimestamp","sum":42},{"parent":"FUNC","rule":"PgVarBit","sum":2},{"parent":"FUNC","rule":"Pickle","sum":118410},{"parent":"FUNC","rule":"QuoteCode","sum":723777},{"parent":"FUNC","rule":"RADIANS","sum":12},{"parent":"FUNC","rule":"RAND","sum":2},{"parent":"FUNC","rule":"RANDOM","sum":486258},{"parent":"FUNC","rule":"RANDOMNUMBER","sum":4002},{"parent":"FUNC","rule":"RANDOMNumber","sum":11},{"parent":"FUNC","rule":"RANDOMUUID","sum":5},{"parent":"FUNC","rule":"RANDOM_NUMBER","sum":83},{"parent":"FUNC","rule":"RANGE","sum":150},{"parent":"FUNC","rule":"RANK","sum":943535},{"parent":"FUNC","rule":"RAndom","sum":7},{"parent":"FUNC","rule":"REGEXP","sum":1},{"parent":"FUNC","rule":"REMOVEMEMBERS","sum":41},{"parent":"FUNC","rule":"REMOVE_mEMBER","sum":6},{"parent":"FUNC","rule":"REPLACE","sum":3},{"parent":"FUNC","rule":"RFIND","sum":3580748},{"parent":"FUNC","rule":"RFind","sum":252143},{"parent":"FUNC","rule":"RIGHT","sum":2},{"parent":"FUNC","rule":"ROUND","sum":10},{"parent":"FUNC","rule":"ROWNUMBER","sum":33},{"parent":"FUNC","rule":"ROW_NUMBER","sum":9398855},{"parent":"FUNC","rule":"ROW_NUMber","sum":11},{"parent":"FUNC","rule":"ROW_Number","sum":8},{"parent":"FUNC","rule":"ROW_nUMBER","sum":1},{"parent":"FUNC","rule":"ROW_nuMBER","sum":4},{"parent":"FUNC","rule":"ROW_number","sum":13485},{"parent":"FUNC","rule":"Rand","sum":1},{"parent":"FUNC","rule":"Random","sum":731275},{"parent":"FUNC","rule":"RandomNUmber","sum":172},{"parent":"FUNC","rule":"RandomNumber","sum":271843},{"parent":"FUNC","rule":"RandomUUID","sum":8087},{"parent":"FUNC","rule":"RandomUUid","sum":33004},{"parent":"FUNC","rule":"RandomUuid","sum":214066},{"parent":"FUNC","rule":"Randomnumber","sum":2},{"parent":"FUNC","rule":"Range","sum":2},{"parent":"FUNC","rule":"Rank","sum":3091},{"parent":"FUNC","rule":"RemoveMEmbers","sum":16},{"parent":"FUNC","rule":"RemoveMember","sum":431434},{"parent":"FUNC","rule":"RemoveMembers","sum":727140},{"parent":"FUNC","rule":"RemoveTimeZone","sum":13},{"parent":"FUNC","rule":"RemoveTimezone","sum":433575},{"parent":"FUNC","rule":"Removemember","sum":686},{"parent":"FUNC","rule":"Removemembers","sum":4},{"parent":"FUNC","rule":"RenameMembers","sum":504186},{"parent":"FUNC","rule":"ReplaceMember","sum":9374},{"parent":"FUNC","rule":"ReprCode","sum":195638},{"parent":"FUNC","rule":"ResourceType","sum":9},{"parent":"FUNC","rule":"Rfind","sum":870},{"parent":"FUNC","rule":"RootAttributes","sum":20},{"parent":"FUNC","rule":"RowNum","sum":1},{"parent":"FUNC","rule":"RowNumber","sum":21077},{"parent":"FUNC","rule":"Row_NUMBER","sum":33},{"parent":"FUNC","rule":"Row_NUmber","sum":16},{"parent":"FUNC","rule":"Row_Number","sum":60208},{"parent":"FUNC","rule":"Row_number","sum":4109},{"parent":"FUNC","rule":"SESSIONWINDOW","sum":131},{"parent":"FUNC","rule":"SETINTERSECTION","sum":20},{"parent":"FUNC","rule":"SIGN","sum":1},{"parent":"FUNC","rule":"SIN","sum":4},{"parent":"FUNC","rule":"SOME","sum":32962069},{"parent":"FUNC","rule":"SOMe","sum":17},{"parent":"FUNC","rule":"SON_VALUE","sum":1},{"parent":"FUNC","rule":"SOmE","sum":3},{"parent":"FUNC","rule":"SOme","sum":9},{"parent":"FUNC","rule":"SQL","sum":6},{"parent":"FUNC","rule":"SQRT","sum":2},{"parent":"FUNC","rule":"STARTSWITH","sum":25435},{"parent":"FUNC","rule":"STARTS_WITH","sum":46},{"parent":"FUNC","rule":"STARTsWITH","sum":17},{"parent":"FUNC","rule":"STATICMAP","sum":46},{"parent":"FUNC","rule":"STD","sum":6},{"parent":"FUNC","rule":"STDDEV","sum":400454},{"parent":"FUNC","rule":"STDDEVPOP","sum":2821},{"parent":"FUNC","rule":"STDDEVSAMP","sum":1096},{"parent":"FUNC","rule":"STDDEV_POP","sum":14},{"parent":"FUNC","rule":"STDDEV_POPULATION","sum":18575},{"parent":"FUNC","rule":"STDDEV_SAMP","sum":3383},{"parent":"FUNC","rule":"STDDEV_SAMPLE","sum":9929},{"parent":"FUNC","rule":"STDDev","sum":2},{"parent":"FUNC","rule":"STDdev","sum":4},{"parent":"FUNC","rule":"STRING_SPLIT","sum":1},{"parent":"FUNC","rule":"ST_AsText","sum":3},{"parent":"FUNC","rule":"ST_ClosestPoint","sum":3},{"parent":"FUNC","rule":"ST_Distance","sum":5},{"parent":"FUNC","rule":"ST_GeomFromGeoHash","sum":1},{"parent":"FUNC","rule":"ST_Point","sum":10},{"parent":"FUNC","rule":"ST_PolygonFromText","sum":5},{"parent":"FUNC","rule":"ST_SetSRID","sum":6},{"parent":"FUNC","rule":"STartsWith","sum":2},{"parent":"FUNC","rule":"STdDEV","sum":9},{"parent":"FUNC","rule":"SUBQUERYExtendFor","sum":6},{"parent":"FUNC","rule":"SUBSTIRNG","sum":4},{"parent":"FUNC","rule":"SUBSTRING","sum":29486312},{"parent":"FUNC","rule":"SUBSTRINg","sum":2},{"parent":"FUNC","rule":"SUBSTRInG","sum":4},{"parent":"FUNC","rule":"SUBSTRiNG","sum":2},{"parent":"FUNC","rule":"SUBSTRinG","sum":2},{"parent":"FUNC","rule":"SUBSTRing","sum":222},{"parent":"FUNC","rule":"SUBSTrING","sum":21},{"parent":"FUNC","rule":"SUBSTring","sum":14},{"parent":"FUNC","rule":"SUBStRING","sum":2},{"parent":"FUNC","rule":"SUBString","sum":20},{"parent":"FUNC","rule":"SUBstring","sum":3},{"parent":"FUNC","rule":"SUM","sum":42518480},{"parent":"FUNC","rule":"SUMIF","sum":3307},{"parent":"FUNC","rule":"SUM_","sum":317},{"parent":"FUNC","rule":"SUM_IF","sum":8450348},{"parent":"FUNC","rule":"SUM_If","sum":117},{"parent":"FUNC","rule":"SUM_iF","sum":19},{"parent":"FUNC","rule":"SUM_if","sum":266967},{"parent":"FUNC","rule":"SUN","sum":1},{"parent":"FUNC","rule":"SUbstring","sum":5},{"parent":"FUNC","rule":"SUm","sum":80},{"parent":"FUNC","rule":"SUm_IF","sum":30},{"parent":"FUNC","rule":"SWITCH","sum":2},{"parent":"FUNC","rule":"SecureParam","sum":899360},{"parent":"FUNC","rule":"SessionStart","sum":24214},{"parent":"FUNC","rule":"SessionState","sum":715},{"parent":"FUNC","rule":"SessionWindow","sum":102011},{"parent":"FUNC","rule":"SetBit","sum":44653},{"parent":"FUNC","rule":"SetContains","sum":1},{"parent":"FUNC","rule":"SetCreate","sum":45695},{"parent":"FUNC","rule":"SetDIfference","sum":11},{"parent":"FUNC","rule":"SetDifference","sum":485655},{"parent":"FUNC","rule":"SetINtersection","sum":1},{"parent":"FUNC","rule":"SetIncludes","sum":246893},{"parent":"FUNC","rule":"SetInterSection","sum":21},{"parent":"FUNC","rule":"SetIntersection","sum":1266285},{"parent":"FUNC","rule":"SetIsDisJOINt","sum":8105},{"parent":"FUNC","rule":"SetIsDisJoint","sum":1682},{"parent":"FUNC","rule":"SetIsDisjoint","sum":1239534},{"parent":"FUNC","rule":"SetSymmetricDifference","sum":8881},{"parent":"FUNC","rule":"SetUNION","sum":1},{"parent":"FUNC","rule":"SetUnion","sum":587911},{"parent":"FUNC","rule":"Setintersection","sum":4469},{"parent":"FUNC","rule":"SizeOf","sum":1},{"parent":"FUNC","rule":"Some","sum":2527597},{"parent":"FUNC","rule":"SplitToList","sum":6},{"parent":"FUNC","rule":"SpreadMembers","sum":344856},{"parent":"FUNC","rule":"StablePicke","sum":1},{"parent":"FUNC","rule":"StablePickle","sum":212261},{"parent":"FUNC","rule":"StartSwith","sum":105},{"parent":"FUNC","rule":"StartsWIth","sum":703},{"parent":"FUNC","rule":"StartsWith","sum":5761432},{"parent":"FUNC","rule":"Startswith","sum":19556},{"parent":"FUNC","rule":"StaticFold","sum":306},{"parent":"FUNC","rule":"StaticMap","sum":683879},{"parent":"FUNC","rule":"StaticZip","sum":14379},{"parent":"FUNC","rule":"Staticmap","sum":2},{"parent":"FUNC","rule":"StdDev","sum":33},{"parent":"FUNC","rule":"Stddev","sum":43},{"parent":"FUNC","rule":"StreamItemType","sum":5717},{"parent":"FUNC","rule":"StreamType","sum":78367},{"parent":"FUNC","rule":"StreamTypeHandle","sum":17},{"parent":"FUNC","rule":"String","sum":34439},{"parent":"FUNC","rule":"StringLength","sum":5},{"parent":"FUNC","rule":"StringSplitToList","sum":4},{"parent":"FUNC","rule":"StructDifference","sum":235},{"parent":"FUNC","rule":"StructIntersection","sum":811},{"parent":"FUNC","rule":"StructMemberType","sum":19757},{"parent":"FUNC","rule":"StructMembers","sum":969975},{"parent":"FUNC","rule":"StructSymmetricDifference","sum":172},{"parent":"FUNC","rule":"StructType","sum":17999},{"parent":"FUNC","rule":"StructTypeComponents","sum":99610},{"parent":"FUNC","rule":"StructTypeHandle","sum":87847},{"parent":"FUNC","rule":"StructUnion","sum":100902},{"parent":"FUNC","rule":"SuBSTRING","sum":1},{"parent":"FUNC","rule":"SuM","sum":9},{"parent":"FUNC","rule":"SuM_IF","sum":4},{"parent":"FUNC","rule":"SubQueryExtendFor","sum":3},{"parent":"FUNC","rule":"SubSTRING","sum":3},{"parent":"FUNC","rule":"SubString","sum":27399},{"parent":"FUNC","rule":"SubqueryAssumeOrderBy","sum":842},{"parent":"FUNC","rule":"SubqueryExtend","sum":37066},{"parent":"FUNC","rule":"SubqueryExtendFor","sum":769785},{"parent":"FUNC","rule":"SubqueryMerge","sum":19},{"parent":"FUNC","rule":"SubqueryMergeFor","sum":36395},{"parent":"FUNC","rule":"SubqueryOrderBy","sum":183831},{"parent":"FUNC","rule":"SubqueryUnionALLFor","sum":9},{"parent":"FUNC","rule":"SubqueryUnionALlFor","sum":76},{"parent":"FUNC","rule":"SubqueryUnionAll","sum":26080},{"parent":"FUNC","rule":"SubqueryUnionAllFor","sum":140800},{"parent":"FUNC","rule":"SubqueryUnionAllfor","sum":380},{"parent":"FUNC","rule":"SubqueryUnionMerge","sum":30574},{"parent":"FUNC","rule":"SubqueryUnionMergeFor","sum":73776},{"parent":"FUNC","rule":"SubsTRING","sum":3},{"parent":"FUNC","rule":"Substring","sum":2922245},{"parent":"FUNC","rule":"Sum","sum":453802},{"parent":"FUNC","rule":"SumIf","sum":1597},{"parent":"FUNC","rule":"Sum_IF","sum":85},{"parent":"FUNC","rule":"Sum_If","sum":14030},{"parent":"FUNC","rule":"Sum_if","sum":52960},{"parent":"FUNC","rule":"TABLENAME","sum":12019},{"parent":"FUNC","rule":"TABLEPATH","sum":282},{"parent":"FUNC","rule":"TABLERECORDINDEX","sum":45},{"parent":"FUNC","rule":"TABLEROW","sum":1253},{"parent":"FUNC","rule":"TABLE_NAME","sum":138},{"parent":"FUNC","rule":"TABLE_PATH","sum":53},{"parent":"FUNC","rule":"TABLE_ROW","sum":124},{"parent":"FUNC","rule":"TABLEname","sum":4},{"parent":"FUNC","rule":"TAbleName","sum":68},{"parent":"FUNC","rule":"TAblePath","sum":2},{"parent":"FUNC","rule":"TAbleRow","sum":25},{"parent":"FUNC","rule":"TAblename","sum":1},{"parent":"FUNC","rule":"TESTBIT","sum":3},{"parent":"FUNC","rule":"TEstBit","sum":1},{"parent":"FUNC","rule":"TIMESTAMP","sum":420},{"parent":"FUNC","rule":"TIMESTAMPDIFF","sum":2},{"parent":"FUNC","rule":"TIMESTAMP_SECONDS","sum":1},{"parent":"FUNC","rule":"TOBytes","sum":1},{"parent":"FUNC","rule":"TODICT","sum":21},{"parent":"FUNC","rule":"TODIct","sum":1},{"parent":"FUNC","rule":"TOP","sum":501195},{"parent":"FUNC","rule":"TOPBY","sum":1},{"parent":"FUNC","rule":"TOPFREQ","sum":510950},{"parent":"FUNC","rule":"TOPFreq","sum":3},{"parent":"FUNC","rule":"TOP_BY","sum":1770572},{"parent":"FUNC","rule":"TOP_FREQ","sum":46},{"parent":"FUNC","rule":"TOP_by","sum":26},{"parent":"FUNC","rule":"TOSET","sum":41},{"parent":"FUNC","rule":"TOSet","sum":774},{"parent":"FUNC","rule":"TO_NUMBER","sum":14},{"parent":"FUNC","rule":"TO_TIMESTAMP","sum":1},{"parent":"FUNC","rule":"TObytes","sum":3},{"parent":"FUNC","rule":"TRY_MEMBER","sum":37},{"parent":"FUNC","rule":"TYPEOF","sum":1},{"parent":"FUNC","rule":"TYPEof","sum":28},{"parent":"FUNC","rule":"TZDateTime","sum":135},{"parent":"FUNC","rule":"TZDatetime","sum":3},{"parent":"FUNC","rule":"TZTimestamp","sum":64},{"parent":"FUNC","rule":"TabLeName","sum":1},{"parent":"FUNC","rule":"TableNAME","sum":3},{"parent":"FUNC","rule":"TableNAme","sum":1633},{"parent":"FUNC","rule":"TableNamE","sum":2},{"parent":"FUNC","rule":"TableName","sum":15042117},{"parent":"FUNC","rule":"TablePATH","sum":1},{"parent":"FUNC","rule":"TablePAth","sum":5},{"parent":"FUNC","rule":"TablePath","sum":1501216},{"parent":"FUNC","rule":"TableROW","sum":4},{"parent":"FUNC","rule":"TableROw","sum":19},{"parent":"FUNC","rule":"TableRecordINdex","sum":4},{"parent":"FUNC","rule":"TableRecordIndex","sum":2316230},{"parent":"FUNC","rule":"TableRedordIndex","sum":1},{"parent":"FUNC","rule":"TableRow","sum":27008055},{"parent":"FUNC","rule":"TableRowIndex","sum":1},{"parent":"FUNC","rule":"TableRows","sum":1687797},{"parent":"FUNC","rule":"Table_Name","sum":55},{"parent":"FUNC","rule":"Table_Row","sum":5},{"parent":"FUNC","rule":"Table_name","sum":73},{"parent":"FUNC","rule":"Table_path","sum":3},{"parent":"FUNC","rule":"Tablename","sum":104251},{"parent":"FUNC","rule":"Tablepath","sum":205},{"parent":"FUNC","rule":"TablerRow","sum":1},{"parent":"FUNC","rule":"TablerecordIndex","sum":1},{"parent":"FUNC","rule":"Tablerecordindex","sum":3},{"parent":"FUNC","rule":"Tablerow","sum":26428},{"parent":"FUNC","rule":"TestBit","sum":253322},{"parent":"FUNC","rule":"Text","sum":20},{"parent":"FUNC","rule":"TimeStamp","sum":4383},{"parent":"FUNC","rule":"Timestamp","sum":1352330},{"parent":"FUNC","rule":"Timestamp64","sum":59},{"parent":"FUNC","rule":"ToBytes","sum":6649582},{"parent":"FUNC","rule":"ToDIct","sum":258},{"parent":"FUNC","rule":"ToDict","sum":6404050},{"parent":"FUNC","rule":"ToList","sum":3},{"parent":"FUNC","rule":"ToLower","sum":4},{"parent":"FUNC","rule":"ToMilliseconds","sum":2},{"parent":"FUNC","rule":"ToMultiDict","sum":339195},{"parent":"FUNC","rule":"ToPg","sum":3151},{"parent":"FUNC","rule":"ToSET","sum":80},{"parent":"FUNC","rule":"ToSet","sum":5473624},{"parent":"FUNC","rule":"ToSortedDict","sum":1116},{"parent":"FUNC","rule":"ToStartOfMonth","sum":4},{"parent":"FUNC","rule":"To_bytes","sum":3},{"parent":"FUNC","rule":"Todict","sum":232},{"parent":"FUNC","rule":"Top","sum":810},{"parent":"FUNC","rule":"TopBy","sum":77},{"parent":"FUNC","rule":"TopFreq","sum":13726},{"parent":"FUNC","rule":"Top_BY","sum":22},{"parent":"FUNC","rule":"Top_By","sum":110},{"parent":"FUNC","rule":"Top_by","sum":122},{"parent":"FUNC","rule":"Topfreq","sum":28},{"parent":"FUNC","rule":"Toset","sum":2338},{"parent":"FUNC","rule":"TryMember","sum":16740604},{"parent":"FUNC","rule":"Trymember","sum":36901},{"parent":"FUNC","rule":"TupleElementType","sum":7628},{"parent":"FUNC","rule":"TupleType","sum":2404},{"parent":"FUNC","rule":"TupleTypeComponents","sum":58},{"parent":"FUNC","rule":"TupleTypeHandle","sum":5589},{"parent":"FUNC","rule":"TypeHandle","sum":161732},{"parent":"FUNC","rule":"TypeKind","sum":54811},{"parent":"FUNC","rule":"TypeOF","sum":37},{"parent":"FUNC","rule":"TypeOf","sum":848023},{"parent":"FUNC","rule":"Typeof","sum":78},{"parent":"FUNC","rule":"TzDate","sum":671},{"parent":"FUNC","rule":"TzDate32","sum":7},{"parent":"FUNC","rule":"TzDateTime","sum":24914},{"parent":"FUNC","rule":"TzDateTime64","sum":7},{"parent":"FUNC","rule":"TzDatetime","sum":46364},{"parent":"FUNC","rule":"TzTimeStamp","sum":4},{"parent":"FUNC","rule":"TzTimestamp","sum":343086},{"parent":"FUNC","rule":"TzTimestamp64","sum":7},{"parent":"FUNC","rule":"UBSTRING","sum":1},{"parent":"FUNC","rule":"UDAF","sum":141471},{"parent":"FUNC","rule":"UDF","sum":1822},{"parent":"FUNC","rule":"UINT32","sum":4},{"parent":"FUNC","rule":"UINT64","sum":6},{"parent":"FUNC","rule":"UInt32","sum":110038},{"parent":"FUNC","rule":"UInt64","sum":829},{"parent":"FUNC","rule":"UInt8","sum":421},{"parent":"FUNC","rule":"UNIQ","sum":1},{"parent":"FUNC","rule":"UNPICKLE","sum":28},{"parent":"FUNC","rule":"UNTAG","sum":18},{"parent":"FUNC","rule":"UNWRAP","sum":14008231},{"parent":"FUNC","rule":"UNWRAp","sum":1},{"parent":"FUNC","rule":"UNWRaP","sum":1},{"parent":"FUNC","rule":"UNWRap","sum":10490},{"parent":"FUNC","rule":"UNWrAP","sum":11},{"parent":"FUNC","rule":"UNWraP","sum":8},{"parent":"FUNC","rule":"UNWrap","sum":4},{"parent":"FUNC","rule":"UNwRAP","sum":6},{"parent":"FUNC","rule":"UNwrap","sum":1355},{"parent":"FUNC","rule":"USING","sum":6},{"parent":"FUNC","rule":"UTF8","sum":337764},{"parent":"FUNC","rule":"UUID","sum":1},{"parent":"FUNC","rule":"Udf","sum":299447},{"parent":"FUNC","rule":"Uint16","sum":83},{"parent":"FUNC","rule":"Uint32","sum":331912},{"parent":"FUNC","rule":"Uint64","sum":11976},{"parent":"FUNC","rule":"Uint8","sum":2921},{"parent":"FUNC","rule":"UnTag","sum":55},{"parent":"FUNC","rule":"UnWRAP","sum":10},{"parent":"FUNC","rule":"UnWrAp","sum":1},{"parent":"FUNC","rule":"UnWrap","sum":1072429},{"parent":"FUNC","rule":"UnionAll","sum":3},{"parent":"FUNC","rule":"Unpickle","sum":180},{"parent":"FUNC","rule":"Untag","sum":15484},{"parent":"FUNC","rule":"Unwarp","sum":1},{"parent":"FUNC","rule":"Unwrap","sum":35759752},{"parent":"FUNC","rule":"UtcCurrentDatetime","sum":8},{"parent":"FUNC","rule":"Utf8","sum":41056},{"parent":"FUNC","rule":"Uuid","sum":453},{"parent":"FUNC","rule":"VALUES","sum":14},{"parent":"FUNC","rule":"VARIANCE","sum":31051},{"parent":"FUNC","rule":"VARIANCE_POPULATION","sum":3016},{"parent":"FUNC","rule":"VARIANCE_SAMPLE","sum":1240},{"parent":"FUNC","rule":"VARP","sum":146},{"parent":"FUNC","rule":"VARPOP","sum":15},{"parent":"FUNC","rule":"VAR_POP","sum":4},{"parent":"FUNC","rule":"VAR_SAMP","sum":44},{"parent":"FUNC","rule":"VERSION","sum":2},{"parent":"FUNC","rule":"Variance","sum":1146},{"parent":"FUNC","rule":"Variance_SAMPLE","sum":30},{"parent":"FUNC","rule":"Variance_Sample","sum":23},{"parent":"FUNC","rule":"Variance_sample","sum":24},{"parent":"FUNC","rule":"VariantType","sum":3175},{"parent":"FUNC","rule":"VariantTypeHandle","sum":8},{"parent":"FUNC","rule":"VariantUnderlyingType","sum":6519},{"parent":"FUNC","rule":"Version","sum":3},{"parent":"FUNC","rule":"Visit","sum":28},{"parent":"FUNC","rule":"Void","sum":92606},{"parent":"FUNC","rule":"WEAKFIELD","sum":834},{"parent":"FUNC","rule":"WEAK_FIELD","sum":374},{"parent":"FUNC","rule":"WEakField","sum":2},{"parent":"FUNC","rule":"Way","sum":8991},{"parent":"FUNC","rule":"WeakFIeld","sum":43},{"parent":"FUNC","rule":"WeakField","sum":21170482},{"parent":"FUNC","rule":"WeakFiled","sum":2},{"parent":"FUNC","rule":"Weakfield","sum":1912},{"parent":"FUNC","rule":"WorldCode","sum":372},{"parent":"FUNC","rule":"YPathDouble","sum":4},{"parent":"FUNC","rule":"YPathExtract","sum":4},{"parent":"FUNC","rule":"YPathInt64","sum":6},{"parent":"FUNC","rule":"YPathString","sum":4},{"parent":"FUNC","rule":"YSON","sum":756},{"parent":"FUNC","rule":"YSONExtractString","sum":6},{"parent":"FUNC","rule":"Yson","sum":608385},{"parent":"FUNC","rule":"aGGREGATE_LIST","sum":13},{"parent":"FUNC","rule":"aGGREGATE_LIST_DISTINCT","sum":4},{"parent":"FUNC","rule":"aGG_LIST","sum":2},{"parent":"FUNC","rule":"aGG_LIST_DISTINCT","sum":10},{"parent":"FUNC","rule":"aSSTRUCT","sum":1},{"parent":"FUNC","rule":"aSTuple","sum":3},{"parent":"FUNC","rule":"aVG","sum":2},{"parent":"FUNC","rule":"abs","sum":1550616},{"parent":"FUNC","rule":"addMember","sum":2582},{"parent":"FUNC","rule":"addTimezone","sum":782},{"parent":"FUNC","rule":"age","sum":2},{"parent":"FUNC","rule":"aggList","sum":265},{"parent":"FUNC","rule":"agg_LIST","sum":76},{"parent":"FUNC","rule":"agg_LIST_DISTINCT","sum":1},{"parent":"FUNC","rule":"agg_List","sum":546},{"parent":"FUNC","rule":"agg_List_distinct","sum":24},{"parent":"FUNC","rule":"agg_list","sum":1285452},{"parent":"FUNC","rule":"agg_list_DISTINCT","sum":2},{"parent":"FUNC","rule":"agg_list_Distinct","sum":1},{"parent":"FUNC","rule":"agg_list_distinct","sum":434951},{"parent":"FUNC","rule":"agg_set","sum":1},{"parent":"FUNC","rule":"agglist","sum":122},{"parent":"FUNC","rule":"agglistdistinct","sum":27},{"parent":"FUNC","rule":"aggr_list","sum":2670},{"parent":"FUNC","rule":"aggr_list_distinct","sum":18},{"parent":"FUNC","rule":"aggr_set","sum":2},{"parent":"FUNC","rule":"aggregATE_LIST","sum":16},{"parent":"FUNC","rule":"aggregateList","sum":369},{"parent":"FUNC","rule":"aggregateListDistinct","sum":37},{"parent":"FUNC","rule":"aggregate_List","sum":986},{"parent":"FUNC","rule":"aggregate_List_Distinct","sum":8},{"parent":"FUNC","rule":"aggregate_List_distinct","sum":10},{"parent":"FUNC","rule":"aggregate_by","sum":369867},{"parent":"FUNC","rule":"aggregate_list","sum":2507268},{"parent":"FUNC","rule":"aggregate_list_","sum":1},{"parent":"FUNC","rule":"aggregate_list_DISTINCT","sum":1790},{"parent":"FUNC","rule":"aggregate_list_Distinct","sum":27},{"parent":"FUNC","rule":"aggregate_list_distinct","sum":940324},{"parent":"FUNC","rule":"aggregatelist","sum":556},{"parent":"FUNC","rule":"aggregatetransforminput","sum":1},{"parent":"FUNC","rule":"aggregationFactory","sum":134},{"parent":"FUNC","rule":"aggregation_factory","sum":9007},{"parent":"FUNC","rule":"aggregationfactory","sum":77},{"parent":"FUNC","rule":"agregate_list_distinct","sum":1},{"parent":"FUNC","rule":"and","sum":5},{"parent":"FUNC","rule":"anyLast","sum":8},{"parent":"FUNC","rule":"argMax","sum":14},{"parent":"FUNC","rule":"arrayElement","sum":1},{"parent":"FUNC","rule":"arrayJoin","sum":10},{"parent":"FUNC","rule":"arrayMax","sum":4},{"parent":"FUNC","rule":"array_agg","sum":5},{"parent":"FUNC","rule":"array_to_string","sum":2},{"parent":"FUNC","rule":"asDIct","sum":1},{"parent":"FUNC","rule":"asDict","sum":3801},{"parent":"FUNC","rule":"asLIST","sum":50},{"parent":"FUNC","rule":"asList","sum":175410},{"parent":"FUNC","rule":"asSet","sum":1978},{"parent":"FUNC","rule":"asStruct","sum":116578},{"parent":"FUNC","rule":"asTUPLE","sum":1},{"parent":"FUNC","rule":"asTagged","sum":2117},{"parent":"FUNC","rule":"asTuple","sum":83395},{"parent":"FUNC","rule":"asVariant","sum":10},{"parent":"FUNC","rule":"as_dict","sum":4},{"parent":"FUNC","rule":"as_list","sum":77},{"parent":"FUNC","rule":"as_struct","sum":122},{"parent":"FUNC","rule":"as_table","sum":30},{"parent":"FUNC","rule":"as_tagged","sum":1},{"parent":"FUNC","rule":"as_tuple","sum":367},{"parent":"FUNC","rule":"asdict","sum":3856},{"parent":"FUNC","rule":"asenum","sum":13},{"parent":"FUNC","rule":"aslist","sum":164797},{"parent":"FUNC","rule":"assessments_integralListReverse","sum":2},{"parent":"FUNC","rule":"asset","sum":1154},{"parent":"FUNC","rule":"asstruct","sum":12980},{"parent":"FUNC","rule":"assumeNotNull","sum":3},{"parent":"FUNC","rule":"astagged","sum":1383},{"parent":"FUNC","rule":"astuple","sum":30946},{"parent":"FUNC","rule":"asvariant","sum":3},{"parent":"FUNC","rule":"atan2","sum":4},{"parent":"FUNC","rule":"avG","sum":52},{"parent":"FUNC","rule":"avg","sum":6007241},{"parent":"FUNC","rule":"avgIf","sum":19330},{"parent":"FUNC","rule":"avg_","sum":1},{"parent":"FUNC","rule":"avg_IF","sum":155},{"parent":"FUNC","rule":"avg_If","sum":129},{"parent":"FUNC","rule":"avg_if","sum":497377},{"parent":"FUNC","rule":"ax","sum":13},{"parent":"FUNC","rule":"bit_or","sum":26592},{"parent":"FUNC","rule":"bool","sum":279},{"parent":"FUNC","rule":"bool_and","sum":67086},{"parent":"FUNC","rule":"bool_or","sum":237042},{"parent":"FUNC","rule":"bool_xor","sum":2},{"parent":"FUNC","rule":"bottom","sum":10584},{"parent":"FUNC","rule":"bottom_by","sum":332186},{"parent":"FUNC","rule":"business_id","sum":1},{"parent":"FUNC","rule":"bytes","sum":2},{"parent":"FUNC","rule":"cOALESCE","sum":36},{"parent":"FUNC","rule":"cOUNT","sum":64},{"parent":"FUNC","rule":"cOUNT_IF","sum":40},{"parent":"FUNC","rule":"cOunt","sum":2},{"parent":"FUNC","rule":"ceil","sum":1},{"parent":"FUNC","rule":"char_LENGTH","sum":2},{"parent":"FUNC","rule":"char_length","sum":18},{"parent":"FUNC","rule":"check_google_id","sum":1},{"parent":"FUNC","rule":"choosemembers","sum":5},{"parent":"FUNC","rule":"client_id","sum":1},{"parent":"FUNC","rule":"cnt","sum":2},{"parent":"FUNC","rule":"coALESCE","sum":14},{"parent":"FUNC","rule":"coUNT","sum":3},{"parent":"FUNC","rule":"coUNt","sum":3},{"parent":"FUNC","rule":"coalESCE","sum":40},{"parent":"FUNC","rule":"coalescE","sum":1},{"parent":"FUNC","rule":"coalesce","sum":24980542},{"parent":"FUNC","rule":"coalescue","sum":2},{"parent":"FUNC","rule":"coalsece","sum":1},{"parent":"FUNC","rule":"combinemembers","sum":5},{"parent":"FUNC","rule":"concat","sum":26},{"parent":"FUNC","rule":"conunt","sum":1},{"parent":"FUNC","rule":"convert_to_360","sum":2},{"parent":"FUNC","rule":"corr","sum":705},{"parent":"FUNC","rule":"correlation","sum":3875},{"parent":"FUNC","rule":"cos","sum":8},{"parent":"FUNC","rule":"couNT","sum":1},{"parent":"FUNC","rule":"couNT_IF","sum":7},{"parent":"FUNC","rule":"counT","sum":24},{"parent":"FUNC","rule":"count","sum":38634912},{"parent":"FUNC","rule":"countDistinct","sum":1},{"parent":"FUNC","rule":"countDistinctEstimate","sum":272527},{"parent":"FUNC","rule":"countIF","sum":8373},{"parent":"FUNC","rule":"countIf","sum":1390204},{"parent":"FUNC","rule":"count_","sum":555},{"parent":"FUNC","rule":"count_IF","sum":34194},{"parent":"FUNC","rule":"count_If","sum":26486},{"parent":"FUNC","rule":"count_distinct_estimate","sum":33},{"parent":"FUNC","rule":"count_if","sum":18926527},{"parent":"FUNC","rule":"countdistinctEstimate","sum":1793},{"parent":"FUNC","rule":"countdistinctestimate","sum":2835},{"parent":"FUNC","rule":"countif","sum":21244},{"parent":"FUNC","rule":"covar","sum":243},{"parent":"FUNC","rule":"covariance","sum":285},{"parent":"FUNC","rule":"covariance_sample","sum":165},{"parent":"FUNC","rule":"cpunt","sum":1},{"parent":"FUNC","rule":"cume_dist","sum":69},{"parent":"FUNC","rule":"currentTzDate","sum":3995},{"parent":"FUNC","rule":"currentTzTimestamp","sum":53},{"parent":"FUNC","rule":"currentUTCDATETIME","sum":19},{"parent":"FUNC","rule":"currentUTCDate","sum":657},{"parent":"FUNC","rule":"currentUTCDateTime","sum":305},{"parent":"FUNC","rule":"currentUTCdate","sum":364},{"parent":"FUNC","rule":"currentUTCdatetime","sum":20},{"parent":"FUNC","rule":"currentUTcdate","sum":1334},{"parent":"FUNC","rule":"currentUtcDate","sum":6288},{"parent":"FUNC","rule":"currentUtcDateTime","sum":14407},{"parent":"FUNC","rule":"currentUtcDatetime","sum":2376},{"parent":"FUNC","rule":"currentUtcTimestamp","sum":197},{"parent":"FUNC","rule":"current_utc_timestamp","sum":2},{"parent":"FUNC","rule":"currenttzdate","sum":418},{"parent":"FUNC","rule":"currenttzdatetime","sum":262},{"parent":"FUNC","rule":"currenttztimestamp","sum":47},{"parent":"FUNC","rule":"currentutcDateTime","sum":3},{"parent":"FUNC","rule":"currentutcdate","sum":118575},{"parent":"FUNC","rule":"currentutcdatetime","sum":58411},{"parent":"FUNC","rule":"currentutctimestamp","sum":48324},{"parent":"FUNC","rule":"d","sum":1},{"parent":"FUNC","rule":"dATE","sum":4},{"parent":"FUNC","rule":"date","sum":415478},{"parent":"FUNC","rule":"date32","sum":11},{"parent":"FUNC","rule":"dateDiff","sum":1},{"parent":"FUNC","rule":"dateNow","sum":2},{"parent":"FUNC","rule":"dateTIME","sum":2},{"parent":"FUNC","rule":"dateTime","sum":10},{"parent":"FUNC","rule":"date_add","sum":8},{"parent":"FUNC","rule":"date_format","sum":2},{"parent":"FUNC","rule":"date_from_ts","sum":1},{"parent":"FUNC","rule":"date_sub","sum":2},{"parent":"FUNC","rule":"dateadd","sum":1},{"parent":"FUNC","rule":"datetime","sum":11813},{"parent":"FUNC","rule":"datetime64","sum":19},{"parent":"FUNC","rule":"decimal","sum":339},{"parent":"FUNC","rule":"dense_RANK","sum":42},{"parent":"FUNC","rule":"dense_rank","sum":46285},{"parent":"FUNC","rule":"dictAggregate","sum":56},{"parent":"FUNC","rule":"dictContains","sum":7},{"parent":"FUNC","rule":"dictGetString","sum":1},{"parent":"FUNC","rule":"dictItems","sum":8443},{"parent":"FUNC","rule":"dictKeys","sum":1199},{"parent":"FUNC","rule":"dictLength","sum":104},{"parent":"FUNC","rule":"dictLookUp","sum":9345},{"parent":"FUNC","rule":"dictLookup","sum":675},{"parent":"FUNC","rule":"dictPayloads","sum":90},{"parent":"FUNC","rule":"dict_keys","sum":8696},{"parent":"FUNC","rule":"dictcontains","sum":344},{"parent":"FUNC","rule":"dictcreate","sum":2},{"parent":"FUNC","rule":"dicthasitems","sum":24},{"parent":"FUNC","rule":"dictitems","sum":2496},{"parent":"FUNC","rule":"dictkeys","sum":896},{"parent":"FUNC","rule":"dictlength","sum":6531},{"parent":"FUNC","rule":"dictlookup","sum":1585},{"parent":"FUNC","rule":"dictpayloads","sum":113},{"parent":"FUNC","rule":"disctinct","sum":2},{"parent":"FUNC","rule":"dol_show1","sum":1},{"parent":"FUNC","rule":"double","sum":134},{"parent":"FUNC","rule":"dynumber","sum":12},{"parent":"FUNC","rule":"each","sum":1},{"parent":"FUNC","rule":"empty","sum":3},{"parent":"FUNC","rule":"emptylist","sum":1},{"parent":"FUNC","rule":"endsWith","sum":2161},{"parent":"FUNC","rule":"endswith","sum":31546},{"parent":"FUNC","rule":"ensure","sum":408577},{"parent":"FUNC","rule":"ensuretype","sum":708},{"parent":"FUNC","rule":"evaluateCode","sum":52},{"parent":"FUNC","rule":"evaluateExpr","sum":294},{"parent":"FUNC","rule":"expandstruct","sum":20},{"parent":"FUNC","rule":"f","sum":1},{"parent":"FUNC","rule":"file_content","sum":10},{"parent":"FUNC","rule":"file_path","sum":1},{"parent":"FUNC","rule":"filecontent","sum":603},{"parent":"FUNC","rule":"filepath","sum":988},{"parent":"FUNC","rule":"filter","sum":2},{"parent":"FUNC","rule":"find","sum":4065628},{"parent":"FUNC","rule":"first_VALUE","sum":19},{"parent":"FUNC","rule":"first_route_timestamp","sum":1},{"parent":"FUNC","rule":"first_value","sum":1147813},{"parent":"FUNC","rule":"flatten","sum":1},{"parent":"FUNC","rule":"float","sum":23035},{"parent":"FUNC","rule":"floor","sum":1},{"parent":"FUNC","rule":"forceremovemember","sum":373},{"parent":"FUNC","rule":"format","sum":1},{"parent":"FUNC","rule":"formatType","sum":8},{"parent":"FUNC","rule":"formattype","sum":146},{"parent":"FUNC","rule":"fromBytes","sum":13},{"parent":"FUNC","rule":"fromPg","sum":130},{"parent":"FUNC","rule":"fromUnixTimestamp64Micro","sum":1},{"parent":"FUNC","rule":"from_bytes","sum":2},{"parent":"FUNC","rule":"frombytes","sum":47},{"parent":"FUNC","rule":"frompg","sum":129},{"parent":"FUNC","rule":"gatherMembers","sum":67},{"parent":"FUNC","rule":"gathermembers","sum":7},{"parent":"FUNC","rule":"get_auto_label","sum":1},{"parent":"FUNC","rule":"get_html","sum":1},{"parent":"FUNC","rule":"get_is_in_collection_feature","sum":1},{"parent":"FUNC","rule":"get_metrika_bro","sum":1},{"parent":"FUNC","rule":"get_pay_processing","sum":2},{"parent":"FUNC","rule":"get_post_profiles","sum":1},{"parent":"FUNC","rule":"get_rewrite_prompt","sum":1},{"parent":"FUNC","rule":"get_support_line","sum":1},{"parent":"FUNC","rule":"get_test_id","sum":7},{"parent":"FUNC","rule":"getdate","sum":2},{"parent":"FUNC","rule":"greatest","sum":318150},{"parent":"FUNC","rule":"groupArray","sum":4},{"parent":"FUNC","rule":"groupUniqArray","sum":2},{"parent":"FUNC","rule":"grouping","sum":17591},{"parent":"FUNC","rule":"hISTOGRAM","sum":2},{"parent":"FUNC","rule":"has","sum":16},{"parent":"FUNC","rule":"histOGRAM","sum":4},{"parent":"FUNC","rule":"histograM","sum":359},{"parent":"FUNC","rule":"histogram","sum":52782},{"parent":"FUNC","rule":"histogramcdf","sum":84},{"parent":"FUNC","rule":"hll","sum":82167},{"parent":"FUNC","rule":"iF","sum":2688},{"parent":"FUNC","rule":"iNtErVaL","sum":1},{"parent":"FUNC","rule":"if","sum":43171932},{"parent":"FUNC","rule":"ifNull","sum":6},{"parent":"FUNC","rule":"in","sum":13},{"parent":"FUNC","rule":"indexOf","sum":6},{"parent":"FUNC","rule":"instanceof","sum":55},{"parent":"FUNC","rule":"instr","sum":1},{"parent":"FUNC","rule":"int","sum":32302},{"parent":"FUNC","rule":"int32","sum":2},{"parent":"FUNC","rule":"int64","sum":19},{"parent":"FUNC","rule":"int8","sum":2},{"parent":"FUNC","rule":"intervaL","sum":9360},{"parent":"FUNC","rule":"interval","sum":1798205},{"parent":"FUNC","rule":"interval64","sum":7},{"parent":"FUNC","rule":"isNull","sum":7},{"parent":"FUNC","rule":"is_allowed_in_kz","sum":2},{"parent":"FUNC","rule":"is_valid_intent","sum":4},{"parent":"FUNC","rule":"is_valid_organic","sum":4},{"parent":"FUNC","rule":"istLast","sum":1},{"parent":"FUNC","rule":"isum","sum":6},{"parent":"FUNC","rule":"joinTableRow","sum":5},{"parent":"FUNC","rule":"jointablerow","sum":215},{"parent":"FUNC","rule":"json","sum":5614},{"parent":"FUNC","rule":"json_extract","sum":3},{"parent":"FUNC","rule":"json_object_agg","sum":1},{"parent":"FUNC","rule":"jsondocument","sum":6},{"parent":"FUNC","rule":"just","sum":244471},{"parent":"FUNC","rule":"lAG","sum":2250},{"parent":"FUNC","rule":"lEAD","sum":6796},{"parent":"FUNC","rule":"lEN","sum":30},{"parent":"FUNC","rule":"lINEARHISTOGRAM","sum":115},{"parent":"FUNC","rule":"lISTlENGTH","sum":2},{"parent":"FUNC","rule":"lISTlength","sum":1},{"parent":"FUNC","rule":"lag","sum":1012017},{"parent":"FUNC","rule":"last_VALUE","sum":14},{"parent":"FUNC","rule":"last_value","sum":1088627},{"parent":"FUNC","rule":"lead","sum":928421},{"parent":"FUNC","rule":"least","sum":374625},{"parent":"FUNC","rule":"len","sum":712039},{"parent":"FUNC","rule":"lenGTH","sum":1},{"parent":"FUNC","rule":"lenght","sum":1},{"parent":"FUNC","rule":"lengtH","sum":2},{"parent":"FUNC","rule":"length","sum":2548583},{"parent":"FUNC","rule":"like","sum":4},{"parent":"FUNC","rule":"likely","sum":16461},{"parent":"FUNC","rule":"linearHISTOGRAM","sum":10},{"parent":"FUNC","rule":"linearHistogram","sum":11},{"parent":"FUNC","rule":"linearhistogram","sum":171},{"parent":"FUNC","rule":"linearhistogramcdf","sum":20},{"parent":"FUNC","rule":"listALL","sum":3},{"parent":"FUNC","rule":"listAVG","sum":887},{"parent":"FUNC","rule":"listAggregateUnique","sum":1},{"parent":"FUNC","rule":"listAll","sum":86},{"parent":"FUNC","rule":"listAny","sum":121},{"parent":"FUNC","rule":"listAvg","sum":102},{"parent":"FUNC","rule":"listCollect","sum":378},{"parent":"FUNC","rule":"listConcat","sum":11083},{"parent":"FUNC","rule":"listEnumerate","sum":132},{"parent":"FUNC","rule":"listExtend","sum":6680},{"parent":"FUNC","rule":"listExtract","sum":24},{"parent":"FUNC","rule":"listFilter","sum":39682},{"parent":"FUNC","rule":"listFlatten","sum":289},{"parent":"FUNC","rule":"listFold","sum":300},{"parent":"FUNC","rule":"listFromRange","sum":770},{"parent":"FUNC","rule":"listHAs","sum":52},{"parent":"FUNC","rule":"listHas","sum":41485},{"parent":"FUNC","rule":"listHasItems","sum":3240},{"parent":"FUNC","rule":"listHead","sum":2187},{"parent":"FUNC","rule":"listIndexOf","sum":39},{"parent":"FUNC","rule":"listLENGTH","sum":225},{"parent":"FUNC","rule":"listLENgth","sum":2},{"parent":"FUNC","rule":"listLast","sum":1011},{"parent":"FUNC","rule":"listLength","sum":29751},{"parent":"FUNC","rule":"listMAX","sum":1},{"parent":"FUNC","rule":"listMIN","sum":1},{"parent":"FUNC","rule":"listMap","sum":53473},{"parent":"FUNC","rule":"listMax","sum":813},{"parent":"FUNC","rule":"listMin","sum":170},{"parent":"FUNC","rule":"listNotNull","sum":279},{"parent":"FUNC","rule":"listReverse","sum":4320},{"parent":"FUNC","rule":"listSkip","sum":18},{"parent":"FUNC","rule":"listSort","sum":23329},{"parent":"FUNC","rule":"listSortAsc","sum":2},{"parent":"FUNC","rule":"listSortDesc","sum":199},{"parent":"FUNC","rule":"listSum","sum":1472},{"parent":"FUNC","rule":"listTake","sum":734},{"parent":"FUNC","rule":"listTopSort","sum":18},{"parent":"FUNC","rule":"listUniq","sum":10540},{"parent":"FUNC","rule":"listUniqStable","sum":1},{"parent":"FUNC","rule":"listZip","sum":1339},{"parent":"FUNC","rule":"listZipAll","sum":1424},{"parent":"FUNC","rule":"list_Length","sum":1},{"parent":"FUNC","rule":"list_MAX","sum":1},{"parent":"FUNC","rule":"list_agg","sum":2},{"parent":"FUNC","rule":"list_avg","sum":25},{"parent":"FUNC","rule":"list_concat","sum":262},{"parent":"FUNC","rule":"list_filter","sum":2},{"parent":"FUNC","rule":"list_flatten","sum":15},{"parent":"FUNC","rule":"list_has","sum":6239},{"parent":"FUNC","rule":"list_has_items","sum":2},{"parent":"FUNC","rule":"list_head","sum":16},{"parent":"FUNC","rule":"list_length","sum":865},{"parent":"FUNC","rule":"list_map","sum":3},{"parent":"FUNC","rule":"list_max","sum":3},{"parent":"FUNC","rule":"list_min","sum":3},{"parent":"FUNC","rule":"list_not_null","sum":1},{"parent":"FUNC","rule":"list_sort","sum":112},{"parent":"FUNC","rule":"list_uniq","sum":1},{"parent":"FUNC","rule":"list_zip","sum":30},{"parent":"FUNC","rule":"listaggregate","sum":95},{"parent":"FUNC","rule":"listall","sum":13228},{"parent":"FUNC","rule":"listany","sum":15147},{"parent":"FUNC","rule":"listavg","sum":11752},{"parent":"FUNC","rule":"listcollect","sum":505},{"parent":"FUNC","rule":"listconcat","sum":19388},{"parent":"FUNC","rule":"listcreate","sum":10},{"parent":"FUNC","rule":"listenumerate","sum":2076},{"parent":"FUNC","rule":"listextend","sum":3845},{"parent":"FUNC","rule":"listextendstrict","sum":61},{"parent":"FUNC","rule":"listextract","sum":1831},{"parent":"FUNC","rule":"listfilter","sum":102248},{"parent":"FUNC","rule":"listflatmap","sum":7221},{"parent":"FUNC","rule":"listflatten","sum":22581},{"parent":"FUNC","rule":"listfold","sum":16},{"parent":"FUNC","rule":"listfold1map","sum":60},{"parent":"FUNC","rule":"listfromRange","sum":15},{"parent":"FUNC","rule":"listfromrange","sum":14073},{"parent":"FUNC","rule":"listfromtuple","sum":58},{"parent":"FUNC","rule":"listhas","sum":172035},{"parent":"FUNC","rule":"listhasItems","sum":34},{"parent":"FUNC","rule":"listhasitems","sum":10543},{"parent":"FUNC","rule":"listhead","sum":18546},{"parent":"FUNC","rule":"listindexof","sum":1206},{"parent":"FUNC","rule":"listlast","sum":3097},{"parent":"FUNC","rule":"listlength","sum":343216},{"parent":"FUNC","rule":"listmap","sum":517934},{"parent":"FUNC","rule":"listmax","sum":3950},{"parent":"FUNC","rule":"listmin","sum":2669},{"parent":"FUNC","rule":"listnotNull","sum":1},{"parent":"FUNC","rule":"listnotnull","sum":12906},{"parent":"FUNC","rule":"listreplicate","sum":49},{"parent":"FUNC","rule":"listreverse","sum":2241},{"parent":"FUNC","rule":"listskip","sum":1176},{"parent":"FUNC","rule":"listsort","sum":56414},{"parent":"FUNC","rule":"listsortDesc","sum":1},{"parent":"FUNC","rule":"listsortasc","sum":383},{"parent":"FUNC","rule":"listsortdesc","sum":3962},{"parent":"FUNC","rule":"listsum","sum":3416},{"parent":"FUNC","rule":"listtake","sum":16090},{"parent":"FUNC","rule":"listtop","sum":68},{"parent":"FUNC","rule":"listunionall","sum":8},{"parent":"FUNC","rule":"listuniq","sum":25574},{"parent":"FUNC","rule":"listuniqstable","sum":94},{"parent":"FUNC","rule":"listzip","sum":16664},{"parent":"FUNC","rule":"listzipALL","sum":2},{"parent":"FUNC","rule":"listzipAll","sum":11},{"parent":"FUNC","rule":"listzipall","sum":99},{"parent":"FUNC","rule":"log","sum":2},{"parent":"FUNC","rule":"logarithmicHistogram","sum":1},{"parent":"FUNC","rule":"logarithmichistogram","sum":6},{"parent":"FUNC","rule":"loghistogram","sum":1},{"parent":"FUNC","rule":"lower","sum":3},{"parent":"FUNC","rule":"mAX","sum":17},{"parent":"FUNC","rule":"mAX_BY","sum":131},{"parent":"FUNC","rule":"mIN","sum":2},{"parent":"FUNC","rule":"mIN_by","sum":10},{"parent":"FUNC","rule":"maX","sum":6},{"parent":"FUNC","rule":"maX_BY","sum":1},{"parent":"FUNC","rule":"map","sum":2},{"parent":"FUNC","rule":"max","sum":25147259},{"parent":"FUNC","rule":"maxBy","sum":1},{"parent":"FUNC","rule":"maxOf","sum":3529},{"parent":"FUNC","rule":"max_","sum":1},{"parent":"FUNC","rule":"max_BY","sum":2357},{"parent":"FUNC","rule":"max_By","sum":4552},{"parent":"FUNC","rule":"max_OF","sum":8},{"parent":"FUNC","rule":"max_Of","sum":29},{"parent":"FUNC","rule":"max_by","sum":28967975},{"parent":"FUNC","rule":"max_if","sum":3},{"parent":"FUNC","rule":"max_of","sum":1282982},{"parent":"FUNC","rule":"maxby","sum":4208},{"parent":"FUNC","rule":"maxof","sum":852},{"parent":"FUNC","rule":"md5int","sum":1},{"parent":"FUNC","rule":"median","sum":381741},{"parent":"FUNC","rule":"metric_exp","sum":1},{"parent":"FUNC","rule":"min","sum":10259043},{"parent":"FUNC","rule":"minOf","sum":2},{"parent":"FUNC","rule":"min_BY","sum":8617},{"parent":"FUNC","rule":"min_By","sum":6},{"parent":"FUNC","rule":"min_OF","sum":9},{"parent":"FUNC","rule":"min_Of","sum":343},{"parent":"FUNC","rule":"min_by","sum":2330282},{"parent":"FUNC","rule":"min_if","sum":10},{"parent":"FUNC","rule":"min_of","sum":577330},{"parent":"FUNC","rule":"minby","sum":1},{"parent":"FUNC","rule":"minof","sum":98},{"parent":"FUNC","rule":"mode","sum":117583},{"parent":"FUNC","rule":"multiIf","sum":2},{"parent":"FUNC","rule":"multi_aggregate_by","sum":117499},{"parent":"FUNC","rule":"nanvl","sum":129162},{"parent":"FUNC","rule":"notEmpty","sum":2},{"parent":"FUNC","rule":"nothing","sum":12869},{"parent":"FUNC","rule":"now","sum":16},{"parent":"FUNC","rule":"nth_value","sum":17},{"parent":"FUNC","rule":"ntile","sum":339},{"parent":"FUNC","rule":"nvL","sum":62},{"parent":"FUNC","rule":"nvl","sum":14029600},{"parent":"FUNC","rule":"on","sum":1},{"parent":"FUNC","rule":"optionaltype","sum":161},{"parent":"FUNC","rule":"or","sum":3},{"parent":"FUNC","rule":"order_nr","sum":1},{"parent":"FUNC","rule":"p25","sum":1},{"parent":"FUNC","rule":"p75","sum":1},{"parent":"FUNC","rule":"pERCENTILE","sum":11},{"parent":"FUNC","rule":"parseFile","sum":150},{"parent":"FUNC","rule":"parseForErrors","sum":32},{"parent":"FUNC","rule":"parse_dt_formatted","sum":1},{"parent":"FUNC","rule":"parsefile","sum":11196},{"parent":"FUNC","rule":"percent_rank","sum":2192},{"parent":"FUNC","rule":"percentile","sum":3352530},{"parent":"FUNC","rule":"pgInt2","sum":2},{"parent":"FUNC","rule":"pgarray","sum":2},{"parent":"FUNC","rule":"pgbpchar","sum":2},{"parent":"FUNC","rule":"pgbytea","sum":6},{"parent":"FUNC","rule":"pgcast","sum":39},{"parent":"FUNC","rule":"pgchar","sum":2},{"parent":"FUNC","rule":"pgdate","sum":94},{"parent":"FUNC","rule":"pgfloat4","sum":5},{"parent":"FUNC","rule":"pgfloat8","sum":3},{"parent":"FUNC","rule":"pgint2","sum":9},{"parent":"FUNC","rule":"pginterval","sum":176},{"parent":"FUNC","rule":"pgjson","sum":10},{"parent":"FUNC","rule":"pgname","sum":4},{"parent":"FUNC","rule":"pgnumeric","sum":4},{"parent":"FUNC","rule":"pgoidvector","sum":1},{"parent":"FUNC","rule":"pgtext","sum":9},{"parent":"FUNC","rule":"pgtimestamp","sum":7},{"parent":"FUNC","rule":"pgtimestamptz","sum":4},{"parent":"FUNC","rule":"pickle","sum":501},{"parent":"FUNC","rule":"pow","sum":8},{"parent":"FUNC","rule":"power","sum":3},{"parent":"FUNC","rule":"quantile","sum":1},{"parent":"FUNC","rule":"quantileExact","sum":1},{"parent":"FUNC","rule":"rFIND","sum":2},{"parent":"FUNC","rule":"rand","sum":5},{"parent":"FUNC","rule":"random","sum":260140},{"parent":"FUNC","rule":"randomNumber","sum":10},{"parent":"FUNC","rule":"randomUuid","sum":152},{"parent":"FUNC","rule":"random_number","sum":11},{"parent":"FUNC","rule":"randomnumber","sum":477},{"parent":"FUNC","rule":"randomuuid","sum":37},{"parent":"FUNC","rule":"range","sum":97},{"parent":"FUNC","rule":"rank","sum":222308},{"parent":"FUNC","rule":"regex_full_match","sum":1},{"parent":"FUNC","rule":"regex_replace_first","sum":1},{"parent":"FUNC","rule":"regionIn","sum":2},{"parent":"FUNC","rule":"removeMember","sum":160},{"parent":"FUNC","rule":"removemember","sum":18},{"parent":"FUNC","rule":"removemembers","sum":77},{"parent":"FUNC","rule":"removetimezone","sum":5},{"parent":"FUNC","rule":"renamemembers","sum":4},{"parent":"FUNC","rule":"replace","sum":1},{"parent":"FUNC","rule":"replaceRegexpAll","sum":2},{"parent":"FUNC","rule":"rfind","sum":127982},{"parent":"FUNC","rule":"round","sum":27},{"parent":"FUNC","rule":"row_NUMBER","sum":5},{"parent":"FUNC","rule":"row_Number","sum":3},{"parent":"FUNC","rule":"row_number","sum":2178991},{"parent":"FUNC","rule":"rownumber","sum":388},{"parent":"FUNC","rule":"sUBSTRING","sum":3},{"parent":"FUNC","rule":"sUM","sum":444},{"parent":"FUNC","rule":"sUM_IF","sum":20},{"parent":"FUNC","rule":"sUm","sum":8},{"parent":"FUNC","rule":"sessionWindow","sum":4},{"parent":"FUNC","rule":"session_start","sum":3},{"parent":"FUNC","rule":"sessionwindow","sum":171},{"parent":"FUNC","rule":"setDifference","sum":59},{"parent":"FUNC","rule":"setIntersection","sum":13},{"parent":"FUNC","rule":"setIsDisjoint","sum":1},{"parent":"FUNC","rule":"setUnion","sum":172},{"parent":"FUNC","rule":"setbit","sum":20},{"parent":"FUNC","rule":"setdifference","sum":171},{"parent":"FUNC","rule":"setincludes","sum":20},{"parent":"FUNC","rule":"setintersection","sum":120},{"parent":"FUNC","rule":"setisdisjoint","sum":1060},{"parent":"FUNC","rule":"setsymmetricdifference","sum":204},{"parent":"FUNC","rule":"setunion","sum":1787},{"parent":"FUNC","rule":"sign","sum":1},{"parent":"FUNC","rule":"sin","sum":16},{"parent":"FUNC","rule":"sipHash64","sum":2},{"parent":"FUNC","rule":"size","sum":1},{"parent":"FUNC","rule":"somE","sum":2},{"parent":"FUNC","rule":"some","sum":20342393},{"parent":"FUNC","rule":"somr","sum":1},{"parent":"FUNC","rule":"splitByChar","sum":1},{"parent":"FUNC","rule":"splitByString","sum":6},{"parent":"FUNC","rule":"spreadmembers","sum":3},{"parent":"FUNC","rule":"sqrt","sum":11},{"parent":"FUNC","rule":"ssubstring","sum":2},{"parent":"FUNC","rule":"stablepickle","sum":30},{"parent":"FUNC","rule":"startsWith","sum":200139},{"parent":"FUNC","rule":"starts_with","sum":14},{"parent":"FUNC","rule":"startswith","sum":20678},{"parent":"FUNC","rule":"staticmap","sum":22},{"parent":"FUNC","rule":"staticzip","sum":1},{"parent":"FUNC","rule":"status$$name","sum":3},{"parent":"FUNC","rule":"std_dev","sum":4},{"parent":"FUNC","rule":"stddev","sum":619643},{"parent":"FUNC","rule":"stddevPop","sum":13},{"parent":"FUNC","rule":"stddev_pop","sum":108},{"parent":"FUNC","rule":"stddev_population","sum":88},{"parent":"FUNC","rule":"stddev_samp","sum":27},{"parent":"FUNC","rule":"stddev_sample","sum":886},{"parent":"FUNC","rule":"stddevpop","sum":19},{"parent":"FUNC","rule":"stddevsamp","sum":5},{"parent":"FUNC","rule":"str","sum":24},{"parent":"FUNC","rule":"strfdate","sum":164},{"parent":"FUNC","rule":"string","sum":77},{"parent":"FUNC","rule":"string_agg","sum":1},{"parent":"FUNC","rule":"string_split","sum":1},{"parent":"FUNC","rule":"string_to_array","sum":3},{"parent":"FUNC","rule":"string_to_features","sum":1},{"parent":"FUNC","rule":"structMembers","sum":7},{"parent":"FUNC","rule":"structUnion","sum":41},{"parent":"FUNC","rule":"structdifference","sum":2},{"parent":"FUNC","rule":"structunion","sum":7},{"parent":"FUNC","rule":"suM","sum":184},{"parent":"FUNC","rule":"suM_if","sum":16},{"parent":"FUNC","rule":"subDate","sum":1},{"parent":"FUNC","rule":"subSTRING","sum":1},{"parent":"FUNC","rule":"subString","sum":580},{"parent":"FUNC","rule":"subqueryMergeFor","sum":21},{"parent":"FUNC","rule":"subqueryUnionMergeFor","sum":4410},{"parent":"FUNC","rule":"subquerymergefor","sum":1936},{"parent":"FUNC","rule":"subsTRING","sum":2},{"parent":"FUNC","rule":"subsrting","sum":3},{"parent":"FUNC","rule":"substing","sum":2},{"parent":"FUNC","rule":"substr","sum":173},{"parent":"FUNC","rule":"substring","sum":16531291},{"parent":"FUNC","rule":"substringUTF8","sum":1},{"parent":"FUNC","rule":"substring_index","sum":1},{"parent":"FUNC","rule":"sum","sum":45451451},{"parent":"FUNC","rule":"sumIF","sum":84},{"parent":"FUNC","rule":"sumIf","sum":94714},{"parent":"FUNC","rule":"sum_","sum":356},{"parent":"FUNC","rule":"sum_IF","sum":19568},{"parent":"FUNC","rule":"sum_If","sum":13003},{"parent":"FUNC","rule":"sum_if","sum":4235537},{"parent":"FUNC","rule":"sum_range2","sum":47},{"parent":"FUNC","rule":"sum_recursive_range","sum":1},{"parent":"FUNC","rule":"suma","sum":1},{"parent":"FUNC","rule":"sumif","sum":7479},{"parent":"FUNC","rule":"summ","sum":10},{"parent":"FUNC","rule":"sunstring","sum":2},{"parent":"FUNC","rule":"susbstring","sum":1},{"parent":"FUNC","rule":"tableName","sum":43245},{"parent":"FUNC","rule":"tablePath","sum":1480},{"parent":"FUNC","rule":"tableRecordIndex","sum":19},{"parent":"FUNC","rule":"tableRow","sum":22824},{"parent":"FUNC","rule":"table_name","sum":2894},{"parent":"FUNC","rule":"table_path","sum":46},{"parent":"FUNC","rule":"table_row","sum":377},{"parent":"FUNC","rule":"tablename","sum":165603},{"parent":"FUNC","rule":"tablepath","sum":32961},{"parent":"FUNC","rule":"tablerecordindex","sum":111},{"parent":"FUNC","rule":"tablerow","sum":42717},{"parent":"FUNC","rule":"tablerows","sum":5},{"parent":"FUNC","rule":"testBit","sum":34},{"parent":"FUNC","rule":"testbit","sum":38968},{"parent":"FUNC","rule":"testid","sum":2},{"parent":"FUNC","rule":"timestamp","sum":9971},{"parent":"FUNC","rule":"timestamp64","sum":7},{"parent":"FUNC","rule":"timezone","sum":2},{"parent":"FUNC","rule":"toBytes","sum":25421},{"parent":"FUNC","rule":"toDate","sum":57},{"parent":"FUNC","rule":"toDate32","sum":10},{"parent":"FUNC","rule":"toDateTime","sum":21},{"parent":"FUNC","rule":"toDateTimeOrNull","sum":3},{"parent":"FUNC","rule":"toDayOfWeek","sum":2},{"parent":"FUNC","rule":"toDict","sum":96254},{"parent":"FUNC","rule":"toFloat32","sum":4},{"parent":"FUNC","rule":"toInt128","sum":3},{"parent":"FUNC","rule":"toIntervalMonth","sum":2},{"parent":"FUNC","rule":"toLastDayOfMonth","sum":1},{"parent":"FUNC","rule":"toMonth","sum":1},{"parent":"FUNC","rule":"toMultiDict","sum":264},{"parent":"FUNC","rule":"toQuarter","sum":1},{"parent":"FUNC","rule":"toSet","sum":435881},{"parent":"FUNC","rule":"toStartOfMonth","sum":15},{"parent":"FUNC","rule":"toStartOfQuarter","sum":1},{"parent":"FUNC","rule":"toStartOfWeek","sum":9},{"parent":"FUNC","rule":"toString","sum":46},{"parent":"FUNC","rule":"toUInt64","sum":5},{"parent":"FUNC","rule":"toUnixTimestamp","sum":2},{"parent":"FUNC","rule":"toUnixTimestamp64Micro","sum":2},{"parent":"FUNC","rule":"toYear","sum":9},{"parent":"FUNC","rule":"to_bytes","sum":36},{"parent":"FUNC","rule":"to_char","sum":2},{"parent":"FUNC","rule":"to_date","sum":4},{"parent":"FUNC","rule":"to_dict","sum":65},{"parent":"FUNC","rule":"tobytes","sum":154},{"parent":"FUNC","rule":"today","sum":1},{"parent":"FUNC","rule":"todict","sum":100072},{"parent":"FUNC","rule":"tomultidict","sum":1157},{"parent":"FUNC","rule":"top","sum":54295},{"parent":"FUNC","rule":"topFreq","sum":94},{"parent":"FUNC","rule":"top_BY","sum":1},{"parent":"FUNC","rule":"top_by","sum":115898},{"parent":"FUNC","rule":"top_freq","sum":113},{"parent":"FUNC","rule":"topfreq","sum":23272},{"parent":"FUNC","rule":"topg","sum":2},{"parent":"FUNC","rule":"toset","sum":38682},{"parent":"FUNC","rule":"trunc","sum":13},{"parent":"FUNC","rule":"truncate","sum":2},{"parent":"FUNC","rule":"tryMember","sum":527},{"parent":"FUNC","rule":"trymember","sum":10121},{"parent":"FUNC","rule":"tupleElement","sum":2},{"parent":"FUNC","rule":"typeOf","sum":38},{"parent":"FUNC","rule":"typeof","sum":407},{"parent":"FUNC","rule":"tzdate","sum":8},{"parent":"FUNC","rule":"tzdate32","sum":7},{"parent":"FUNC","rule":"tzdatetime","sum":44},{"parent":"FUNC","rule":"tzdatetime64","sum":7},{"parent":"FUNC","rule":"tztimestamp","sum":35},{"parent":"FUNC","rule":"tztimestamp64","sum":7},{"parent":"FUNC","rule":"uNWRAP","sum":88},{"parent":"FUNC","rule":"udaf","sum":17656},{"parent":"FUNC","rule":"uint32","sum":13540},{"parent":"FUNC","rule":"uint64","sum":31},{"parent":"FUNC","rule":"uint8","sum":2},{"parent":"FUNC","rule":"unWRap","sum":3},{"parent":"FUNC","rule":"unWrap","sum":5},{"parent":"FUNC","rule":"uniq","sum":9},{"parent":"FUNC","rule":"uniqExact","sum":10},{"parent":"FUNC","rule":"unique","sum":1},{"parent":"FUNC","rule":"unique_pairs","sum":1},{"parent":"FUNC","rule":"unnest","sum":3},{"parent":"FUNC","rule":"untag","sum":1363},{"parent":"FUNC","rule":"unwrap","sum":27076605},{"parent":"FUNC","rule":"unwraped","sum":1},{"parent":"FUNC","rule":"upper","sum":2},{"parent":"FUNC","rule":"using","sum":12},{"parent":"FUNC","rule":"utc_action_created_dttm","sum":1},{"parent":"FUNC","rule":"utf8","sum":2039},{"parent":"FUNC","rule":"uuid","sum":25},{"parent":"FUNC","rule":"values","sum":6},{"parent":"FUNC","rule":"varPop","sum":77},{"parent":"FUNC","rule":"varSamp","sum":85},{"parent":"FUNC","rule":"var_samp","sum":43},{"parent":"FUNC","rule":"variance","sum":66705},{"parent":"FUNC","rule":"variance_population","sum":4},{"parent":"FUNC","rule":"variance_sample","sum":512},{"parent":"FUNC","rule":"varpop","sum":19},{"parent":"FUNC","rule":"version","sum":19},{"parent":"FUNC","rule":"vl","sum":2},{"parent":"FUNC","rule":"void","sum":1},{"parent":"FUNC","rule":"way","sum":41571},{"parent":"FUNC","rule":"weakField","sum":1077},{"parent":"FUNC","rule":"weakfield","sum":979276},{"parent":"FUNC","rule":"windowFunnel","sum":1},{"parent":"FUNC","rule":"worked_rules","sum":3},{"parent":"FUNC","rule":"wrap","sum":1},{"parent":"FUNC","rule":"yesterday","sum":4},{"parent":"FUNC","rule":"yson","sum":42},{"parent":"FUNC","rule":"ytListTables","sum":1},{"parent":"MODULE","rule":"Compress","sum":84348},{"parent":"MODULE","rule":"DATETIME","sum":1129},{"parent":"MODULE","rule":"DATEtime","sum":2},{"parent":"MODULE","rule":"DAteTime","sum":3242},{"parent":"MODULE","rule":"DAtetime","sum":7},{"parent":"MODULE","rule":"DIgest","sum":2},{"parent":"MODULE","rule":"DaTETIME","sum":335},{"parent":"MODULE","rule":"DaTeTime","sum":84},{"parent":"MODULE","rule":"DateTIME","sum":723},{"parent":"MODULE","rule":"DateTIme","sum":4334},{"parent":"MODULE","rule":"DateTime","sum":275526739},{"parent":"MODULE","rule":"DatetIme","sum":367},{"parent":"MODULE","rule":"Datetime","sum":7098291},{"parent":"MODULE","rule":"Decompress","sum":24693},{"parent":"MODULE","rule":"Digest","sum":7702122},{"parent":"MODULE","rule":"HyperScan","sum":2310},{"parent":"MODULE","rule":"Hyperscan","sum":389922},{"parent":"MODULE","rule":"Ip","sum":1351732},{"parent":"MODULE","rule":"JSON","sum":26918},{"parent":"MODULE","rule":"JSon","sum":2},{"parent":"MODULE","rule":"Json","sum":1015827},{"parent":"MODULE","rule":"MATH","sum":31},{"parent":"MODULE","rule":"Math","sum":44852436},{"parent":"MODULE","rule":"PG","sum":198},{"parent":"MODULE","rule":"PIRE","sum":29},{"parent":"MODULE","rule":"Pg","sum":4885},{"parent":"MODULE","rule":"PgAgg","sum":2},{"parent":"MODULE","rule":"PgProc","sum":2},{"parent":"MODULE","rule":"Pire","sum":1913153},{"parent":"MODULE","rule":"Protobuf","sum":269851},{"parent":"MODULE","rule":"RE2","sum":5121},{"parent":"MODULE","rule":"Re2","sum":12179227},{"parent":"MODULE","rule":"STRING","sum":6},{"parent":"MODULE","rule":"String","sum":95380120},{"parent":"MODULE","rule":"TryDecompress","sum":4743},{"parent":"MODULE","rule":"URL","sum":7},{"parent":"MODULE","rule":"Unicode","sum":4909149},{"parent":"MODULE","rule":"Url","sum":23522080},{"parent":"MODULE","rule":"YSON","sum":149},{"parent":"MODULE","rule":"YSon","sum":17},{"parent":"MODULE","rule":"Yson","sum":395219884},{"parent":"MODULE","rule":"dateTime","sum":467},{"parent":"MODULE","rule":"datetime","sum":14180},{"parent":"MODULE","rule":"digest","sum":15},{"parent":"MODULE","rule":"json","sum":8},{"parent":"MODULE","rule":"math","sum":54},{"parent":"MODULE","rule":"pg","sum":1786},{"parent":"MODULE","rule":"pire","sum":36},{"parent":"MODULE","rule":"re2","sum":3195},{"parent":"MODULE","rule":"string","sum":75},{"parent":"MODULE","rule":"url","sum":5},{"parent":"MODULE","rule":"ySoN","sum":1},{"parent":"MODULE","rule":"yson","sum":61},{"parent":"MODULE_FUNC","rule":"Compress::BZip2","sum":2},{"parent":"MODULE_FUNC","rule":"Compress::BlockCodec","sum":6},{"parent":"MODULE_FUNC","rule":"Compress::Brotli","sum":160},{"parent":"MODULE_FUNC","rule":"Compress::Gzip","sum":83366},{"parent":"MODULE_FUNC","rule":"Compress::Lz4","sum":620},{"parent":"MODULE_FUNC","rule":"Compress::Lzma","sum":7},{"parent":"MODULE_FUNC","rule":"Compress::Snappy","sum":7},{"parent":"MODULE_FUNC","rule":"Compress::Zlib","sum":32},{"parent":"MODULE_FUNC","rule":"Compress::Zstd","sum":148},{"parent":"MODULE_FUNC","rule":"DATETIME::Format","sum":10},{"parent":"MODULE_FUNC","rule":"DATETIME::FromMilliseconds","sum":49},{"parent":"MODULE_FUNC","rule":"DATETIME::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DATETIME::GetYear","sum":18},{"parent":"MODULE_FUNC","rule":"DATETIME::MakeDate","sum":742},{"parent":"MODULE_FUNC","rule":"DATETIME::MakeDatetime","sum":286},{"parent":"MODULE_FUNC","rule":"DATETIME::Parse","sum":9},{"parent":"MODULE_FUNC","rule":"DATETIME::StartOfWeek","sum":14},{"parent":"MODULE_FUNC","rule":"DATEtime::GetMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DATEtime::GetYear","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::GetDayOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::MakeDate","sum":581},{"parent":"MODULE_FUNC","rule":"DAteTime::Parse","sum":150},{"parent":"MODULE_FUNC","rule":"DAteTime::StartOfMonth","sum":2500},{"parent":"MODULE_FUNC","rule":"DAteTime::StartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DAteTime::ToDays","sum":8},{"parent":"MODULE_FUNC","rule":"DAtetime::FromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DAtetime::MakeDatetime","sum":2},{"parent":"MODULE_FUNC","rule":"DAtetime::ToStartOfWeek","sum":4},{"parent":"MODULE_FUNC","rule":"DIgest::SipHash","sum":2},{"parent":"MODULE_FUNC","rule":"DaTETIME::StartOfWeek","sum":335},{"parent":"MODULE_FUNC","rule":"DaTeTime::GetMonth","sum":4},{"parent":"MODULE_FUNC","rule":"DaTeTime::GetYear","sum":4},{"parent":"MODULE_FUNC","rule":"DaTeTime::IntervalFromDays","sum":1},{"parent":"MODULE_FUNC","rule":"DaTeTime::MakeDate","sum":32},{"parent":"MODULE_FUNC","rule":"DaTeTime::ShiftMonths","sum":42},{"parent":"MODULE_FUNC","rule":"DaTeTime::StartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIME::IntervalFromDays","sum":723},{"parent":"MODULE_FUNC","rule":"DateTIme::EndOfMonth","sum":3},{"parent":"MODULE_FUNC","rule":"DateTIme::Format","sum":11},{"parent":"MODULE_FUNC","rule":"DateTIme::FromMicroseconds","sum":11},{"parent":"MODULE_FUNC","rule":"DateTIme::FromSeconds","sum":64},{"parent":"MODULE_FUNC","rule":"DateTIme::GetDayOfWeek","sum":44},{"parent":"MODULE_FUNC","rule":"DateTIme::GetHour","sum":2},{"parent":"MODULE_FUNC","rule":"DateTIme::GetMinute","sum":38},{"parent":"MODULE_FUNC","rule":"DateTIme::GetYear","sum":71},{"parent":"MODULE_FUNC","rule":"DateTIme::IntervalFromDays","sum":46},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeDate","sum":3587},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeDatetime","sum":284},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeTimestamp","sum":7},{"parent":"MODULE_FUNC","rule":"DateTIme::MakeTzTimestamp","sum":4},{"parent":"MODULE_FUNC","rule":"DateTIme::Parse","sum":2},{"parent":"MODULE_FUNC","rule":"DateTIme::ParseIso8601","sum":2},{"parent":"MODULE_FUNC","rule":"DateTIme::ShiftMonths","sum":6},{"parent":"MODULE_FUNC","rule":"DateTIme::StartOfMonth","sum":135},{"parent":"MODULE_FUNC","rule":"DateTIme::StartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToDays","sum":13},{"parent":"MODULE_FUNC","rule":"DateTIme::ToHours","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTIme::ToSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::AddTimezone","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Convert","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentDate","sum":5},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentDateTimeUTC","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::CurrentUtcDate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Date","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::DateTime","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::DatetimeStartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::DayOfWeek","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::Days","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::DiffMinutes","sum":8},{"parent":"MODULE_FUNC","rule":"DateTime::DiffMonths","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Difference","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::EndOf","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfDay","sum":381},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfMonth","sum":38771},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfQuarter","sum":306},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfWeek","sum":571},{"parent":"MODULE_FUNC","rule":"DateTime::EndOfYear","sum":144},{"parent":"MODULE_FUNC","rule":"DateTime::EndofMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::EndtOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ExtractHour","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::FROMMilliseconds","sum":14},{"parent":"MODULE_FUNC","rule":"DateTime::FROMSeconds","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Format","sum":39880424},{"parent":"MODULE_FUNC","rule":"DateTime::FormatTime","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::FromDays","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroSeconds","sum":153},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroseconds","sum":4675092},{"parent":"MODULE_FUNC","rule":"DateTime::FromMicroseconds64","sum":122},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliSeconds","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliseconds","sum":9211698},{"parent":"MODULE_FUNC","rule":"DateTime::FromMilliseconds64","sum":958},{"parent":"MODULE_FUNC","rule":"DateTime::FromSecond","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::FromSeconds","sum":15196525},{"parent":"MODULE_FUNC","rule":"DateTime::FromSeconds64","sum":931},{"parent":"MODULE_FUNC","rule":"DateTime::FromString","sum":368},{"parent":"MODULE_FUNC","rule":"DateTime::FromTimeZone","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::Fromat","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetDay","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfMonth","sum":595723},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfWeek","sum":636422},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfWeekName","sum":125820},{"parent":"MODULE_FUNC","rule":"DateTime::GetDayOfYear","sum":44450},{"parent":"MODULE_FUNC","rule":"DateTime::GetHour","sum":1453652},{"parent":"MODULE_FUNC","rule":"DateTime::GetIntervalLength","sum":12},{"parent":"MODULE_FUNC","rule":"DateTime::GetLastDayOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetMicrosecondOfSecond","sum":1253},{"parent":"MODULE_FUNC","rule":"DateTime::GetMillisecondOfSecond","sum":41},{"parent":"MODULE_FUNC","rule":"DateTime::GetMinute","sum":288991},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonth","sum":704438},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonthName","sum":38740},{"parent":"MODULE_FUNC","rule":"DateTime::GetMonthOfYear","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::GetSecond","sum":86244},{"parent":"MODULE_FUNC","rule":"DateTime::GetTimezoneId","sum":104},{"parent":"MODULE_FUNC","rule":"DateTime::GetTimezoneName","sum":518},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeek","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeekOfYear","sum":503668},{"parent":"MODULE_FUNC","rule":"DateTime::GetWeekOfYearIso8601","sum":34273},{"parent":"MODULE_FUNC","rule":"DateTime::GetYEAR","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::GetYear","sum":784656},{"parent":"MODULE_FUNC","rule":"DateTime::Interval","sum":33},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromDays","sum":2158},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromHours","sum":4268},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMicroseconds","sum":9},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMilliseconds","sum":8},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromMinutes","sum":182},{"parent":"MODULE_FUNC","rule":"DateTime::Interval64FromSeconds","sum":132},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFROMDays","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFrom","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromDays","sum":15020678},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromHDays","sum":28},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromHours","sum":8954718},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMicroseconds","sum":77537},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMilliseconds","sum":515472},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMinute","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMinutes","sum":4270970},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMonth","sum":9},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromMonths","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromSeconds","sum":935649},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalFromYears","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalfromDays","sum":27},{"parent":"MODULE_FUNC","rule":"DateTime::IntervalfromHours","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::LastDayOfMonth","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::MakeData","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDate","sum":23153151},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDate32","sum":73},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDateTime","sum":246},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDatetime","sum":36716857},{"parent":"MODULE_FUNC","rule":"DateTime::MakeDatetime64","sum":113},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTimestamp","sum":7837490},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTimestamp64","sum":295},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDate","sum":262396},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDateTime","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDatetime","sum":3127159},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzDatetime64","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzTimestamp","sum":153334},{"parent":"MODULE_FUNC","rule":"DateTime::MakeTzTimestamp64","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Makedate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::MilliSeconds","sum":156},{"parent":"MODULE_FUNC","rule":"DateTime::NOW","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::Now","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::Parce","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Parse","sum":23597513},{"parent":"MODULE_FUNC","rule":"DateTime::Parse64","sum":13},{"parent":"MODULE_FUNC","rule":"DateTime::Parse8601","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ParseDateTime","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseDateTimeBestEffort","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::ParseFromString","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseHttp","sum":26993},{"parent":"MODULE_FUNC","rule":"DateTime::ParseIso","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ParseIso8601","sum":18536999},{"parent":"MODULE_FUNC","rule":"DateTime::ParseRfc822","sum":1862},{"parent":"MODULE_FUNC","rule":"DateTime::ParseX509","sum":245},{"parent":"MODULE_FUNC","rule":"DateTime::STartOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Shift","sum":4},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftDay","sum":51},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftDays","sum":195},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMonth","sum":11},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftMonths","sum":3302965},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftQuarters","sum":388068},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftWeek","sum":7},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftWeeks","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ShiftYears","sum":672410},{"parent":"MODULE_FUNC","rule":"DateTime::Split","sum":700021},{"parent":"MODULE_FUNC","rule":"DateTime::StartOf","sum":2338328},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfDay","sum":2651264},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfHour","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfMohth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfMonth","sum":4155079},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfQuarter","sum":554245},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfWeek","sum":2075179},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfYear","sum":942330},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfmonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::StartOfweek","sum":3},{"parent":"MODULE_FUNC","rule":"DateTime::TimeOfDay","sum":66970},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMicroSeconds","sum":364},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMilliSeconds","sum":2515},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromSeconds","sum":391},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampFromString","sum":424},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampStartOfMonth","sum":821},{"parent":"MODULE_FUNC","rule":"DateTime::TimestampStartOfWeek","sum":398},{"parent":"MODULE_FUNC","rule":"DateTime::To","sum":5},{"parent":"MODULE_FUNC","rule":"DateTime::ToDate","sum":1435},{"parent":"MODULE_FUNC","rule":"DateTime::ToDateTime","sum":6},{"parent":"MODULE_FUNC","rule":"DateTime::ToDays","sum":3467508},{"parent":"MODULE_FUNC","rule":"DateTime::ToHours","sum":1320055},{"parent":"MODULE_FUNC","rule":"DateTime::ToIsoFormat","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ToMicroseconds","sum":2220231},{"parent":"MODULE_FUNC","rule":"DateTime::ToMilliseconds","sum":5300346},{"parent":"MODULE_FUNC","rule":"DateTime::ToMinutes","sum":859430},{"parent":"MODULE_FUNC","rule":"DateTime::ToMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::ToMonths","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::ToSeconds","sum":23407059},{"parent":"MODULE_FUNC","rule":"DateTime::ToSeconds64","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Today","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Toseconds","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Trunc","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::Update","sum":3596216},{"parent":"MODULE_FUNC","rule":"DateTime::format","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::fromSeconds","sum":15},{"parent":"MODULE_FUNC","rule":"DateTime::parse","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::parseiso8601","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::toDate","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::toMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::toSeconds","sum":2},{"parent":"MODULE_FUNC","rule":"DateTime::toStartOfMonth","sum":1},{"parent":"MODULE_FUNC","rule":"DateTime::todate","sum":1},{"parent":"MODULE_FUNC","rule":"DatetIme::IntervalFromDays","sum":367},{"parent":"MODULE_FUNC","rule":"Datetime::CurrentDate","sum":2},{"parent":"MODULE_FUNC","rule":"Datetime::CurrentUtcDatetime","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::DaysInMonth","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::EndOfMonth","sum":182},{"parent":"MODULE_FUNC","rule":"Datetime::EndOfWeek","sum":59},{"parent":"MODULE_FUNC","rule":"Datetime::Format","sum":366091},{"parent":"MODULE_FUNC","rule":"Datetime::FromMicroseconds","sum":36372},{"parent":"MODULE_FUNC","rule":"Datetime::FromMilliseconds","sum":786334},{"parent":"MODULE_FUNC","rule":"Datetime::FromSeconds","sum":567404},{"parent":"MODULE_FUNC","rule":"Datetime::FromSeconds64","sum":24},{"parent":"MODULE_FUNC","rule":"Datetime::GetDay","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfMonth","sum":709},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfWeek","sum":12460},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfWeekName","sum":838},{"parent":"MODULE_FUNC","rule":"Datetime::GetDayOfYear","sum":59},{"parent":"MODULE_FUNC","rule":"Datetime::GetHour","sum":14645},{"parent":"MODULE_FUNC","rule":"Datetime::GetMinute","sum":13706},{"parent":"MODULE_FUNC","rule":"Datetime::GetMonth","sum":1036},{"parent":"MODULE_FUNC","rule":"Datetime::GetMonthName","sum":344},{"parent":"MODULE_FUNC","rule":"Datetime::GetWeekOfYear","sum":2701},{"parent":"MODULE_FUNC","rule":"Datetime::GetWeekOfYearIso8601","sum":3},{"parent":"MODULE_FUNC","rule":"Datetime::GetYear","sum":2720},{"parent":"MODULE_FUNC","rule":"Datetime::Interval","sum":9},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFROMDays","sum":6},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromDays","sum":372229},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromHours","sum":265903},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMicroseconds","sum":194},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMilliseconds","sum":91},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromMinutes","sum":49879},{"parent":"MODULE_FUNC","rule":"Datetime::IntervalFromSeconds","sum":9623},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDate","sum":678374},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDate32","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDateTime","sum":2},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDatetime","sum":920770},{"parent":"MODULE_FUNC","rule":"Datetime::MakeDatetime64","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTimestamp","sum":417045},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzDate","sum":2307},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzDatetime","sum":23894},{"parent":"MODULE_FUNC","rule":"Datetime::MakeTzTimestamp","sum":33282},{"parent":"MODULE_FUNC","rule":"Datetime::Makedate","sum":37},{"parent":"MODULE_FUNC","rule":"Datetime::Parse","sum":138225},{"parent":"MODULE_FUNC","rule":"Datetime::ParseIso8601","sum":257650},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftMonths","sum":25927},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftQuarters","sum":543},{"parent":"MODULE_FUNC","rule":"Datetime::ShiftYears","sum":313},{"parent":"MODULE_FUNC","rule":"Datetime::Split","sum":207},{"parent":"MODULE_FUNC","rule":"Datetime::StarOfWeek","sum":1},{"parent":"MODULE_FUNC","rule":"Datetime::StartOf","sum":64223},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfDay","sum":22765},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfMonth","sum":74579},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfQuarter","sum":4372},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfWeek","sum":23019},{"parent":"MODULE_FUNC","rule":"Datetime::StartOfYear","sum":11647},{"parent":"MODULE_FUNC","rule":"Datetime::TimeOfDay","sum":126},{"parent":"MODULE_FUNC","rule":"Datetime::ToDatetime","sum":12},{"parent":"MODULE_FUNC","rule":"Datetime::ToDays","sum":141482},{"parent":"MODULE_FUNC","rule":"Datetime::ToHours","sum":22945},{"parent":"MODULE_FUNC","rule":"Datetime::ToMicroseconds","sum":582},{"parent":"MODULE_FUNC","rule":"Datetime::ToMilliseconds","sum":121842},{"parent":"MODULE_FUNC","rule":"Datetime::ToMinutes","sum":45209},{"parent":"MODULE_FUNC","rule":"Datetime::ToSeconds","sum":1533763},{"parent":"MODULE_FUNC","rule":"Datetime::Update","sum":29519},{"parent":"MODULE_FUNC","rule":"Datetime::startOfmonth","sum":1},{"parent":"MODULE_FUNC","rule":"Decompress::BZip2","sum":518},{"parent":"MODULE_FUNC","rule":"Decompress::Brotli","sum":7961},{"parent":"MODULE_FUNC","rule":"Decompress::Gzip","sum":3922},{"parent":"MODULE_FUNC","rule":"Decompress::Lz4","sum":902},{"parent":"MODULE_FUNC","rule":"Decompress::Snappy","sum":462},{"parent":"MODULE_FUNC","rule":"Decompress::Zlib","sum":10899},{"parent":"MODULE_FUNC","rule":"Decompress::Zstd","sum":29},{"parent":"MODULE_FUNC","rule":"Digest::Argon2","sum":48547},{"parent":"MODULE_FUNC","rule":"Digest::Blake2B","sum":4535},{"parent":"MODULE_FUNC","rule":"Digest::CityHash","sum":1224275},{"parent":"MODULE_FUNC","rule":"Digest::CityHash128","sum":17823},{"parent":"MODULE_FUNC","rule":"Digest::Crc32c","sum":77315},{"parent":"MODULE_FUNC","rule":"Digest::Crc64","sum":140567},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint","sum":69786},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint128","sum":26},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint2","sum":166218},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint32","sum":208},{"parent":"MODULE_FUNC","rule":"Digest::FarmHashFingerprint64","sum":1051198},{"parent":"MODULE_FUNC","rule":"Digest::Fnv32","sum":1747},{"parent":"MODULE_FUNC","rule":"Digest::Fnv64","sum":261123},{"parent":"MODULE_FUNC","rule":"Digest::IntHash64","sum":18293},{"parent":"MODULE_FUNC","rule":"Digest::MD5Hex","sum":5},{"parent":"MODULE_FUNC","rule":"Digest::Md5","sum":2},{"parent":"MODULE_FUNC","rule":"Digest::Md5HalfMix","sum":427803},{"parent":"MODULE_FUNC","rule":"Digest::Md5Hex","sum":567611},{"parent":"MODULE_FUNC","rule":"Digest::Md5Raw","sum":15724},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash","sum":2418430},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash2A","sum":2499},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash2A32","sum":840},{"parent":"MODULE_FUNC","rule":"Digest::MurMurHash32","sum":256424},{"parent":"MODULE_FUNC","rule":"Digest::MurMurhash","sum":2},{"parent":"MODULE_FUNC","rule":"Digest::NimericHash","sum":3},{"parent":"MODULE_FUNC","rule":"Digest::NumericHash","sum":274144},{"parent":"MODULE_FUNC","rule":"Digest::Sha1","sum":42460},{"parent":"MODULE_FUNC","rule":"Digest::Sha256","sum":399343},{"parent":"MODULE_FUNC","rule":"Digest::SipHash","sum":132347},{"parent":"MODULE_FUNC","rule":"Digest::SuperFastHash","sum":34802},{"parent":"MODULE_FUNC","rule":"Digest::XXH3","sum":48000},{"parent":"MODULE_FUNC","rule":"Digest::XXH3_128","sum":19},{"parent":"MODULE_FUNC","rule":"Digest::murmurhash","sum":3},{"parent":"MODULE_FUNC","rule":"HyperScan::BacktrackingGrep","sum":1},{"parent":"MODULE_FUNC","rule":"HyperScan::Grep","sum":2303},{"parent":"MODULE_FUNC","rule":"HyperScan::Match","sum":6},{"parent":"MODULE_FUNC","rule":"Hyperscan::BacktrackingGrep","sum":48609},{"parent":"MODULE_FUNC","rule":"Hyperscan::BacktrackingMatch","sum":128},{"parent":"MODULE_FUNC","rule":"Hyperscan::Capture","sum":5639},{"parent":"MODULE_FUNC","rule":"Hyperscan::Grep","sum":145100},{"parent":"MODULE_FUNC","rule":"Hyperscan::Match","sum":52838},{"parent":"MODULE_FUNC","rule":"Hyperscan::MultiGrep","sum":64},{"parent":"MODULE_FUNC","rule":"Hyperscan::MultiMatch","sum":40637},{"parent":"MODULE_FUNC","rule":"Hyperscan::Replace","sum":96907},{"parent":"MODULE_FUNC","rule":"Ip::ConvertToIPv6","sum":44916},{"parent":"MODULE_FUNC","rule":"Ip::FromString","sum":391207},{"parent":"MODULE_FUNC","rule":"Ip::GetSubnet","sum":135473},{"parent":"MODULE_FUNC","rule":"Ip::GetSubnetByMask","sum":4},{"parent":"MODULE_FUNC","rule":"Ip::IsEmbeddedIPv4","sum":6948},{"parent":"MODULE_FUNC","rule":"Ip::IsIPv4","sum":136060},{"parent":"MODULE_FUNC","rule":"Ip::IsIPv6","sum":118500},{"parent":"MODULE_FUNC","rule":"Ip::SubnetFromString","sum":549},{"parent":"MODULE_FUNC","rule":"Ip::SubnetMatch","sum":502},{"parent":"MODULE_FUNC","rule":"Ip::ToFixedIPv6String","sum":3821},{"parent":"MODULE_FUNC","rule":"Ip::ToString","sum":513752},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToDouble","sum":19612},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToInt64","sum":3261},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToList","sum":15},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToString","sum":133},{"parent":"MODULE_FUNC","rule":"JSON::ConvertToStringList","sum":4},{"parent":"MODULE_FUNC","rule":"JSON::From","sum":5},{"parent":"MODULE_FUNC","rule":"JSON::LookupBool","sum":5},{"parent":"MODULE_FUNC","rule":"JSON::LookupDouble","sum":10},{"parent":"MODULE_FUNC","rule":"JSON::LookupInt64","sum":8},{"parent":"MODULE_FUNC","rule":"JSON::LookupString","sum":254},{"parent":"MODULE_FUNC","rule":"JSON::PARSE","sum":2},{"parent":"MODULE_FUNC","rule":"JSON::Parse","sum":3609},{"parent":"MODULE_FUNC","rule":"JSon::From","sum":2},{"parent":"MODULE_FUNC","rule":"Json::Attributes","sum":2},{"parent":"MODULE_FUNC","rule":"Json::Contains","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToBool","sum":5},{"parent":"MODULE_FUNC","rule":"Json::ConvertToDict","sum":114},{"parent":"MODULE_FUNC","rule":"Json::ConvertToDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"Json::ConvertToList","sum":108},{"parent":"MODULE_FUNC","rule":"Json::ConvertToString","sum":160},{"parent":"MODULE_FUNC","rule":"Json::ConvertToStringDict","sum":5},{"parent":"MODULE_FUNC","rule":"Json::ConvertToStringList","sum":19},{"parent":"MODULE_FUNC","rule":"Json::From","sum":3874},{"parent":"MODULE_FUNC","rule":"Json::FromString","sum":16},{"parent":"MODULE_FUNC","rule":"Json::GetField","sum":21},{"parent":"MODULE_FUNC","rule":"Json::GetHash","sum":1},{"parent":"MODULE_FUNC","rule":"Json::GetLength","sum":19},{"parent":"MODULE_FUNC","rule":"Json::Lookup","sum":1},{"parent":"MODULE_FUNC","rule":"Json::LookupInt64","sum":824343},{"parent":"MODULE_FUNC","rule":"Json::LookupString","sum":53},{"parent":"MODULE_FUNC","rule":"Json::Options","sum":852},{"parent":"MODULE_FUNC","rule":"Json::Parse","sum":182381},{"parent":"MODULE_FUNC","rule":"Json::ParseJson","sum":211},{"parent":"MODULE_FUNC","rule":"Json::Serialize","sum":1558},{"parent":"MODULE_FUNC","rule":"Json::SerializeJson","sum":919},{"parent":"MODULE_FUNC","rule":"Json::SerializePretty","sum":1124},{"parent":"MODULE_FUNC","rule":"Json::SerializeText","sum":3},{"parent":"MODULE_FUNC","rule":"Json::YPath","sum":20},{"parent":"MODULE_FUNC","rule":"Json::YPathDict","sum":14},{"parent":"MODULE_FUNC","rule":"Json::YPathString","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::ABS","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::Cos","sum":4},{"parent":"MODULE_FUNC","rule":"MATH::EXP","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::Log","sum":4},{"parent":"MODULE_FUNC","rule":"MATH::NearbyINT","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::NearbyInt","sum":1},{"parent":"MODULE_FUNC","rule":"MATH::ROUND","sum":19},{"parent":"MODULE_FUNC","rule":"Math::Aabs","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Abs","sum":477598},{"parent":"MODULE_FUNC","rule":"Math::Acos","sum":13299},{"parent":"MODULE_FUNC","rule":"Math::Asin","sum":11228},{"parent":"MODULE_FUNC","rule":"Math::Asinh","sum":4},{"parent":"MODULE_FUNC","rule":"Math::Atan","sum":7909},{"parent":"MODULE_FUNC","rule":"Math::Atan2","sum":13019},{"parent":"MODULE_FUNC","rule":"Math::Cbrt","sum":492},{"parent":"MODULE_FUNC","rule":"Math::Ceil","sum":1246578},{"parent":"MODULE_FUNC","rule":"Math::Cos","sum":96598},{"parent":"MODULE_FUNC","rule":"Math::Cosh","sum":5},{"parent":"MODULE_FUNC","rule":"Math::Crbt","sum":1},{"parent":"MODULE_FUNC","rule":"Math::E","sum":9607},{"parent":"MODULE_FUNC","rule":"Math::EXP","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Eps","sum":179},{"parent":"MODULE_FUNC","rule":"Math::Erf","sum":2569},{"parent":"MODULE_FUNC","rule":"Math::ErfInv","sum":42},{"parent":"MODULE_FUNC","rule":"Math::ErfcInv","sum":6},{"parent":"MODULE_FUNC","rule":"Math::Exp","sum":557620},{"parent":"MODULE_FUNC","rule":"Math::Exp2","sum":2024},{"parent":"MODULE_FUNC","rule":"Math::Fabs","sum":144459},{"parent":"MODULE_FUNC","rule":"Math::Floor","sum":327868},{"parent":"MODULE_FUNC","rule":"Math::Flor","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Fmod","sum":9},{"parent":"MODULE_FUNC","rule":"Math::FuzzyEquals","sum":19429},{"parent":"MODULE_FUNC","rule":"Math::Hypot","sum":18779},{"parent":"MODULE_FUNC","rule":"Math::IsFinite","sum":186397},{"parent":"MODULE_FUNC","rule":"Math::IsInf","sum":76878},{"parent":"MODULE_FUNC","rule":"Math::IsNaN","sum":250281},{"parent":"MODULE_FUNC","rule":"Math::IsNan","sum":6},{"parent":"MODULE_FUNC","rule":"Math::Ldexp","sum":47},{"parent":"MODULE_FUNC","rule":"Math::Lgamma","sum":4},{"parent":"MODULE_FUNC","rule":"Math::Log","sum":822843},{"parent":"MODULE_FUNC","rule":"Math::Log10","sum":153177},{"parent":"MODULE_FUNC","rule":"Math::Log2","sum":197640},{"parent":"MODULE_FUNC","rule":"Math::Max","sum":3},{"parent":"MODULE_FUNC","rule":"Math::Min","sum":1},{"parent":"MODULE_FUNC","rule":"Math::Mod","sum":66515},{"parent":"MODULE_FUNC","rule":"Math::NearbyInt","sum":426816},{"parent":"MODULE_FUNC","rule":"Math::Pi","sum":74410},{"parent":"MODULE_FUNC","rule":"Math::Pow","sum":1331521},{"parent":"MODULE_FUNC","rule":"Math::Power","sum":6},{"parent":"MODULE_FUNC","rule":"Math::ROUND","sum":5},{"parent":"MODULE_FUNC","rule":"Math::Rem","sum":2276},{"parent":"MODULE_FUNC","rule":"Math::Remainder","sum":163},{"parent":"MODULE_FUNC","rule":"Math::Rint","sum":20365},{"parent":"MODULE_FUNC","rule":"Math::Round","sum":36720906},{"parent":"MODULE_FUNC","rule":"Math::RoundDownward","sum":138123},{"parent":"MODULE_FUNC","rule":"Math::RoundToNearest","sum":76444},{"parent":"MODULE_FUNC","rule":"Math::RoundTowardZero","sum":896},{"parent":"MODULE_FUNC","rule":"Math::RoundUpward","sum":211425},{"parent":"MODULE_FUNC","rule":"Math::Sigmoid","sum":279198},{"parent":"MODULE_FUNC","rule":"Math::Sin","sum":82019},{"parent":"MODULE_FUNC","rule":"Math::Sinh","sum":6030},{"parent":"MODULE_FUNC","rule":"Math::Sqrt","sum":612531},{"parent":"MODULE_FUNC","rule":"Math::Tan","sum":4988},{"parent":"MODULE_FUNC","rule":"Math::Tanh","sum":4957},{"parent":"MODULE_FUNC","rule":"Math::Tgamma","sum":60},{"parent":"MODULE_FUNC","rule":"Math::Trunc","sum":156094},{"parent":"MODULE_FUNC","rule":"Math::abs","sum":2},{"parent":"MODULE_FUNC","rule":"Math::ceil","sum":8},{"parent":"MODULE_FUNC","rule":"Math::cos","sum":2},{"parent":"MODULE_FUNC","rule":"Math::exp","sum":6},{"parent":"MODULE_FUNC","rule":"Math::floor","sum":3},{"parent":"MODULE_FUNC","rule":"Math::isnan","sum":1},{"parent":"MODULE_FUNC","rule":"Math::round","sum":59},{"parent":"MODULE_FUNC","rule":"Math::sin","sum":2},{"parent":"MODULE_FUNC","rule":"Math::sqrt","sum":3},{"parent":"MODULE_FUNC","rule":"PG::ARRAY_AGG","sum":7},{"parent":"MODULE_FUNC","rule":"PG::STRING_AGG","sum":37},{"parent":"MODULE_FUNC","rule":"PG::generate_series","sum":7},{"parent":"MODULE_FUNC","rule":"PG::json_object_keys","sum":19},{"parent":"MODULE_FUNC","rule":"PG::jsonb_object_keys","sum":18},{"parent":"MODULE_FUNC","rule":"PG::string_agg","sum":101},{"parent":"MODULE_FUNC","rule":"PG::to_hex","sum":9},{"parent":"MODULE_FUNC","rule":"PIRE::Capture","sum":7},{"parent":"MODULE_FUNC","rule":"PIRE::Grep","sum":22},{"parent":"MODULE_FUNC","rule":"Pg::ARRAY_AGG","sum":11},{"parent":"MODULE_FUNC","rule":"Pg::Array_Agg","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::CONCAT","sum":12},{"parent":"MODULE_FUNC","rule":"Pg::Date","sum":3},{"parent":"MODULE_FUNC","rule":"Pg::GENERATE_SERIES","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::SPLIT_PART","sum":5},{"parent":"MODULE_FUNC","rule":"Pg::STRING_AGG","sum":50},{"parent":"MODULE_FUNC","rule":"Pg::ST_Area","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::ST_AsEWKB","sum":57},{"parent":"MODULE_FUNC","rule":"Pg::ST_AsEWKT","sum":15},{"parent":"MODULE_FUNC","rule":"Pg::ST_AsGeoJSON","sum":7},{"parent":"MODULE_FUNC","rule":"Pg::ST_AsSVG","sum":3},{"parent":"MODULE_FUNC","rule":"Pg::ST_AsText","sum":57},{"parent":"MODULE_FUNC","rule":"Pg::ST_Boundary","sum":43},{"parent":"MODULE_FUNC","rule":"Pg::ST_Centroid","sum":14},{"parent":"MODULE_FUNC","rule":"Pg::ST_ClosestPoint","sum":37},{"parent":"MODULE_FUNC","rule":"Pg::ST_Contains","sum":3},{"parent":"MODULE_FUNC","rule":"Pg::ST_Distance","sum":13},{"parent":"MODULE_FUNC","rule":"Pg::ST_GeoHash","sum":15},{"parent":"MODULE_FUNC","rule":"Pg::ST_GeomFromEWKB","sum":86},{"parent":"MODULE_FUNC","rule":"Pg::ST_GeomFromGeoHash","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::ST_GeomFromText","sum":65},{"parent":"MODULE_FUNC","rule":"Pg::ST_GeomFromWKB","sum":7},{"parent":"MODULE_FUNC","rule":"Pg::ST_Intersects","sum":13},{"parent":"MODULE_FUNC","rule":"Pg::ST_IsValid","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::ST_MakePoint","sum":7},{"parent":"MODULE_FUNC","rule":"Pg::ST_MakeValid","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::ST_Point","sum":6},{"parent":"MODULE_FUNC","rule":"Pg::ST_PointOnSurface","sum":17},{"parent":"MODULE_FUNC","rule":"Pg::ST_Scale","sum":3},{"parent":"MODULE_FUNC","rule":"Pg::ST_SetSRID","sum":6},{"parent":"MODULE_FUNC","rule":"Pg::ST_Transform","sum":183},{"parent":"MODULE_FUNC","rule":"Pg::ST_X","sum":72},{"parent":"MODULE_FUNC","rule":"Pg::ST_Y","sum":66},{"parent":"MODULE_FUNC","rule":"Pg::St_geomfromewkb","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::String_Agg","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::age","sum":8},{"parent":"MODULE_FUNC","rule":"Pg::array_agg","sum":6},{"parent":"MODULE_FUNC","rule":"Pg::array_length","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::bit_length","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::center","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::concat","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::date_generate_series","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::date_part","sum":8},{"parent":"MODULE_FUNC","rule":"Pg::date_trunc","sum":11},{"parent":"MODULE_FUNC","rule":"Pg::extract","sum":136},{"parent":"MODULE_FUNC","rule":"Pg::generate_series","sum":1511},{"parent":"MODULE_FUNC","rule":"Pg::json_object_agg","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::lower","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::max","sum":364},{"parent":"MODULE_FUNC","rule":"Pg::sind","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::split_part","sum":20},{"parent":"MODULE_FUNC","rule":"Pg::st_asgeojson","sum":1},{"parent":"MODULE_FUNC","rule":"Pg::st_astext","sum":6},{"parent":"MODULE_FUNC","rule":"Pg::st_collect","sum":8},{"parent":"MODULE_FUNC","rule":"Pg::st_geomfromewkb","sum":22},{"parent":"MODULE_FUNC","rule":"Pg::st_intersects","sum":9},{"parent":"MODULE_FUNC","rule":"Pg::st_transform","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::st_union","sum":8},{"parent":"MODULE_FUNC","rule":"Pg::string_Agg","sum":2},{"parent":"MODULE_FUNC","rule":"Pg::string_agg","sum":1768},{"parent":"MODULE_FUNC","rule":"Pg::to_char","sum":18},{"parent":"MODULE_FUNC","rule":"Pg::to_timestamp","sum":74},{"parent":"MODULE_FUNC","rule":"Pg::version","sum":3},{"parent":"MODULE_FUNC","rule":"PgAgg::string_agg","sum":2},{"parent":"MODULE_FUNC","rule":"PgProc::upper","sum":2},{"parent":"MODULE_FUNC","rule":"Pire::Capture","sum":529324},{"parent":"MODULE_FUNC","rule":"Pire::Grep","sum":144108},{"parent":"MODULE_FUNC","rule":"Pire::Match","sum":234334},{"parent":"MODULE_FUNC","rule":"Pire::MultiGrep","sum":675},{"parent":"MODULE_FUNC","rule":"Pire::MultiMatch","sum":153},{"parent":"MODULE_FUNC","rule":"Pire::Replace","sum":1004559},{"parent":"MODULE_FUNC","rule":"Protobuf::Parse","sum":21023},{"parent":"MODULE_FUNC","rule":"Protobuf::Serialize","sum":103687},{"parent":"MODULE_FUNC","rule":"Protobuf::TryParse","sum":145141},{"parent":"MODULE_FUNC","rule":"RE2::Capture","sum":4027},{"parent":"MODULE_FUNC","rule":"RE2::Count","sum":328},{"parent":"MODULE_FUNC","rule":"RE2::FindAndConsume","sum":31},{"parent":"MODULE_FUNC","rule":"RE2::Grep","sum":66},{"parent":"MODULE_FUNC","rule":"RE2::Match","sum":545},{"parent":"MODULE_FUNC","rule":"RE2::Replace","sum":124},{"parent":"MODULE_FUNC","rule":"Re2::Capture","sum":4402477},{"parent":"MODULE_FUNC","rule":"Re2::Catch","sum":1},{"parent":"MODULE_FUNC","rule":"Re2::Compile","sum":4},{"parent":"MODULE_FUNC","rule":"Re2::Count","sum":172471},{"parent":"MODULE_FUNC","rule":"Re2::FindAll","sum":2},{"parent":"MODULE_FUNC","rule":"Re2::FindAllSubmatch","sum":2},{"parent":"MODULE_FUNC","rule":"Re2::FindAndConsume","sum":389517},{"parent":"MODULE_FUNC","rule":"Re2::Grep","sum":651844},{"parent":"MODULE_FUNC","rule":"Re2::Match","sum":1685101},{"parent":"MODULE_FUNC","rule":"Re2::Options","sum":248654},{"parent":"MODULE_FUNC","rule":"Re2::Replace","sum":4629139},{"parent":"MODULE_FUNC","rule":"Re2::ReplaceAll","sum":15},{"parent":"MODULE_FUNC","rule":"STRING::AsciiToLower","sum":1},{"parent":"MODULE_FUNC","rule":"STRING::Contains","sum":2},{"parent":"MODULE_FUNC","rule":"STRING::RemoveAll","sum":1},{"parent":"MODULE_FUNC","rule":"STRING::SplitToList","sum":2},{"parent":"MODULE_FUNC","rule":"String::ASciiToLower","sum":2},{"parent":"MODULE_FUNC","rule":"String::AsciiToLower","sum":4121260},{"parent":"MODULE_FUNC","rule":"String::AsciiToTitle","sum":95297},{"parent":"MODULE_FUNC","rule":"String::AsciiToUpper","sum":549100},{"parent":"MODULE_FUNC","rule":"String::AsciiTolower","sum":4},{"parent":"MODULE_FUNC","rule":"String::Base32Decode","sum":274},{"parent":"MODULE_FUNC","rule":"String::Base32Encode","sum":194},{"parent":"MODULE_FUNC","rule":"String::Base32StrictDecode","sum":56},{"parent":"MODULE_FUNC","rule":"String::Base64Decode","sum":394448},{"parent":"MODULE_FUNC","rule":"String::Base64Encode","sum":112358},{"parent":"MODULE_FUNC","rule":"String::Base64EncodeUrl","sum":5714},{"parent":"MODULE_FUNC","rule":"String::Base64StrictDecode","sum":96599},{"parent":"MODULE_FUNC","rule":"String::Bin","sum":510},{"parent":"MODULE_FUNC","rule":"String::BinText","sum":121},{"parent":"MODULE_FUNC","rule":"String::CgiEscape","sum":71141},{"parent":"MODULE_FUNC","rule":"String::CgiUnescape","sum":24008},{"parent":"MODULE_FUNC","rule":"String::ColapseText","sum":4},{"parent":"MODULE_FUNC","rule":"String::Collapse","sum":197504},{"parent":"MODULE_FUNC","rule":"String::CollapseText","sum":200340},{"parent":"MODULE_FUNC","rule":"String::Contains","sum":6167479},{"parent":"MODULE_FUNC","rule":"String::DecodeHtml","sum":3005},{"parent":"MODULE_FUNC","rule":"String::EncodeHtml","sum":416},{"parent":"MODULE_FUNC","rule":"String::EndsWith","sum":539060},{"parent":"MODULE_FUNC","rule":"String::EndsWithIgnoreCase","sum":36079},{"parent":"MODULE_FUNC","rule":"String::EscapeC","sum":56141},{"parent":"MODULE_FUNC","rule":"String::Find","sum":654309},{"parent":"MODULE_FUNC","rule":"String::From","sum":2},{"parent":"MODULE_FUNC","rule":"String::FromByteList","sum":1048344},{"parent":"MODULE_FUNC","rule":"String::HasPrefix","sum":21305},{"parent":"MODULE_FUNC","rule":"String::HasPrefixIgnoreCase","sum":49},{"parent":"MODULE_FUNC","rule":"String::HasSuffix","sum":4930},{"parent":"MODULE_FUNC","rule":"String::HasSuffixIgnoreCase","sum":28771},{"parent":"MODULE_FUNC","rule":"String::Hex","sum":376508},{"parent":"MODULE_FUNC","rule":"String::HexDecode","sum":165567},{"parent":"MODULE_FUNC","rule":"String::HexEncode","sum":194436},{"parent":"MODULE_FUNC","rule":"String::HexText","sum":79392},{"parent":"MODULE_FUNC","rule":"String::HumanReadableBytes","sum":244},{"parent":"MODULE_FUNC","rule":"String::HumanReadableDuration","sum":1122121},{"parent":"MODULE_FUNC","rule":"String::HumanReadableQuantity","sum":342},{"parent":"MODULE_FUNC","rule":"String::IsAscii","sum":13981},{"parent":"MODULE_FUNC","rule":"String::IsAsciiAlnum","sum":1001},{"parent":"MODULE_FUNC","rule":"String::IsAsciiAlpha","sum":383},{"parent":"MODULE_FUNC","rule":"String::IsAsciiDigit","sum":8442},{"parent":"MODULE_FUNC","rule":"String::IsAsciiHex","sum":19443},{"parent":"MODULE_FUNC","rule":"String::IsAsciiLower","sum":22},{"parent":"MODULE_FUNC","rule":"String::IsAsciiSpace","sum":13},{"parent":"MODULE_FUNC","rule":"String::IsAsciiUpper","sum":355},{"parent":"MODULE_FUNC","rule":"String::Join","sum":2},{"parent":"MODULE_FUNC","rule":"String::JoinFROMList","sum":32020},{"parent":"MODULE_FUNC","rule":"String::JoinFromList","sum":14003251},{"parent":"MODULE_FUNC","rule":"String::LeftPad","sum":65910},{"parent":"MODULE_FUNC","rule":"String::Length","sum":2},{"parent":"MODULE_FUNC","rule":"String::LevenshteinDistance","sum":4},{"parent":"MODULE_FUNC","rule":"String::LevensteinDistance","sum":11690},{"parent":"MODULE_FUNC","rule":"String::Prec","sum":1849},{"parent":"MODULE_FUNC","rule":"String::RaplaceAll","sum":3},{"parent":"MODULE_FUNC","rule":"String::RemoveAll","sum":1248314},{"parent":"MODULE_FUNC","rule":"String::RemoveFirst","sum":640987},{"parent":"MODULE_FUNC","rule":"String::RemoveLast","sum":556385},{"parent":"MODULE_FUNC","rule":"String::Replace","sum":11},{"parent":"MODULE_FUNC","rule":"String::ReplaceALL","sum":3},{"parent":"MODULE_FUNC","rule":"String::ReplaceAll","sum":15647494},{"parent":"MODULE_FUNC","rule":"String::ReplaceFirst","sum":1364617},{"parent":"MODULE_FUNC","rule":"String::ReplaceFirstStartsWith","sum":1},{"parent":"MODULE_FUNC","rule":"String::ReplaceLast","sum":173452},{"parent":"MODULE_FUNC","rule":"String::ReplaceRegex","sum":1},{"parent":"MODULE_FUNC","rule":"String::Reverse","sum":118761},{"parent":"MODULE_FUNC","rule":"String::ReverseFind","sum":38847},{"parent":"MODULE_FUNC","rule":"String::RightPad","sum":372890},{"parent":"MODULE_FUNC","rule":"String::SBin","sum":7},{"parent":"MODULE_FUNC","rule":"String::SHex","sum":6281},{"parent":"MODULE_FUNC","rule":"String::Split","sum":10},{"parent":"MODULE_FUNC","rule":"String::SplitToList","sum":31793998},{"parent":"MODULE_FUNC","rule":"String::SplitToSet","sum":12},{"parent":"MODULE_FUNC","rule":"String::StartWith","sum":1},{"parent":"MODULE_FUNC","rule":"String::StartsWith","sum":3456492},{"parent":"MODULE_FUNC","rule":"String::StartsWithIgnoreCase","sum":56347},{"parent":"MODULE_FUNC","rule":"String::Strip","sum":3178205},{"parent":"MODULE_FUNC","rule":"String::Substring","sum":302275},{"parent":"MODULE_FUNC","rule":"String::ToByteList","sum":140330},{"parent":"MODULE_FUNC","rule":"String::ToLower","sum":5256794},{"parent":"MODULE_FUNC","rule":"String::ToLowerCase","sum":1},{"parent":"MODULE_FUNC","rule":"String::ToTitle","sum":34547},{"parent":"MODULE_FUNC","rule":"String::ToUpper","sum":140634},{"parent":"MODULE_FUNC","rule":"String::Trim","sum":3},{"parent":"MODULE_FUNC","rule":"String::UnescapeC","sum":326609},{"parent":"MODULE_FUNC","rule":"String::contains","sum":3},{"parent":"MODULE_FUNC","rule":"String::splittolist","sum":1},{"parent":"MODULE_FUNC","rule":"String::tolower","sum":2},{"parent":"MODULE_FUNC","rule":"TryDecompress::BZip2","sum":4},{"parent":"MODULE_FUNC","rule":"TryDecompress::BlockCodec","sum":1},{"parent":"MODULE_FUNC","rule":"TryDecompress::Brotli","sum":11},{"parent":"MODULE_FUNC","rule":"TryDecompress::Gzip","sum":1015},{"parent":"MODULE_FUNC","rule":"TryDecompress::Lz4","sum":124},{"parent":"MODULE_FUNC","rule":"TryDecompress::Lzma","sum":4},{"parent":"MODULE_FUNC","rule":"TryDecompress::Snappy","sum":10},{"parent":"MODULE_FUNC","rule":"TryDecompress::Xz","sum":4},{"parent":"MODULE_FUNC","rule":"TryDecompress::Zlib","sum":3552},{"parent":"MODULE_FUNC","rule":"TryDecompress::Zstd","sum":18},{"parent":"MODULE_FUNC","rule":"URL::Decode","sum":5},{"parent":"MODULE_FUNC","rule":"URL::GetHost","sum":2},{"parent":"MODULE_FUNC","rule":"Unicode::FInd","sum":4},{"parent":"MODULE_FUNC","rule":"Unicode::Find","sum":153146},{"parent":"MODULE_FUNC","rule":"Unicode::Fold","sum":29117},{"parent":"MODULE_FUNC","rule":"Unicode::FromCodePointList","sum":120420},{"parent":"MODULE_FUNC","rule":"Unicode::GetLength","sum":594623},{"parent":"MODULE_FUNC","rule":"Unicode::GetLengthn","sum":1},{"parent":"MODULE_FUNC","rule":"Unicode::IsAlnum","sum":551},{"parent":"MODULE_FUNC","rule":"Unicode::IsAlpha","sum":460},{"parent":"MODULE_FUNC","rule":"Unicode::IsAscii","sum":688},{"parent":"MODULE_FUNC","rule":"Unicode::IsDigit","sum":8984},{"parent":"MODULE_FUNC","rule":"Unicode::IsHex","sum":4},{"parent":"MODULE_FUNC","rule":"Unicode::IsLower","sum":96},{"parent":"MODULE_FUNC","rule":"Unicode::IsSpace","sum":18},{"parent":"MODULE_FUNC","rule":"Unicode::IsUnicodeSet","sum":440},{"parent":"MODULE_FUNC","rule":"Unicode::IsUpper","sum":1816},{"parent":"MODULE_FUNC","rule":"Unicode::IsUtf","sum":670446},{"parent":"MODULE_FUNC","rule":"Unicode::JoinFromList","sum":202740},{"parent":"MODULE_FUNC","rule":"Unicode::Length","sum":2},{"parent":"MODULE_FUNC","rule":"Unicode::LevensteinDistance","sum":39597},{"parent":"MODULE_FUNC","rule":"Unicode::Normalize","sum":110601},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFC","sum":527},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFD","sum":36},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFKC","sum":6965},{"parent":"MODULE_FUNC","rule":"Unicode::NormalizeNFKD","sum":1729},{"parent":"MODULE_FUNC","rule":"Unicode::RFind","sum":73240},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveAll","sum":139022},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveFirst","sum":7747},{"parent":"MODULE_FUNC","rule":"Unicode::RemoveLast","sum":7785},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceAll","sum":269427},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceFirst","sum":1750},{"parent":"MODULE_FUNC","rule":"Unicode::ReplaceLast","sum":317},{"parent":"MODULE_FUNC","rule":"Unicode::Reverse","sum":52328},{"parent":"MODULE_FUNC","rule":"Unicode::SUBSTRING","sum":2},{"parent":"MODULE_FUNC","rule":"Unicode::SplitToList","sum":180911},{"parent":"MODULE_FUNC","rule":"Unicode::Strip","sum":64726},{"parent":"MODULE_FUNC","rule":"Unicode::Substring","sum":601464},{"parent":"MODULE_FUNC","rule":"Unicode::ToCodePointList","sum":122853},{"parent":"MODULE_FUNC","rule":"Unicode::ToLower","sum":1190672},{"parent":"MODULE_FUNC","rule":"Unicode::ToTitle","sum":36005},{"parent":"MODULE_FUNC","rule":"Unicode::ToUint64","sum":413},{"parent":"MODULE_FUNC","rule":"Unicode::ToUpper","sum":111882},{"parent":"MODULE_FUNC","rule":"Unicode::Translit","sum":103652},{"parent":"MODULE_FUNC","rule":"Unicode::TryToUint64","sum":1942},{"parent":"MODULE_FUNC","rule":"Url::AsciiToLower","sum":1},{"parent":"MODULE_FUNC","rule":"Url::BuildQueryString","sum":25869},{"parent":"MODULE_FUNC","rule":"Url::CanBePunycodeHostName","sum":6528},{"parent":"MODULE_FUNC","rule":"Url::CutQueryStringAndFragment","sum":232746},{"parent":"MODULE_FUNC","rule":"Url::CutScheme","sum":1132324},{"parent":"MODULE_FUNC","rule":"Url::CutWWW","sum":859165},{"parent":"MODULE_FUNC","rule":"Url::CutWWW2","sum":741509},{"parent":"MODULE_FUNC","rule":"Url::Decode","sum":1694041},{"parent":"MODULE_FUNC","rule":"Url::Encode","sum":370603},{"parent":"MODULE_FUNC","rule":"Url::ForceHostNameToPunycode","sum":200827},{"parent":"MODULE_FUNC","rule":"Url::ForcePunycodeToHostName","sum":132424},{"parent":"MODULE_FUNC","rule":"Url::GetCGIParam","sum":1824397},{"parent":"MODULE_FUNC","rule":"Url::GetCgiParam","sum":7},{"parent":"MODULE_FUNC","rule":"Url::GetDomain","sum":1124409},{"parent":"MODULE_FUNC","rule":"Url::GetDomainLevel","sum":70675},{"parent":"MODULE_FUNC","rule":"Url::GetFragment","sum":495},{"parent":"MODULE_FUNC","rule":"Url::GetHost","sum":4049696},{"parent":"MODULE_FUNC","rule":"Url::GetHostPort","sum":196358},{"parent":"MODULE_FUNC","rule":"Url::GetOwner","sum":1403748},{"parent":"MODULE_FUNC","rule":"Url::GetPath","sum":1692312},{"parent":"MODULE_FUNC","rule":"Url::GetPort","sum":817082},{"parent":"MODULE_FUNC","rule":"Url::GetScheme","sum":2056328},{"parent":"MODULE_FUNC","rule":"Url::GetSchemeHost","sum":145839},{"parent":"MODULE_FUNC","rule":"Url::GetSchemeHostPort","sum":697464},{"parent":"MODULE_FUNC","rule":"Url::GetSignificantDomain","sum":463398},{"parent":"MODULE_FUNC","rule":"Url::GetTLD","sum":35514},{"parent":"MODULE_FUNC","rule":"Url::GetTail","sum":574437},{"parent":"MODULE_FUNC","rule":"Url::Getowner","sum":1},{"parent":"MODULE_FUNC","rule":"Url::HostNameToPunycode","sum":580783},{"parent":"MODULE_FUNC","rule":"Url::IsAllowedByRobotsTxt","sum":7},{"parent":"MODULE_FUNC","rule":"Url::IsKnownTLD","sum":20834},{"parent":"MODULE_FUNC","rule":"Url::IsWellKnownTLD","sum":4928},{"parent":"MODULE_FUNC","rule":"Url::Normalize","sum":1049217},{"parent":"MODULE_FUNC","rule":"Url::NormalizeWithDefaultHttpScheme","sum":622798},{"parent":"MODULE_FUNC","rule":"Url::Parse","sum":299237},{"parent":"MODULE_FUNC","rule":"Url::PunycodeToHostName","sum":202698},{"parent":"MODULE_FUNC","rule":"Url::QueryStringToDict","sum":154631},{"parent":"MODULE_FUNC","rule":"Url::QueryStringToList","sum":38742},{"parent":"MODULE_FUNC","rule":"Url::ReplaceAll","sum":8},{"parent":"MODULE_FUNC","rule":"YSON::Co","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToBool","sum":12},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToDict","sum":2},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToDouble","sum":4},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToDoubleList","sum":10},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToInt64","sum":2},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToList","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToString","sum":64},{"parent":"MODULE_FUNC","rule":"YSON::ConvertToStringList","sum":20},{"parent":"MODULE_FUNC","rule":"YSON::From","sum":7},{"parent":"MODULE_FUNC","rule":"YSON::IsDict","sum":3},{"parent":"MODULE_FUNC","rule":"YSON::Lookup","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::LookupDict","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::LookupDouble","sum":3},{"parent":"MODULE_FUNC","rule":"YSON::LookupInt64","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::LookupString","sum":3},{"parent":"MODULE_FUNC","rule":"YSON::Parse","sum":5},{"parent":"MODULE_FUNC","rule":"YSON::ToString","sum":1},{"parent":"MODULE_FUNC","rule":"YSON::convertToString","sum":7},{"parent":"MODULE_FUNC","rule":"YSON::from","sum":1},{"parent":"MODULE_FUNC","rule":"YSon::ConvertToDouble","sum":6},{"parent":"MODULE_FUNC","rule":"YSon::ConvertToList","sum":3},{"parent":"MODULE_FUNC","rule":"YSon::ConvertToString","sum":2},{"parent":"MODULE_FUNC","rule":"YSon::LookupString","sum":1},{"parent":"MODULE_FUNC","rule":"YSon::Parse","sum":5},{"parent":"MODULE_FUNC","rule":"Yson::AsList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Attributes","sum":1892},{"parent":"MODULE_FUNC","rule":"Yson::COntains","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::COnvertToDouble","sum":7},{"parent":"MODULE_FUNC","rule":"Yson::CastToStringList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Contains","sum":2964796},{"parent":"MODULE_FUNC","rule":"Yson::Conver","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConverTToInt64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConverToDouble","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConverToInt64","sum":10},{"parent":"MODULE_FUNC","rule":"Yson::ConverToList","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::ConverToString","sum":12},{"parent":"MODULE_FUNC","rule":"Yson::ConvertFromString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTOList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTo","sum":8887873},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToAttributes","sum":16},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBool","sum":8778198},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolDict","sum":105286},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolList","sum":7400},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBoolgDict","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToBytes","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDate","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDateTime","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDict","sum":8505885},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictOfDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDictString","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDouble","sum":9532162},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDoubleDict","sum":279814},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToDoubleList","sum":1192022},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToFloat","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToFloat64","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToINT64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToIn64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt","sum":37},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt32","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt32List","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64","sum":20427706},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64Dict","sum":186913},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInt64List","sum":2391814},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToIntList","sum":5},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToInteget","sum":168},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToJson","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToList","sum":18954978},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListDouble","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListString","sum":16},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToListg","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSTring","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSTringList","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToSetring","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToString","sum":96745330},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringDict","sum":1449218},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringInt64","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStringList","sum":24374452},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStrint","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToStruct","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToText","sum":13},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUINT64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUInt64","sum":164},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUInt64List","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint32","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64","sum":8491077},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64Dict","sum":36518},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUint64List","sum":2200184},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToUnit64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToevent_value","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToint64","sum":8},{"parent":"MODULE_FUNC","rule":"Yson::ConvertToint64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTolIST","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTolist","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::ConvertTostring","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::ConverttoList","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::ConverttoString","sum":12},{"parent":"MODULE_FUNC","rule":"Yson::ConvvertToString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Dict","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::Equals","sum":796957},{"parent":"MODULE_FUNC","rule":"Yson::Extract","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FROM","sum":8},{"parent":"MODULE_FUNC","rule":"Yson::Find","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::From","sum":25115604},{"parent":"MODULE_FUNC","rule":"Yson::From64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromAGG_LIST","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromASDFDSKLDJF","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromAboba","sum":4},{"parent":"MODULE_FUNC","rule":"Yson::FromBinary","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromBoolDict","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromBytes","sum":17},{"parent":"MODULE_FUNC","rule":"Yson::FromDict","sum":3076},{"parent":"MODULE_FUNC","rule":"Yson::FromDouble","sum":2801},{"parent":"MODULE_FUNC","rule":"Yson::FromDouble64Dict","sum":3673},{"parent":"MODULE_FUNC","rule":"Yson::FromDoubleDict","sum":42637},{"parent":"MODULE_FUNC","rule":"Yson::FromDoubleList","sum":996},{"parent":"MODULE_FUNC","rule":"Yson::FromInt32","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromInt64Dict","sum":2139},{"parent":"MODULE_FUNC","rule":"Yson::FromInt64List","sum":7479},{"parent":"MODULE_FUNC","rule":"Yson::FromJson","sum":2153},{"parent":"MODULE_FUNC","rule":"Yson::FromKek","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::FromList","sum":4907},{"parent":"MODULE_FUNC","rule":"Yson::FromListTake","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::FromMap","sum":356},{"parent":"MODULE_FUNC","rule":"Yson::FromMinutes","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromSHEEEEEEEEEEEEE","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::FromSHIIIIIIIIIII","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromSeconds","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::FromSring","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::FromSting","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromString","sum":56274},{"parent":"MODULE_FUNC","rule":"Yson::FromStringDict","sum":69232},{"parent":"MODULE_FUNC","rule":"Yson::FromStringList","sum":59327},{"parent":"MODULE_FUNC","rule":"Yson::FromStruct","sum":651231},{"parent":"MODULE_FUNC","rule":"Yson::FromUi64List","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::FromUin64List","sum":378},{"parent":"MODULE_FUNC","rule":"Yson::FromUint32Dict","sum":10257},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64Dict","sum":16028},{"parent":"MODULE_FUNC","rule":"Yson::FromUint64List","sum":25356},{"parent":"MODULE_FUNC","rule":"Yson::FromY2020MachoDachaTbIhaHouse","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Fromt","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::Get","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::GetHash","sum":863949},{"parent":"MODULE_FUNC","rule":"Yson::GetLength","sum":2810726},{"parent":"MODULE_FUNC","rule":"Yson::IsBool","sum":33313},{"parent":"MODULE_FUNC","rule":"Yson::IsDict","sum":257947},{"parent":"MODULE_FUNC","rule":"Yson::IsDouble","sum":99984},{"parent":"MODULE_FUNC","rule":"Yson::IsEntity","sum":2675802},{"parent":"MODULE_FUNC","rule":"Yson::IsInt64","sum":288518},{"parent":"MODULE_FUNC","rule":"Yson::IsList","sum":286846},{"parent":"MODULE_FUNC","rule":"Yson::IsString","sum":1024354},{"parent":"MODULE_FUNC","rule":"Yson::IsUint64","sum":154279},{"parent":"MODULE_FUNC","rule":"Yson::ListMap","sum":12},{"parent":"MODULE_FUNC","rule":"Yson::Lo","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Loo","sum":10},{"parent":"MODULE_FUNC","rule":"Yson::LookUp","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookUpDict","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookUpString","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::Lookup","sum":6565660},{"parent":"MODULE_FUNC","rule":"Yson::LookupBool","sum":2150705},{"parent":"MODULE_FUNC","rule":"Yson::LookupDict","sum":402618},{"parent":"MODULE_FUNC","rule":"Yson::LookupDouble","sum":3063358},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt","sum":15},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt32","sum":91},{"parent":"MODULE_FUNC","rule":"Yson::LookupInt64","sum":5505702},{"parent":"MODULE_FUNC","rule":"Yson::LookupInteger","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupList","sum":1627353},{"parent":"MODULE_FUNC","rule":"Yson::LookupSTRING","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupString","sum":41313918},{"parent":"MODULE_FUNC","rule":"Yson::LookupStringList","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::LookupStruct","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::LookupTimestamp","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LookupUInt64","sum":6},{"parent":"MODULE_FUNC","rule":"Yson::LookupUint64","sum":3802432},{"parent":"MODULE_FUNC","rule":"Yson::LookupsTRING","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::LoopUpString","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::Options","sum":9252892},{"parent":"MODULE_FUNC","rule":"Yson::Parse","sum":8644334},{"parent":"MODULE_FUNC","rule":"Yson::ParseJSON","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::ParseJson","sum":12622535},{"parent":"MODULE_FUNC","rule":"Yson::ParseJsonDecodeUtf8","sum":134162},{"parent":"MODULE_FUNC","rule":"Yson::Parsejson","sum":9},{"parent":"MODULE_FUNC","rule":"Yson::Path","sum":23},{"parent":"MODULE_FUNC","rule":"Yson::Serialize","sum":3870551},{"parent":"MODULE_FUNC","rule":"Yson::SerializeJSON","sum":5},{"parent":"MODULE_FUNC","rule":"Yson::SerializeJson","sum":9849752},{"parent":"MODULE_FUNC","rule":"Yson::SerializeJsonEncodeUtf8","sum":201909},{"parent":"MODULE_FUNC","rule":"Yson::SerializePretty","sum":1187442},{"parent":"MODULE_FUNC","rule":"Yson::SerializeText","sum":611791},{"parent":"MODULE_FUNC","rule":"Yson::WithAttributes","sum":660},{"parent":"MODULE_FUNC","rule":"Yson::YPath","sum":12613297},{"parent":"MODULE_FUNC","rule":"Yson::YPathBool","sum":1177444},{"parent":"MODULE_FUNC","rule":"Yson::YPathBoolean","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::YPathDict","sum":25491},{"parent":"MODULE_FUNC","rule":"Yson::YPathDouble","sum":1365910},{"parent":"MODULE_FUNC","rule":"Yson::YPathInt16","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::YPathInt64","sum":2718665},{"parent":"MODULE_FUNC","rule":"Yson::YPathList","sum":1387120},{"parent":"MODULE_FUNC","rule":"Yson::YPathListString","sum":7},{"parent":"MODULE_FUNC","rule":"Yson::YPathString","sum":13533915},{"parent":"MODULE_FUNC","rule":"Yson::YPathUint64","sum":709229},{"parent":"MODULE_FUNC","rule":"Yson::YaPathString","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::Ypath","sum":51},{"parent":"MODULE_FUNC","rule":"Yson::YpathString","sum":7},{"parent":"MODULE_FUNC","rule":"Yson::Yson2","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::convertToDict","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::convertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"Yson::convertToString","sum":7},{"parent":"MODULE_FUNC","rule":"Yson::convertToUint64","sum":2},{"parent":"MODULE_FUNC","rule":"Yson::from","sum":45},{"parent":"MODULE_FUNC","rule":"Yson::fromJson","sum":3},{"parent":"MODULE_FUNC","rule":"Yson::lookupString","sum":1},{"parent":"MODULE_FUNC","rule":"dateTime::Format","sum":3},{"parent":"MODULE_FUNC","rule":"dateTime::GetMonth","sum":6},{"parent":"MODULE_FUNC","rule":"dateTime::IntervalFromDays","sum":361},{"parent":"MODULE_FUNC","rule":"dateTime::IntervalFromSeconds","sum":1},{"parent":"MODULE_FUNC","rule":"dateTime::MakeDate","sum":87},{"parent":"MODULE_FUNC","rule":"dateTime::MakeDatetime","sum":7},{"parent":"MODULE_FUNC","rule":"dateTime::StartOfMonth","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::Format","sum":5},{"parent":"MODULE_FUNC","rule":"datetime::FromMilliseconds","sum":3},{"parent":"MODULE_FUNC","rule":"datetime::FromSeconds","sum":3736},{"parent":"MODULE_FUNC","rule":"datetime::GetDayOfWeek","sum":4},{"parent":"MODULE_FUNC","rule":"datetime::GetDayOfWeekName","sum":3},{"parent":"MODULE_FUNC","rule":"datetime::GetHour","sum":8},{"parent":"MODULE_FUNC","rule":"datetime::GetMonth","sum":14},{"parent":"MODULE_FUNC","rule":"datetime::GetWeekOfYear","sum":20},{"parent":"MODULE_FUNC","rule":"datetime::GetYear","sum":29},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromDays","sum":1345},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromHours","sum":37},{"parent":"MODULE_FUNC","rule":"datetime::IntervalFromMinutes","sum":6},{"parent":"MODULE_FUNC","rule":"datetime::MakeDate","sum":5405},{"parent":"MODULE_FUNC","rule":"datetime::MakeDatetime","sum":661},{"parent":"MODULE_FUNC","rule":"datetime::MakeTimestamp","sum":1},{"parent":"MODULE_FUNC","rule":"datetime::Parse","sum":397},{"parent":"MODULE_FUNC","rule":"datetime::ParseIso8601","sum":3},{"parent":"MODULE_FUNC","rule":"datetime::ShiftMonths","sum":309},{"parent":"MODULE_FUNC","rule":"datetime::StartOf","sum":1},{"parent":"MODULE_FUNC","rule":"datetime::StartOfDay","sum":2},{"parent":"MODULE_FUNC","rule":"datetime::StartOfMonth","sum":549},{"parent":"MODULE_FUNC","rule":"datetime::StartOfWeek","sum":528},{"parent":"MODULE_FUNC","rule":"datetime::ToDays","sum":375},{"parent":"MODULE_FUNC","rule":"datetime::ToSeconds","sum":735},{"parent":"MODULE_FUNC","rule":"datetime::fromseconds","sum":4},{"parent":"MODULE_FUNC","rule":"digest::Md5HalfMix","sum":14},{"parent":"MODULE_FUNC","rule":"digest::city_hash","sum":1},{"parent":"MODULE_FUNC","rule":"json::ConvertToString","sum":7},{"parent":"MODULE_FUNC","rule":"json::From","sum":1},{"parent":"MODULE_FUNC","rule":"math::Round","sum":2},{"parent":"MODULE_FUNC","rule":"math::floor","sum":1},{"parent":"MODULE_FUNC","rule":"math::log","sum":2},{"parent":"MODULE_FUNC","rule":"math::pow","sum":6},{"parent":"MODULE_FUNC","rule":"math::round","sum":43},{"parent":"MODULE_FUNC","rule":"pg::GENERATE_SERIES","sum":8},{"parent":"MODULE_FUNC","rule":"pg::SPLIT_PART","sum":2},{"parent":"MODULE_FUNC","rule":"pg::age","sum":732},{"parent":"MODULE_FUNC","rule":"pg::array_agg","sum":2},{"parent":"MODULE_FUNC","rule":"pg::date_part","sum":711},{"parent":"MODULE_FUNC","rule":"pg::extract","sum":285},{"parent":"MODULE_FUNC","rule":"pg::generate_series","sum":2},{"parent":"MODULE_FUNC","rule":"pg::st_asgeojson","sum":2},{"parent":"MODULE_FUNC","rule":"pg::st_astext","sum":4},{"parent":"MODULE_FUNC","rule":"pg::st_geomfromewkb","sum":14},{"parent":"MODULE_FUNC","rule":"pg::st_transform","sum":9},{"parent":"MODULE_FUNC","rule":"pg::string_agg","sum":15},{"parent":"MODULE_FUNC","rule":"pire::Capture","sum":7},{"parent":"MODULE_FUNC","rule":"pire::Match","sum":29},{"parent":"MODULE_FUNC","rule":"re2::Capture","sum":2733},{"parent":"MODULE_FUNC","rule":"re2::Grep","sum":1},{"parent":"MODULE_FUNC","rule":"re2::Match","sum":24},{"parent":"MODULE_FUNC","rule":"re2::Replace","sum":429},{"parent":"MODULE_FUNC","rule":"re2::capture","sum":8},{"parent":"MODULE_FUNC","rule":"string::AsciiToLower","sum":6},{"parent":"MODULE_FUNC","rule":"string::JoinFromList","sum":1},{"parent":"MODULE_FUNC","rule":"string::ReplaceFirst","sum":14},{"parent":"MODULE_FUNC","rule":"string::SplitToList","sum":5},{"parent":"MODULE_FUNC","rule":"string::StartsWith","sum":1},{"parent":"MODULE_FUNC","rule":"string::contains","sum":4},{"parent":"MODULE_FUNC","rule":"string::joinfromlist","sum":15},{"parent":"MODULE_FUNC","rule":"string::removeall","sum":4},{"parent":"MODULE_FUNC","rule":"string::replaceall","sum":11},{"parent":"MODULE_FUNC","rule":"string::splittolist","sum":12},{"parent":"MODULE_FUNC","rule":"string::strip","sum":2},{"parent":"MODULE_FUNC","rule":"url::Encode","sum":2},{"parent":"MODULE_FUNC","rule":"url::decode","sum":2},{"parent":"MODULE_FUNC","rule":"url::gethost","sum":1},{"parent":"MODULE_FUNC","rule":"ySoN::CoNveRtTo","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToDict","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToDouble","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToInt64","sum":1},{"parent":"MODULE_FUNC","rule":"yson::ConvertToString","sum":3},{"parent":"MODULE_FUNC","rule":"yson::ConvertToStringList","sum":2},{"parent":"MODULE_FUNC","rule":"yson::From","sum":1},{"parent":"MODULE_FUNC","rule":"yson::LookupString","sum":1},{"parent":"MODULE_FUNC","rule":"yson::PARSE","sum":2},{"parent":"MODULE_FUNC","rule":"yson::convertto","sum":2},{"parent":"MODULE_FUNC","rule":"yson::converttodoubledict","sum":8},{"parent":"MODULE_FUNC","rule":"yson::converttolist","sum":1},{"parent":"MODULE_FUNC","rule":"yson::converttostring","sum":4},{"parent":"MODULE_FUNC","rule":"yson::converttostringdict","sum":8},{"parent":"MODULE_FUNC","rule":"yson::from","sum":2},{"parent":"MODULE_FUNC","rule":"yson::options","sum":8},{"parent":"MODULE_FUNC","rule":"yson::parsejson","sum":16},{"parent":"PRAGMA","rule":"AllowDotInAlias","sum":258051},{"parent":"PRAGMA","rule":"AllowUnnamedColumns","sum":4},{"parent":"PRAGMA","rule":"AnsiCurrentRow","sum":951},{"parent":"PRAGMA","rule":"AnsiImplicitCrossJoin","sum":13019},{"parent":"PRAGMA","rule":"AnsiInFOREmptyORNullableItemsCollections","sum":52},{"parent":"PRAGMA","rule":"AnsiInForEmptyOrNULLableItemsCollectiONs","sum":158},{"parent":"PRAGMA","rule":"AnsiInForEmptyOrNULLableItemsCollections","sum":4074},{"parent":"PRAGMA","rule":"AnsiInForEmptyOrNullableItemsCollections","sum":39592457},{"parent":"PRAGMA","rule":"AnsiInForEmptyOrnullableItemsCollections","sum":260},{"parent":"PRAGMA","rule":"AnsiInForEmptyorNullableItemsCollections","sum":2145},{"parent":"PRAGMA","rule":"AnsiInForEmptyornullableItemsCollections","sum":1272},{"parent":"PRAGMA","rule":"AnsiInforEmptyOrNullableItemsCollections","sum":243},{"parent":"PRAGMA","rule":"AnsiOptionalAS","sum":4},{"parent":"PRAGMA","rule":"AnsiOptionalAs","sum":2145464},{"parent":"PRAGMA","rule":"AnsiOptionalas","sum":352},{"parent":"PRAGMA","rule":"AnsiOrderByLimitInUnionAll","sum":210077},{"parent":"PRAGMA","rule":"AnsiRankForNullableKeys","sum":223539},{"parent":"PRAGMA","rule":"AnsiinForEmptyOrNullableItemsCollections","sum":8480},{"parent":"PRAGMA","rule":"AnsiinForEmptyOrNullableitemsCollections","sum":16},{"parent":"PRAGMA","rule":"AutoCommit","sum":39070},{"parent":"PRAGMA","rule":"BlockEngine","sum":351069},{"parent":"PRAGMA","rule":"BogousStarInGroupByOverJoin","sum":2},{"parent":"PRAGMA","rule":"CLassicDivision","sum":9},{"parent":"PRAGMA","rule":"CheckedOps","sum":649},{"parent":"PRAGMA","rule":"ClASsicDivision","sum":627},{"parent":"PRAGMA","rule":"ClassicDIvision","sum":44},{"parent":"PRAGMA","rule":"ClassicDivisiON","sum":148},{"parent":"PRAGMA","rule":"ClassicDivision","sum":2553059},{"parent":"PRAGMA","rule":"Classicdivision","sum":6},{"parent":"PRAGMA","rule":"CoalesceJoinKeysOnQualifiedAll","sum":36},{"parent":"PRAGMA","rule":"CompactGroupBy","sum":26},{"parent":"PRAGMA","rule":"CompactNamedExprs","sum":2438},{"parent":"PRAGMA","rule":"CostBasedOptimizer","sum":14407},{"parent":"PRAGMA","rule":"DQ.ANALYZEQUERY","sum":1617},{"parent":"PRAGMA","rule":"DQEngine","sum":3},{"parent":"PRAGMA","rule":"DirectRead","sum":48},{"parent":"PRAGMA","rule":"DisableAnsiInForEmptyOrNullableItemsCollections","sum":233532},{"parent":"PRAGMA","rule":"DisableAnsiRankForNullableKeys","sum":38142},{"parent":"PRAGMA","rule":"DisableCoalesceJoinKeysOnQualifiedAll","sum":197550},{"parent":"PRAGMA","rule":"DisableCompactNamedExprs","sum":12},{"parent":"PRAGMA","rule":"DisableOrderedColumns","sum":536},{"parent":"PRAGMA","rule":"DisablePullUpFlatMapOverJoin","sum":2},{"parent":"PRAGMA","rule":"DisableSimpleColumns","sum":397601},{"parent":"PRAGMA","rule":"DisableStrictJoinKeyTypes","sum":416},{"parent":"PRAGMA","rule":"DisableUnicodeLiterals","sum":34},{"parent":"PRAGMA","rule":"DisableUnordered","sum":869},{"parent":"PRAGMA","rule":"DistinctOverWindow","sum":7},{"parent":"PRAGMA","rule":"Dq.HashJoinMode","sum":6},{"parent":"PRAGMA","rule":"Dq.MaxTasksPerStage","sum":5},{"parent":"PRAGMA","rule":"Dq.SplitStageOnDqReplicate","sum":4},{"parent":"PRAGMA","rule":"Dq.UseBlockReader","sum":2},{"parent":"PRAGMA","rule":"DqEngine","sum":10781569},{"parent":"PRAGMA","rule":"Dqengine","sum":2},{"parent":"PRAGMA","rule":"EmitAggApply","sum":1},{"parent":"PRAGMA","rule":"EmitUnionMerge","sum":7},{"parent":"PRAGMA","rule":"EnableSystemColumns","sum":3},{"parent":"PRAGMA","rule":"FILE","sum":289321},{"parent":"PRAGMA","rule":"FeatureR010","sum":224},{"parent":"PRAGMA","rule":"File","sum":7397697},{"parent":"PRAGMA","rule":"FileOption","sum":15305},{"parent":"PRAGMA","rule":"FilterPushdownOverJoinOptionalSide","sum":6},{"parent":"PRAGMA","rule":"FlexibleTypes","sum":943},{"parent":"PRAGMA","rule":"Folder","sum":3774},{"parent":"PRAGMA","rule":"Greetings","sum":1443},{"parent":"PRAGMA","rule":"GroupByCubeLimit","sum":39613},{"parent":"PRAGMA","rule":"GroupByLimit","sum":152020},{"parent":"PRAGMA","rule":"JsonQueryReturnsJsonDocument","sum":300759},{"parent":"PRAGMA","rule":"LIBRARY","sum":246771},{"parent":"PRAGMA","rule":"LIbrary","sum":52},{"parent":"PRAGMA","rule":"Library","sum":15064301},{"parent":"PRAGMA","rule":"OrderedColumns","sum":5979946},{"parent":"PRAGMA","rule":"Orderedcolumns","sum":12},{"parent":"PRAGMA","rule":"OverrideLibrary","sum":182},{"parent":"PRAGMA","rule":"Package","sum":5},{"parent":"PRAGMA","rule":"PositionalUnionAll","sum":119861},{"parent":"PRAGMA","rule":"PqReadBy","sum":42},{"parent":"PRAGMA","rule":"REGEXUSERE2","sum":13433},{"parent":"PRAGMA","rule":"RefSelect","sum":433467},{"parent":"PRAGMA","rule":"RegExUseRe2","sum":32},{"parent":"PRAGMA","rule":"RegexUseRe2","sum":312165},{"parent":"PRAGMA","rule":"ResultRowsLimit","sum":3},{"parent":"PRAGMA","rule":"SampleSelect","sum":11},{"parent":"PRAGMA","rule":"SeqMode","sum":4},{"parent":"PRAGMA","rule":"SimpleColumns","sum":6945318},{"parent":"PRAGMA","rule":"Simplecolumns","sum":3},{"parent":"PRAGMA","rule":"StrictJoinKeyTypes","sum":76407},{"parent":"PRAGMA","rule":"TablePathPrefix","sum":5599047},{"parent":"PRAGMA","rule":"UDF","sum":2171118},{"parent":"PRAGMA","rule":"Udf","sum":1936209},{"parent":"PRAGMA","rule":"UnicodeLiterals","sum":98},{"parent":"PRAGMA","rule":"UnorderedSubqueries","sum":2},{"parent":"PRAGMA","rule":"UseBlocks","sum":57},{"parent":"PRAGMA","rule":"UseTablePrefixForEach","sum":52},{"parent":"PRAGMA","rule":"WARNING","sum":26},{"parent":"PRAGMA","rule":"WarnUnnamedColumns","sum":26250},{"parent":"PRAGMA","rule":"Warning","sum":2962871},{"parent":"PRAGMA","rule":"WarningMsg","sum":12640},{"parent":"PRAGMA","rule":"YSON.AutoConvert","sum":834},{"parent":"PRAGMA","rule":"YSON.DisableStrict","sum":44713},{"parent":"PRAGMA","rule":"YSON.Strict","sum":732},{"parent":"PRAGMA","rule":"YT.Auth","sum":58},{"parent":"PRAGMA","rule":"YT.DefaultOperationWeight","sum":253828},{"parent":"PRAGMA","rule":"YT.InferSchema","sum":17857},{"parent":"PRAGMA","rule":"YT.POOL","sum":275},{"parent":"PRAGMA","rule":"YT.Pool","sum":86019},{"parent":"PRAGMA","rule":"YT.StaticPool","sum":6},{"parent":"PRAGMA","rule":"YT.TableContentDeliveryMode","sum":1666},{"parent":"PRAGMA","rule":"YT.UseNativeYTTypes","sum":2},{"parent":"PRAGMA","rule":"YT.UseNativeYTtypes","sum":3},{"parent":"PRAGMA","rule":"YT.pOOL","sum":4},{"parent":"PRAGMA","rule":"YT.pool","sum":18841},{"parent":"PRAGMA","rule":"YsON.Disablestrict","sum":131},{"parent":"PRAGMA","rule":"Yson.AutoConvert","sum":41777},{"parent":"PRAGMA","rule":"Yson.DIsableStrict","sum":15201},{"parent":"PRAGMA","rule":"Yson.DisableStrict","sum":481166},{"parent":"PRAGMA","rule":"Yson.Strict","sum":6872},{"parent":"PRAGMA","rule":"Yson.disablestrict","sum":29},{"parent":"PRAGMA","rule":"Yt.Auth","sum":33},{"parent":"PRAGMA","rule":"Yt.Description","sum":4},{"parent":"PRAGMA","rule":"Yt.ExternalTx","sum":24787},{"parent":"PRAGMA","rule":"Yt.HybridDqExecution","sum":9},{"parent":"PRAGMA","rule":"Yt.InferSchema","sum":2746},{"parent":"PRAGMA","rule":"Yt.MaxRowWeight","sum":955},{"parent":"PRAGMA","rule":"Yt.ParallelOperationsLimit","sum":12},{"parent":"PRAGMA","rule":"Yt.Pool","sum":255054},{"parent":"PRAGMA","rule":"Yt.PoolTrees","sum":1},{"parent":"PRAGMA","rule":"Yt.StaticPool","sum":9950},{"parent":"PRAGMA","rule":"Yt.TmpFolder","sum":4},{"parent":"PRAGMA","rule":"Yt.UseNativeYtTypes","sum":2851},{"parent":"PRAGMA","rule":"Yt.pool","sum":82540},{"parent":"PRAGMA","rule":"ansiInForEmptyOrNullableItemsCollections","sum":118},{"parent":"PRAGMA","rule":"ansiimplicitcrossjoin","sum":2},{"parent":"PRAGMA","rule":"ansiinforemptyornullableitemscollections","sum":12},{"parent":"PRAGMA","rule":"ansioptionalas","sum":98},{"parent":"PRAGMA","rule":"autocommit","sum":133608},{"parent":"PRAGMA","rule":"classicDivision","sum":18},{"parent":"PRAGMA","rule":"classic_division","sum":11},{"parent":"PRAGMA","rule":"classicdivision","sum":77},{"parent":"PRAGMA","rule":"config.flags","sum":1287644},{"parent":"PRAGMA","rule":"direct_read","sum":2},{"parent":"PRAGMA","rule":"directread","sum":2},{"parent":"PRAGMA","rule":"disableSimpleColumns","sum":8},{"parent":"PRAGMA","rule":"disablesimplecolumns","sum":6},{"parent":"PRAGMA","rule":"dq.AnalyticsHopping","sum":4},{"parent":"PRAGMA","rule":"dq.AnalyzeQuery","sum":3092333},{"parent":"PRAGMA","rule":"dq.ENABLEDQREPLICATE","sum":1},{"parent":"PRAGMA","rule":"dq.EnableComputeActor","sum":1},{"parent":"PRAGMA","rule":"dq.EnableDqReplicate","sum":920387},{"parent":"PRAGMA","rule":"dq.EnableDqreplicate","sum":3},{"parent":"PRAGMA","rule":"dq.EnableFullResultWrite","sum":28736},{"parent":"PRAGMA","rule":"dq.EnableInsert","sum":86029},{"parent":"PRAGMA","rule":"dq.FallbackPolicy","sum":3},{"parent":"PRAGMA","rule":"dq.HashJoinMode","sum":6061},{"parent":"PRAGMA","rule":"dq.HashShuffleMaxTasks","sum":7},{"parent":"PRAGMA","rule":"dq.HashShuffleTasksRatio","sum":7},{"parent":"PRAGMA","rule":"dq.MaxDataSizePerJob","sum":11},{"parent":"PRAGMA","rule":"dq.MaxDataSizePerQuery","sum":2},{"parent":"PRAGMA","rule":"dq.MaxRetries","sum":1124},{"parent":"PRAGMA","rule":"dq.MaxTasksPerOperation","sum":22991},{"parent":"PRAGMA","rule":"dq.MaxTasksPerStage","sum":60968},{"parent":"PRAGMA","rule":"dq.MemoryLimit","sum":7899},{"parent":"PRAGMA","rule":"dq.OptLLVM","sum":11},{"parent":"PRAGMA","rule":"dq.SplitStageOnDqReplicate","sum":11031},{"parent":"PRAGMA","rule":"dq.UseBlockReader","sum":3948},{"parent":"PRAGMA","rule":"dq.UseFastPickleTransport","sum":3},{"parent":"PRAGMA","rule":"dq.UseFinalizeByKey","sum":10},{"parent":"PRAGMA","rule":"dq.UseOOBTransport","sum":9},{"parent":"PRAGMA","rule":"dq.UseWideBlockChannels","sum":2},{"parent":"PRAGMA","rule":"dq.UseWideChannels","sum":1},{"parent":"PRAGMA","rule":"dq.WorkerFilter","sum":109},{"parent":"PRAGMA","rule":"dq.enableDqReplicate","sum":348},{"parent":"PRAGMA","rule":"dq.enabledqreplicate","sum":24},{"parent":"PRAGMA","rule":"dqEngine","sum":91599},{"parent":"PRAGMA","rule":"dqengine","sum":21267},{"parent":"PRAGMA","rule":"equijoin","sum":4},{"parent":"PRAGMA","rule":"file","sum":1808859},{"parent":"PRAGMA","rule":"folder","sum":674},{"parent":"PRAGMA","rule":"greetings","sum":1},{"parent":"PRAGMA","rule":"library","sum":4874289},{"parent":"PRAGMA","rule":"orderedColumns","sum":7},{"parent":"PRAGMA","rule":"orderedcolumns","sum":15150},{"parent":"PRAGMA","rule":"override_library","sum":32},{"parent":"PRAGMA","rule":"package","sum":228295},{"parent":"PRAGMA","rule":"refselect","sum":32461},{"parent":"PRAGMA","rule":"rtmr.Account","sum":4},{"parent":"PRAGMA","rule":"rtmr.TaskName","sum":12},{"parent":"PRAGMA","rule":"rtmr.YfInstanceCount","sum":8},{"parent":"PRAGMA","rule":"rtmr.YfSlotCount","sum":8},{"parent":"PRAGMA","rule":"rtmr.yfPool","sum":1},{"parent":"PRAGMA","rule":"sampleselect","sum":177},{"parent":"PRAGMA","rule":"simpleColumns","sum":1442},{"parent":"PRAGMA","rule":"simplecolumns","sum":243154},{"parent":"PRAGMA","rule":"tablepathprefix","sum":2},{"parent":"PRAGMA","rule":"udf","sum":4332081},{"parent":"PRAGMA","rule":"warning","sum":66551},{"parent":"PRAGMA","rule":"yson.AutoConvert","sum":4727715},{"parent":"PRAGMA","rule":"yson.Auto_convert","sum":18},{"parent":"PRAGMA","rule":"yson.Autoconvert","sum":1161},{"parent":"PRAGMA","rule":"yson.DisableStrict","sum":20043055},{"parent":"PRAGMA","rule":"yson.Disablestrict","sum":46},{"parent":"PRAGMA","rule":"yson.Fast","sum":664},{"parent":"PRAGMA","rule":"yson.Strict","sum":3492262},{"parent":"PRAGMA","rule":"yson.autoConvert","sum":4},{"parent":"PRAGMA","rule":"yson.autoconvert","sum":3737},{"parent":"PRAGMA","rule":"yson.disableStrict","sum":2363},{"parent":"PRAGMA","rule":"yson.disable_strict","sum":3},{"parent":"PRAGMA","rule":"yson.disablestrict","sum":211677},{"parent":"PRAGMA","rule":"yson.strict","sum":708},{"parent":"PRAGMA","rule":"yt.AUth","sum":4},{"parent":"PRAGMA","rule":"yt.Annotations","sum":46403354},{"parent":"PRAGMA","rule":"yt.Auth","sum":1277833},{"parent":"PRAGMA","rule":"yt.AutoMerge","sum":4456791},{"parent":"PRAGMA","rule":"yt.Automerge","sum":2800},{"parent":"PRAGMA","rule":"yt.BatchListFolderConcurrency","sum":152},{"parent":"PRAGMA","rule":"yt.BinaryExpirationInterval","sum":6212936},{"parent":"PRAGMA","rule":"yt.BinaryTmpFolder","sum":6238906},{"parent":"PRAGMA","rule":"yt.BlockReaderSupportedDataTypes","sum":5},{"parent":"PRAGMA","rule":"yt.ColumnGroupMode","sum":443562},{"parent":"PRAGMA","rule":"yt.CombineCoreLimit","sum":76823},{"parent":"PRAGMA","rule":"yt.CommonJoinCoreLimit","sum":1845},{"parent":"PRAGMA","rule":"yt.CoreDumpPath","sum":2},{"parent":"PRAGMA","rule":"yt.DQRPCReaderInflight","sum":140},{"parent":"PRAGMA","rule":"yt.DQRPCReaderTimeout","sum":8},{"parent":"PRAGMA","rule":"yt.DatASizePerJob","sum":2146},{"parent":"PRAGMA","rule":"yt.DatASizePerPartition","sum":732},{"parent":"PRAGMA","rule":"yt.DataSizePerJob","sum":5239479},{"parent":"PRAGMA","rule":"yt.DataSizePerMapJob","sum":314117},{"parent":"PRAGMA","rule":"yt.DataSizePerPartition","sum":513857},{"parent":"PRAGMA","rule":"yt.DataSizePerSortJob","sum":199622},{"parent":"PRAGMA","rule":"yt.DatasizePerJob","sum":16},{"parent":"PRAGMA","rule":"yt.DatasizePerSortJob","sum":16},{"parent":"PRAGMA","rule":"yt.DefaultCalcMemoryLimit","sum":258983},{"parent":"PRAGMA","rule":"yt.DefaultCluster","sum":71795086},{"parent":"PRAGMA","rule":"yt.DefaultLocalityTimeout","sum":4063},{"parent":"PRAGMA","rule":"yt.DefaultMapSelectivityFactor","sum":53},{"parent":"PRAGMA","rule":"yt.DefaultMaxJobFails","sum":1550814},{"parent":"PRAGMA","rule":"yt.DefaultMemORyLimit","sum":7},{"parent":"PRAGMA","rule":"yt.DefaultMemoryLimit","sum":2058884},{"parent":"PRAGMA","rule":"yt.DefaultMemoryReserveFactor","sum":25433},{"parent":"PRAGMA","rule":"yt.DefaultOperationWeight","sum":9750845},{"parent":"PRAGMA","rule":"yt.Description","sum":236380},{"parent":"PRAGMA","rule":"yt.DisableFuseOperations","sum":2},{"parent":"PRAGMA","rule":"yt.DisableJobSplitting","sum":4638},{"parent":"PRAGMA","rule":"yt.DisableOptimizers","sum":30931},{"parent":"PRAGMA","rule":"yt.EnableDynamicStoreReadInDQ","sum":1},{"parent":"PRAGMA","rule":"yt.ErasureCodecCpu","sum":148239},{"parent":"PRAGMA","rule":"yt.EvaluationTableSizeLimit","sum":815030},{"parent":"PRAGMA","rule":"yt.ExpirationDeadline","sum":3423055},{"parent":"PRAGMA","rule":"yt.ExpirationInterval","sum":8107892},{"parent":"PRAGMA","rule":"yt.ExtendedStatsMaxChunkCount","sum":4},{"parent":"PRAGMA","rule":"yt.ExternalTx","sum":80671058},{"parent":"PRAGMA","rule":"yt.ExtraTmpfsSize","sum":95},{"parent":"PRAGMA","rule":"yt.FileCacheTtl","sum":8247621},{"parent":"PRAGMA","rule":"yt.FolderInlineDataLimit","sum":40},{"parent":"PRAGMA","rule":"yt.FolderInlineItemsLimit","sum":2},{"parent":"PRAGMA","rule":"yt.ForceInferSchema","sum":801298},{"parent":"PRAGMA","rule":"yt.ForceJobSizeAdjuster","sum":10237},{"parent":"PRAGMA","rule":"yt.GeobaseDownloadUrl","sum":62921},{"parent":"PRAGMA","rule":"yt.HybridDqDataSizeLimitForOrdered","sum":312251},{"parent":"PRAGMA","rule":"yt.HybridDqDataSizeLimitForUnordered","sum":315268},{"parent":"PRAGMA","rule":"yt.HybridDqExecution","sum":3699504},{"parent":"PRAGMA","rule":"yt.HybridDqExecutionFallback","sum":108},{"parent":"PRAGMA","rule":"yt.INFERSCHEMA","sum":1},{"parent":"PRAGMA","rule":"yt.INferSchema","sum":6},{"parent":"PRAGMA","rule":"yt.IgnoreTypeV3","sum":4595},{"parent":"PRAGMA","rule":"yt.IgnoreWeakSchema","sum":231556},{"parent":"PRAGMA","rule":"yt.IgnoreYamrDsv","sum":22544},{"parent":"PRAGMA","rule":"yt.InferSchemA","sum":11},{"parent":"PRAGMA","rule":"yt.InferSchema","sum":32292775},{"parent":"PRAGMA","rule":"yt.InferSchemaTableCountThreshold","sum":1},{"parent":"PRAGMA","rule":"yt.Inferschema","sum":85412},{"parent":"PRAGMA","rule":"yt.IntermediateAccount","sum":132967},{"parent":"PRAGMA","rule":"yt.IntermediateDataMedium","sum":294637},{"parent":"PRAGMA","rule":"yt.IntermediateReplicationFactor","sum":2782},{"parent":"PRAGMA","rule":"yt.JavascriptCpu","sum":42},{"parent":"PRAGMA","rule":"yt.JobBlockInput","sum":27},{"parent":"PRAGMA","rule":"yt.JobBlockOutput","sum":12},{"parent":"PRAGMA","rule":"yt.JobEnv","sum":14134},{"parent":"PRAGMA","rule":"yt.JoinAllowColumnRenames","sum":446},{"parent":"PRAGMA","rule":"yt.JoinCollectColumnarStatistics","sum":20355},{"parent":"PRAGMA","rule":"yt.JoinColumnarStatisticsFetcherMode","sum":592},{"parent":"PRAGMA","rule":"yt.JoinEnableStarJoin","sum":41721},{"parent":"PRAGMA","rule":"yt.JoinMergeForce","sum":53571},{"parent":"PRAGMA","rule":"yt.JoinMergeReduceJobMaxSize","sum":2753},{"parent":"PRAGMA","rule":"yt.JoinMergeTablesLimit","sum":3660},{"parent":"PRAGMA","rule":"yt.JoinMergeUnsortedFactor","sum":1673},{"parent":"PRAGMA","rule":"yt.JoinMergeUseSmallAsPrimary","sum":5933},{"parent":"PRAGMA","rule":"yt.JoinUseColumnarStatistics","sum":1013},{"parent":"PRAGMA","rule":"yt.JoinWaitAllInputs","sum":16},{"parent":"PRAGMA","rule":"yt.KeepTempTables","sum":974},{"parent":"PRAGMA","rule":"yt.LayerPaths","sum":159087},{"parent":"PRAGMA","rule":"yt.LookupJoinLimit","sum":230889},{"parent":"PRAGMA","rule":"yt.LookupJoinMaxRows","sum":181590},{"parent":"PRAGMA","rule":"yt.MAXRowWeight","sum":56},{"parent":"PRAGMA","rule":"yt.MAxJobCount","sum":64},{"parent":"PRAGMA","rule":"yt.MAxRowWeight","sum":1},{"parent":"PRAGMA","rule":"yt.MapJOINLimit","sum":721},{"parent":"PRAGMA","rule":"yt.MapJoinLimit","sum":691161},{"parent":"PRAGMA","rule":"yt.MapJoinShardCount","sum":320751},{"parent":"PRAGMA","rule":"yt.MapJoinShardMinRows","sum":4},{"parent":"PRAGMA","rule":"yt.MapJoinUseFlow","sum":1},{"parent":"PRAGMA","rule":"yt.MapLocalityTimeout","sum":4},{"parent":"PRAGMA","rule":"yt.MaxChunksForDqRead","sum":120},{"parent":"PRAGMA","rule":"yt.MaxExtraJobMemoryToFuseOperations","sum":19952},{"parent":"PRAGMA","rule":"yt.MaxInputTables","sum":29639},{"parent":"PRAGMA","rule":"yt.MaxInputTablesForSortedMerge","sum":5917},{"parent":"PRAGMA","rule":"yt.MaxJobCount","sum":12535931},{"parent":"PRAGMA","rule":"yt.MaxJobcount","sum":1383},{"parent":"PRAGMA","rule":"yt.MaxKeyRangeCount","sum":9},{"parent":"PRAGMA","rule":"yt.MaxKeyWeight","sum":206061},{"parent":"PRAGMA","rule":"yt.MaxOutputTables","sum":54},{"parent":"PRAGMA","rule":"yt.MaxReplicationFactorToFuseOperations","sum":15},{"parent":"PRAGMA","rule":"yt.MaxRowWeight","sum":6096066},{"parent":"PRAGMA","rule":"yt.MaxSpeculativeJobCountPerTask","sum":2243},{"parent":"PRAGMA","rule":"yt.MinLocalityInputDataWeight","sum":8},{"parent":"PRAGMA","rule":"yt.MinPublishedAvgChunkSize","sum":3163080},{"parent":"PRAGMA","rule":"yt.MinTempAvgChunkSize","sum":178194},{"parent":"PRAGMA","rule":"yt.NativeYtTypeCompatibility","sum":15},{"parent":"PRAGMA","rule":"yt.NetworkProject","sum":608728},{"parent":"PRAGMA","rule":"yt.NightlyCompress","sum":146858},{"parent":"PRAGMA","rule":"yt.OWners","sum":21},{"parent":"PRAGMA","rule":"yt.OperationReaders","sum":12782804},{"parent":"PRAGMA","rule":"yt.OperationSpec","sum":8554262},{"parent":"PRAGMA","rule":"yt.OptimizeFor","sum":5765068},{"parent":"PRAGMA","rule":"yt.Owners","sum":65852261},{"parent":"PRAGMA","rule":"yt.POOL","sum":68},{"parent":"PRAGMA","rule":"yt.POol","sum":12},{"parent":"PRAGMA","rule":"yt.ParallelOperationsLimit","sum":1384490},{"parent":"PRAGMA","rule":"yt.PartitionByConstantKeysViaMap","sum":13},{"parent":"PRAGMA","rule":"yt.PooL","sum":1},{"parent":"PRAGMA","rule":"yt.Pool","sum":54684434},{"parent":"PRAGMA","rule":"yt.PoolTrees","sum":6598981},{"parent":"PRAGMA","rule":"yt.PrimaryMedium","sum":381954},{"parent":"PRAGMA","rule":"yt.PruneKeyFilterLambda","sum":560},{"parent":"PRAGMA","rule":"yt.PublishedAutoMerge","sum":3402932},{"parent":"PRAGMA","rule":"yt.PublishedCompressionCodec","sum":7389711},{"parent":"PRAGMA","rule":"yt.PublishedErasureCodec","sum":5377449},{"parent":"PRAGMA","rule":"yt.PublishedMedia","sum":4220},{"parent":"PRAGMA","rule":"yt.PublishedPrimaryMedium","sum":782354},{"parent":"PRAGMA","rule":"yt.PublishedReplicationFactor","sum":23458},{"parent":"PRAGMA","rule":"yt.PythonCpu","sum":81442},{"parent":"PRAGMA","rule":"yt.QueryCacheIgnoreTableRevision","sum":65674},{"parent":"PRAGMA","rule":"yt.QueryCacheMode","sum":24255862},{"parent":"PRAGMA","rule":"yt.QueryCacheSalt","sum":5},{"parent":"PRAGMA","rule":"yt.QueryCacheTtl","sum":8944821},{"parent":"PRAGMA","rule":"yt.QueryCacheUseForCalc","sum":8},{"parent":"PRAGMA","rule":"yt.QuerycacheMode","sum":2},{"parent":"PRAGMA","rule":"yt.ReleaseTempData","sum":2701171},{"parent":"PRAGMA","rule":"yt.STaticPool","sum":3},{"parent":"PRAGMA","rule":"yt.SamplingIoBlockSize","sum":3871},{"parent":"PRAGMA","rule":"yt.SchedulingTag","sum":37},{"parent":"PRAGMA","rule":"yt.SchedulingTagFilter","sum":2971},{"parent":"PRAGMA","rule":"yt.ScriptCpu","sum":4799},{"parent":"PRAGMA","rule":"yt.StartedBy","sum":264057},{"parent":"PRAGMA","rule":"yt.StaticPOol","sum":1},{"parent":"PRAGMA","rule":"yt.StaticPool","sum":112934029},{"parent":"PRAGMA","rule":"yt.Static_pool","sum":1},{"parent":"PRAGMA","rule":"yt.SuspendIfAccountLimitExceeded","sum":42805},{"parent":"PRAGMA","rule":"yt.TableContentDeliveryMode","sum":591},{"parent":"PRAGMA","rule":"yt.TableContentLocalExecution","sum":18},{"parent":"PRAGMA","rule":"yt.TableContentMaxChunksForNativeDelivery","sum":1},{"parent":"PRAGMA","rule":"yt.TableContentMaxInputTables","sum":6745},{"parent":"PRAGMA","rule":"yt.TableContentMinAvgChunkSize","sum":2641},{"parent":"PRAGMA","rule":"yt.TableContentTmpFolder","sum":28},{"parent":"PRAGMA","rule":"yt.TableContentUseSkiff","sum":11},{"parent":"PRAGMA","rule":"yt.TablesTmpFolder","sum":3427851},{"parent":"PRAGMA","rule":"yt.TempTablesTtl","sum":5363561},{"parent":"PRAGMA","rule":"yt.TemporaryAutoMerge","sum":8842097},{"parent":"PRAGMA","rule":"yt.TemporaryCompressionCodec","sum":5825991},{"parent":"PRAGMA","rule":"yt.TemporaryErasureCodec","sum":4747451},{"parent":"PRAGMA","rule":"yt.TemporaryPrimaryMedium","sum":494255},{"parent":"PRAGMA","rule":"yt.TemporaryReplicationFactor","sum":94},{"parent":"PRAGMA","rule":"yt.TentativePoolTrees","sum":1850255},{"parent":"PRAGMA","rule":"yt.TentativeTreeEligibilityMaxJobDurationRatio","sum":17563},{"parent":"PRAGMA","rule":"yt.TentativeTreeEligibilityMinJobDuration","sum":2176},{"parent":"PRAGMA","rule":"yt.TentativeTreeEligibilitySampleJobCount","sum":16218},{"parent":"PRAGMA","rule":"yt.TmpFolder","sum":31243741},{"parent":"PRAGMA","rule":"yt.TopSortMaxLimit","sum":59},{"parent":"PRAGMA","rule":"yt.TopSortSizePerJob","sum":1},{"parent":"PRAGMA","rule":"yt.USENATIVEYTTYPES","sum":25},{"parent":"PRAGMA","rule":"yt.USeNativeYtTypes","sum":12},{"parent":"PRAGMA","rule":"yt.UseColumnarStatistics","sum":166172},{"parent":"PRAGMA","rule":"yt.UseDefaultTentativePoolTrees","sum":252423},{"parent":"PRAGMA","rule":"yt.UseFlow","sum":3},{"parent":"PRAGMA","rule":"yt.UseIntermediateStreams","sum":6},{"parent":"PRAGMA","rule":"yt.UseNAtiveYtTypes","sum":1},{"parent":"PRAGMA","rule":"yt.UseNativeDescSort","sum":2005},{"parent":"PRAGMA","rule":"yt.UseNativeYtTYpes","sum":1485},{"parent":"PRAGMA","rule":"yt.UseNativeYtTypes","sum":24243051},{"parent":"PRAGMA","rule":"yt.UseNativeYttypes","sum":760},{"parent":"PRAGMA","rule":"yt.UseNativeytTypes","sum":514},{"parent":"PRAGMA","rule":"yt.UseNewPredicateExtraction","sum":182128},{"parent":"PRAGMA","rule":"yt.UseRPCReaderInDQ","sum":79},{"parent":"PRAGMA","rule":"yt.UseRPCReaderInDq","sum":12130},{"parent":"PRAGMA","rule":"yt.UseSkiff","sum":4049},{"parent":"PRAGMA","rule":"yt.UseSystemColumns","sum":2137},{"parent":"PRAGMA","rule":"yt.UseTmpfs","sum":34350},{"parent":"PRAGMA","rule":"yt.UseTypeV2","sum":4421},{"parent":"PRAGMA","rule":"yt.UseYqlRowSpecCompactForm","sum":3756},{"parent":"PRAGMA","rule":"yt.UserSlots","sum":2521151},{"parent":"PRAGMA","rule":"yt.ViewIsolation","sum":966},{"parent":"PRAGMA","rule":"yt.WideFlowLimit","sum":157941},{"parent":"PRAGMA","rule":"yt.auth","sum":47422},{"parent":"PRAGMA","rule":"yt.datasizeperjob","sum":2025},{"parent":"PRAGMA","rule":"yt.defaultoperationweight","sum":47},{"parent":"PRAGMA","rule":"yt.forceinferschema","sum":50},{"parent":"PRAGMA","rule":"yt.inferSchema","sum":54517},{"parent":"PRAGMA","rule":"yt.infer_schema","sum":10},{"parent":"PRAGMA","rule":"yt.inferschema","sum":2993},{"parent":"PRAGMA","rule":"yt.mapjoinlimit","sum":97691},{"parent":"PRAGMA","rule":"yt.maxRowWeight","sum":1},{"parent":"PRAGMA","rule":"yt.max_row_weight","sum":19},{"parent":"PRAGMA","rule":"yt.maxjobcount","sum":5},{"parent":"PRAGMA","rule":"yt.maxrowweight","sum":41883},{"parent":"PRAGMA","rule":"yt.minPublishedAvgChunksize","sum":11},{"parent":"PRAGMA","rule":"yt.network_project","sum":14438},{"parent":"PRAGMA","rule":"yt.pool","sum":6851101},{"parent":"PRAGMA","rule":"yt.pool_trees","sum":1},{"parent":"PRAGMA","rule":"yt.pooltrees","sum":53},{"parent":"PRAGMA","rule":"yt.publishedcompressioncodec","sum":402},{"parent":"PRAGMA","rule":"yt.staticPool","sum":1262549},{"parent":"PRAGMA","rule":"yt.static_pool","sum":20},{"parent":"PRAGMA","rule":"yt.staticpool","sum":10663},{"parent":"PRAGMA","rule":"yt.tmpFolder","sum":76618},{"parent":"PRAGMA","rule":"yt.tmpfolder","sum":65},{"parent":"PRAGMA","rule":"yt.useNativeYtTYpes","sum":1},{"parent":"PRAGMA","rule":"yt.useNativeYtTypes","sum":909},{"parent":"PRAGMA","rule":"yt.usenativeyttypes","sum":167},{"parent":"TRule_action_or_subquery_args","rule":"TRule_action_or_subquery_args.Block2","sum":4937230},{"parent":"TRule_action_or_subquery_args","rule":"TRule_action_or_subquery_args.Rule_opt_bind_parameter1","sum":13950455},{"parent":"TRule_action_or_subquery_args.TBlock2","rule":"TRule_action_or_subquery_args.TBlock2.Rule_opt_bind_parameter2","sum":7499701},{"parent":"TRule_action_or_subquery_args.TBlock2","rule":"TRule_action_or_subquery_args.TBlock2.Token1","sum":7499701},{"parent":"TRule_add_subexpr","rule":"TRule_add_subexpr.Block2","sum":136399194},{"parent":"TRule_add_subexpr","rule":"TRule_add_subexpr.Rule_mul_subexpr1","sum":15618407399},{"parent":"TRule_add_subexpr.TBlock2","rule":"TRule_add_subexpr.TBlock2.Rule_mul_subexpr2","sum":167963005},{"parent":"TRule_add_subexpr.TBlock2","rule":"TRule_add_subexpr.TBlock2.Token1","sum":167963005},{"parent":"TRule_an_id","rule":"TRule_an_id.Alt_an_id1","sum":2002378369},{"parent":"TRule_an_id.TAlt1","rule":"TRule_an_id.TAlt1.Rule_id1","sum":2002378369},{"parent":"TRule_an_id_as_compat","rule":"TRule_an_id_as_compat.Alt_an_id_as_compat1","sum":2986536},{"parent":"TRule_an_id_as_compat.TAlt1","rule":"TRule_an_id_as_compat.TAlt1.Rule_id_as_compat1","sum":2986536},{"parent":"TRule_an_id_expr","rule":"TRule_an_id_expr.Alt_an_id_expr1","sum":96266390},{"parent":"TRule_an_id_expr.TAlt1","rule":"TRule_an_id_expr.TAlt1.Rule_id_expr1","sum":96266390},{"parent":"TRule_an_id_hint","rule":"TRule_an_id_hint.Alt_an_id_hint1","sum":154334901},{"parent":"TRule_an_id_hint.TAlt1","rule":"TRule_an_id_hint.TAlt1.Rule_id_hint1","sum":154334901},{"parent":"TRule_an_id_or_type","rule":"TRule_an_id_or_type.Alt_an_id_or_type1","sum":9623698435},{"parent":"TRule_an_id_or_type","rule":"TRule_an_id_or_type.Alt_an_id_or_type2","sum":7},{"parent":"TRule_an_id_or_type.TAlt1","rule":"TRule_an_id_or_type.TAlt1.Rule_id_or_type1","sum":9623698435},{"parent":"TRule_an_id_or_type.TAlt2","rule":"TRule_an_id_or_type.TAlt2.Token1","sum":7},{"parent":"TRule_an_id_pure","rule":"TRule_an_id_pure.Alt_an_id_pure1","sum":677385895},{"parent":"TRule_an_id_pure.TAlt1","rule":"TRule_an_id_pure.TAlt1.Rule_identifier1","sum":677385895},{"parent":"TRule_an_id_schema","rule":"TRule_an_id_schema.Alt_an_id_schema1","sum":1181},{"parent":"TRule_an_id_schema.TAlt1","rule":"TRule_an_id_schema.TAlt1.Rule_id_schema1","sum":1181},{"parent":"TRule_an_id_table","rule":"TRule_an_id_table.Alt_an_id_table1","sum":330797323},{"parent":"TRule_an_id_table","rule":"TRule_an_id_table.Alt_an_id_table2","sum":1},{"parent":"TRule_an_id_table.TAlt1","rule":"TRule_an_id_table.TAlt1.Rule_id_table1","sum":330797323},{"parent":"TRule_an_id_table.TAlt2","rule":"TRule_an_id_table.TAlt2.Token1","sum":1},{"parent":"TRule_an_id_window","rule":"TRule_an_id_window.Alt_an_id_window1","sum":46640092},{"parent":"TRule_an_id_window.TAlt1","rule":"TRule_an_id_window.TAlt1.Rule_id_window1","sum":46640092},{"parent":"TRule_an_id_without","rule":"TRule_an_id_without.Alt_an_id_without1","sum":23278546},{"parent":"TRule_an_id_without.TAlt1","rule":"TRule_an_id_without.TAlt1.Rule_id_without1","sum":23278546},{"parent":"TRule_and_subexpr","rule":"TRule_and_subexpr.Block2","sum":29466},{"parent":"TRule_and_subexpr","rule":"TRule_and_subexpr.Rule_xor_subexpr1","sum":14281530840},{"parent":"TRule_and_subexpr.TBlock2","rule":"TRule_and_subexpr.TBlock2.Rule_xor_subexpr2","sum":29530},{"parent":"TRule_and_subexpr.TBlock2","rule":"TRule_and_subexpr.TBlock2.Token1","sum":29530},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr1","sum":5166388612},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr10","sum":35961481},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr11","sum":8505553},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr12","sum":14597994},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr2","sum":1770883384},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr3","sum":446646858},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr4","sum":398317432},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr5","sum":77082},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr6","sum":61695919},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr7","sum":927990981},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr8","sum":163639},{"parent":"TRule_atom_expr","rule":"TRule_atom_expr.Alt_atom_expr9","sum":510126},{"parent":"TRule_atom_expr.TAlt1","rule":"TRule_atom_expr.TAlt1.Rule_literal_value1","sum":5166388612},{"parent":"TRule_atom_expr.TAlt10","rule":"TRule_atom_expr.TAlt10.Rule_list_literal1","sum":35961481},{"parent":"TRule_atom_expr.TAlt11","rule":"TRule_atom_expr.TAlt11.Rule_dict_literal1","sum":8505553},{"parent":"TRule_atom_expr.TAlt12","rule":"TRule_atom_expr.TAlt12.Rule_struct_literal1","sum":14597994},{"parent":"TRule_atom_expr.TAlt2","rule":"TRule_atom_expr.TAlt2.Rule_bind_parameter1","sum":1770883384},{"parent":"TRule_atom_expr.TAlt3","rule":"TRule_atom_expr.TAlt3.Rule_lambda1","sum":446646858},{"parent":"TRule_atom_expr.TAlt4","rule":"TRule_atom_expr.TAlt4.Rule_cast_expr1","sum":398317432},{"parent":"TRule_atom_expr.TAlt5","rule":"TRule_atom_expr.TAlt5.Rule_exists_expr1","sum":77082},{"parent":"TRule_atom_expr.TAlt6","rule":"TRule_atom_expr.TAlt6.Rule_case_expr1","sum":61695919},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Block3","sum":927990981},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Rule_an_id_or_type1","sum":927990981},{"parent":"TRule_atom_expr.TAlt7","rule":"TRule_atom_expr.TAlt7.Token2","sum":927990981},{"parent":"TRule_atom_expr.TAlt7.TBlock3","rule":"TRule_atom_expr.TAlt7.TBlock3.Alt1","sum":927989742},{"parent":"TRule_atom_expr.TAlt7.TBlock3","rule":"TRule_atom_expr.TAlt7.TBlock3.Alt2","sum":1239},{"parent":"TRule_atom_expr.TAlt7.TBlock3.TAlt1","rule":"TRule_atom_expr.TAlt7.TBlock3.TAlt1.Rule_id_or_type1","sum":927989742},{"parent":"TRule_atom_expr.TAlt7.TBlock3.TAlt2","rule":"TRule_atom_expr.TAlt7.TBlock3.TAlt2.Token1","sum":1239},{"parent":"TRule_atom_expr.TAlt8","rule":"TRule_atom_expr.TAlt8.Rule_value_constructor1","sum":163639},{"parent":"TRule_atom_expr.TAlt9","rule":"TRule_atom_expr.TAlt9.Rule_bitcast_expr1","sum":510126},{"parent":"TRule_bind_parameter","rule":"TRule_bind_parameter.Block2","sum":3701723791},{"parent":"TRule_bind_parameter","rule":"TRule_bind_parameter.Token1","sum":3701723791},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt1","sum":3701711851},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt2","sum":9460},{"parent":"TRule_bind_parameter.TBlock2","rule":"TRule_bind_parameter.TBlock2.Alt3","sum":2480},{"parent":"TRule_bind_parameter.TBlock2.TAlt1","rule":"TRule_bind_parameter.TBlock2.TAlt1.Rule_an_id_or_type1","sum":3701711851},{"parent":"TRule_bind_parameter.TBlock2.TAlt2","rule":"TRule_bind_parameter.TBlock2.TAlt2.Token1","sum":9460},{"parent":"TRule_bind_parameter.TBlock2.TAlt3","rule":"TRule_bind_parameter.TBlock2.TAlt3.Token1","sum":2480},{"parent":"TRule_bind_parameter_list","rule":"TRule_bind_parameter_list.Block2","sum":244789},{"parent":"TRule_bind_parameter_list","rule":"TRule_bind_parameter_list.Rule_bind_parameter1","sum":1007079493},{"parent":"TRule_bind_parameter_list.TBlock2","rule":"TRule_bind_parameter_list.TBlock2.Rule_bind_parameter2","sum":336756},{"parent":"TRule_bind_parameter_list.TBlock2","rule":"TRule_bind_parameter_list.TBlock2.Token1","sum":336756},{"parent":"TRule_bit_subexpr","rule":"TRule_bit_subexpr.Block2","sum":138551599},{"parent":"TRule_bit_subexpr","rule":"TRule_bit_subexpr.Rule_add_subexpr1","sum":15460391762},{"parent":"TRule_bit_subexpr.TBlock2","rule":"TRule_bit_subexpr.TBlock2.Rule_add_subexpr2","sum":158015637},{"parent":"TRule_bit_subexpr.TBlock2","rule":"TRule_bit_subexpr.TBlock2.Token1","sum":158015637},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Rule_expr3","sum":510126},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Rule_type_name_simple5","sum":510126},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token1","sum":510126},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token2","sum":510126},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token4","sum":510126},{"parent":"TRule_bitcast_expr","rule":"TRule_bitcast_expr.Token6","sum":510126},{"parent":"TRule_bool_value","rule":"TRule_bool_value.Token1","sum":104231904},{"parent":"TRule_call_action","rule":"TRule_call_action.Block1","sum":9439855},{"parent":"TRule_call_action","rule":"TRule_call_action.Block3","sum":7169537},{"parent":"TRule_call_action","rule":"TRule_call_action.Token2","sum":9439855},{"parent":"TRule_call_action","rule":"TRule_call_action.Token4","sum":9439855},{"parent":"TRule_call_action.TBlock1","rule":"TRule_call_action.TBlock1.Alt1","sum":9313937},{"parent":"TRule_call_action.TBlock1","rule":"TRule_call_action.TBlock1.Alt2","sum":125918},{"parent":"TRule_call_action.TBlock1.TAlt1","rule":"TRule_call_action.TBlock1.TAlt1.Rule_bind_parameter1","sum":9313937},{"parent":"TRule_call_action.TBlock1.TAlt2","rule":"TRule_call_action.TBlock1.TAlt2.Token1","sum":125918},{"parent":"TRule_call_action.TBlock3","rule":"TRule_call_action.TBlock3.Rule_expr_list1","sum":7169537},{"parent":"TRule_callable_arg","rule":"TRule_callable_arg.Block2","sum":1},{"parent":"TRule_callable_arg","rule":"TRule_callable_arg.Rule_variant_arg1","sum":17979058},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token1","sum":1},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token2","sum":1},{"parent":"TRule_callable_arg.TBlock2","rule":"TRule_callable_arg.TBlock2.Token3","sum":1},{"parent":"TRule_callable_arg_list","rule":"TRule_callable_arg_list.Block2","sum":4366203},{"parent":"TRule_callable_arg_list","rule":"TRule_callable_arg_list.Rule_callable_arg1","sum":9770032},{"parent":"TRule_callable_arg_list.TBlock2","rule":"TRule_callable_arg_list.TBlock2.Rule_callable_arg2","sum":8209026},{"parent":"TRule_callable_arg_list.TBlock2","rule":"TRule_callable_arg_list.TBlock2.Token1","sum":8209026},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block2","sum":4703469},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block3","sum":61695923},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Block4","sum":61695923},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Token1","sum":61695923},{"parent":"TRule_case_expr","rule":"TRule_case_expr.Token5","sum":61695923},{"parent":"TRule_case_expr.TBlock2","rule":"TRule_case_expr.TBlock2.Rule_expr1","sum":4703469},{"parent":"TRule_case_expr.TBlock3","rule":"TRule_case_expr.TBlock3.Rule_when_expr1","sum":158241873},{"parent":"TRule_case_expr.TBlock4","rule":"TRule_case_expr.TBlock4.Rule_expr2","sum":61695923},{"parent":"TRule_case_expr.TBlock4","rule":"TRule_case_expr.TBlock4.Token1","sum":61695923},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Rule_expr3","sum":398361820},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Rule_type_name_or_bind5","sum":398361820},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token1","sum":398361820},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token2","sum":398361820},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token4","sum":398361820},{"parent":"TRule_cast_expr","rule":"TRule_cast_expr.Token6","sum":398361820},{"parent":"TRule_cluster_expr","rule":"TRule_cluster_expr.Block1","sum":6776988},{"parent":"TRule_cluster_expr","rule":"TRule_cluster_expr.Block2","sum":374662314},{"parent":"TRule_cluster_expr.TBlock1","rule":"TRule_cluster_expr.TBlock1.Rule_an_id1","sum":6776988},{"parent":"TRule_cluster_expr.TBlock1","rule":"TRule_cluster_expr.TBlock1.Token2","sum":6776988},{"parent":"TRule_cluster_expr.TBlock2","rule":"TRule_cluster_expr.TBlock2.Alt1","sum":374662314},{"parent":"TRule_cluster_expr.TBlock2.TAlt1","rule":"TRule_cluster_expr.TBlock2.TAlt1.Rule_pure_column_or_named1","sum":374662314},{"parent":"TRule_column_list","rule":"TRule_column_list.Block2","sum":282294},{"parent":"TRule_column_list","rule":"TRule_column_list.Block3","sum":805},{"parent":"TRule_column_list","rule":"TRule_column_list.Rule_column_name1","sum":848767},{"parent":"TRule_column_list.TBlock2","rule":"TRule_column_list.TBlock2.Rule_column_name2","sum":1124311},{"parent":"TRule_column_list.TBlock2","rule":"TRule_column_list.TBlock2.Token1","sum":1124311},{"parent":"TRule_column_list.TBlock3","rule":"TRule_column_list.TBlock3.Token1","sum":805},{"parent":"TRule_column_name","rule":"TRule_column_name.Rule_an_id2","sum":23324516},{"parent":"TRule_column_name","rule":"TRule_column_name.Rule_opt_id_prefix1","sum":23324516},{"parent":"TRule_column_order_by_specification","rule":"TRule_column_order_by_specification.Rule_an_id1","sum":107},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_an_id_schema1","sum":1181},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_opt_column_constraints4","sum":1181},{"parent":"TRule_column_schema","rule":"TRule_column_schema.Rule_type_name_or_bind2","sum":1181},{"parent":"TRule_commit_stmt","rule":"TRule_commit_stmt.Token1","sum":13017921},{"parent":"TRule_con_subexpr","rule":"TRule_con_subexpr.Alt_con_subexpr1","sum":15927923723},{"parent":"TRule_con_subexpr","rule":"TRule_con_subexpr.Alt_con_subexpr2","sum":87792338},{"parent":"TRule_con_subexpr.TAlt1","rule":"TRule_con_subexpr.TAlt1.Rule_unary_subexpr1","sum":15927923723},{"parent":"TRule_con_subexpr.TAlt2","rule":"TRule_con_subexpr.TAlt2.Rule_unary_op1","sum":87792338},{"parent":"TRule_con_subexpr.TAlt2","rule":"TRule_con_subexpr.TAlt2.Rule_unary_subexpr2","sum":87792338},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr1","sum":52200219},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr2","sum":126116119},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr3","sum":172659778},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr4","sum":24970630},{"parent":"TRule_cond_expr","rule":"TRule_cond_expr.Alt_cond_expr5","sum":719590569},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Block1","sum":9282308},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Block4","sum":93089},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Rule_eq_subexpr3","sum":52200219},{"parent":"TRule_cond_expr.TAlt1","rule":"TRule_cond_expr.TAlt1.Rule_match_op2","sum":52200219},{"parent":"TRule_cond_expr.TAlt1.TBlock1","rule":"TRule_cond_expr.TAlt1.TBlock1.Token1","sum":9282308},{"parent":"TRule_cond_expr.TAlt1.TBlock4","rule":"TRule_cond_expr.TAlt1.TBlock4.Rule_eq_subexpr2","sum":93089},{"parent":"TRule_cond_expr.TAlt1.TBlock4","rule":"TRule_cond_expr.TAlt1.TBlock4.Token1","sum":93089},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Block1","sum":26171687},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Block3","sum":3144732},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Rule_in_expr4","sum":126116119},{"parent":"TRule_cond_expr.TAlt2","rule":"TRule_cond_expr.TAlt2.Token2","sum":126116119},{"parent":"TRule_cond_expr.TAlt2.TBlock1","rule":"TRule_cond_expr.TAlt2.TBlock1.Token1","sum":26171687},{"parent":"TRule_cond_expr.TAlt2.TBlock3","rule":"TRule_cond_expr.TAlt2.TBlock3.Token1","sum":3144732},{"parent":"TRule_cond_expr.TAlt3","rule":"TRule_cond_expr.TAlt3.Block1","sum":172659778},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt1","sum":163},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt2","sum":412},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt3","sum":60677332},{"parent":"TRule_cond_expr.TAlt3.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.Alt4","sum":111981871},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt1","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt1.Token1","sum":163},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt2","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt2.Token1","sum":412},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt3.Token1","sum":60677332},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt3.Token2","sum":60677332},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Block1","sum":111940435},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Token2","sum":111981871},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.Token3","sum":111981871},{"parent":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.TBlock1","rule":"TRule_cond_expr.TAlt3.TBlock1.TAlt4.TBlock1.Token1","sum":111940435},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Block1","sum":402989},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Block3","sum":25},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Rule_eq_subexpr4","sum":24970630},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Rule_eq_subexpr6","sum":24970630},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Token2","sum":24970630},{"parent":"TRule_cond_expr.TAlt4","rule":"TRule_cond_expr.TAlt4.Token5","sum":24970630},{"parent":"TRule_cond_expr.TAlt4.TBlock1","rule":"TRule_cond_expr.TAlt4.TBlock1.Token1","sum":402989},{"parent":"TRule_cond_expr.TAlt4.TBlock3","rule":"TRule_cond_expr.TAlt4.TBlock3.Token1","sum":25},{"parent":"TRule_cond_expr.TAlt5","rule":"TRule_cond_expr.TAlt5.Block1","sum":719590569},{"parent":"TRule_cond_expr.TAlt5.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.Block1","sum":719592554},{"parent":"TRule_cond_expr.TAlt5.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.Rule_eq_subexpr2","sum":719592554},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt1","sum":492260107},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt2","sum":172254454},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt3","sum":49582527},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt4","sum":5097730},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.Alt5","sum":397736},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt1","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt1.Token1","sum":492260107},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt2","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt2.Token1","sum":172254454},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt3","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt3.Token1","sum":49582527},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt4","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt4.Token1","sum":5097730},{"parent":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt5","rule":"TRule_cond_expr.TAlt5.TBlock1.TBlock1.TAlt5.Rule_distinct_from_op1","sum":397736},{"parent":"TRule_create_table_entry","rule":"TRule_create_table_entry.Alt_create_table_entry1","sum":1181},{"parent":"TRule_create_table_entry","rule":"TRule_create_table_entry.Alt_create_table_entry2","sum":233},{"parent":"TRule_create_table_entry.TAlt1","rule":"TRule_create_table_entry.TAlt1.Rule_column_schema1","sum":1181},{"parent":"TRule_create_table_entry.TAlt2","rule":"TRule_create_table_entry.TAlt2.Rule_table_constraint1","sum":233},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Block3","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Block8","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Rule_create_table_entry7","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Rule_simple_table_ref5","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token1","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token10","sum":127},{"parent":"TRule_create_table_stmt","rule":"TRule_create_table_stmt.Token6","sum":127},{"parent":"TRule_create_table_stmt.TBlock3","rule":"TRule_create_table_stmt.TBlock3.Alt1","sum":127},{"parent":"TRule_create_table_stmt.TBlock3.TAlt1","rule":"TRule_create_table_stmt.TBlock3.TAlt1.Token1","sum":127},{"parent":"TRule_create_table_stmt.TBlock8","rule":"TRule_create_table_stmt.TBlock8.Rule_create_table_entry2","sum":1287},{"parent":"TRule_create_table_stmt.TBlock8","rule":"TRule_create_table_stmt.TBlock8.Token1","sum":1287},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Rule_ordinary_grouping_set_list3","sum":276671},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token1","sum":276671},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token2","sum":276671},{"parent":"TRule_cube_list","rule":"TRule_cube_list.Token4","sum":276671},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Rule_bind_parameter2","sum":156358531},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Rule_type_name4","sum":156358531},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Token1","sum":156358531},{"parent":"TRule_declare_stmt","rule":"TRule_declare_stmt.Token3","sum":156358531},{"parent":"TRule_define_action_or_subquery_body","rule":"TRule_define_action_or_subquery_body.Block1","sum":45559},{"parent":"TRule_define_action_or_subquery_body","rule":"TRule_define_action_or_subquery_body.Block2","sum":36124603},{"parent":"TRule_define_action_or_subquery_body.TBlock1","rule":"TRule_define_action_or_subquery_body.TBlock1.Token1","sum":45559},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Block2","sum":16082328},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Block3","sum":23526370},{"parent":"TRule_define_action_or_subquery_body.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.Rule_sql_stmt_core1","sum":36124603},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.Block1","sum":45905507},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.Rule_sql_stmt_core2","sum":45905507},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.TBlock1","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock2.TBlock1.Token1","sum":45918577},{"parent":"TRule_define_action_or_subquery_body.TBlock2.TBlock3","rule":"TRule_define_action_or_subquery_body.TBlock2.TBlock3.Token1","sum":23552077},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Block5","sum":13950455},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Rule_bind_parameter3","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Rule_define_action_or_subquery_body8","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token1","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token10","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token2","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token4","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token6","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token7","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt","rule":"TRule_define_action_or_subquery_stmt.Token9","sum":22267674},{"parent":"TRule_define_action_or_subquery_stmt.TBlock5","rule":"TRule_define_action_or_subquery_stmt.TBlock5.Rule_action_or_subquery_args1","sum":13950455},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Block2","sum":8218509},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Block3","sum":2249006},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Token1","sum":8960374},{"parent":"TRule_dict_literal","rule":"TRule_dict_literal.Token4","sum":8960374},{"parent":"TRule_dict_literal.TBlock2","rule":"TRule_dict_literal.TBlock2.Rule_expr_dict_list1","sum":8218509},{"parent":"TRule_dict_literal.TBlock3","rule":"TRule_dict_literal.TBlock3.Token1","sum":2249006},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Block2","sum":118890},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token1","sum":397736},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token3","sum":397736},{"parent":"TRule_distinct_from_op","rule":"TRule_distinct_from_op.Token4","sum":397736},{"parent":"TRule_distinct_from_op.TBlock2","rule":"TRule_distinct_from_op.TBlock2.Token1","sum":118890},{"parent":"TRule_do_stmt","rule":"TRule_do_stmt.Block2","sum":23303339},{"parent":"TRule_do_stmt","rule":"TRule_do_stmt.Token1","sum":23303339},{"parent":"TRule_do_stmt.TBlock2","rule":"TRule_do_stmt.TBlock2.Alt1","sum":9439855},{"parent":"TRule_do_stmt.TBlock2","rule":"TRule_do_stmt.TBlock2.Alt2","sum":13863484},{"parent":"TRule_do_stmt.TBlock2.TAlt1","rule":"TRule_do_stmt.TBlock2.TAlt1.Rule_call_action1","sum":9439855},{"parent":"TRule_do_stmt.TBlock2.TAlt2","rule":"TRule_do_stmt.TBlock2.TAlt2.Rule_inline_action1","sum":13863484},{"parent":"TRule_double_question","rule":"TRule_double_question.Token1","sum":108321380},{"parent":"TRule_double_question","rule":"TRule_double_question.Token2","sum":108321380},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Block2","sum":2566518},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Block3","sum":3},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Rule_simple_table_ref4","sum":2566518},{"parent":"TRule_drop_table_stmt","rule":"TRule_drop_table_stmt.Token1","sum":2566518},{"parent":"TRule_drop_table_stmt.TBlock2","rule":"TRule_drop_table_stmt.TBlock2.Alt1","sum":2566518},{"parent":"TRule_drop_table_stmt.TBlock2.TAlt1","rule":"TRule_drop_table_stmt.TBlock2.TAlt1.Token1","sum":2566518},{"parent":"TRule_drop_table_stmt.TBlock3","rule":"TRule_drop_table_stmt.TBlock3.Token1","sum":3},{"parent":"TRule_drop_table_stmt.TBlock3","rule":"TRule_drop_table_stmt.TBlock3.Token2","sum":3},{"parent":"TRule_eq_subexpr","rule":"TRule_eq_subexpr.Block2","sum":239470161},{"parent":"TRule_eq_subexpr","rule":"TRule_eq_subexpr.Rule_neq_subexpr1","sum":15103387492},{"parent":"TRule_eq_subexpr.TBlock2","rule":"TRule_eq_subexpr.TBlock2.Rule_neq_subexpr2","sum":239470216},{"parent":"TRule_eq_subexpr.TBlock2","rule":"TRule_eq_subexpr.TBlock2.Token1","sum":239470216},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Block3","sum":77082},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token1","sum":77082},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token2","sum":77082},{"parent":"TRule_exists_expr","rule":"TRule_exists_expr.Token4","sum":77082},{"parent":"TRule_exists_expr.TBlock3","rule":"TRule_exists_expr.TBlock3.Alt1","sum":77082},{"parent":"TRule_exists_expr.TBlock3.TAlt1","rule":"TRule_exists_expr.TBlock3.TAlt1.Rule_select_stmt1","sum":77082},{"parent":"TRule_expr","rule":"TRule_expr.Alt_expr1","sum":13695230947},{"parent":"TRule_expr","rule":"TRule_expr.Alt_expr2","sum":17375948},{"parent":"TRule_expr.TAlt1","rule":"TRule_expr.TAlt1.Block2","sum":68340563},{"parent":"TRule_expr.TAlt1","rule":"TRule_expr.TAlt1.Rule_or_subexpr1","sum":13695230947},{"parent":"TRule_expr.TAlt1.TBlock2","rule":"TRule_expr.TAlt1.TBlock2.Rule_or_subexpr2","sum":115179334},{"parent":"TRule_expr.TAlt1.TBlock2","rule":"TRule_expr.TAlt1.TBlock2.Token1","sum":115179334},{"parent":"TRule_expr.TAlt2","rule":"TRule_expr.TAlt2.Rule_type_name_composite1","sum":17375948},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Block2","sum":7021313},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Block3","sum":6633289},{"parent":"TRule_expr_dict_list","rule":"TRule_expr_dict_list.Rule_expr1","sum":8218509},{"parent":"TRule_expr_dict_list.TBlock2","rule":"TRule_expr_dict_list.TBlock2.Rule_expr2","sum":7021313},{"parent":"TRule_expr_dict_list.TBlock2","rule":"TRule_expr_dict_list.TBlock2.Token1","sum":7021313},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Block3","sum":35212745},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Rule_expr2","sum":42151326},{"parent":"TRule_expr_dict_list.TBlock3","rule":"TRule_expr_dict_list.TBlock3.Token1","sum":42151326},{"parent":"TRule_expr_dict_list.TBlock3.TBlock3","rule":"TRule_expr_dict_list.TBlock3.TBlock3.Rule_expr2","sum":35212745},{"parent":"TRule_expr_dict_list.TBlock3.TBlock3","rule":"TRule_expr_dict_list.TBlock3.TBlock3.Token1","sum":35212745},{"parent":"TRule_expr_list","rule":"TRule_expr_list.Block2","sum":41893722},{"parent":"TRule_expr_list","rule":"TRule_expr_list.Rule_expr1","sum":67465016},{"parent":"TRule_expr_list.TBlock2","rule":"TRule_expr_list.TBlock2.Rule_expr2","sum":206368221},{"parent":"TRule_expr_list.TBlock2","rule":"TRule_expr_list.TBlock2.Token1","sum":206368221},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Block4","sum":12532295},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Rule_expr1","sum":14467055},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Rule_expr3","sum":14467055},{"parent":"TRule_expr_struct_list","rule":"TRule_expr_struct_list.Token2","sum":14467055},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Rule_expr2","sum":31267575},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Rule_expr4","sum":31267575},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Token1","sum":31267575},{"parent":"TRule_expr_struct_list.TBlock4","rule":"TRule_expr_struct_list.TBlock4.Token3","sum":31267575},{"parent":"TRule_ext_order_by_clause","rule":"TRule_ext_order_by_clause.Block1","sum":2591132},{"parent":"TRule_ext_order_by_clause","rule":"TRule_ext_order_by_clause.Rule_order_by_clause2","sum":88637099},{"parent":"TRule_ext_order_by_clause.TBlock1","rule":"TRule_ext_order_by_clause.TBlock1.Token1","sum":2591132},{"parent":"TRule_flatten_by_arg","rule":"TRule_flatten_by_arg.Alt_flatten_by_arg1","sum":21351438},{"parent":"TRule_flatten_by_arg","rule":"TRule_flatten_by_arg.Alt_flatten_by_arg2","sum":4323714},{"parent":"TRule_flatten_by_arg.TAlt1","rule":"TRule_flatten_by_arg.TAlt1.Rule_named_column1","sum":21351438},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Block3","sum":52003},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Rule_named_expr_list2","sum":4323714},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Token1","sum":4323714},{"parent":"TRule_flatten_by_arg.TAlt2","rule":"TRule_flatten_by_arg.TAlt2.Token4","sum":4323714},{"parent":"TRule_flatten_by_arg.TAlt2.TBlock3","rule":"TRule_flatten_by_arg.TAlt2.TBlock3.Token1","sum":52003},{"parent":"TRule_flatten_source","rule":"TRule_flatten_source.Block2","sum":33608927},{"parent":"TRule_flatten_source","rule":"TRule_flatten_source.Rule_named_single_source1","sum":1052565235},{"parent":"TRule_flatten_source.TBlock2","rule":"TRule_flatten_source.TBlock2.Block2","sum":33608927},{"parent":"TRule_flatten_source.TBlock2","rule":"TRule_flatten_source.TBlock2.Token1","sum":33608927},{"parent":"TRule_flatten_source.TBlock2.TBlock2","rule":"TRule_flatten_source.TBlock2.TBlock2.Alt1","sum":25675152},{"parent":"TRule_flatten_source.TBlock2.TBlock2","rule":"TRule_flatten_source.TBlock2.TBlock2.Alt2","sum":7933775},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Block1","sum":19618250},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Rule_flatten_by_arg3","sum":25675152},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.Token2","sum":25675152},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.TBlock1","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt1.TBlock1.Token1","sum":19618250},{"parent":"TRule_flatten_source.TBlock2.TBlock2.TAlt2","rule":"TRule_flatten_source.TBlock2.TBlock2.TAlt2.Token1","sum":7933775},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block1","sum":7000777},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block2","sum":8},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Block8","sum":64710},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_bind_parameter4","sum":7000888},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_do_stmt7","sum":7000888},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Rule_expr6","sum":7000888},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Token3","sum":7000888},{"parent":"TRule_for_stmt","rule":"TRule_for_stmt.Token5","sum":7000888},{"parent":"TRule_for_stmt.TBlock1","rule":"TRule_for_stmt.TBlock1.Token1","sum":7000777},{"parent":"TRule_for_stmt.TBlock2","rule":"TRule_for_stmt.TBlock2.Token1","sum":8},{"parent":"TRule_for_stmt.TBlock8","rule":"TRule_for_stmt.TBlock8.Rule_do_stmt2","sum":64710},{"parent":"TRule_for_stmt.TBlock8","rule":"TRule_for_stmt.TBlock8.Token1","sum":64710},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Block2","sum":1604634},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Block6","sum":6},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Rule_grouping_element_list5","sum":137888913},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Rule_opt_set_quantifier4","sum":137888913},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Token1","sum":137888913},{"parent":"TRule_group_by_clause","rule":"TRule_group_by_clause.Token3","sum":137888913},{"parent":"TRule_group_by_clause.TBlock2","rule":"TRule_group_by_clause.TBlock2.Token1","sum":1604634},{"parent":"TRule_group_by_clause.TBlock6","rule":"TRule_group_by_clause.TBlock6.Rule_an_id2","sum":6},{"parent":"TRule_group_by_clause.TBlock6","rule":"TRule_group_by_clause.TBlock6.Token1","sum":6},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element1","sum":300533615},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element2","sum":62439},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element3","sum":276671},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element4","sum":92442},{"parent":"TRule_grouping_element","rule":"TRule_grouping_element.Alt_grouping_element5","sum":23},{"parent":"TRule_grouping_element.TAlt1","rule":"TRule_grouping_element.TAlt1.Rule_ordinary_grouping_set1","sum":300533615},{"parent":"TRule_grouping_element.TAlt2","rule":"TRule_grouping_element.TAlt2.Rule_rollup_list1","sum":62439},{"parent":"TRule_grouping_element.TAlt3","rule":"TRule_grouping_element.TAlt3.Rule_cube_list1","sum":276671},{"parent":"TRule_grouping_element.TAlt4","rule":"TRule_grouping_element.TAlt4.Rule_grouping_sets_specification1","sum":92442},{"parent":"TRule_grouping_element.TAlt5","rule":"TRule_grouping_element.TAlt5.Rule_hopping_window_specification1","sum":23},{"parent":"TRule_grouping_element_list","rule":"TRule_grouping_element_list.Block2","sum":62735458},{"parent":"TRule_grouping_element_list","rule":"TRule_grouping_element_list.Rule_grouping_element1","sum":137981355},{"parent":"TRule_grouping_element_list.TBlock2","rule":"TRule_grouping_element_list.TBlock2.Rule_grouping_element2","sum":162983835},{"parent":"TRule_grouping_element_list.TBlock2","rule":"TRule_grouping_element_list.TBlock2.Token1","sum":162983835},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Rule_grouping_element_list4","sum":92442},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token1","sum":92442},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token2","sum":92442},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token3","sum":92442},{"parent":"TRule_grouping_sets_specification","rule":"TRule_grouping_sets_specification.Token5","sum":92442},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr3","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr5","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr7","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Rule_expr9","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token1","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token10","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token2","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token4","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token6","sum":23},{"parent":"TRule_hopping_window_specification","rule":"TRule_hopping_window_specification.Token8","sum":23},{"parent":"TRule_id","rule":"TRule_id.Alt_id1","sum":12118792042},{"parent":"TRule_id","rule":"TRule_id.Alt_id2","sum":531580247},{"parent":"TRule_id.TAlt1","rule":"TRule_id.TAlt1.Rule_identifier1","sum":12118792042},{"parent":"TRule_id.TAlt2","rule":"TRule_id.TAlt2.Rule_keyword1","sum":531580247},{"parent":"TRule_id_as_compat","rule":"TRule_id_as_compat.Alt_id_as_compat1","sum":2983926},{"parent":"TRule_id_as_compat","rule":"TRule_id_as_compat.Alt_id_as_compat2","sum":2610},{"parent":"TRule_id_as_compat.TAlt1","rule":"TRule_id_as_compat.TAlt1.Rule_identifier1","sum":2983926},{"parent":"TRule_id_as_compat.TAlt2","rule":"TRule_id_as_compat.TAlt2.Rule_keyword_as_compat1","sum":2610},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr1","sum":6918553572},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr2","sum":319560587},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr3","sum":430787},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr5","sum":35187248},{"parent":"TRule_id_expr","rule":"TRule_id_expr.Alt_id_expr6","sum":181489},{"parent":"TRule_id_expr.TAlt1","rule":"TRule_id_expr.TAlt1.Rule_identifier1","sum":6918553572},{"parent":"TRule_id_expr.TAlt2","rule":"TRule_id_expr.TAlt2.Rule_keyword_compat1","sum":319560587},{"parent":"TRule_id_expr.TAlt3","rule":"TRule_id_expr.TAlt3.Rule_keyword_alter_uncompat1","sum":430787},{"parent":"TRule_id_expr.TAlt5","rule":"TRule_id_expr.TAlt5.Rule_keyword_window_uncompat1","sum":35187248},{"parent":"TRule_id_expr.TAlt6","rule":"TRule_id_expr.TAlt6.Rule_keyword_hint_uncompat1","sum":181489},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in1","sum":5634058},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in2","sum":74062},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in4","sum":213},{"parent":"TRule_id_expr_in","rule":"TRule_id_expr_in.Alt_id_expr_in5","sum":53},{"parent":"TRule_id_expr_in.TAlt1","rule":"TRule_id_expr_in.TAlt1.Rule_identifier1","sum":5634058},{"parent":"TRule_id_expr_in.TAlt2","rule":"TRule_id_expr_in.TAlt2.Rule_keyword_compat1","sum":74062},{"parent":"TRule_id_expr_in.TAlt4","rule":"TRule_id_expr_in.TAlt4.Rule_keyword_window_uncompat1","sum":213},{"parent":"TRule_id_expr_in.TAlt5","rule":"TRule_id_expr_in.TAlt5.Rule_keyword_hint_uncompat1","sum":53},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint1","sum":154289157},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint2","sum":22872},{"parent":"TRule_id_hint","rule":"TRule_id_hint.Alt_id_hint3","sum":22872},{"parent":"TRule_id_hint.TAlt1","rule":"TRule_id_hint.TAlt1.Rule_identifier1","sum":154289157},{"parent":"TRule_id_hint.TAlt2","rule":"TRule_id_hint.TAlt2.Rule_keyword_compat1","sum":22872},{"parent":"TRule_id_hint.TAlt3","rule":"TRule_id_hint.TAlt3.Rule_keyword_expr_uncompat1","sum":22872},{"parent":"TRule_id_or_at","rule":"TRule_id_or_at.Block1","sum":4760096},{"parent":"TRule_id_or_at","rule":"TRule_id_or_at.Rule_an_id_or_type2","sum":133734499},{"parent":"TRule_id_or_at.TBlock1","rule":"TRule_id_or_at.TBlock1.Token1","sum":4760096},{"parent":"TRule_id_or_type","rule":"TRule_id_or_type.Alt_id_or_type1","sum":10542990928},{"parent":"TRule_id_or_type","rule":"TRule_id_or_type.Alt_id_or_type2","sum":11662845},{"parent":"TRule_id_or_type.TAlt1","rule":"TRule_id_or_type.TAlt1.Rule_id1","sum":10542990928},{"parent":"TRule_id_or_type.TAlt2","rule":"TRule_id_or_type.TAlt2.Rule_type_id1","sum":11662845},{"parent":"TRule_id_schema","rule":"TRule_id_schema.Alt_id_schema1","sum":1169},{"parent":"TRule_id_schema","rule":"TRule_id_schema.Alt_id_schema2","sum":12},{"parent":"TRule_id_schema.TAlt1","rule":"TRule_id_schema.TAlt1.Rule_identifier1","sum":1169},{"parent":"TRule_id_schema.TAlt2","rule":"TRule_id_schema.TAlt2.Rule_keyword_compat1","sum":12},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table1","sum":330135915},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table2","sum":646698},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table3","sum":370},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table4","sum":12497},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table5","sum":4},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table6","sum":1829},{"parent":"TRule_id_table","rule":"TRule_id_table.Alt_id_table7","sum":10},{"parent":"TRule_id_table.TAlt1","rule":"TRule_id_table.TAlt1.Rule_identifier1","sum":330135915},{"parent":"TRule_id_table.TAlt2","rule":"TRule_id_table.TAlt2.Rule_keyword_compat1","sum":646698},{"parent":"TRule_id_table.TAlt3","rule":"TRule_id_table.TAlt3.Rule_keyword_expr_uncompat1","sum":370},{"parent":"TRule_id_table.TAlt4","rule":"TRule_id_table.TAlt4.Rule_keyword_select_uncompat1","sum":12497},{"parent":"TRule_id_table.TAlt5","rule":"TRule_id_table.TAlt5.Rule_keyword_in_uncompat1","sum":4},{"parent":"TRule_id_table.TAlt6","rule":"TRule_id_table.TAlt6.Rule_keyword_window_uncompat1","sum":1829},{"parent":"TRule_id_table.TAlt7","rule":"TRule_id_table.TAlt7.Rule_keyword_hint_uncompat1","sum":10},{"parent":"TRule_id_table_or_type","rule":"TRule_id_table_or_type.Alt_id_table_or_type1","sum":330797324},{"parent":"TRule_id_table_or_type","rule":"TRule_id_table_or_type.Alt_id_table_or_type2","sum":4271},{"parent":"TRule_id_table_or_type.TAlt1","rule":"TRule_id_table_or_type.TAlt1.Rule_an_id_table1","sum":330797324},{"parent":"TRule_id_table_or_type.TAlt2","rule":"TRule_id_table_or_type.TAlt2.Rule_type_id1","sum":4271},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window1","sum":46369040},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window2","sum":15650},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window3","sum":247560},{"parent":"TRule_id_window","rule":"TRule_id_window.Alt_id_window5","sum":7842},{"parent":"TRule_id_window.TAlt1","rule":"TRule_id_window.TAlt1.Rule_identifier1","sum":46369040},{"parent":"TRule_id_window.TAlt2","rule":"TRule_id_window.TAlt2.Rule_keyword_compat1","sum":15650},{"parent":"TRule_id_window.TAlt3","rule":"TRule_id_window.TAlt3.Rule_keyword_expr_uncompat1","sum":247560},{"parent":"TRule_id_window.TAlt5","rule":"TRule_id_window.TAlt5.Rule_keyword_select_uncompat1","sum":7842},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without1","sum":23000667},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without2","sum":276200},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without6","sum":1157},{"parent":"TRule_id_without","rule":"TRule_id_without.Alt_id_without7","sum":522},{"parent":"TRule_id_without.TAlt1","rule":"TRule_id_without.TAlt1.Rule_identifier1","sum":23000667},{"parent":"TRule_id_without.TAlt2","rule":"TRule_id_without.TAlt2.Rule_keyword_compat1","sum":276200},{"parent":"TRule_id_without.TAlt6","rule":"TRule_id_without.TAlt6.Rule_keyword_window_uncompat1","sum":1157},{"parent":"TRule_id_without.TAlt7","rule":"TRule_id_without.TAlt7.Rule_keyword_hint_uncompat1","sum":522},{"parent":"TRule_identifier","rule":"TRule_identifier.Token1","sum":20277145443},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Block1","sum":8558735},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Block5","sum":2972234},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Rule_do_stmt4","sum":8558779},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Rule_expr3","sum":8558779},{"parent":"TRule_if_stmt","rule":"TRule_if_stmt.Token2","sum":8558779},{"parent":"TRule_if_stmt.TBlock1","rule":"TRule_if_stmt.TBlock1.Token1","sum":8558735},{"parent":"TRule_if_stmt.TBlock5","rule":"TRule_if_stmt.TBlock5.Rule_do_stmt2","sum":2972234},{"parent":"TRule_if_stmt.TBlock5","rule":"TRule_if_stmt.TBlock5.Token1","sum":2972234},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Rule_module_path2","sum":16573208},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Rule_named_bind_parameter_list4","sum":16573208},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Token1","sum":16573208},{"parent":"TRule_import_stmt","rule":"TRule_import_stmt.Token3","sum":16573208},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr1","sum":2239},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr10","sum":5114871},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr11","sum":454821},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr2","sum":36393837},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr3","sum":65693148},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr4","sum":44388},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr5","sum":4},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr6","sum":2965596},{"parent":"TRule_in_atom_expr","rule":"TRule_in_atom_expr.Alt_in_atom_expr7","sum":9738777},{"parent":"TRule_in_atom_expr.TAlt1","rule":"TRule_in_atom_expr.TAlt1.Rule_literal_value1","sum":2239},{"parent":"TRule_in_atom_expr.TAlt10","rule":"TRule_in_atom_expr.TAlt10.Rule_list_literal1","sum":5114871},{"parent":"TRule_in_atom_expr.TAlt11","rule":"TRule_in_atom_expr.TAlt11.Rule_dict_literal1","sum":454821},{"parent":"TRule_in_atom_expr.TAlt2","rule":"TRule_in_atom_expr.TAlt2.Rule_bind_parameter1","sum":36393837},{"parent":"TRule_in_atom_expr.TAlt3","rule":"TRule_in_atom_expr.TAlt3.Rule_lambda1","sum":65693148},{"parent":"TRule_in_atom_expr.TAlt4","rule":"TRule_in_atom_expr.TAlt4.Rule_cast_expr1","sum":44388},{"parent":"TRule_in_atom_expr.TAlt5","rule":"TRule_in_atom_expr.TAlt5.Rule_case_expr1","sum":4},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Block3","sum":2965596},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Rule_an_id_or_type1","sum":2965596},{"parent":"TRule_in_atom_expr.TAlt6","rule":"TRule_in_atom_expr.TAlt6.Token2","sum":2965596},{"parent":"TRule_in_atom_expr.TAlt6.TBlock3","rule":"TRule_in_atom_expr.TAlt6.TBlock3.Alt1","sum":2965596},{"parent":"TRule_in_atom_expr.TAlt6.TBlock3.TAlt1","rule":"TRule_in_atom_expr.TAlt6.TBlock3.TAlt1.Rule_id_or_type1","sum":2965596},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Rule_select_stmt2","sum":9738777},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Token1","sum":9738777},{"parent":"TRule_in_atom_expr.TAlt7","rule":"TRule_in_atom_expr.TAlt7.Token3","sum":9738777},{"parent":"TRule_in_expr","rule":"TRule_in_expr.Rule_in_unary_subexpr1","sum":126116119},{"parent":"TRule_in_unary_casual_subexpr","rule":"TRule_in_unary_casual_subexpr.Block1","sum":126116067},{"parent":"TRule_in_unary_casual_subexpr","rule":"TRule_in_unary_casual_subexpr.Rule_unary_subexpr_suffix2","sum":126116067},{"parent":"TRule_in_unary_casual_subexpr.TBlock1","rule":"TRule_in_unary_casual_subexpr.TBlock1.Alt1","sum":5708386},{"parent":"TRule_in_unary_casual_subexpr.TBlock1","rule":"TRule_in_unary_casual_subexpr.TBlock1.Alt2","sum":120407681},{"parent":"TRule_in_unary_casual_subexpr.TBlock1.TAlt1","rule":"TRule_in_unary_casual_subexpr.TBlock1.TAlt1.Rule_id_expr_in1","sum":5708386},{"parent":"TRule_in_unary_casual_subexpr.TBlock1.TAlt2","rule":"TRule_in_unary_casual_subexpr.TBlock1.TAlt2.Rule_in_atom_expr1","sum":120407681},{"parent":"TRule_in_unary_subexpr","rule":"TRule_in_unary_subexpr.Alt_in_unary_subexpr1","sum":126116067},{"parent":"TRule_in_unary_subexpr","rule":"TRule_in_unary_subexpr.Alt_in_unary_subexpr2","sum":52},{"parent":"TRule_in_unary_subexpr.TAlt1","rule":"TRule_in_unary_subexpr.TAlt1.Rule_in_unary_casual_subexpr1","sum":126116067},{"parent":"TRule_in_unary_subexpr.TAlt2","rule":"TRule_in_unary_subexpr.TAlt2.Rule_json_api_expr1","sum":52},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Rule_define_action_or_subquery_body2","sum":13863484},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token1","sum":13863484},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token3","sum":13863484},{"parent":"TRule_inline_action","rule":"TRule_inline_action.Token4","sum":13863484},{"parent":"TRule_integer","rule":"TRule_integer.Token1","sum":2807280781},{"parent":"TRule_integer_or_bind","rule":"TRule_integer_or_bind.Alt_integer_or_bind1","sum":55719701},{"parent":"TRule_integer_or_bind","rule":"TRule_integer_or_bind.Alt_integer_or_bind2","sum":48703},{"parent":"TRule_integer_or_bind.TAlt1","rule":"TRule_integer_or_bind.TAlt1.Rule_integer1","sum":55719701},{"parent":"TRule_integer_or_bind.TAlt2","rule":"TRule_integer_or_bind.TAlt2.Rule_bind_parameter1","sum":48703},{"parent":"TRule_into_simple_table_ref","rule":"TRule_into_simple_table_ref.Block2","sum":25326},{"parent":"TRule_into_simple_table_ref","rule":"TRule_into_simple_table_ref.Rule_simple_table_ref1","sum":208912996},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Rule_pure_column_list3","sum":25326},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Token1","sum":25326},{"parent":"TRule_into_simple_table_ref.TBlock2","rule":"TRule_into_simple_table_ref.TBlock2.Token2","sum":25326},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Block1","sum":208912996},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Rule_into_simple_table_ref3","sum":208912996},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Rule_into_values_source4","sum":208912996},{"parent":"TRule_into_table_stmt","rule":"TRule_into_table_stmt.Token2","sum":208912996},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt1","sum":208802107},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt5","sum":110888},{"parent":"TRule_into_table_stmt.TBlock1","rule":"TRule_into_table_stmt.TBlock1.Alt6","sum":1},{"parent":"TRule_into_table_stmt.TBlock1.TAlt1","rule":"TRule_into_table_stmt.TBlock1.TAlt1.Token1","sum":208802107},{"parent":"TRule_into_table_stmt.TBlock1.TAlt5","rule":"TRule_into_table_stmt.TBlock1.TAlt5.Token1","sum":110888},{"parent":"TRule_into_table_stmt.TBlock1.TAlt6","rule":"TRule_into_table_stmt.TBlock1.TAlt6.Token1","sum":1},{"parent":"TRule_into_values_source","rule":"TRule_into_values_source.Alt_into_values_source1","sum":208912996},{"parent":"TRule_into_values_source.TAlt1","rule":"TRule_into_values_source.TAlt1.Block1","sum":4505304},{"parent":"TRule_into_values_source.TAlt1","rule":"TRule_into_values_source.TAlt1.Rule_values_source2","sum":208912996},{"parent":"TRule_into_values_source.TAlt1.TBlock1","rule":"TRule_into_values_source.TAlt1.TBlock1.Rule_pure_column_list1","sum":4505304},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Block2","sum":2763524266},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Rule_invoke_expr_tail4","sum":2877697076},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Token1","sum":2877697076},{"parent":"TRule_invoke_expr","rule":"TRule_invoke_expr.Token3","sum":2877697076},{"parent":"TRule_invoke_expr.TBlock2","rule":"TRule_invoke_expr.TBlock2.Alt1","sum":2704512878},{"parent":"TRule_invoke_expr.TBlock2","rule":"TRule_invoke_expr.TBlock2.Alt2","sum":59011388},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Block3","sum":9644223},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Rule_named_expr_list2","sum":2704512878},{"parent":"TRule_invoke_expr.TBlock2.TAlt1","rule":"TRule_invoke_expr.TBlock2.TAlt1.Rule_opt_set_quantifier1","sum":2704512878},{"parent":"TRule_invoke_expr.TBlock2.TAlt1.TBlock3","rule":"TRule_invoke_expr.TBlock2.TAlt1.TBlock3.Token1","sum":9644223},{"parent":"TRule_invoke_expr.TBlock2.TAlt2","rule":"TRule_invoke_expr.TBlock2.TAlt2.Token1","sum":59011388},{"parent":"TRule_invoke_expr_tail","rule":"TRule_invoke_expr_tail.Block1","sum":7198049},{"parent":"TRule_invoke_expr_tail","rule":"TRule_invoke_expr_tail.Block2","sum":45306351},{"parent":"TRule_invoke_expr_tail.TBlock1","rule":"TRule_invoke_expr_tail.TBlock1.Alt1","sum":7198049},{"parent":"TRule_invoke_expr_tail.TBlock1.TAlt1","rule":"TRule_invoke_expr_tail.TBlock1.TAlt1.Rule_null_treatment1","sum":7198049},{"parent":"TRule_invoke_expr_tail.TBlock2","rule":"TRule_invoke_expr_tail.TBlock2.Rule_window_name_or_specification2","sum":45306351},{"parent":"TRule_invoke_expr_tail.TBlock2","rule":"TRule_invoke_expr_tail.TBlock2.Token1","sum":45306351},{"parent":"TRule_join_constraint","rule":"TRule_join_constraint.Alt_join_constraint1","sum":173071411},{"parent":"TRule_join_constraint","rule":"TRule_join_constraint.Alt_join_constraint2","sum":32459519},{"parent":"TRule_join_constraint.TAlt1","rule":"TRule_join_constraint.TAlt1.Rule_expr2","sum":173071411},{"parent":"TRule_join_constraint.TAlt1","rule":"TRule_join_constraint.TAlt1.Token1","sum":173071411},{"parent":"TRule_join_constraint.TAlt2","rule":"TRule_join_constraint.TAlt2.Rule_pure_column_or_named_list2","sum":32459519},{"parent":"TRule_join_constraint.TAlt2","rule":"TRule_join_constraint.TAlt2.Token1","sum":32459519},{"parent":"TRule_join_op","rule":"TRule_join_op.Alt_join_op1","sum":28045},{"parent":"TRule_join_op","rule":"TRule_join_op.Alt_join_op2","sum":208587089},{"parent":"TRule_join_op.TAlt1","rule":"TRule_join_op.TAlt1.Token1","sum":28045},{"parent":"TRule_join_op.TAlt2","rule":"TRule_join_op.TAlt2.Block2","sum":208587089},{"parent":"TRule_join_op.TAlt2","rule":"TRule_join_op.TAlt2.Token3","sum":208587089},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt1","sum":174822377},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt2","sum":30708553},{"parent":"TRule_join_op.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.Alt3","sum":3056159},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.Block1","sum":126746215},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.Block2","sum":294988},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt1","sum":118839517},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt2","sum":1698319},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt3","sum":285634},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.Alt4","sum":5922745},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.Block2","sum":19678134},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.Token1","sum":118839517},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt1.TBlock2.Token1","sum":19678134},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.Block2","sum":519406},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.Token1","sum":1698319},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt2.TBlock2.Token1","sum":519406},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt3","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt3.Token1","sum":285634},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt4","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock1.TAlt4.Token1","sum":5922745},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt1.TBlock2.Token1","sum":294988},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt2","rule":"TRule_join_op.TAlt2.TBlock2.TAlt2.Token1","sum":30708553},{"parent":"TRule_join_op.TAlt2.TBlock2.TAlt3","rule":"TRule_join_op.TAlt2.TBlock2.TAlt3.Token1","sum":3056159},{"parent":"TRule_join_source","rule":"TRule_join_source.Block1","sum":614714},{"parent":"TRule_join_source","rule":"TRule_join_source.Block3","sum":145402073},{"parent":"TRule_join_source","rule":"TRule_join_source.Rule_flatten_source2","sum":843950101},{"parent":"TRule_join_source.TBlock1","rule":"TRule_join_source.TBlock1.Token1","sum":614714},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Block2","sum":14033269},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Block4","sum":205530930},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Rule_flatten_source3","sum":208615134},{"parent":"TRule_join_source.TBlock3","rule":"TRule_join_source.TBlock3.Rule_join_op1","sum":208615134},{"parent":"TRule_join_source.TBlock3.TBlock2","rule":"TRule_join_source.TBlock3.TBlock2.Token1","sum":14033269},{"parent":"TRule_join_source.TBlock3.TBlock4","rule":"TRule_join_source.TBlock3.TBlock4.Rule_join_constraint1","sum":205530930},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr1","sum":5842441},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr2","sum":188220},{"parent":"TRule_json_api_expr","rule":"TRule_json_api_expr.Alt_json_api_expr3","sum":317445},{"parent":"TRule_json_api_expr.TAlt1","rule":"TRule_json_api_expr.TAlt1.Rule_json_value1","sum":5842441},{"parent":"TRule_json_api_expr.TAlt2","rule":"TRule_json_api_expr.TAlt2.Rule_json_exists1","sum":188220},{"parent":"TRule_json_api_expr.TAlt3","rule":"TRule_json_api_expr.TAlt3.Rule_json_query1","sum":317445},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler1","sum":6928},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler2","sum":14319},{"parent":"TRule_json_case_handler","rule":"TRule_json_case_handler.Alt_json_case_handler3","sum":200803},{"parent":"TRule_json_case_handler.TAlt1","rule":"TRule_json_case_handler.TAlt1.Token1","sum":6928},{"parent":"TRule_json_case_handler.TAlt2","rule":"TRule_json_case_handler.TAlt2.Token1","sum":14319},{"parent":"TRule_json_case_handler.TAlt3","rule":"TRule_json_case_handler.TAlt3.Rule_expr2","sum":200803},{"parent":"TRule_json_case_handler.TAlt3","rule":"TRule_json_case_handler.TAlt3.Token1","sum":200803},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Block4","sum":20600},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Rule_expr1","sum":6348106},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Rule_jsonpath_spec3","sum":6348106},{"parent":"TRule_json_common_args","rule":"TRule_json_common_args.Token2","sum":6348106},{"parent":"TRule_json_common_args.TBlock4","rule":"TRule_json_common_args.TBlock4.Rule_json_variables2","sum":20600},{"parent":"TRule_json_common_args.TBlock4","rule":"TRule_json_common_args.TBlock4.Token1","sum":20600},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Block4","sum":90},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Rule_json_common_args3","sum":188220},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token1","sum":188220},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token2","sum":188220},{"parent":"TRule_json_exists","rule":"TRule_json_exists.Token5","sum":188220},{"parent":"TRule_json_exists.TBlock4","rule":"TRule_json_exists.TBlock4.Rule_json_exists_handler1","sum":90},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token1","sum":90},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token2","sum":90},{"parent":"TRule_json_exists_handler","rule":"TRule_json_exists_handler.Token3","sum":90},{"parent":"TRule_json_query","rule":"TRule_json_query.Block4","sum":190887},{"parent":"TRule_json_query","rule":"TRule_json_query.Block5","sum":86},{"parent":"TRule_json_query","rule":"TRule_json_query.Block6","sum":4124},{"parent":"TRule_json_query","rule":"TRule_json_query.Rule_json_common_args3","sum":317445},{"parent":"TRule_json_query","rule":"TRule_json_query.Token1","sum":317445},{"parent":"TRule_json_query","rule":"TRule_json_query.Token2","sum":317445},{"parent":"TRule_json_query","rule":"TRule_json_query.Token7","sum":317445},{"parent":"TRule_json_query.TBlock4","rule":"TRule_json_query.TBlock4.Rule_json_query_wrapper1","sum":190887},{"parent":"TRule_json_query.TBlock4","rule":"TRule_json_query.TBlock4.Token2","sum":190887},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Rule_json_query_handler1","sum":86},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Token2","sum":86},{"parent":"TRule_json_query.TBlock5","rule":"TRule_json_query.TBlock5.Token3","sum":86},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Rule_json_query_handler1","sum":4124},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Token2","sum":4124},{"parent":"TRule_json_query.TBlock6","rule":"TRule_json_query.TBlock6.Token3","sum":4124},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler1","sum":1675},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler2","sum":26},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler3","sum":2290},{"parent":"TRule_json_query_handler","rule":"TRule_json_query_handler.Alt_json_query_handler4","sum":219},{"parent":"TRule_json_query_handler.TAlt1","rule":"TRule_json_query_handler.TAlt1.Token1","sum":1675},{"parent":"TRule_json_query_handler.TAlt2","rule":"TRule_json_query_handler.TAlt2.Token1","sum":26},{"parent":"TRule_json_query_handler.TAlt3","rule":"TRule_json_query_handler.TAlt3.Token1","sum":2290},{"parent":"TRule_json_query_handler.TAlt3","rule":"TRule_json_query_handler.TAlt3.Token2","sum":2290},{"parent":"TRule_json_query_handler.TAlt4","rule":"TRule_json_query_handler.TAlt4.Token1","sum":219},{"parent":"TRule_json_query_handler.TAlt4","rule":"TRule_json_query_handler.TAlt4.Token2","sum":219},{"parent":"TRule_json_query_wrapper","rule":"TRule_json_query_wrapper.Alt_json_query_wrapper1","sum":45},{"parent":"TRule_json_query_wrapper","rule":"TRule_json_query_wrapper.Alt_json_query_wrapper2","sum":190842},{"parent":"TRule_json_query_wrapper.TAlt1","rule":"TRule_json_query_wrapper.TAlt1.Block2","sum":10},{"parent":"TRule_json_query_wrapper.TAlt1","rule":"TRule_json_query_wrapper.TAlt1.Token1","sum":45},{"parent":"TRule_json_query_wrapper.TAlt1.TBlock2","rule":"TRule_json_query_wrapper.TAlt1.TBlock2.Token1","sum":10},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Block2","sum":166555},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Block3","sum":32119},{"parent":"TRule_json_query_wrapper.TAlt2","rule":"TRule_json_query_wrapper.TAlt2.Token1","sum":190842},{"parent":"TRule_json_query_wrapper.TAlt2.TBlock2","rule":"TRule_json_query_wrapper.TAlt2.TBlock2.Token1","sum":166555},{"parent":"TRule_json_query_wrapper.TAlt2.TBlock3","rule":"TRule_json_query_wrapper.TAlt2.TBlock3.Token1","sum":32119},{"parent":"TRule_json_value","rule":"TRule_json_value.Block4","sum":1332511},{"parent":"TRule_json_value","rule":"TRule_json_value.Block5","sum":202578},{"parent":"TRule_json_value","rule":"TRule_json_value.Rule_json_common_args3","sum":5842441},{"parent":"TRule_json_value","rule":"TRule_json_value.Token1","sum":5842441},{"parent":"TRule_json_value","rule":"TRule_json_value.Token2","sum":5842441},{"parent":"TRule_json_value","rule":"TRule_json_value.Token6","sum":5842441},{"parent":"TRule_json_value.TBlock4","rule":"TRule_json_value.TBlock4.Rule_type_name_simple2","sum":1332511},{"parent":"TRule_json_value.TBlock4","rule":"TRule_json_value.TBlock4.Token1","sum":1332511},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Rule_json_case_handler1","sum":222050},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Token2","sum":222050},{"parent":"TRule_json_value.TBlock5","rule":"TRule_json_value.TBlock5.Token3","sum":222050},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Rule_expr1","sum":20607},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Rule_json_variable_name3","sum":20607},{"parent":"TRule_json_variable","rule":"TRule_json_variable.Token2","sum":20607},{"parent":"TRule_json_variable_name","rule":"TRule_json_variable_name.Alt_json_variable_name1","sum":18347},{"parent":"TRule_json_variable_name","rule":"TRule_json_variable_name.Alt_json_variable_name2","sum":2260},{"parent":"TRule_json_variable_name.TAlt1","rule":"TRule_json_variable_name.TAlt1.Rule_id_expr1","sum":18347},{"parent":"TRule_json_variable_name.TAlt2","rule":"TRule_json_variable_name.TAlt2.Token1","sum":2260},{"parent":"TRule_json_variables","rule":"TRule_json_variables.Block2","sum":7},{"parent":"TRule_json_variables","rule":"TRule_json_variables.Rule_json_variable1","sum":20600},{"parent":"TRule_json_variables.TBlock2","rule":"TRule_json_variables.TBlock2.Rule_json_variable2","sum":7},{"parent":"TRule_json_variables.TBlock2","rule":"TRule_json_variables.TBlock2.Token1","sum":7},{"parent":"TRule_jsonpath_spec","rule":"TRule_jsonpath_spec.Token1","sum":6348106},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Rule_expr2","sum":173319845},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Token1","sum":173319845},{"parent":"TRule_key_expr","rule":"TRule_key_expr.Token3","sum":173319845},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword1","sum":481209679},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword2","sum":33508736},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword3","sum":505061},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword4","sum":9146576},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword5","sum":777362},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword6","sum":23580},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword7","sum":3895250},{"parent":"TRule_keyword","rule":"TRule_keyword.Alt_keyword8","sum":2514003},{"parent":"TRule_keyword.TAlt1","rule":"TRule_keyword.TAlt1.Rule_keyword_compat1","sum":481209679},{"parent":"TRule_keyword.TAlt2","rule":"TRule_keyword.TAlt2.Rule_keyword_expr_uncompat1","sum":33508736},{"parent":"TRule_keyword.TAlt3","rule":"TRule_keyword.TAlt3.Rule_keyword_table_uncompat1","sum":505061},{"parent":"TRule_keyword.TAlt4","rule":"TRule_keyword.TAlt4.Rule_keyword_select_uncompat1","sum":9146576},{"parent":"TRule_keyword.TAlt5","rule":"TRule_keyword.TAlt5.Rule_keyword_alter_uncompat1","sum":777362},{"parent":"TRule_keyword.TAlt6","rule":"TRule_keyword.TAlt6.Rule_keyword_in_uncompat1","sum":23580},{"parent":"TRule_keyword.TAlt7","rule":"TRule_keyword.TAlt7.Rule_keyword_window_uncompat1","sum":3895250},{"parent":"TRule_keyword.TAlt8","rule":"TRule_keyword.TAlt8.Rule_keyword_hint_uncompat1","sum":2514003},{"parent":"TRule_keyword_alter_uncompat","rule":"TRule_keyword_alter_uncompat.Token1","sum":1208149},{"parent":"TRule_keyword_as_compat","rule":"TRule_keyword_as_compat.Token1","sum":2610},{"parent":"TRule_keyword_compat","rule":"TRule_keyword_compat.Token1","sum":801805760},{"parent":"TRule_keyword_expr_uncompat","rule":"TRule_keyword_expr_uncompat.Token1","sum":33779538},{"parent":"TRule_keyword_hint_uncompat","rule":"TRule_keyword_hint_uncompat.Token1","sum":2696077},{"parent":"TRule_keyword_in_uncompat","rule":"TRule_keyword_in_uncompat.Token1","sum":23584},{"parent":"TRule_keyword_select_uncompat","rule":"TRule_keyword_select_uncompat.Token1","sum":9166915},{"parent":"TRule_keyword_table_uncompat","rule":"TRule_keyword_table_uncompat.Token1","sum":505061},{"parent":"TRule_keyword_window_uncompat","rule":"TRule_keyword_window_uncompat.Token1","sum":39085697},{"parent":"TRule_lambda","rule":"TRule_lambda.Block2","sum":208848249},{"parent":"TRule_lambda","rule":"TRule_lambda.Rule_smart_parenthesis1","sum":512340006},{"parent":"TRule_lambda.TBlock2","rule":"TRule_lambda.TBlock2.Block2","sum":208848249},{"parent":"TRule_lambda.TBlock2","rule":"TRule_lambda.TBlock2.Token1","sum":208848249},{"parent":"TRule_lambda.TBlock2.TBlock2","rule":"TRule_lambda.TBlock2.TBlock2.Alt1","sum":42668471},{"parent":"TRule_lambda.TBlock2.TBlock2","rule":"TRule_lambda.TBlock2.TBlock2.Alt2","sum":166179778},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Rule_expr2","sum":42668471},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Token1","sum":42668471},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt1","rule":"TRule_lambda.TBlock2.TBlock2.TAlt1.Token3","sum":42668471},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Rule_lambda_body2","sum":166179778},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Token1","sum":166179778},{"parent":"TRule_lambda.TBlock2.TBlock2.TAlt2","rule":"TRule_lambda.TBlock2.TBlock2.TAlt2.Token3","sum":166179778},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block1","sum":160},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block2","sum":34755805},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Block5","sum":87613937},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Rule_expr4","sum":166179778},{"parent":"TRule_lambda_body","rule":"TRule_lambda_body.Token3","sum":166179778},{"parent":"TRule_lambda_body.TBlock1","rule":"TRule_lambda_body.TBlock1.Token1","sum":160},{"parent":"TRule_lambda_body.TBlock2","rule":"TRule_lambda_body.TBlock2.Block2","sum":63704082},{"parent":"TRule_lambda_body.TBlock2","rule":"TRule_lambda_body.TBlock2.Rule_lambda_stmt1","sum":63704082},{"parent":"TRule_lambda_body.TBlock2.TBlock2","rule":"TRule_lambda_body.TBlock2.TBlock2.Token1","sum":63705246},{"parent":"TRule_lambda_body.TBlock5","rule":"TRule_lambda_body.TBlock5.Token1","sum":87622552},{"parent":"TRule_lambda_stmt","rule":"TRule_lambda_stmt.Alt_lambda_stmt1","sum":63701461},{"parent":"TRule_lambda_stmt","rule":"TRule_lambda_stmt.Alt_lambda_stmt2","sum":2621},{"parent":"TRule_lambda_stmt.TAlt1","rule":"TRule_lambda_stmt.TAlt1.Rule_named_nodes_stmt1","sum":63701461},{"parent":"TRule_lambda_stmt.TAlt2","rule":"TRule_lambda_stmt.TAlt2.Rule_import_stmt1","sum":2621},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Block2","sum":35332851},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Block3","sum":2294453},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Token1","sum":41076352},{"parent":"TRule_list_literal","rule":"TRule_list_literal.Token4","sum":41076352},{"parent":"TRule_list_literal.TBlock2","rule":"TRule_list_literal.TBlock2.Rule_expr_list1","sum":35332851},{"parent":"TRule_list_literal.TBlock3","rule":"TRule_list_literal.TBlock3.Token1","sum":2294453},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value1","sum":2751561079},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value10","sum":2},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value2","sum":109092927},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value3","sum":2122067898},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value5","sum":79437041},{"parent":"TRule_literal_value","rule":"TRule_literal_value.Alt_literal_value9","sum":104231904},{"parent":"TRule_literal_value.TAlt1","rule":"TRule_literal_value.TAlt1.Rule_integer1","sum":2751561079},{"parent":"TRule_literal_value.TAlt10","rule":"TRule_literal_value.TAlt10.Token1","sum":2},{"parent":"TRule_literal_value.TAlt2","rule":"TRule_literal_value.TAlt2.Rule_real1","sum":109092927},{"parent":"TRule_literal_value.TAlt3","rule":"TRule_literal_value.TAlt3.Token1","sum":2122067898},{"parent":"TRule_literal_value.TAlt5","rule":"TRule_literal_value.TAlt5.Token1","sum":79437041},{"parent":"TRule_literal_value.TAlt9","rule":"TRule_literal_value.TAlt9.Rule_bool_value1","sum":104231904},{"parent":"TRule_match_op","rule":"TRule_match_op.Token1","sum":52200219},{"parent":"TRule_module_path","rule":"TRule_module_path.Block3","sum":361562},{"parent":"TRule_module_path","rule":"TRule_module_path.Rule_an_id2","sum":16573208},{"parent":"TRule_module_path.TBlock3","rule":"TRule_module_path.TBlock3.Rule_an_id2","sum":1077738},{"parent":"TRule_module_path.TBlock3","rule":"TRule_module_path.TBlock3.Token1","sum":1077738},{"parent":"TRule_mul_subexpr","rule":"TRule_mul_subexpr.Block2","sum":115170120},{"parent":"TRule_mul_subexpr","rule":"TRule_mul_subexpr.Rule_con_subexpr1","sum":15786370404},{"parent":"TRule_mul_subexpr.TBlock2","rule":"TRule_mul_subexpr.TBlock2.Rule_con_subexpr2","sum":229345657},{"parent":"TRule_mul_subexpr.TBlock2","rule":"TRule_mul_subexpr.TBlock2.Token1","sum":229345657},{"parent":"TRule_named_bind_parameter","rule":"TRule_named_bind_parameter.Block2","sum":135389},{"parent":"TRule_named_bind_parameter","rule":"TRule_named_bind_parameter.Rule_bind_parameter1","sum":37888685},{"parent":"TRule_named_bind_parameter.TBlock2","rule":"TRule_named_bind_parameter.TBlock2.Rule_bind_parameter2","sum":135389},{"parent":"TRule_named_bind_parameter.TBlock2","rule":"TRule_named_bind_parameter.TBlock2.Token1","sum":135389},{"parent":"TRule_named_bind_parameter_list","rule":"TRule_named_bind_parameter_list.Block2","sum":7054906},{"parent":"TRule_named_bind_parameter_list","rule":"TRule_named_bind_parameter_list.Rule_named_bind_parameter1","sum":16573208},{"parent":"TRule_named_bind_parameter_list.TBlock2","rule":"TRule_named_bind_parameter_list.TBlock2.Rule_named_bind_parameter2","sum":21315477},{"parent":"TRule_named_bind_parameter_list.TBlock2","rule":"TRule_named_bind_parameter_list.TBlock2.Token1","sum":21315477},{"parent":"TRule_named_column","rule":"TRule_named_column.Block2","sum":7171352},{"parent":"TRule_named_column","rule":"TRule_named_column.Rule_column_name1","sum":21351438},{"parent":"TRule_named_column.TBlock2","rule":"TRule_named_column.TBlock2.Rule_an_id2","sum":7171352},{"parent":"TRule_named_column.TBlock2","rule":"TRule_named_column.TBlock2.Token1","sum":7171352},{"parent":"TRule_named_expr","rule":"TRule_named_expr.Block2","sum":156218746},{"parent":"TRule_named_expr","rule":"TRule_named_expr.Rule_expr1","sum":7199395472},{"parent":"TRule_named_expr.TBlock2","rule":"TRule_named_expr.TBlock2.Rule_an_id_or_type2","sum":156218746},{"parent":"TRule_named_expr.TBlock2","rule":"TRule_named_expr.TBlock2.Token1","sum":156218746},{"parent":"TRule_named_expr_list","rule":"TRule_named_expr_list.Block2","sum":1182917674},{"parent":"TRule_named_expr_list","rule":"TRule_named_expr_list.Rule_named_expr1","sum":3239140964},{"parent":"TRule_named_expr_list.TBlock2","rule":"TRule_named_expr_list.TBlock2.Rule_named_expr2","sum":3466442386},{"parent":"TRule_named_expr_list.TBlock2","rule":"TRule_named_expr_list.TBlock2.Token1","sum":3466442386},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Block3","sum":1007079493},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Rule_bind_parameter_list1","sum":1007079493},{"parent":"TRule_named_nodes_stmt","rule":"TRule_named_nodes_stmt.Token2","sum":1007079493},{"parent":"TRule_named_nodes_stmt.TBlock3","rule":"TRule_named_nodes_stmt.TBlock3.Alt1","sum":677142252},{"parent":"TRule_named_nodes_stmt.TBlock3","rule":"TRule_named_nodes_stmt.TBlock3.Alt2","sum":329937241},{"parent":"TRule_named_nodes_stmt.TBlock3.TAlt1","rule":"TRule_named_nodes_stmt.TBlock3.TAlt1.Rule_expr1","sum":677142252},{"parent":"TRule_named_nodes_stmt.TBlock3.TAlt2","rule":"TRule_named_nodes_stmt.TBlock3.TAlt2.Rule_subselect_stmt1","sum":329937241},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block2","sum":1},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block3","sum":442565115},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Block4","sum":403893},{"parent":"TRule_named_single_source","rule":"TRule_named_single_source.Rule_single_source1","sum":1058157852},{"parent":"TRule_named_single_source.TBlock2","rule":"TRule_named_single_source.TBlock2.Rule_row_pattern_recognition_clause1","sum":1},{"parent":"TRule_named_single_source.TBlock3","rule":"TRule_named_single_source.TBlock3.Block1","sum":442565115},{"parent":"TRule_named_single_source.TBlock3","rule":"TRule_named_single_source.TBlock3.Block2","sum":159781},{"parent":"TRule_named_single_source.TBlock3.TBlock1","rule":"TRule_named_single_source.TBlock3.TBlock1.Alt1","sum":440444825},{"parent":"TRule_named_single_source.TBlock3.TBlock1","rule":"TRule_named_single_source.TBlock3.TBlock1.Alt2","sum":2120290},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt1","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt1.Rule_an_id2","sum":440444825},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt1","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt1.Token1","sum":440444825},{"parent":"TRule_named_single_source.TBlock3.TBlock1.TAlt2","rule":"TRule_named_single_source.TBlock3.TBlock1.TAlt2.Rule_an_id_as_compat1","sum":2120290},{"parent":"TRule_named_single_source.TBlock3.TBlock2","rule":"TRule_named_single_source.TBlock3.TBlock2.Rule_pure_column_list1","sum":159781},{"parent":"TRule_named_single_source.TBlock4","rule":"TRule_named_single_source.TBlock4.Alt1","sum":75224},{"parent":"TRule_named_single_source.TBlock4","rule":"TRule_named_single_source.TBlock4.Alt2","sum":328669},{"parent":"TRule_named_single_source.TBlock4.TAlt1","rule":"TRule_named_single_source.TBlock4.TAlt1.Rule_sample_clause1","sum":75224},{"parent":"TRule_named_single_source.TBlock4.TAlt2","rule":"TRule_named_single_source.TBlock4.TAlt2.Rule_tablesample_clause1","sum":328669},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Block2","sum":9143475},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Block3","sum":112095111},{"parent":"TRule_neq_subexpr","rule":"TRule_neq_subexpr.Rule_bit_subexpr1","sum":15451179088},{"parent":"TRule_neq_subexpr.TBlock2","rule":"TRule_neq_subexpr.TBlock2.Block1","sum":9212674},{"parent":"TRule_neq_subexpr.TBlock2","rule":"TRule_neq_subexpr.TBlock2.Rule_bit_subexpr2","sum":9212674},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt1","sum":8101003},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt2","sum":53387},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt3","sum":469},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt4","sum":23},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt5","sum":757653},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt6","sum":134987},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.Alt7","sum":165152},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt1","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt1.Token1","sum":8101003},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt2","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt2.Rule_shift_right1","sum":53387},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt3","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt3.Token1","sum":469},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt4","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt4.Rule_rot_right1","sum":23},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt5","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt5.Token1","sum":757653},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt6","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt6.Token1","sum":134987},{"parent":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt7","rule":"TRule_neq_subexpr.TBlock2.TBlock1.TAlt7.Token1","sum":165152},{"parent":"TRule_neq_subexpr.TBlock3","rule":"TRule_neq_subexpr.TBlock3.Alt1","sum":108321380},{"parent":"TRule_neq_subexpr.TBlock3","rule":"TRule_neq_subexpr.TBlock3.Alt2","sum":3773731},{"parent":"TRule_neq_subexpr.TBlock3.TAlt1","rule":"TRule_neq_subexpr.TBlock3.TAlt1.Rule_double_question1","sum":108321380},{"parent":"TRule_neq_subexpr.TBlock3.TAlt1","rule":"TRule_neq_subexpr.TBlock3.TAlt1.Rule_neq_subexpr2","sum":108321380},{"parent":"TRule_neq_subexpr.TBlock3.TAlt2","rule":"TRule_neq_subexpr.TBlock3.TAlt2.Block1","sum":3773731},{"parent":"TRule_neq_subexpr.TBlock3.TAlt2.TBlock1","rule":"TRule_neq_subexpr.TBlock3.TAlt2.TBlock1.Token1","sum":3785579},{"parent":"TRule_new_window_name","rule":"TRule_new_window_name.Rule_window_name1","sum":14160253},{"parent":"TRule_null_treatment","rule":"TRule_null_treatment.Alt_null_treatment1","sum":45},{"parent":"TRule_null_treatment","rule":"TRule_null_treatment.Alt_null_treatment2","sum":7198004},{"parent":"TRule_null_treatment.TAlt1","rule":"TRule_null_treatment.TAlt1.Token1","sum":45},{"parent":"TRule_null_treatment.TAlt1","rule":"TRule_null_treatment.TAlt1.Token2","sum":45},{"parent":"TRule_null_treatment.TAlt2","rule":"TRule_null_treatment.TAlt2.Token1","sum":7198004},{"parent":"TRule_null_treatment.TAlt2","rule":"TRule_null_treatment.TAlt2.Token2","sum":7198004},{"parent":"TRule_object_ref","rule":"TRule_object_ref.Block1","sum":5288874},{"parent":"TRule_object_ref","rule":"TRule_object_ref.Rule_id_or_at2","sum":133734499},{"parent":"TRule_object_ref.TBlock1","rule":"TRule_object_ref.TBlock1.Rule_cluster_expr1","sum":5288874},{"parent":"TRule_object_ref.TBlock1","rule":"TRule_object_ref.TBlock1.Token2","sum":5288874},{"parent":"TRule_opt_bind_parameter","rule":"TRule_opt_bind_parameter.Block2","sum":27136},{"parent":"TRule_opt_bind_parameter","rule":"TRule_opt_bind_parameter.Rule_bind_parameter1","sum":21450156},{"parent":"TRule_opt_bind_parameter.TBlock2","rule":"TRule_opt_bind_parameter.TBlock2.Token1","sum":27136},{"parent":"TRule_opt_id_prefix","rule":"TRule_opt_id_prefix.Block1","sum":82957126},{"parent":"TRule_opt_id_prefix.TBlock1","rule":"TRule_opt_id_prefix.TBlock1.Rule_an_id1","sum":82957126},{"parent":"TRule_opt_id_prefix.TBlock1","rule":"TRule_opt_id_prefix.TBlock1.Token2","sum":82957126},{"parent":"TRule_opt_id_prefix_or_type","rule":"TRule_opt_id_prefix_or_type.Block1","sum":777053633},{"parent":"TRule_opt_id_prefix_or_type.TBlock1","rule":"TRule_opt_id_prefix_or_type.TBlock1.Rule_an_id_or_type1","sum":777053633},{"parent":"TRule_opt_id_prefix_or_type.TBlock1","rule":"TRule_opt_id_prefix_or_type.TBlock1.Token2","sum":777053633},{"parent":"TRule_opt_set_quantifier","rule":"TRule_opt_set_quantifier.Block1","sum":70348625},{"parent":"TRule_opt_set_quantifier.TBlock1","rule":"TRule_opt_set_quantifier.TBlock1.Token1","sum":70348625},{"parent":"TRule_or_subexpr","rule":"TRule_or_subexpr.Block2","sum":277164299},{"parent":"TRule_or_subexpr","rule":"TRule_or_subexpr.Rule_and_subexpr1","sum":13810410281},{"parent":"TRule_or_subexpr.TBlock2","rule":"TRule_or_subexpr.TBlock2.Rule_and_subexpr2","sum":471120559},{"parent":"TRule_or_subexpr.TBlock2","rule":"TRule_or_subexpr.TBlock2.Token1","sum":471120559},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Rule_sort_specification_list3","sum":107666782},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Token1","sum":107666782},{"parent":"TRule_order_by_clause","rule":"TRule_order_by_clause.Token2","sum":107666782},{"parent":"TRule_ordinary_grouping_set","rule":"TRule_ordinary_grouping_set.Rule_named_expr1","sum":301305027},{"parent":"TRule_ordinary_grouping_set_list","rule":"TRule_ordinary_grouping_set_list.Block2","sum":199418},{"parent":"TRule_ordinary_grouping_set_list","rule":"TRule_ordinary_grouping_set_list.Rule_ordinary_grouping_set1","sum":339110},{"parent":"TRule_ordinary_grouping_set_list.TBlock2","rule":"TRule_ordinary_grouping_set_list.TBlock2.Rule_ordinary_grouping_set2","sum":432302},{"parent":"TRule_ordinary_grouping_set_list.TBlock2","rule":"TRule_ordinary_grouping_set_list.TBlock2.Token1","sum":432302},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Block4","sum":791546480},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Rule_an_id3","sum":896077672},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Rule_opt_id_prefix_or_type2","sum":896077672},{"parent":"TRule_pragma_stmt","rule":"TRule_pragma_stmt.Token1","sum":896077672},{"parent":"TRule_pragma_stmt.TBlock4","rule":"TRule_pragma_stmt.TBlock4.Alt1","sum":743570382},{"parent":"TRule_pragma_stmt.TBlock4","rule":"TRule_pragma_stmt.TBlock4.Alt2","sum":47976098},{"parent":"TRule_pragma_stmt.TBlock4.TAlt1","rule":"TRule_pragma_stmt.TBlock4.TAlt1.Rule_pragma_value2","sum":743570382},{"parent":"TRule_pragma_stmt.TBlock4.TAlt1","rule":"TRule_pragma_stmt.TBlock4.TAlt1.Token1","sum":743570382},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Block3","sum":17700602},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Rule_pragma_value2","sum":47976098},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Token1","sum":47976098},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2","rule":"TRule_pragma_stmt.TBlock4.TAlt2.Token4","sum":47976098},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3","rule":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3.Rule_pragma_value2","sum":18516762},{"parent":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3","rule":"TRule_pragma_stmt.TBlock4.TAlt2.TBlock3.Token1","sum":18516762},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value2","sum":256177},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value3","sum":797434933},{"parent":"TRule_pragma_value","rule":"TRule_pragma_value.Alt_pragma_value5","sum":12372132},{"parent":"TRule_pragma_value.TAlt2","rule":"TRule_pragma_value.TAlt2.Rule_id1","sum":256177},{"parent":"TRule_pragma_value.TAlt3","rule":"TRule_pragma_value.TAlt3.Token1","sum":797434933},{"parent":"TRule_pragma_value.TAlt5","rule":"TRule_pragma_value.TAlt5.Rule_bind_parameter1","sum":12372132},{"parent":"TRule_process_core","rule":"TRule_process_core.Block4","sum":1132},{"parent":"TRule_process_core","rule":"TRule_process_core.Block5","sum":1918423},{"parent":"TRule_process_core","rule":"TRule_process_core.Rule_named_single_source3","sum":4536125},{"parent":"TRule_process_core","rule":"TRule_process_core.Token1","sum":4536125},{"parent":"TRule_process_core.TBlock4","rule":"TRule_process_core.TBlock4.Rule_named_single_source2","sum":1133},{"parent":"TRule_process_core.TBlock4","rule":"TRule_process_core.TBlock4.Token1","sum":1133},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block3","sum":46},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block5","sum":4663},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Block7","sum":34},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Rule_using_call_expr2","sum":1918423},{"parent":"TRule_process_core.TBlock5","rule":"TRule_process_core.TBlock5.Token1","sum":1918423},{"parent":"TRule_process_core.TBlock5.TBlock3","rule":"TRule_process_core.TBlock5.TBlock3.Rule_an_id2","sum":46},{"parent":"TRule_process_core.TBlock5.TBlock3","rule":"TRule_process_core.TBlock5.TBlock3.Token1","sum":46},{"parent":"TRule_process_core.TBlock5.TBlock5","rule":"TRule_process_core.TBlock5.TBlock5.Rule_expr2","sum":4663},{"parent":"TRule_process_core.TBlock5.TBlock5","rule":"TRule_process_core.TBlock5.TBlock5.Token1","sum":4663},{"parent":"TRule_process_core.TBlock5.TBlock7","rule":"TRule_process_core.TBlock5.TBlock7.Rule_order_by_clause2","sum":34},{"parent":"TRule_process_core.TBlock5.TBlock7","rule":"TRule_process_core.TBlock5.TBlock7.Token1","sum":34},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Block3","sum":3950756},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Rule_an_id2","sum":4690411},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Token1","sum":4690411},{"parent":"TRule_pure_column_list","rule":"TRule_pure_column_list.Token4","sum":4690411},{"parent":"TRule_pure_column_list.TBlock3","rule":"TRule_pure_column_list.TBlock3.Rule_an_id2","sum":35153173},{"parent":"TRule_pure_column_list.TBlock3","rule":"TRule_pure_column_list.TBlock3.Token1","sum":35153173},{"parent":"TRule_pure_column_or_named","rule":"TRule_pure_column_or_named.Alt_pure_column_or_named1","sum":6594704},{"parent":"TRule_pure_column_or_named","rule":"TRule_pure_column_or_named.Alt_pure_column_or_named2","sum":425145265},{"parent":"TRule_pure_column_or_named.TAlt1","rule":"TRule_pure_column_or_named.TAlt1.Rule_bind_parameter1","sum":6594704},{"parent":"TRule_pure_column_or_named.TAlt2","rule":"TRule_pure_column_or_named.TAlt2.Rule_an_id1","sum":425145265},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Block3","sum":11687130},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Rule_pure_column_or_named2","sum":32459519},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Token1","sum":32459519},{"parent":"TRule_pure_column_or_named_list","rule":"TRule_pure_column_or_named_list.Token4","sum":32459519},{"parent":"TRule_pure_column_or_named_list.TBlock3","rule":"TRule_pure_column_or_named_list.TBlock3.Rule_pure_column_or_named2","sum":21732741},{"parent":"TRule_pure_column_or_named_list.TBlock3","rule":"TRule_pure_column_or_named_list.TBlock3.Token1","sum":21732741},{"parent":"TRule_real","rule":"TRule_real.Token1","sum":109092927},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block11","sum":8912},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block13","sum":26285},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block3","sum":119295},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block4","sum":302146},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Block8","sum":313583},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_column_list6","sum":848767},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_named_single_source2","sum":848767},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Rule_using_call_expr9","sum":848767},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token1","sum":848767},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token5","sum":848767},{"parent":"TRule_reduce_core","rule":"TRule_reduce_core.Token7","sum":848767},{"parent":"TRule_reduce_core.TBlock11","rule":"TRule_reduce_core.TBlock11.Rule_expr2","sum":8912},{"parent":"TRule_reduce_core.TBlock11","rule":"TRule_reduce_core.TBlock11.Token1","sum":8912},{"parent":"TRule_reduce_core.TBlock13","rule":"TRule_reduce_core.TBlock13.Rule_order_by_clause2","sum":26285},{"parent":"TRule_reduce_core.TBlock13","rule":"TRule_reduce_core.TBlock13.Token1","sum":26285},{"parent":"TRule_reduce_core.TBlock3","rule":"TRule_reduce_core.TBlock3.Rule_named_single_source2","sum":206592},{"parent":"TRule_reduce_core.TBlock3","rule":"TRule_reduce_core.TBlock3.Token1","sum":206592},{"parent":"TRule_reduce_core.TBlock4","rule":"TRule_reduce_core.TBlock4.Rule_sort_specification_list2","sum":302146},{"parent":"TRule_reduce_core.TBlock4","rule":"TRule_reduce_core.TBlock4.Token1","sum":302146},{"parent":"TRule_reduce_core.TBlock8","rule":"TRule_reduce_core.TBlock8.Token1","sum":313583},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Rule_expr3","sum":9275},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token1","sum":9275},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token2","sum":9275},{"parent":"TRule_repeatable_clause","rule":"TRule_repeatable_clause.Token4","sum":9275},{"parent":"TRule_result_column","rule":"TRule_result_column.Alt_result_column1","sum":250248825},{"parent":"TRule_result_column","rule":"TRule_result_column.Alt_result_column2","sum":3447285439},{"parent":"TRule_result_column.TAlt1","rule":"TRule_result_column.TAlt1.Rule_opt_id_prefix1","sum":250248825},{"parent":"TRule_result_column.TAlt1","rule":"TRule_result_column.TAlt1.Token2","sum":250248825},{"parent":"TRule_result_column.TAlt2","rule":"TRule_result_column.TAlt2.Block2","sum":1987445288},{"parent":"TRule_result_column.TAlt2","rule":"TRule_result_column.TAlt2.Rule_expr1","sum":3447285439},{"parent":"TRule_result_column.TAlt2.TBlock2","rule":"TRule_result_column.TAlt2.TBlock2.Alt1","sum":1986579042},{"parent":"TRule_result_column.TAlt2.TBlock2","rule":"TRule_result_column.TAlt2.TBlock2.Alt2","sum":866246},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt1","rule":"TRule_result_column.TAlt2.TBlock2.TAlt1.Rule_an_id_or_type2","sum":1986579042},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt1","rule":"TRule_result_column.TAlt2.TBlock2.TAlt1.Token1","sum":1986579042},{"parent":"TRule_result_column.TAlt2.TBlock2.TAlt2","rule":"TRule_result_column.TAlt2.TBlock2.TAlt2.Rule_an_id_as_compat1","sum":866246},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Rule_ordinary_grouping_set_list3","sum":62439},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token1","sum":62439},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token2","sum":62439},{"parent":"TRule_rollup_list","rule":"TRule_rollup_list.Token4","sum":62439},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token1","sum":23},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token2","sum":23},{"parent":"TRule_rot_right","rule":"TRule_rot_right.Token3","sum":23},{"parent":"TRule_row_pattern","rule":"TRule_row_pattern.Rule_row_pattern_term1","sum":2},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Rule_row_pattern5","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Rule_row_pattern_definition_list9","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token3","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token4","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token6","sum":1},{"parent":"TRule_row_pattern_common_syntax","rule":"TRule_row_pattern_common_syntax.Token8","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Rule_row_pattern_definition_search_condition3","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Rule_row_pattern_definition_variable_name1","sum":1},{"parent":"TRule_row_pattern_definition","rule":"TRule_row_pattern_definition.Token2","sum":1},{"parent":"TRule_row_pattern_definition_list","rule":"TRule_row_pattern_definition_list.Rule_row_pattern_definition1","sum":1},{"parent":"TRule_row_pattern_definition_search_condition","rule":"TRule_row_pattern_definition_search_condition.Rule_search_condition1","sum":1},{"parent":"TRule_row_pattern_definition_variable_name","rule":"TRule_row_pattern_definition_variable_name.Rule_row_pattern_variable_name1","sum":1},{"parent":"TRule_row_pattern_factor","rule":"TRule_row_pattern_factor.Block2","sum":1},{"parent":"TRule_row_pattern_factor","rule":"TRule_row_pattern_factor.Rule_row_pattern_primary1","sum":2},{"parent":"TRule_row_pattern_factor.TBlock2","rule":"TRule_row_pattern_factor.TBlock2.Rule_row_pattern_quantifier1","sum":1},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Rule_an_id3","sum":3},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Rule_expr1","sum":3},{"parent":"TRule_row_pattern_measure_definition","rule":"TRule_row_pattern_measure_definition.Token2","sum":3},{"parent":"TRule_row_pattern_measure_list","rule":"TRule_row_pattern_measure_list.Block2","sum":1},{"parent":"TRule_row_pattern_measure_list","rule":"TRule_row_pattern_measure_list.Rule_row_pattern_measure_definition1","sum":1},{"parent":"TRule_row_pattern_measure_list.TBlock2","rule":"TRule_row_pattern_measure_list.TBlock2.Rule_row_pattern_measure_definition2","sum":2},{"parent":"TRule_row_pattern_measure_list.TBlock2","rule":"TRule_row_pattern_measure_list.TBlock2.Token1","sum":2},{"parent":"TRule_row_pattern_measures","rule":"TRule_row_pattern_measures.Rule_row_pattern_measure_list2","sum":1},{"parent":"TRule_row_pattern_measures","rule":"TRule_row_pattern_measures.Token1","sum":1},{"parent":"TRule_row_pattern_primary","rule":"TRule_row_pattern_primary.Alt_row_pattern_primary1","sum":1},{"parent":"TRule_row_pattern_primary","rule":"TRule_row_pattern_primary.Alt_row_pattern_primary4","sum":1},{"parent":"TRule_row_pattern_primary.TAlt1","rule":"TRule_row_pattern_primary.TAlt1.Rule_row_pattern_primary_variable_name1","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Block2","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Token1","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4","rule":"TRule_row_pattern_primary.TAlt4.Token3","sum":1},{"parent":"TRule_row_pattern_primary.TAlt4.TBlock2","rule":"TRule_row_pattern_primary.TAlt4.TBlock2.Rule_row_pattern1","sum":1},{"parent":"TRule_row_pattern_primary_variable_name","rule":"TRule_row_pattern_primary_variable_name.Rule_row_pattern_variable_name1","sum":1},{"parent":"TRule_row_pattern_quantifier","rule":"TRule_row_pattern_quantifier.Alt_row_pattern_quantifier5","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Rule_integer2","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Token1","sum":1},{"parent":"TRule_row_pattern_quantifier.TAlt5","rule":"TRule_row_pattern_quantifier.TAlt5.Token3","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block3","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block4","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block5","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Block6","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Rule_row_pattern_common_syntax7","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token1","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token2","sum":1},{"parent":"TRule_row_pattern_recognition_clause","rule":"TRule_row_pattern_recognition_clause.Token8","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock3","rule":"TRule_row_pattern_recognition_clause.TBlock3.Rule_window_partition_clause1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock4","rule":"TRule_row_pattern_recognition_clause.TBlock4.Rule_order_by_clause1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock5","rule":"TRule_row_pattern_recognition_clause.TBlock5.Rule_row_pattern_measures1","sum":1},{"parent":"TRule_row_pattern_recognition_clause.TBlock6","rule":"TRule_row_pattern_recognition_clause.TBlock6.Rule_row_pattern_rows_per_match1","sum":1},{"parent":"TRule_row_pattern_rows_per_match","rule":"TRule_row_pattern_rows_per_match.Alt_row_pattern_rows_per_match1","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token1","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token2","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token3","sum":1},{"parent":"TRule_row_pattern_rows_per_match.TAlt1","rule":"TRule_row_pattern_rows_per_match.TAlt1.Token4","sum":1},{"parent":"TRule_row_pattern_term","rule":"TRule_row_pattern_term.Block1","sum":2},{"parent":"TRule_row_pattern_term.TBlock1","rule":"TRule_row_pattern_term.TBlock1.Rule_row_pattern_factor1","sum":2},{"parent":"TRule_row_pattern_variable_name","rule":"TRule_row_pattern_variable_name.Rule_identifier1","sum":2},{"parent":"TRule_sample_clause","rule":"TRule_sample_clause.Rule_expr2","sum":75224},{"parent":"TRule_sample_clause","rule":"TRule_sample_clause.Token1","sum":75224},{"parent":"TRule_sampling_mode","rule":"TRule_sampling_mode.Token1","sum":328669},{"parent":"TRule_search_condition","rule":"TRule_search_condition.Rule_expr1","sum":1},{"parent":"TRule_select_core","rule":"TRule_select_core.Block1","sum":10672782},{"parent":"TRule_select_core","rule":"TRule_select_core.Block10","sum":340845955},{"parent":"TRule_select_core","rule":"TRule_select_core.Block11","sum":137888913},{"parent":"TRule_select_core","rule":"TRule_select_core.Block12","sum":12107514},{"parent":"TRule_select_core","rule":"TRule_select_core.Block13","sum":13585207},{"parent":"TRule_select_core","rule":"TRule_select_core.Block14","sum":88637099},{"parent":"TRule_select_core","rule":"TRule_select_core.Block3","sum":117},{"parent":"TRule_select_core","rule":"TRule_select_core.Block6","sum":529664522},{"parent":"TRule_select_core","rule":"TRule_select_core.Block7","sum":93678926},{"parent":"TRule_select_core","rule":"TRule_select_core.Block8","sum":24883415},{"parent":"TRule_select_core","rule":"TRule_select_core.Block9","sum":833277319},{"parent":"TRule_select_core","rule":"TRule_select_core.Rule_opt_set_quantifier4","sum":904378975},{"parent":"TRule_select_core","rule":"TRule_select_core.Rule_result_column5","sum":904378975},{"parent":"TRule_select_core","rule":"TRule_select_core.Token2","sum":904378975},{"parent":"TRule_select_core.TBlock1","rule":"TRule_select_core.TBlock1.Rule_join_source2","sum":10672782},{"parent":"TRule_select_core.TBlock1","rule":"TRule_select_core.TBlock1.Token1","sum":10672782},{"parent":"TRule_select_core.TBlock10","rule":"TRule_select_core.TBlock10.Rule_expr2","sum":340845955},{"parent":"TRule_select_core.TBlock10","rule":"TRule_select_core.TBlock10.Token1","sum":340845955},{"parent":"TRule_select_core.TBlock11","rule":"TRule_select_core.TBlock11.Rule_group_by_clause1","sum":137888913},{"parent":"TRule_select_core.TBlock12","rule":"TRule_select_core.TBlock12.Rule_expr2","sum":12107514},{"parent":"TRule_select_core.TBlock12","rule":"TRule_select_core.TBlock12.Token1","sum":12107514},{"parent":"TRule_select_core.TBlock13","rule":"TRule_select_core.TBlock13.Rule_window_clause1","sum":13585207},{"parent":"TRule_select_core.TBlock14","rule":"TRule_select_core.TBlock14.Rule_ext_order_by_clause1","sum":88637099},{"parent":"TRule_select_core.TBlock3","rule":"TRule_select_core.TBlock3.Token1","sum":117},{"parent":"TRule_select_core.TBlock6","rule":"TRule_select_core.TBlock6.Rule_result_column2","sum":2793155289},{"parent":"TRule_select_core.TBlock6","rule":"TRule_select_core.TBlock6.Token1","sum":2793155289},{"parent":"TRule_select_core.TBlock7","rule":"TRule_select_core.TBlock7.Token1","sum":93678926},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Block2","sum":14},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Rule_without_column_list3","sum":24883415},{"parent":"TRule_select_core.TBlock8","rule":"TRule_select_core.TBlock8.Token1","sum":24883415},{"parent":"TRule_select_core.TBlock8.TBlock2","rule":"TRule_select_core.TBlock8.TBlock2.Token1","sum":14},{"parent":"TRule_select_core.TBlock8.TBlock2","rule":"TRule_select_core.TBlock8.TBlock2.Token2","sum":14},{"parent":"TRule_select_core.TBlock9","rule":"TRule_select_core.TBlock9.Rule_join_source2","sum":833277319},{"parent":"TRule_select_core.TBlock9","rule":"TRule_select_core.TBlock9.Token1","sum":833277319},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block1","sum":921844},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block2","sum":909763867},{"parent":"TRule_select_kind","rule":"TRule_select_kind.Block3","sum":2885395},{"parent":"TRule_select_kind.TBlock1","rule":"TRule_select_kind.TBlock1.Token1","sum":921844},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt1","sum":4536125},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt2","sum":848767},{"parent":"TRule_select_kind.TBlock2","rule":"TRule_select_kind.TBlock2.Alt3","sum":904378975},{"parent":"TRule_select_kind.TBlock2.TAlt1","rule":"TRule_select_kind.TBlock2.TAlt1.Rule_process_core1","sum":4536125},{"parent":"TRule_select_kind.TBlock2.TAlt2","rule":"TRule_select_kind.TBlock2.TAlt2.Rule_reduce_core1","sum":848767},{"parent":"TRule_select_kind.TBlock2.TAlt3","rule":"TRule_select_kind.TBlock2.TAlt3.Rule_select_core1","sum":904378975},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Rule_pure_column_or_named3","sum":2885395},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Token1","sum":2885395},{"parent":"TRule_select_kind.TBlock3","rule":"TRule_select_kind.TBlock3.Token2","sum":2885395},{"parent":"TRule_select_kind_parenthesis","rule":"TRule_select_kind_parenthesis.Alt_select_kind_parenthesis1","sum":779150787},{"parent":"TRule_select_kind_parenthesis","rule":"TRule_select_kind_parenthesis.Alt_select_kind_parenthesis2","sum":7976140},{"parent":"TRule_select_kind_parenthesis.TAlt1","rule":"TRule_select_kind_parenthesis.TAlt1.Rule_select_kind_partial1","sum":779150787},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Rule_select_kind_partial2","sum":7976140},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Token1","sum":7976140},{"parent":"TRule_select_kind_parenthesis.TAlt2","rule":"TRule_select_kind_parenthesis.TAlt2.Token3","sum":7976140},{"parent":"TRule_select_kind_partial","rule":"TRule_select_kind_partial.Block2","sum":30691478},{"parent":"TRule_select_kind_partial","rule":"TRule_select_kind_partial.Rule_select_kind1","sum":909763867},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Block3","sum":4603257},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Rule_expr2","sum":30691478},{"parent":"TRule_select_kind_partial.TBlock2","rule":"TRule_select_kind_partial.TBlock2.Token1","sum":30691478},{"parent":"TRule_select_kind_partial.TBlock2.TBlock3","rule":"TRule_select_kind_partial.TBlock2.TBlock3.Rule_expr2","sum":4603257},{"parent":"TRule_select_kind_partial.TBlock2.TBlock3","rule":"TRule_select_kind_partial.TBlock2.TBlock3.Token1","sum":4603257},{"parent":"TRule_select_op","rule":"TRule_select_op.Alt_select_op1","sum":63894618},{"parent":"TRule_select_op.TAlt1","rule":"TRule_select_op.TAlt1.Block2","sum":62983928},{"parent":"TRule_select_op.TAlt1","rule":"TRule_select_op.TAlt1.Token1","sum":63894618},{"parent":"TRule_select_op.TAlt1.TBlock2","rule":"TRule_select_op.TAlt1.TBlock2.Token1","sum":62983928},{"parent":"TRule_select_stmt","rule":"TRule_select_stmt.Block2","sum":33275900},{"parent":"TRule_select_stmt","rule":"TRule_select_stmt.Rule_select_kind_parenthesis1","sum":723232309},{"parent":"TRule_select_stmt.TBlock2","rule":"TRule_select_stmt.TBlock2.Rule_select_kind_parenthesis2","sum":56935244},{"parent":"TRule_select_stmt.TBlock2","rule":"TRule_select_stmt.TBlock2.Rule_select_op1","sum":56935244},{"parent":"TRule_select_unparenthesized_stmt","rule":"TRule_select_unparenthesized_stmt.Block2","sum":4296981},{"parent":"TRule_select_unparenthesized_stmt","rule":"TRule_select_unparenthesized_stmt.Rule_select_kind_partial1","sum":122636940},{"parent":"TRule_select_unparenthesized_stmt.TBlock2","rule":"TRule_select_unparenthesized_stmt.TBlock2.Rule_select_kind_parenthesis2","sum":6959374},{"parent":"TRule_select_unparenthesized_stmt.TBlock2","rule":"TRule_select_unparenthesized_stmt.TBlock2.Rule_select_op1","sum":6959374},{"parent":"TRule_shift_right","rule":"TRule_shift_right.Token1","sum":53387},{"parent":"TRule_shift_right","rule":"TRule_shift_right.Token2","sum":53387},{"parent":"TRule_simple_table_ref","rule":"TRule_simple_table_ref.Block2","sum":142994339},{"parent":"TRule_simple_table_ref","rule":"TRule_simple_table_ref.Rule_simple_table_ref_core1","sum":211479641},{"parent":"TRule_simple_table_ref.TBlock2","rule":"TRule_simple_table_ref.TBlock2.Rule_table_hints1","sum":142994339},{"parent":"TRule_simple_table_ref_core","rule":"TRule_simple_table_ref_core.Alt_simple_table_ref_core1","sum":133734499},{"parent":"TRule_simple_table_ref_core","rule":"TRule_simple_table_ref_core.Alt_simple_table_ref_core2","sum":77745142},{"parent":"TRule_simple_table_ref_core.TAlt1","rule":"TRule_simple_table_ref_core.TAlt1.Rule_object_ref1","sum":133734499},{"parent":"TRule_simple_table_ref_core.TAlt2","rule":"TRule_simple_table_ref_core.TAlt2.Block1","sum":71492},{"parent":"TRule_simple_table_ref_core.TAlt2","rule":"TRule_simple_table_ref_core.TAlt2.Rule_bind_parameter2","sum":77745142},{"parent":"TRule_simple_table_ref_core.TAlt2.TBlock1","rule":"TRule_simple_table_ref_core.TAlt2.TBlock1.Token1","sum":71492},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source1","sum":953241090},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source2","sum":104748390},{"parent":"TRule_single_source","rule":"TRule_single_source.Alt_single_source3","sum":168372},{"parent":"TRule_single_source.TAlt1","rule":"TRule_single_source.TAlt1.Rule_table_ref1","sum":953241090},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Rule_select_stmt2","sum":104748390},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Token1","sum":104748390},{"parent":"TRule_single_source.TAlt2","rule":"TRule_single_source.TAlt2.Token3","sum":104748390},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Rule_values_stmt2","sum":168372},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Token1","sum":168372},{"parent":"TRule_single_source.TAlt3","rule":"TRule_single_source.TAlt3.Token3","sum":168372},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Block2","sum":508513096},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Block3","sum":2640116},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Token1","sum":512340006},{"parent":"TRule_smart_parenthesis","rule":"TRule_smart_parenthesis.Token4","sum":512340006},{"parent":"TRule_smart_parenthesis.TBlock2","rule":"TRule_smart_parenthesis.TBlock2.Rule_named_expr_list1","sum":508513096},{"parent":"TRule_smart_parenthesis.TBlock3","rule":"TRule_smart_parenthesis.TBlock3.Token1","sum":2640116},{"parent":"TRule_sort_specification","rule":"TRule_sort_specification.Block2","sum":33193476},{"parent":"TRule_sort_specification","rule":"TRule_sort_specification.Rule_expr1","sum":181604904},{"parent":"TRule_sort_specification.TBlock2","rule":"TRule_sort_specification.TBlock2.Token1","sum":33193476},{"parent":"TRule_sort_specification_list","rule":"TRule_sort_specification_list.Block2","sum":39858879},{"parent":"TRule_sort_specification_list","rule":"TRule_sort_specification_list.Rule_sort_specification1","sum":107968928},{"parent":"TRule_sort_specification_list.TBlock2","rule":"TRule_sort_specification_list.TBlock2.Rule_sort_specification2","sum":73635976},{"parent":"TRule_sort_specification_list.TBlock2","rule":"TRule_sort_specification_list.TBlock2.Token1","sum":73635976},{"parent":"TRule_sql_query","rule":"TRule_sql_query.Alt_sql_query1","sum":320130391},{"parent":"TRule_sql_query.TAlt1","rule":"TRule_sql_query.TAlt1.Rule_sql_stmt_list1","sum":320130391},{"parent":"TRule_sql_stmt","rule":"TRule_sql_stmt.Block1","sum":53},{"parent":"TRule_sql_stmt","rule":"TRule_sql_stmt.Rule_sql_stmt_core2","sum":2720354628},{"parent":"TRule_sql_stmt.TBlock1","rule":"TRule_sql_stmt.TBlock1.Token1","sum":53},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core1","sum":896077672},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core12","sum":156358531},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core13","sum":16570587},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core17","sum":4706728},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core18","sum":22267674},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core19","sum":8558779},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core2","sum":193539980},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core20","sum":7000888},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core21","sum":321},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core3","sum":943378032},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core4","sum":127},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core5","sum":2566518},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core6","sum":329427984},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core7","sum":208912996},{"parent":"TRule_sql_stmt_core","rule":"TRule_sql_stmt_core.Alt_sql_stmt_core8","sum":13017921},{"parent":"TRule_sql_stmt_core.TAlt1","rule":"TRule_sql_stmt_core.TAlt1.Rule_pragma_stmt1","sum":896077672},{"parent":"TRule_sql_stmt_core.TAlt12","rule":"TRule_sql_stmt_core.TAlt12.Rule_declare_stmt1","sum":156358531},{"parent":"TRule_sql_stmt_core.TAlt13","rule":"TRule_sql_stmt_core.TAlt13.Rule_import_stmt1","sum":16570587},{"parent":"TRule_sql_stmt_core.TAlt17","rule":"TRule_sql_stmt_core.TAlt17.Rule_do_stmt1","sum":4706728},{"parent":"TRule_sql_stmt_core.TAlt18","rule":"TRule_sql_stmt_core.TAlt18.Rule_define_action_or_subquery_stmt1","sum":22267674},{"parent":"TRule_sql_stmt_core.TAlt19","rule":"TRule_sql_stmt_core.TAlt19.Rule_if_stmt1","sum":8558779},{"parent":"TRule_sql_stmt_core.TAlt2","rule":"TRule_sql_stmt_core.TAlt2.Rule_select_stmt1","sum":193539980},{"parent":"TRule_sql_stmt_core.TAlt20","rule":"TRule_sql_stmt_core.TAlt20.Rule_for_stmt1","sum":7000888},{"parent":"TRule_sql_stmt_core.TAlt21","rule":"TRule_sql_stmt_core.TAlt21.Rule_values_stmt1","sum":321},{"parent":"TRule_sql_stmt_core.TAlt3","rule":"TRule_sql_stmt_core.TAlt3.Rule_named_nodes_stmt1","sum":943378032},{"parent":"TRule_sql_stmt_core.TAlt4","rule":"TRule_sql_stmt_core.TAlt4.Rule_create_table_stmt1","sum":127},{"parent":"TRule_sql_stmt_core.TAlt5","rule":"TRule_sql_stmt_core.TAlt5.Rule_drop_table_stmt1","sum":2566518},{"parent":"TRule_sql_stmt_core.TAlt6","rule":"TRule_sql_stmt_core.TAlt6.Rule_use_stmt1","sum":329427984},{"parent":"TRule_sql_stmt_core.TAlt7","rule":"TRule_sql_stmt_core.TAlt7.Rule_into_table_stmt1","sum":208912996},{"parent":"TRule_sql_stmt_core.TAlt8","rule":"TRule_sql_stmt_core.TAlt8.Rule_commit_stmt1","sum":13017921},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block1","sum":437},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block3","sum":314643305},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Block4","sum":195133009},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Rule_sql_stmt2","sum":320130391},{"parent":"TRule_sql_stmt_list","rule":"TRule_sql_stmt_list.Token5","sum":320130391},{"parent":"TRule_sql_stmt_list.TBlock1","rule":"TRule_sql_stmt_list.TBlock1.Token1","sum":460},{"parent":"TRule_sql_stmt_list.TBlock3","rule":"TRule_sql_stmt_list.TBlock3.Block1","sum":2400224237},{"parent":"TRule_sql_stmt_list.TBlock3","rule":"TRule_sql_stmt_list.TBlock3.Rule_sql_stmt2","sum":2400224237},{"parent":"TRule_sql_stmt_list.TBlock3.TBlock1","rule":"TRule_sql_stmt_list.TBlock3.TBlock1.Token1","sum":2409292915},{"parent":"TRule_sql_stmt_list.TBlock4","rule":"TRule_sql_stmt_list.TBlock4.Token1","sum":197820473},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Rule_type_name_or_bind3","sum":114016881},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Rule_type_name_tag1","sum":114016881},{"parent":"TRule_struct_arg","rule":"TRule_struct_arg.Token2","sum":114016881},{"parent":"TRule_struct_arg_positional","rule":"TRule_struct_arg_positional.Alt_struct_arg_positional1","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Block3","sum":146},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Rule_type_name_or_bind2","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1","rule":"TRule_struct_arg_positional.TAlt1.Rule_type_name_tag1","sum":175},{"parent":"TRule_struct_arg_positional.TAlt1.TBlock3","rule":"TRule_struct_arg_positional.TAlt1.TBlock3.Token2","sum":146},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Block2","sum":14467055},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Block3","sum":2534097},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Token1","sum":14597994},{"parent":"TRule_struct_literal","rule":"TRule_struct_literal.Token4","sum":14597994},{"parent":"TRule_struct_literal.TBlock2","rule":"TRule_struct_literal.TBlock2.Rule_expr_struct_list1","sum":14467055},{"parent":"TRule_struct_literal.TBlock3","rule":"TRule_struct_literal.TBlock3.Token1","sum":2534097},{"parent":"TRule_subselect_stmt","rule":"TRule_subselect_stmt.Block1","sum":329937241},{"parent":"TRule_subselect_stmt.TBlock1","rule":"TRule_subselect_stmt.TBlock1.Alt1","sum":207300301},{"parent":"TRule_subselect_stmt.TBlock1","rule":"TRule_subselect_stmt.TBlock1.Alt2","sum":122636940},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Rule_select_stmt2","sum":207300301},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Token1","sum":207300301},{"parent":"TRule_subselect_stmt.TBlock1.TAlt1","rule":"TRule_subselect_stmt.TBlock1.TAlt1.Token3","sum":207300301},{"parent":"TRule_subselect_stmt.TBlock1.TAlt2","rule":"TRule_subselect_stmt.TBlock1.TAlt2.Rule_select_unparenthesized_stmt1","sum":122636940},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Block1","sum":116310},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Block3","sum":264688},{"parent":"TRule_table_arg","rule":"TRule_table_arg.Rule_named_expr2","sum":192507095},{"parent":"TRule_table_arg.TBlock1","rule":"TRule_table_arg.TBlock1.Token1","sum":116310},{"parent":"TRule_table_arg.TBlock3","rule":"TRule_table_arg.TBlock3.Rule_view_name2","sum":264688},{"parent":"TRule_table_arg.TBlock3","rule":"TRule_table_arg.TBlock3.Token1","sum":264688},{"parent":"TRule_table_constraint","rule":"TRule_table_constraint.Alt_table_constraint2","sum":126},{"parent":"TRule_table_constraint","rule":"TRule_table_constraint.Alt_table_constraint3","sum":107},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Block5","sum":105},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Rule_an_id4","sum":126},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token1","sum":126},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token2","sum":126},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token3","sum":126},{"parent":"TRule_table_constraint.TAlt2","rule":"TRule_table_constraint.TAlt2.Token6","sum":126},{"parent":"TRule_table_constraint.TAlt2.TBlock5","rule":"TRule_table_constraint.TAlt2.TBlock5.Rule_an_id2","sum":264},{"parent":"TRule_table_constraint.TAlt2.TBlock5","rule":"TRule_table_constraint.TAlt2.TBlock5.Token1","sum":264},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Rule_column_order_by_specification4","sum":107},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token1","sum":107},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token2","sum":107},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token3","sum":107},{"parent":"TRule_table_constraint.TAlt3","rule":"TRule_table_constraint.TAlt3.Token6","sum":107},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint1","sum":154334901},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint2","sum":18944153},{"parent":"TRule_table_hint","rule":"TRule_table_hint.Alt_table_hint3","sum":12},{"parent":"TRule_table_hint.TAlt1","rule":"TRule_table_hint.TAlt1.Block2","sum":10710264},{"parent":"TRule_table_hint.TAlt1","rule":"TRule_table_hint.TAlt1.Rule_an_id_hint1","sum":154334901},{"parent":"TRule_table_hint.TAlt1.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.Block2","sum":10710264},{"parent":"TRule_table_hint.TAlt1.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.Token1","sum":10710264},{"parent":"TRule_table_hint.TAlt1.TBlock2.TBlock2","rule":"TRule_table_hint.TAlt1.TBlock2.TBlock2.Alt1","sum":10710264},{"parent":"TRule_table_hint.TAlt1.TBlock2.TBlock2.TAlt1","rule":"TRule_table_hint.TAlt1.TBlock2.TBlock2.TAlt1.Rule_type_name_tag1","sum":10710264},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Block2","sum":1660},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Rule_type_name_or_bind3","sum":18944153},{"parent":"TRule_table_hint.TAlt2","rule":"TRule_table_hint.TAlt2.Token1","sum":18944153},{"parent":"TRule_table_hint.TAlt2.TBlock2","rule":"TRule_table_hint.TAlt2.TBlock2.Token1","sum":1660},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Block4","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token1","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token3","sum":12},{"parent":"TRule_table_hint.TAlt3","rule":"TRule_table_hint.TAlt3.Token6","sum":12},{"parent":"TRule_table_hint.TAlt3.TBlock4","rule":"TRule_table_hint.TAlt3.TBlock4.Block2","sum":7},{"parent":"TRule_table_hint.TAlt3.TBlock4","rule":"TRule_table_hint.TAlt3.TBlock4.Rule_struct_arg_positional1","sum":12},{"parent":"TRule_table_hint.TAlt3.TBlock4.TBlock2","rule":"TRule_table_hint.TAlt3.TBlock4.TBlock2.Rule_struct_arg_positional2","sum":163},{"parent":"TRule_table_hint.TAlt3.TBlock4.TBlock2","rule":"TRule_table_hint.TAlt3.TBlock4.TBlock2.Token1","sum":163},{"parent":"TRule_table_hints","rule":"TRule_table_hints.Block2","sum":162657466},{"parent":"TRule_table_hints","rule":"TRule_table_hints.Token1","sum":162657466},{"parent":"TRule_table_hints.TBlock2","rule":"TRule_table_hints.TBlock2.Alt1","sum":152165766},{"parent":"TRule_table_hints.TBlock2","rule":"TRule_table_hints.TBlock2.Alt2","sum":10491700},{"parent":"TRule_table_hints.TBlock2.TAlt1","rule":"TRule_table_hints.TBlock2.TAlt1.Rule_table_hint1","sum":152165766},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Block3","sum":10221799},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Rule_table_hint2","sum":10491700},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Token1","sum":10491700},{"parent":"TRule_table_hints.TBlock2.TAlt2","rule":"TRule_table_hints.TBlock2.TAlt2.Token4","sum":10491700},{"parent":"TRule_table_hints.TBlock2.TAlt2.TBlock3","rule":"TRule_table_hints.TBlock2.TAlt2.TBlock3.Rule_table_hint2","sum":10621600},{"parent":"TRule_table_hints.TBlock2.TAlt2.TBlock3","rule":"TRule_table_hints.TBlock2.TAlt2.TBlock3.Token1","sum":10621600},{"parent":"TRule_table_key","rule":"TRule_table_key.Block2","sum":864520},{"parent":"TRule_table_key","rule":"TRule_table_key.Rule_id_table_or_type1","sum":330801595},{"parent":"TRule_table_key.TBlock2","rule":"TRule_table_key.TBlock2.Rule_view_name2","sum":864520},{"parent":"TRule_table_key.TBlock2","rule":"TRule_table_key.TBlock2.Token1","sum":864520},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block1","sum":39945456},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block2","sum":6030087},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block3","sum":953241090},{"parent":"TRule_table_ref","rule":"TRule_table_ref.Block4","sum":19663127},{"parent":"TRule_table_ref.TBlock1","rule":"TRule_table_ref.TBlock1.Rule_cluster_expr1","sum":39945456},{"parent":"TRule_table_ref.TBlock1","rule":"TRule_table_ref.TBlock1.Token2","sum":39945456},{"parent":"TRule_table_ref.TBlock2","rule":"TRule_table_ref.TBlock2.Token1","sum":6030087},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt1","sum":330801595},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt2","sum":96266390},{"parent":"TRule_table_ref.TBlock3","rule":"TRule_table_ref.TBlock3.Alt3","sum":526173105},{"parent":"TRule_table_ref.TBlock3.TAlt1","rule":"TRule_table_ref.TBlock3.TAlt1.Rule_table_key1","sum":330801595},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Block3","sum":96266365},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Rule_an_id_expr1","sum":96266390},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Token2","sum":96266390},{"parent":"TRule_table_ref.TBlock3.TAlt2","rule":"TRule_table_ref.TBlock3.TAlt2.Token4","sum":96266390},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Block2","sum":49696578},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Block3","sum":356338},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.Rule_table_arg1","sum":96266365},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2.Rule_table_arg2","sum":96240730},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock2.Token1","sum":96240730},{"parent":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt2.TBlock3.TBlock3.Token1","sum":356338},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Block2","sum":22272712},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Block3","sum":177045},{"parent":"TRule_table_ref.TBlock3.TAlt3","rule":"TRule_table_ref.TBlock3.TAlt3.Rule_bind_parameter1","sum":526173105},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Block2","sum":15906646},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Token1","sum":22272712},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.Token3","sum":22272712},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock2.TBlock2","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock2.TBlock2.Rule_expr_list1","sum":15906646},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock3.Rule_view_name2","sum":177045},{"parent":"TRule_table_ref.TBlock3.TAlt3.TBlock3","rule":"TRule_table_ref.TBlock3.TAlt3.TBlock3.Token1","sum":177045},{"parent":"TRule_table_ref.TBlock4","rule":"TRule_table_ref.TBlock4.Rule_table_hints1","sum":19663127},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Block6","sum":9275},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Rule_expr4","sum":328669},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Rule_sampling_mode2","sum":328669},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token1","sum":328669},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token3","sum":328669},{"parent":"TRule_tablesample_clause","rule":"TRule_tablesample_clause.Token5","sum":328669},{"parent":"TRule_tablesample_clause.TBlock6","rule":"TRule_tablesample_clause.TBlock6.Rule_repeatable_clause1","sum":9275},{"parent":"TRule_type_id","rule":"TRule_type_id.Token1","sum":11667116},{"parent":"TRule_type_name","rule":"TRule_type_name.Alt_type_name1","sum":89350499},{"parent":"TRule_type_name","rule":"TRule_type_name.Alt_type_name2","sum":703427460},{"parent":"TRule_type_name.TAlt1","rule":"TRule_type_name.TAlt1.Rule_type_name_composite1","sum":89350499},{"parent":"TRule_type_name.TAlt2","rule":"TRule_type_name.TAlt2.Block1","sum":703427460},{"parent":"TRule_type_name.TAlt2","rule":"TRule_type_name.TAlt2.Block2","sum":73589678},{"parent":"TRule_type_name.TAlt2.TBlock1","rule":"TRule_type_name.TAlt2.TBlock1.Alt1","sum":27884202},{"parent":"TRule_type_name.TAlt2.TBlock1","rule":"TRule_type_name.TAlt2.TBlock1.Alt2","sum":675543258},{"parent":"TRule_type_name.TAlt2.TBlock1.TAlt1","rule":"TRule_type_name.TAlt2.TBlock1.TAlt1.Rule_type_name_decimal1","sum":27884202},{"parent":"TRule_type_name.TAlt2.TBlock1.TAlt2","rule":"TRule_type_name.TAlt2.TBlock1.TAlt2.Rule_type_name_simple1","sum":675543258},{"parent":"TRule_type_name.TAlt2.TBlock2","rule":"TRule_type_name.TAlt2.TBlock2.Token1","sum":73590713},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block4","sum":9759575},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block5","sum":222641},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Block6","sum":10457},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Rule_type_name_or_bind9","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token1","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token10","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token2","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token3","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token7","sum":10086496},{"parent":"TRule_type_name_callable","rule":"TRule_type_name_callable.Token8","sum":10086496},{"parent":"TRule_type_name_callable.TBlock4","rule":"TRule_type_name_callable.TBlock4.Rule_callable_arg_list1","sum":9759575},{"parent":"TRule_type_name_callable.TBlock5","rule":"TRule_type_name_callable.TBlock5.Token1","sum":222641},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Rule_callable_arg_list2","sum":10457},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Token1","sum":10457},{"parent":"TRule_type_name_callable.TBlock6","rule":"TRule_type_name_callable.TBlock6.Token3","sum":10457},{"parent":"TRule_type_name_composite","rule":"TRule_type_name_composite.Block1","sum":106726447},{"parent":"TRule_type_name_composite","rule":"TRule_type_name_composite.Block2","sum":4244791},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt1","sum":33257209},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt10","sum":20320},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt11","sum":456243},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt12","sum":7804},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt13","sum":10086496},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt2","sum":2156159},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt3","sum":26673461},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt4","sum":136152},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt5","sum":25759007},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt6","sum":1520066},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt8","sum":6640454},{"parent":"TRule_type_name_composite.TBlock1","rule":"TRule_type_name_composite.TBlock1.Alt9","sum":13076},{"parent":"TRule_type_name_composite.TBlock1.TAlt1","rule":"TRule_type_name_composite.TBlock1.TAlt1.Rule_type_name_optional1","sum":33257209},{"parent":"TRule_type_name_composite.TBlock1.TAlt10","rule":"TRule_type_name_composite.TBlock1.TAlt10.Rule_type_name_enum1","sum":20320},{"parent":"TRule_type_name_composite.TBlock1.TAlt11","rule":"TRule_type_name_composite.TBlock1.TAlt11.Rule_type_name_resource1","sum":456243},{"parent":"TRule_type_name_composite.TBlock1.TAlt12","rule":"TRule_type_name_composite.TBlock1.TAlt12.Rule_type_name_tagged1","sum":7804},{"parent":"TRule_type_name_composite.TBlock1.TAlt13","rule":"TRule_type_name_composite.TBlock1.TAlt13.Rule_type_name_callable1","sum":10086496},{"parent":"TRule_type_name_composite.TBlock1.TAlt2","rule":"TRule_type_name_composite.TBlock1.TAlt2.Rule_type_name_tuple1","sum":2156159},{"parent":"TRule_type_name_composite.TBlock1.TAlt3","rule":"TRule_type_name_composite.TBlock1.TAlt3.Rule_type_name_struct1","sum":26673461},{"parent":"TRule_type_name_composite.TBlock1.TAlt4","rule":"TRule_type_name_composite.TBlock1.TAlt4.Rule_type_name_variant1","sum":136152},{"parent":"TRule_type_name_composite.TBlock1.TAlt5","rule":"TRule_type_name_composite.TBlock1.TAlt5.Rule_type_name_list1","sum":25759007},{"parent":"TRule_type_name_composite.TBlock1.TAlt6","rule":"TRule_type_name_composite.TBlock1.TAlt6.Rule_type_name_stream1","sum":1520066},{"parent":"TRule_type_name_composite.TBlock1.TAlt8","rule":"TRule_type_name_composite.TBlock1.TAlt8.Rule_type_name_dict1","sum":6640454},{"parent":"TRule_type_name_composite.TBlock1.TAlt9","rule":"TRule_type_name_composite.TBlock1.TAlt9.Rule_type_name_set1","sum":13076},{"parent":"TRule_type_name_composite.TBlock2","rule":"TRule_type_name_composite.TBlock2.Token1","sum":4244794},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Rule_integer_or_bind3","sum":27884202},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Rule_integer_or_bind5","sum":27884202},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token1","sum":27884202},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token2","sum":27884202},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token4","sum":27884202},{"parent":"TRule_type_name_decimal","rule":"TRule_type_name_decimal.Token6","sum":27884202},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Rule_type_name_or_bind3","sum":6640454},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Rule_type_name_or_bind5","sum":6640454},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token1","sum":6640454},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token2","sum":6640454},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token4","sum":6640454},{"parent":"TRule_type_name_dict","rule":"TRule_type_name_dict.Token6","sum":6640454},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Block4","sum":20072},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Block5","sum":378},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Rule_type_name_tag3","sum":20320},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token1","sum":20320},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token2","sum":20320},{"parent":"TRule_type_name_enum","rule":"TRule_type_name_enum.Token6","sum":20320},{"parent":"TRule_type_name_enum.TBlock4","rule":"TRule_type_name_enum.TBlock4.Rule_type_name_tag2","sum":43728},{"parent":"TRule_type_name_enum.TBlock4","rule":"TRule_type_name_enum.TBlock4.Token1","sum":43728},{"parent":"TRule_type_name_enum.TBlock5","rule":"TRule_type_name_enum.TBlock5.Token1","sum":378},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Rule_type_name_or_bind3","sum":25759007},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token1","sum":25759007},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token2","sum":25759007},{"parent":"TRule_type_name_list","rule":"TRule_type_name_list.Token4","sum":25759007},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Rule_type_name_or_bind3","sum":33257209},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token1","sum":33257209},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token2","sum":33257209},{"parent":"TRule_type_name_optional","rule":"TRule_type_name_optional.Token4","sum":33257209},{"parent":"TRule_type_name_or_bind","rule":"TRule_type_name_or_bind.Alt_type_name_or_bind1","sum":636419428},{"parent":"TRule_type_name_or_bind","rule":"TRule_type_name_or_bind.Alt_type_name_or_bind2","sum":2669561},{"parent":"TRule_type_name_or_bind.TAlt1","rule":"TRule_type_name_or_bind.TAlt1.Rule_type_name1","sum":636419428},{"parent":"TRule_type_name_or_bind.TAlt2","rule":"TRule_type_name_or_bind.TAlt2.Rule_bind_parameter1","sum":2669561},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Rule_type_name_tag3","sum":456243},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token1","sum":456243},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token2","sum":456243},{"parent":"TRule_type_name_resource","rule":"TRule_type_name_resource.Token4","sum":456243},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Rule_type_name_or_bind3","sum":13076},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token1","sum":13076},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token2","sum":13076},{"parent":"TRule_type_name_set","rule":"TRule_type_name_set.Token4","sum":13076},{"parent":"TRule_type_name_simple","rule":"TRule_type_name_simple.Rule_an_id_pure1","sum":677385895},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Rule_type_name_or_bind3","sum":1520066},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token1","sum":1520066},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token2","sum":1520066},{"parent":"TRule_type_name_stream","rule":"TRule_type_name_stream.Token4","sum":1520066},{"parent":"TRule_type_name_struct","rule":"TRule_type_name_struct.Block2","sum":26673461},{"parent":"TRule_type_name_struct","rule":"TRule_type_name_struct.Token1","sum":26673461},{"parent":"TRule_type_name_struct.TBlock2","rule":"TRule_type_name_struct.TBlock2.Alt1","sum":26670569},{"parent":"TRule_type_name_struct.TBlock2","rule":"TRule_type_name_struct.TBlock2.Alt2","sum":2892},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Block2","sum":26670558},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Token1","sum":26670569},{"parent":"TRule_type_name_struct.TBlock2.TAlt1","rule":"TRule_type_name_struct.TBlock2.TAlt1.Token3","sum":26670569},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Block2","sum":20356595},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Block3","sum":2053764},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.Rule_struct_arg1","sum":26670558},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2.Rule_struct_arg2","sum":87346323},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock2.Token1","sum":87346323},{"parent":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock3","rule":"TRule_type_name_struct.TBlock2.TAlt1.TBlock2.TBlock3.Token1","sum":2053764},{"parent":"TRule_type_name_struct.TBlock2.TAlt2","rule":"TRule_type_name_struct.TBlock2.TAlt2.Token1","sum":2892},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag1","sum":104746815},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag2","sum":18252530},{"parent":"TRule_type_name_tag","rule":"TRule_type_name_tag.Alt_type_name_tag3","sum":2612869},{"parent":"TRule_type_name_tag.TAlt1","rule":"TRule_type_name_tag.TAlt1.Rule_id1","sum":104746815},{"parent":"TRule_type_name_tag.TAlt2","rule":"TRule_type_name_tag.TAlt2.Token1","sum":18252530},{"parent":"TRule_type_name_tag.TAlt3","rule":"TRule_type_name_tag.TAlt3.Rule_bind_parameter1","sum":2612869},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Rule_type_name_or_bind3","sum":7804},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Rule_type_name_tag5","sum":7804},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token1","sum":7804},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token2","sum":7804},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token4","sum":7804},{"parent":"TRule_type_name_tagged","rule":"TRule_type_name_tagged.Token6","sum":7804},{"parent":"TRule_type_name_tuple","rule":"TRule_type_name_tuple.Block2","sum":2156159},{"parent":"TRule_type_name_tuple","rule":"TRule_type_name_tuple.Token1","sum":2156159},{"parent":"TRule_type_name_tuple.TBlock2","rule":"TRule_type_name_tuple.TBlock2.Alt1","sum":2156149},{"parent":"TRule_type_name_tuple.TBlock2","rule":"TRule_type_name_tuple.TBlock2.Alt2","sum":10},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Block2","sum":2156149},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Token1","sum":2156149},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1","rule":"TRule_type_name_tuple.TBlock2.TAlt1.Token3","sum":2156149},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Block2","sum":2154633},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Block3","sum":63598},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.Rule_type_name_or_bind1","sum":2156149},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2.Rule_type_name_or_bind2","sum":3336978},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock2.Token1","sum":3336978},{"parent":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock3","rule":"TRule_type_name_tuple.TBlock2.TAlt1.TBlock2.TBlock3.Token1","sum":63598},{"parent":"TRule_type_name_tuple.TBlock2.TAlt2","rule":"TRule_type_name_tuple.TBlock2.TAlt2.Token1","sum":10},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Block4","sum":121499},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Block5","sum":99},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Rule_variant_arg3","sum":136152},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token1","sum":136152},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token2","sum":136152},{"parent":"TRule_type_name_variant","rule":"TRule_type_name_variant.Token6","sum":136152},{"parent":"TRule_type_name_variant.TBlock4","rule":"TRule_type_name_variant.TBlock4.Rule_variant_arg2","sum":231876},{"parent":"TRule_type_name_variant.TBlock4","rule":"TRule_type_name_variant.TBlock4.Token1","sum":231876},{"parent":"TRule_type_name_variant.TBlock5","rule":"TRule_type_name_variant.TBlock5.Token1","sum":99},{"parent":"TRule_unary_casual_subexpr","rule":"TRule_unary_casual_subexpr.Block1","sum":16009368007},{"parent":"TRule_unary_casual_subexpr","rule":"TRule_unary_casual_subexpr.Rule_unary_subexpr_suffix2","sum":16009368007},{"parent":"TRule_unary_casual_subexpr.TBlock1","rule":"TRule_unary_casual_subexpr.TBlock1.Alt1","sum":7177628946},{"parent":"TRule_unary_casual_subexpr.TBlock1","rule":"TRule_unary_casual_subexpr.TBlock1.Alt2","sum":8831739061},{"parent":"TRule_unary_casual_subexpr.TBlock1.TAlt1","rule":"TRule_unary_casual_subexpr.TBlock1.TAlt1.Rule_id_expr1","sum":7177628946},{"parent":"TRule_unary_casual_subexpr.TBlock1.TAlt2","rule":"TRule_unary_casual_subexpr.TBlock1.TAlt2.Rule_atom_expr1","sum":8831739061},{"parent":"TRule_unary_op","rule":"TRule_unary_op.Token1","sum":87792338},{"parent":"TRule_unary_subexpr","rule":"TRule_unary_subexpr.Alt_unary_subexpr1","sum":16009368007},{"parent":"TRule_unary_subexpr","rule":"TRule_unary_subexpr.Alt_unary_subexpr2","sum":6348054},{"parent":"TRule_unary_subexpr.TAlt1","rule":"TRule_unary_subexpr.TAlt1.Rule_unary_casual_subexpr1","sum":16009368007},{"parent":"TRule_unary_subexpr.TAlt2","rule":"TRule_unary_subexpr.TAlt2.Rule_json_api_expr1","sum":6348054},{"parent":"TRule_unary_subexpr_suffix","rule":"TRule_unary_subexpr_suffix.Block1","sum":4771836263},{"parent":"TRule_unary_subexpr_suffix.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.Block1","sum":5026228634},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt1","sum":173319845},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt2","sum":2874929886},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.Alt3","sum":1977978903},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt1.Rule_key_expr1","sum":173319845},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt2.Rule_invoke_expr1","sum":2874929886},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.Block2","sum":1977978903},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.Token1","sum":1977978903},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt1","sum":3152320},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt2","sum":40423819},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.Alt3","sum":1934402764},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt1","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt1.Rule_bind_parameter1","sum":3152320},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt2","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt2.Token1","sum":40423819},{"parent":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt3","rule":"TRule_unary_subexpr_suffix.TBlock1.TBlock1.TAlt3.TBlock2.TAlt3.Rule_an_id_or_type1","sum":1934402764},{"parent":"TRule_use_stmt","rule":"TRule_use_stmt.Rule_cluster_expr2","sum":329427984},{"parent":"TRule_use_stmt","rule":"TRule_use_stmt.Token1","sum":329427984},{"parent":"TRule_using_call_expr","rule":"TRule_using_call_expr.Block1","sum":2767190},{"parent":"TRule_using_call_expr","rule":"TRule_using_call_expr.Rule_invoke_expr2","sum":2767190},{"parent":"TRule_using_call_expr.TBlock1","rule":"TRule_using_call_expr.TBlock1.Alt1","sum":1520665},{"parent":"TRule_using_call_expr.TBlock1","rule":"TRule_using_call_expr.TBlock1.Alt3","sum":1246525},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Rule_an_id_or_type1","sum":1520665},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Rule_an_id_or_type3","sum":1520665},{"parent":"TRule_using_call_expr.TBlock1.TAlt1","rule":"TRule_using_call_expr.TBlock1.TAlt1.Token2","sum":1520665},{"parent":"TRule_using_call_expr.TBlock1.TAlt3","rule":"TRule_using_call_expr.TBlock1.TAlt3.Rule_bind_parameter1","sum":1246525},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor1","sum":32135},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor2","sum":50918},{"parent":"TRule_value_constructor","rule":"TRule_value_constructor.Alt_value_constructor3","sum":80586},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr3","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr5","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Rule_expr7","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token1","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token2","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token4","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token6","sum":32135},{"parent":"TRule_value_constructor.TAlt1","rule":"TRule_value_constructor.TAlt1.Token8","sum":32135},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Rule_expr3","sum":50918},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Rule_expr5","sum":50918},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token1","sum":50918},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token2","sum":50918},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token4","sum":50918},{"parent":"TRule_value_constructor.TAlt2","rule":"TRule_value_constructor.TAlt2.Token6","sum":50918},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Rule_expr3","sum":80586},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Rule_expr5","sum":80586},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token1","sum":80586},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token2","sum":80586},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token4","sum":80586},{"parent":"TRule_value_constructor.TAlt3","rule":"TRule_value_constructor.TAlt3.Token6","sum":80586},{"parent":"TRule_values_source","rule":"TRule_values_source.Alt_values_source1","sum":1085217},{"parent":"TRule_values_source","rule":"TRule_values_source.Alt_values_source2","sum":207827779},{"parent":"TRule_values_source.TAlt1","rule":"TRule_values_source.TAlt1.Rule_values_stmt1","sum":1085217},{"parent":"TRule_values_source.TAlt2","rule":"TRule_values_source.TAlt2.Rule_select_stmt1","sum":207827779},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Rule_expr_list2","sum":9055982},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Token1","sum":9055982},{"parent":"TRule_values_source_row","rule":"TRule_values_source_row.Token3","sum":9055982},{"parent":"TRule_values_source_row_list","rule":"TRule_values_source_row_list.Block2","sum":486988},{"parent":"TRule_values_source_row_list","rule":"TRule_values_source_row_list.Rule_values_source_row1","sum":1253910},{"parent":"TRule_values_source_row_list.TBlock2","rule":"TRule_values_source_row_list.TBlock2.Rule_values_source_row2","sum":7802072},{"parent":"TRule_values_source_row_list.TBlock2","rule":"TRule_values_source_row_list.TBlock2.Token1","sum":7802072},{"parent":"TRule_values_stmt","rule":"TRule_values_stmt.Rule_values_source_row_list2","sum":1253910},{"parent":"TRule_values_stmt","rule":"TRule_values_stmt.Token1","sum":1253910},{"parent":"TRule_variant_arg","rule":"TRule_variant_arg.Block1","sum":356799},{"parent":"TRule_variant_arg","rule":"TRule_variant_arg.Rule_type_name_or_bind2","sum":18347086},{"parent":"TRule_variant_arg.TBlock1","rule":"TRule_variant_arg.TBlock1.Rule_type_name_tag1","sum":356799},{"parent":"TRule_variant_arg.TBlock1","rule":"TRule_variant_arg.TBlock1.Token2","sum":356799},{"parent":"TRule_view_name","rule":"TRule_view_name.Alt_view_name1","sum":1306253},{"parent":"TRule_view_name.TAlt1","rule":"TRule_view_name.TAlt1.Rule_an_id1","sum":1306253},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Rule_expr2","sum":158241873},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Rule_expr4","sum":158241873},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Token1","sum":158241873},{"parent":"TRule_when_expr","rule":"TRule_when_expr.Token3","sum":158241873},{"parent":"TRule_window_clause","rule":"TRule_window_clause.Rule_window_definition_list2","sum":13585207},{"parent":"TRule_window_clause","rule":"TRule_window_clause.Token1","sum":13585207},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Rule_new_window_name1","sum":14160253},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Rule_window_specification3","sum":14160253},{"parent":"TRule_window_definition","rule":"TRule_window_definition.Token2","sum":14160253},{"parent":"TRule_window_definition_list","rule":"TRule_window_definition_list.Block2","sum":434934},{"parent":"TRule_window_definition_list","rule":"TRule_window_definition_list.Rule_window_definition1","sum":13585207},{"parent":"TRule_window_definition_list.TBlock2","rule":"TRule_window_definition_list.TBlock2.Rule_window_definition2","sum":575046},{"parent":"TRule_window_definition_list.TBlock2","rule":"TRule_window_definition_list.TBlock2.Token1","sum":575046},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Rule_window_frame_bound2","sum":2316737},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Rule_window_frame_bound4","sum":2316737},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Token1","sum":2316737},{"parent":"TRule_window_frame_between","rule":"TRule_window_frame_between.Token3","sum":2316737},{"parent":"TRule_window_frame_bound","rule":"TRule_window_frame_bound.Alt_window_frame_bound1","sum":1324737},{"parent":"TRule_window_frame_bound","rule":"TRule_window_frame_bound.Alt_window_frame_bound2","sum":3366628},{"parent":"TRule_window_frame_bound.TAlt1","rule":"TRule_window_frame_bound.TAlt1.Token1","sum":1324737},{"parent":"TRule_window_frame_bound.TAlt1","rule":"TRule_window_frame_bound.TAlt1.Token2","sum":1324737},{"parent":"TRule_window_frame_bound.TAlt2","rule":"TRule_window_frame_bound.TAlt2.Block1","sum":3366628},{"parent":"TRule_window_frame_bound.TAlt2","rule":"TRule_window_frame_bound.TAlt2.Token2","sum":3366628},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.Alt1","sum":1114210},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.Alt2","sum":2252418},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt1","rule":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt1.Rule_expr1","sum":1114210},{"parent":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt2","rule":"TRule_window_frame_bound.TAlt2.TBlock1.TAlt2.Token1","sum":2252418},{"parent":"TRule_window_frame_clause","rule":"TRule_window_frame_clause.Rule_window_frame_extent2","sum":2374628},{"parent":"TRule_window_frame_clause","rule":"TRule_window_frame_clause.Rule_window_frame_units1","sum":2374628},{"parent":"TRule_window_frame_extent","rule":"TRule_window_frame_extent.Alt_window_frame_extent1","sum":57891},{"parent":"TRule_window_frame_extent","rule":"TRule_window_frame_extent.Alt_window_frame_extent2","sum":2316737},{"parent":"TRule_window_frame_extent.TAlt1","rule":"TRule_window_frame_extent.TAlt1.Rule_window_frame_bound1","sum":57891},{"parent":"TRule_window_frame_extent.TAlt2","rule":"TRule_window_frame_extent.TAlt2.Rule_window_frame_between1","sum":2316737},{"parent":"TRule_window_frame_units","rule":"TRule_window_frame_units.Token1","sum":2374628},{"parent":"TRule_window_name","rule":"TRule_window_name.Rule_an_id_window1","sum":46640092},{"parent":"TRule_window_name_or_specification","rule":"TRule_window_name_or_specification.Alt_window_name_or_specification1","sum":32479839},{"parent":"TRule_window_name_or_specification","rule":"TRule_window_name_or_specification.Alt_window_name_or_specification2","sum":12826512},{"parent":"TRule_window_name_or_specification.TAlt1","rule":"TRule_window_name_or_specification.TAlt1.Rule_window_name1","sum":32479839},{"parent":"TRule_window_name_or_specification.TAlt2","rule":"TRule_window_name_or_specification.TAlt2.Rule_window_specification1","sum":12826512},{"parent":"TRule_window_order_clause","rule":"TRule_window_order_clause.Rule_order_by_clause1","sum":19003363},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Block2","sum":51717},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Rule_named_expr_list4","sum":21791276},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Token1","sum":21791276},{"parent":"TRule_window_partition_clause","rule":"TRule_window_partition_clause.Token3","sum":21791276},{"parent":"TRule_window_partition_clause.TBlock2","rule":"TRule_window_partition_clause.TBlock2.Token1","sum":51717},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Rule_window_specification_details2","sum":26986765},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Token1","sum":26986765},{"parent":"TRule_window_specification","rule":"TRule_window_specification.Token3","sum":26986765},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block2","sum":21791275},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block3","sum":19003363},{"parent":"TRule_window_specification_details","rule":"TRule_window_specification_details.Block4","sum":2374628},{"parent":"TRule_window_specification_details.TBlock2","rule":"TRule_window_specification_details.TBlock2.Rule_window_partition_clause1","sum":21791275},{"parent":"TRule_window_specification_details.TBlock3","rule":"TRule_window_specification_details.TBlock3.Rule_window_order_clause1","sum":19003363},{"parent":"TRule_window_specification_details.TBlock4","rule":"TRule_window_specification_details.TBlock4.Rule_window_frame_clause1","sum":2374628},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Block2","sum":9203014},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Block3","sum":1311173},{"parent":"TRule_without_column_list","rule":"TRule_without_column_list.Rule_without_column_name1","sum":24883415},{"parent":"TRule_without_column_list.TBlock2","rule":"TRule_without_column_list.TBlock2.Rule_without_column_name2","sum":29234776},{"parent":"TRule_without_column_list.TBlock2","rule":"TRule_without_column_list.TBlock2.Token1","sum":29234776},{"parent":"TRule_without_column_list.TBlock3","rule":"TRule_without_column_list.TBlock3.Token1","sum":1311173},{"parent":"TRule_without_column_name","rule":"TRule_without_column_name.Alt_without_column_name1","sum":30839645},{"parent":"TRule_without_column_name","rule":"TRule_without_column_name.Alt_without_column_name2","sum":23278546},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Rule_an_id1","sum":30839645},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Rule_an_id3","sum":30839645},{"parent":"TRule_without_column_name.TAlt1","rule":"TRule_without_column_name.TAlt1.Token2","sum":30839645},{"parent":"TRule_without_column_name.TAlt2","rule":"TRule_without_column_name.TAlt2.Rule_an_id_without1","sum":23278546},{"parent":"TRule_xor_subexpr","rule":"TRule_xor_subexpr.Block2","sum":1095537315},{"parent":"TRule_xor_subexpr","rule":"TRule_xor_subexpr.Rule_eq_subexpr1","sum":14281560370},{"parent":"TRule_xor_subexpr.TBlock2","rule":"TRule_xor_subexpr.TBlock2.Rule_cond_expr1","sum":1095537315},{"parent":"TSQLv1ParserAST","rule":"TSQLv1ParserAST.Rule_sql_query","sum":320130391},{"parent":"TYPE","rule":"BIGINT","sum":7045},{"parent":"TYPE","rule":"BOOL","sum":109153},{"parent":"TYPE","rule":"BYTEs","sum":1},{"parent":"TYPE","rule":"BigInt","sum":210},{"parent":"TYPE","rule":"Bool","sum":9299135},{"parent":"TYPE","rule":"Bytes","sum":421334},{"parent":"TYPE","rule":"DATE","sum":1252991},{"parent":"TYPE","rule":"DATETIME","sum":66736},{"parent":"TYPE","rule":"DATETime","sum":2},{"parent":"TYPE","rule":"DATEtIME","sum":1},{"parent":"TYPE","rule":"DATEtime","sum":34},{"parent":"TYPE","rule":"DAte","sum":9272},{"parent":"TYPE","rule":"DAteTime","sum":54},{"parent":"TYPE","rule":"DAtetime","sum":665},{"parent":"TYPE","rule":"DOUBLE","sum":1351180},{"parent":"TYPE","rule":"DOUBLe","sum":1},{"parent":"TYPE","rule":"DOUBle","sum":1},{"parent":"TYPE","rule":"DOUble","sum":3},{"parent":"TYPE","rule":"DOuble","sum":2143},{"parent":"TYPE","rule":"DaTeTime","sum":1},{"parent":"TYPE","rule":"Date","sum":20625922},{"parent":"TYPE","rule":"Date32","sum":60},{"parent":"TYPE","rule":"DateTIME","sum":19},{"parent":"TYPE","rule":"DateTIme","sum":121},{"parent":"TYPE","rule":"DateTime","sum":4994749},{"parent":"TYPE","rule":"DateTime64","sum":8},{"parent":"TYPE","rule":"DatetIME","sum":35},{"parent":"TYPE","rule":"Datetime","sum":3253222},{"parent":"TYPE","rule":"Datetime64","sum":37},{"parent":"TYPE","rule":"DoubLe","sum":13},{"parent":"TYPE","rule":"Double","sum":30104093},{"parent":"TYPE","rule":"EmptyDict","sum":28},{"parent":"TYPE","rule":"EmptyList","sum":28},{"parent":"TYPE","rule":"FLOAT","sum":191924},{"parent":"TYPE","rule":"FLoat","sum":3426},{"parent":"TYPE","rule":"FlOAT","sum":15},{"parent":"TYPE","rule":"FloaT","sum":320},{"parent":"TYPE","rule":"Float","sum":7080417},{"parent":"TYPE","rule":"Generic","sum":3},{"parent":"TYPE","rule":"INT","sum":422033},{"parent":"TYPE","rule":"INT16","sum":9481},{"parent":"TYPE","rule":"INT32","sum":140114},{"parent":"TYPE","rule":"INT64","sum":4144555},{"parent":"TYPE","rule":"INT8","sum":1823},{"parent":"TYPE","rule":"INTEGER","sum":73414},{"parent":"TYPE","rule":"INTERVAL","sum":10667},{"parent":"TYPE","rule":"INt16","sum":1},{"parent":"TYPE","rule":"INt32","sum":1172},{"parent":"TYPE","rule":"INt64","sum":38323},{"parent":"TYPE","rule":"InT32","sum":1},{"parent":"TYPE","rule":"Int","sum":523129},{"parent":"TYPE","rule":"Int16","sum":906516},{"parent":"TYPE","rule":"Int32","sum":13797078},{"parent":"TYPE","rule":"Int64","sum":47392259},{"parent":"TYPE","rule":"Int8","sum":1004426},{"parent":"TYPE","rule":"Integer","sum":468078},{"parent":"TYPE","rule":"Interval","sum":204594},{"parent":"TYPE","rule":"Interval64","sum":34},{"parent":"TYPE","rule":"JSON","sum":1735332},{"parent":"TYPE","rule":"JSONDocument","sum":255},{"parent":"TYPE","rule":"JSOn","sum":1},{"parent":"TYPE","rule":"JSon","sum":2},{"parent":"TYPE","rule":"Json","sum":5481257},{"parent":"TYPE","rule":"JsonDocument","sum":17007},{"parent":"TYPE","rule":"Jsondocument","sum":25},{"parent":"TYPE","rule":"PgBool","sum":20},{"parent":"TYPE","rule":"PgBox","sum":2},{"parent":"TYPE","rule":"PgByteA","sum":16},{"parent":"TYPE","rule":"PgCString","sum":19},{"parent":"TYPE","rule":"PgDate","sum":85},{"parent":"TYPE","rule":"PgFloat4","sum":31},{"parent":"TYPE","rule":"PgFloat8","sum":31},{"parent":"TYPE","rule":"PgInt","sum":1},{"parent":"TYPE","rule":"PgInt2","sum":97},{"parent":"TYPE","rule":"PgInt4","sum":37},{"parent":"TYPE","rule":"PgInt8","sum":32},{"parent":"TYPE","rule":"PgInterval","sum":238},{"parent":"TYPE","rule":"PgMoney","sum":2},{"parent":"TYPE","rule":"PgName","sum":2},{"parent":"TYPE","rule":"PgNumeric","sum":10},{"parent":"TYPE","rule":"PgPoint","sum":1278},{"parent":"TYPE","rule":"PgPolygon","sum":638},{"parent":"TYPE","rule":"PgText","sum":419},{"parent":"TYPE","rule":"PgTimestamp","sum":1875},{"parent":"TYPE","rule":"PgVarChar","sum":1},{"parent":"TYPE","rule":"PgVarchar","sum":231},{"parent":"TYPE","rule":"STRING","sum":2678975},{"parent":"TYPE","rule":"STRINg","sum":3},{"parent":"TYPE","rule":"STRing","sum":3},{"parent":"TYPE","rule":"STring","sum":1330},{"parent":"TYPE","rule":"StrINg","sum":45},{"parent":"TYPE","rule":"StrinG","sum":27},{"parent":"TYPE","rule":"String","sum":335154604},{"parent":"TYPE","rule":"TEXT","sum":20612},{"parent":"TYPE","rule":"TIMESTAMP","sum":315869},{"parent":"TYPE","rule":"TINYINT","sum":1},{"parent":"TYPE","rule":"TZDate","sum":30},{"parent":"TYPE","rule":"TZDateTime","sum":1},{"parent":"TYPE","rule":"TZDatetime","sum":25},{"parent":"TYPE","rule":"TZtimestamp","sum":1},{"parent":"TYPE","rule":"Text","sum":195837},{"parent":"TYPE","rule":"TimeStamp","sum":295779},{"parent":"TYPE","rule":"Timestamp","sum":5985852},{"parent":"TYPE","rule":"Timestamp64","sum":486},{"parent":"TYPE","rule":"TzDATE","sum":5},{"parent":"TYPE","rule":"TzDate","sum":48165},{"parent":"TYPE","rule":"TzDateTime","sum":43754},{"parent":"TYPE","rule":"TzDatetime","sum":672850},{"parent":"TYPE","rule":"TzTimeStamp","sum":6},{"parent":"TYPE","rule":"TzTimestamp","sum":8078},{"parent":"TYPE","rule":"Tzdate","sum":5},{"parent":"TYPE","rule":"Tzdatetime","sum":4},{"parent":"TYPE","rule":"UINT16","sum":1235},{"parent":"TYPE","rule":"UINT32","sum":430136},{"parent":"TYPE","rule":"UINT64","sum":590451},{"parent":"TYPE","rule":"UINT8","sum":160},{"parent":"TYPE","rule":"UINt32","sum":44},{"parent":"TYPE","rule":"UINt64","sum":416},{"parent":"TYPE","rule":"UINt8","sum":24},{"parent":"TYPE","rule":"UInt16","sum":129858},{"parent":"TYPE","rule":"UInt32","sum":7328251},{"parent":"TYPE","rule":"UInt64","sum":9460318},{"parent":"TYPE","rule":"UInt8","sum":153012},{"parent":"TYPE","rule":"UNIT","sum":756},{"parent":"TYPE","rule":"UTF8","sum":267638},{"parent":"TYPE","rule":"UTf8","sum":143},{"parent":"TYPE","rule":"UUID","sum":1241689},{"parent":"TYPE","rule":"UiNt32","sum":4},{"parent":"TYPE","rule":"Uint16","sum":333955},{"parent":"TYPE","rule":"Uint32","sum":18506534},{"parent":"TYPE","rule":"Uint64","sum":32752609},{"parent":"TYPE","rule":"Uint8","sum":2331285},{"parent":"TYPE","rule":"Unit","sum":1150},{"parent":"TYPE","rule":"Utf8","sum":14355669},{"parent":"TYPE","rule":"Uuid","sum":43569},{"parent":"TYPE","rule":"VARCHAR","sum":517176},{"parent":"TYPE","rule":"Varchar","sum":2},{"parent":"TYPE","rule":"Void","sum":11134},{"parent":"TYPE","rule":"XML","sum":21},{"parent":"TYPE","rule":"YSON","sum":226769},{"parent":"TYPE","rule":"YSon","sum":357},{"parent":"TYPE","rule":"Yson","sum":17637234},{"parent":"TYPE","rule":"_PgMoney","sum":3},{"parent":"TYPE","rule":"bigint","sum":8796},{"parent":"TYPE","rule":"bool","sum":936903},{"parent":"TYPE","rule":"bytes","sum":35237},{"parent":"TYPE","rule":"dATE","sum":1},{"parent":"TYPE","rule":"daTE","sum":5},{"parent":"TYPE","rule":"date","sum":32392415},{"parent":"TYPE","rule":"date32","sum":41},{"parent":"TYPE","rule":"dateTIME","sum":8},{"parent":"TYPE","rule":"dateTime","sum":43345},{"parent":"TYPE","rule":"datetime","sum":7110634},{"parent":"TYPE","rule":"datetime64","sum":61},{"parent":"TYPE","rule":"double","sum":6938427},{"parent":"TYPE","rule":"emptyList","sum":1},{"parent":"TYPE","rule":"float","sum":4720867},{"parent":"TYPE","rule":"generic","sum":2},{"parent":"TYPE","rule":"iNT","sum":1},{"parent":"TYPE","rule":"iNT64","sum":27},{"parent":"TYPE","rule":"inT64","sum":260},{"parent":"TYPE","rule":"int","sum":1119552},{"parent":"TYPE","rule":"int16","sum":79269},{"parent":"TYPE","rule":"int32","sum":3208024},{"parent":"TYPE","rule":"int64","sum":8444124},{"parent":"TYPE","rule":"int8","sum":42080},{"parent":"TYPE","rule":"integer","sum":135629},{"parent":"TYPE","rule":"interval","sum":400514},{"parent":"TYPE","rule":"json","sum":1061388},{"parent":"TYPE","rule":"json_document","sum":1},{"parent":"TYPE","rule":"jsondocument","sum":1},{"parent":"TYPE","rule":"pgDate","sum":22},{"parent":"TYPE","rule":"pg_name","sum":2},{"parent":"TYPE","rule":"pgbigint","sum":1},{"parent":"TYPE","rule":"pgbool","sum":18},{"parent":"TYPE","rule":"pgdate","sum":14},{"parent":"TYPE","rule":"pgfloat8","sum":2},{"parent":"TYPE","rule":"pgint","sum":366},{"parent":"TYPE","rule":"pgint2","sum":6},{"parent":"TYPE","rule":"pgint4","sum":41},{"parent":"TYPE","rule":"pgint8","sum":1},{"parent":"TYPE","rule":"pginteger","sum":1},{"parent":"TYPE","rule":"pginterval","sum":371},{"parent":"TYPE","rule":"pgnumeric","sum":5},{"parent":"TYPE","rule":"pgoid","sum":1},{"parent":"TYPE","rule":"pgtext","sum":76},{"parent":"TYPE","rule":"pgtimestamp","sum":20},{"parent":"TYPE","rule":"sTRING","sum":16},{"parent":"TYPE","rule":"smallint","sum":1},{"parent":"TYPE","rule":"strINg","sum":14},{"parent":"TYPE","rule":"striNg","sum":1},{"parent":"TYPE","rule":"strinG","sum":73},{"parent":"TYPE","rule":"string","sum":45728030},{"parent":"TYPE","rule":"text","sum":4565258},{"parent":"TYPE","rule":"timeStamp","sum":94},{"parent":"TYPE","rule":"timestamp","sum":21894834},{"parent":"TYPE","rule":"timestamp64","sum":12},{"parent":"TYPE","rule":"tinyint","sum":3},{"parent":"TYPE","rule":"tzDate","sum":1},{"parent":"TYPE","rule":"tzDateTime","sum":3},{"parent":"TYPE","rule":"tzDatetime","sum":30},{"parent":"TYPE","rule":"tzTimestamp","sum":709},{"parent":"TYPE","rule":"tzdate","sum":19},{"parent":"TYPE","rule":"tzdatetime","sum":67},{"parent":"TYPE","rule":"tzdatetime64","sum":2},{"parent":"TYPE","rule":"tztimestamp","sum":32},{"parent":"TYPE","rule":"tztimestamp64","sum":3},{"parent":"TYPE","rule":"uINT32","sum":6},{"parent":"TYPE","rule":"uInt32","sum":12269},{"parent":"TYPE","rule":"uInt64","sum":2528},{"parent":"TYPE","rule":"uInt8","sum":52},{"parent":"TYPE","rule":"uint16","sum":27129},{"parent":"TYPE","rule":"uint32","sum":5308657},{"parent":"TYPE","rule":"uint64","sum":8644573},{"parent":"TYPE","rule":"uint8","sum":159561},{"parent":"TYPE","rule":"unit","sum":693399},{"parent":"TYPE","rule":"utf8","sum":4587294},{"parent":"TYPE","rule":"uuid","sum":2980729},{"parent":"TYPE","rule":"varchar","sum":58836},{"parent":"TYPE","rule":"void","sum":1},{"parent":"TYPE","rule":"xml","sum":32381},{"parent":"TYPE","rule":"yaml","sum":144},{"parent":"TYPE","rule":"yson","sum":1126740}]
diff --git a/yql/essentials/data/language/sql_functions.json b/yql/essentials/data/language/sql_functions.json
new file mode 100644
index 0000000000..db6ce06075
--- /dev/null
+++ b/yql/essentials/data/language/sql_functions.json
@@ -0,0 +1 @@
+[{"name":"Abs","kind":"Normal"},{"name":"AdaptiveDistanceHistogram","kind":"Agg"},{"name":"AdaptiveDistanceHistogramCDF","kind":"Agg"},{"name":"AdaptiveWardHistogram","kind":"Agg"},{"name":"AdaptiveWardHistogramCDF","kind":"Agg"},{"name":"AdaptiveWeightHistogram","kind":"Agg"},{"name":"AdaptiveWeightHistogramCDF","kind":"Agg"},{"name":"AddMember","kind":"Normal"},{"name":"AddTimezone","kind":"Normal"},{"name":"AggList","kind":"Agg"},{"name":"AggListDistinct","kind":"Agg"},{"name":"AggregateFlatten","kind":"Normal"},{"name":"AggregateTransformInput","kind":"Normal"},{"name":"AggregateTransformOutput","kind":"Normal"},{"name":"AsAtom","kind":"Normal"},{"name":"AsDict","kind":"Normal"},{"name":"AsDictStrict","kind":"Normal"},{"name":"AsEnum","kind":"Normal"},{"name":"AsList","kind":"Normal"},{"name":"AsListStrict","kind":"Normal"},{"name":"AsSet","kind":"Normal"},{"name":"AsSetStrict","kind":"Normal"},{"name":"AsTagged","kind":"Normal"},{"name":"AsTuple","kind":"Normal"},{"name":"AsVariant","kind":"Normal"},{"name":"AssumeNonStrict","kind":"Normal"},{"name":"AssumeStrict","kind":"Normal"},{"name":"AtomCode","kind":"Normal"},{"name":"Avg","kind":"Agg"},{"name":"AvgIf","kind":"Agg"},{"name":"BitAnd","kind":"Agg"},{"name":"BitOr","kind":"Agg"},{"name":"BitXor","kind":"Agg"},{"name":"BlockWardHistogram","kind":"Agg"},{"name":"BlockWardHistogramCDF","kind":"Agg"},{"name":"BlockWeightHistogram","kind":"Agg"},{"name":"BlockWeightHistogramCDF","kind":"Agg"},{"name":"BoolAnd","kind":"Agg"},{"name":"BoolOr","kind":"Agg"},{"name":"BoolXor","kind":"Agg"},{"name":"Bottom","kind":"Agg"},{"name":"BottomBy","kind":"Agg"},{"name":"ByteAt","kind":"Normal"},{"name":"Callable","kind":"Normal"},{"name":"CallableArgument","kind":"Normal"},{"name":"CallableArgumentType","kind":"Normal"},{"name":"CallableResultType","kind":"Normal"},{"name":"CallableType","kind":"Normal"},{"name":"CallableTypeComponents","kind":"Normal"},{"name":"CallableTypeHandle","kind":"Normal"},{"name":"ChooseMembers","kind":"Normal"},{"name":"ClearBit","kind":"Normal"},{"name":"Coalesce","kind":"Normal"},{"name":"CombineMembers","kind":"Normal"},{"name":"Corr","kind":"Agg"},{"name":"Correlation","kind":"Agg"},{"name":"Count","kind":"Agg"},{"name":"CountDistinctEstimate","kind":"Agg"},{"name":"CountIf","kind":"Agg"},{"name":"Covar","kind":"Agg"},{"name":"CovarP","kind":"Agg"},{"name":"CovarPop","kind":"Agg"},{"name":"CovarS","kind":"Agg"},{"name":"CovarSamp","kind":"Agg"},{"name":"Covariance","kind":"Agg"},{"name":"CovariancePopulation","kind":"Agg"},{"name":"CovarianceSample","kind":"Agg"},{"name":"CumeDist","kind":"Window"},{"name":"CurrentAuthenticatedUser","kind":"Normal"},{"name":"CurrentOperationId","kind":"Normal"},{"name":"CurrentOperationSharedId","kind":"Normal"},{"name":"CurrentTzDate","kind":"Normal"},{"name":"CurrentTzDatetime","kind":"Normal"},{"name":"CurrentTzTimestamp","kind":"Normal"},{"name":"CurrentUtcDate","kind":"Normal"},{"name":"CurrentUtcDatetime","kind":"Normal"},{"name":"CurrentUtcTimestamp","kind":"Normal"},{"name":"DataType","kind":"Normal"},{"name":"DataTypeComponents","kind":"Normal"},{"name":"DataTypeHandle","kind":"Normal"},{"name":"DenseRank","kind":"Window"},{"name":"DictAggregate","kind":"Normal"},{"name":"DictContains","kind":"Normal"},{"name":"DictCreate","kind":"Normal"},{"name":"DictHasItems","kind":"Normal"},{"name":"DictItems","kind":"Normal"},{"name":"DictKeyType","kind":"Normal"},{"name":"DictKeys","kind":"Normal"},{"name":"DictLength","kind":"Normal"},{"name":"DictLookup","kind":"Normal"},{"name":"DictPayloadType","kind":"Normal"},{"name":"DictPayloads","kind":"Normal"},{"name":"DictType","kind":"Normal"},{"name":"DictTypeComponents","kind":"Normal"},{"name":"DictTypeHandle","kind":"Normal"},{"name":"DynamicVariant","kind":"Normal"},{"name":"EmptyDict","kind":"Normal"},{"name":"EmptyDictTypehandle","kind":"Normal"},{"name":"EmptyList","kind":"Normal"},{"name":"EmptyListTypeHandle","kind":"Normal"},{"name":"EndsWith","kind":"Normal"},{"name":"Ensure","kind":"Normal"},{"name":"EnsureConvertibleTo","kind":"Normal"},{"name":"EnsureType","kind":"Normal"},{"name":"Enum","kind":"Normal"},{"name":"EvaluateAtom","kind":"Normal"},{"name":"EvaluateCode","kind":"Normal"},{"name":"EvaluateExpr","kind":"Normal"},{"name":"EvaluateType","kind":"Normal"},{"name":"FileContent","kind":"Normal"},{"name":"FilePath","kind":"Normal"},{"name":"Files","kind":"Normal"},{"name":"Find","kind":"Normal"},{"name":"First","kind":"MatchRec"},{"name":"FirstValue","kind":"Window"},{"name":"FlattenMembers","kind":"Normal"},{"name":"FlipBit","kind":"Normal"},{"name":"FolderPath","kind":"Normal"},{"name":"ForceRemoveMember","kind":"Normal"},{"name":"ForceRemoveMembers","kind":"Normal"},{"name":"ForceRenameMembers","kind":"Normal"},{"name":"ForceSpreadMembers","kind":"Normal"},{"name":"FormatCode","kind":"Normal"},{"name":"FormatType","kind":"Normal"},{"name":"FormatTypeDeffPretty","kind":"Normal"},{"name":"FormatTypeDiff","kind":"Normal"},{"name":"FromBytes","kind":"Normal"},{"name":"FromPg","kind":"Normal"},{"name":"FromYsonSimpleType","kind":"Normal"},{"name":"FuncCode","kind":"Normal"},{"name":"GatherMembers","kind":"Normal"},{"name":"GenericType","kind":"Normal"},{"name":"Grouping","kind":"AggKey"},{"name":"HLL","kind":"Agg"},{"name":"Histogram","kind":"Agg"},{"name":"HistogramCDF","kind":"Agg"},{"name":"HopEnd","kind":"Agg"},{"name":"HopStart","kind":"Agg"},{"name":"HyperLogLog","kind":"Agg"},{"name":"If","kind":"Normal"},{"name":"IfStrict","kind":"Normal"},{"name":"IndexOf","kind":"Normal"},{"name":"InstanceOf","kind":"Normal"},{"name":"JoinTableRow","kind":"Normal"},{"name":"Just","kind":"Normal"},{"name":"Lag","kind":"Window"},{"name":"LambdaArgumentsCount","kind":"Normal"},{"name":"LambdaCode","kind":"Normal"},{"name":"LambdaOptionalArgumentsCount","kind":"Normal"},{"name":"Last","kind":"MatchRec"},{"name":"LastValue","kind":"Window"},{"name":"Lead","kind":"Window"},{"name":"Length","kind":"Normal"},{"name":"Likely","kind":"Normal"},{"name":"LinearHistogram","kind":"Agg"},{"name":"LinearHistogramCDF","kind":"Agg"},{"name":"ListAggregate","kind":"Normal"},{"name":"ListAll","kind":"Normal"},{"name":"ListAny","kind":"Normal"},{"name":"ListAvg","kind":"Normal"},{"name":"ListCode","kind":"Normal"},{"name":"ListCollect","kind":"Normal"},{"name":"ListConcat","kind":"Normal"},{"name":"ListCreate","kind":"Normal"},{"name":"ListEnumerate","kind":"Normal"},{"name":"ListExtend","kind":"Normal"},{"name":"ListExtendStrict","kind":"Normal"},{"name":"ListExtract","kind":"Normal"},{"name":"ListFilter","kind":"Normal"},{"name":"ListFlatMap","kind":"Normal"},{"name":"ListFlatten","kind":"Normal"},{"name":"ListFold","kind":"Normal"},{"name":"ListFold1","kind":"Normal"},{"name":"ListFold1Map","kind":"Normal"},{"name":"ListFoldMap","kind":"Normal"},{"name":"ListFromRange","kind":"Normal"},{"name":"ListFromTuple","kind":"Normal"},{"name":"ListHas","kind":"Normal"},{"name":"ListHasItems","kind":"Normal"},{"name":"ListHead","kind":"Normal"},{"name":"ListItemType","kind":"Normal"},{"name":"ListLast","kind":"Normal"},{"name":"ListLength","kind":"Normal"},{"name":"ListMap","kind":"Normal"},{"name":"ListMax","kind":"Normal"},{"name":"ListMin","kind":"Normal"},{"name":"ListNotNull","kind":"Normal"},{"name":"ListReplicate","kind":"Normal"},{"name":"ListReverse","kind":"Normal"},{"name":"ListSample","kind":"Normal"},{"name":"ListSampleN","kind":"Normal"},{"name":"ListShuffle","kind":"Normal"},{"name":"ListSkip","kind":"Normal"},{"name":"ListSkipWhile","kind":"Normal"},{"name":"ListSkipWhileInclusive","kind":"Normal"},{"name":"ListSort","kind":"Normal"},{"name":"ListSortAsc","kind":"Normal"},{"name":"ListSortDesc","kind":"Normal"},{"name":"ListSum","kind":"Normal"},{"name":"ListTake","kind":"Normal"},{"name":"ListTakeWhile","kind":"Normal"},{"name":"ListTakeWhileInclusive","kind":"Normal"},{"name":"ListToTuple","kind":"Normal"},{"name":"ListTop","kind":"Normal"},{"name":"ListTopAsc","kind":"Normal"},{"name":"ListTopDesc","kind":"Normal"},{"name":"ListTopSort","kind":"Normal"},{"name":"ListTopSortAsc","kind":"Normal"},{"name":"ListTopSortDesc","kind":"Normal"},{"name":"ListType","kind":"Normal"},{"name":"ListTypeHandle","kind":"Normal"},{"name":"ListUnionAll","kind":"Normal"},{"name":"ListUniq","kind":"Normal"},{"name":"ListUniqStable","kind":"Normal"},{"name":"ListZip","kind":"Normal"},{"name":"ListZipAll","kind":"Normal"},{"name":"LogHistogram","kind":"Agg"},{"name":"LogHistogramCDF","kind":"Agg"},{"name":"LogarithmicHistogram","kind":"Agg"},{"name":"LogarithmicHistogramCDF","kind":"Agg"},{"name":"Max","kind":"Agg"},{"name":"MaxBy","kind":"Agg"},{"name":"MaxOf","kind":"Normal"},{"name":"Median","kind":"Agg"},{"name":"Min","kind":"Agg"},{"name":"MinBy","kind":"Agg"},{"name":"MinOf","kind":"Normal"},{"name":"Mode","kind":"Agg"},{"name":"NTile","kind":"Window"},{"name":"Nanvl","kind":"Normal"},{"name":"Nothing","kind":"Normal"},{"name":"NthValue","kind":"Window"},{"name":"NullTypeHandle","kind":"Normal"},{"name":"Nvl","kind":"Normal"},{"name":"Opaque","kind":"Normal"},{"name":"OptionalItemType","kind":"Normal"},{"name":"OptionalType","kind":"Normal"},{"name":"OptionalTypeHandle","kind":"Normal"},{"name":"ParseFile","kind":"Normal"},{"name":"ParseType","kind":"Normal"},{"name":"ParseTypeHandle","kind":"Normal"},{"name":"PercentRank","kind":"Window"},{"name":"Percentile","kind":"Agg"},{"name":"PgAnd","kind":"Normal"},{"name":"PgArray","kind":"Normal"},{"name":"PgCall","kind":"Normal"},{"name":"PgCast","kind":"Normal"},{"name":"PgConst","kind":"Normal"},{"name":"PgNot","kind":"Normal"},{"name":"PgOp","kind":"Normal"},{"name":"PgOr","kind":"Normal"},{"name":"PgRangeCall","kind":"Normal"},{"name":"PgType","kind":"Normal"},{"name":"PgTypeHandle","kind":"Normal"},{"name":"PgTypeName","kind":"Normal"},{"name":"Pickle","kind":"Normal"},{"name":"PopulationStdDev","kind":"Agg"},{"name":"PopulationVariance","kind":"Agg"},{"name":"QuoteCode","kind":"Normal"},{"name":"RFind","kind":"Normal"},{"name":"Random","kind":"Normal"},{"name":"RandomNumber","kind":"Normal"},{"name":"RandomUuid","kind":"Normal"},{"name":"Rank","kind":"Window"},{"name":"RemoveMember","kind":"Normal"},{"name":"RemoveMembers","kind":"Normal"},{"name":"RemoveTimezone","kind":"Normal"},{"name":"RenameMembers","kind":"Normal"},{"name":"ReplaceMember","kind":"Normal"},{"name":"ReprCode","kind":"Normal"},{"name":"ResourceType","kind":"Normal"},{"name":"ResourceTypeHandle","kind":"Normal"},{"name":"ResourceTypeTag","kind":"Normal"},{"name":"RowNumber","kind":"Window"},{"name":"SecureParam","kind":"Normal"},{"name":"SessionStart","kind":"Agg"},{"name":"SessionState","kind":"Agg"},{"name":"SessionWindow","kind":"Partition"},{"name":"SetBit","kind":"Normal"},{"name":"SetCreate","kind":"Normal"},{"name":"SetDifference","kind":"Normal"},{"name":"SetIncludes","kind":"Normal"},{"name":"SetIntersection","kind":"Normal"},{"name":"SetIsDisjoint","kind":"Normal"},{"name":"SetSymmetricDifference","kind":"Normal"},{"name":"SetUnion","kind":"Normal"},{"name":"Some","kind":"Agg"},{"name":"SpreadMembers","kind":"Normal"},{"name":"StablePickle","kind":"Normal"},{"name":"StartsWith","kind":"Normal"},{"name":"StaticFold","kind":"Normal"},{"name":"StaticFold1","kind":"Normal"},{"name":"StaticMap","kind":"Normal"},{"name":"StaticZip","kind":"Normal"},{"name":"StdDev","kind":"Agg"},{"name":"StdDevP","kind":"Agg"},{"name":"StdDevPop","kind":"Agg"},{"name":"StdDevPopulation","kind":"Agg"},{"name":"StdDevS","kind":"Agg"},{"name":"StdDevSamp","kind":"Agg"},{"name":"StdDevSample","kind":"Agg"},{"name":"StreamType","kind":"Normal"},{"name":"StreamTypeHandle","kind":"Normal"},{"name":"StructDifference","kind":"Normal"},{"name":"StructIntersection","kind":"Normal"},{"name":"StructMemberType","kind":"Normal"},{"name":"StructMembers","kind":"Normal"},{"name":"StructSymmetricDifference","kind":"Normal"},{"name":"StructTypeComponents","kind":"Normal"},{"name":"StructTypeHandle","kind":"Normal"},{"name":"StructUnion","kind":"Normal"},{"name":"SubqueryAssumeOrderBy","kind":"Normal"},{"name":"SubqueryExtend","kind":"Normal"},{"name":"SubqueryExtendFor","kind":"Normal"},{"name":"SubqueryMerge","kind":"Normal"},{"name":"SubqueryMergeFor","kind":"Normal"},{"name":"SubqueryOrderBy","kind":"Normal"},{"name":"SubqueryUnionAll","kind":"Normal"},{"name":"SubqueryUnionAllFor","kind":"Normal"},{"name":"SubqueryUnionMerge","kind":"Normal"},{"name":"SubqueryUnionMergeFor","kind":"Normal"},{"name":"Substring","kind":"Normal"},{"name":"Sum","kind":"Agg"},{"name":"SumIf","kind":"Agg"},{"name":"SystemMetadata","kind":"Normal"},{"name":"TablePath","kind":"Normal"},{"name":"TableRecordIndex","kind":"Normal"},{"name":"TableRow","kind":"Normal"},{"name":"TableRows","kind":"Produce"},{"name":"TaggedType","kind":"Normal"},{"name":"TaggedTypeComponents","kind":"Normal"},{"name":"TaggedTypeHandle","kind":"Normal"},{"name":"TestBit","kind":"Normal"},{"name":"ToBytes","kind":"Normal"},{"name":"ToDict","kind":"Normal"},{"name":"ToHashedDict","kind":"Normal"},{"name":"ToHashedMultiDict","kind":"Normal"},{"name":"ToMultiDict","kind":"Normal"},{"name":"ToPg","kind":"Normal"},{"name":"ToSet","kind":"Normal"},{"name":"ToSortedDict","kind":"Normal"},{"name":"ToSortedMultiDict","kind":"Normal"},{"name":"Top","kind":"Agg"},{"name":"TopBy","kind":"Agg"},{"name":"TopFreq","kind":"Agg"},{"name":"TryMember","kind":"Normal"},{"name":"TupleElementType","kind":"Normal"},{"name":"TupleType","kind":"Normal"},{"name":"TupleTypeComponents","kind":"Normal"},{"name":"TupleTypeHandle","kind":"Normal"},{"name":"TypeHandle","kind":"Normal"},{"name":"TypeKind","kind":"Normal"},{"name":"TypeOf","kind":"Normal"},{"name":"UDAF","kind":"Agg"},{"name":"UnitType","kind":"Normal"},{"name":"Unpickle","kind":"Normal"},{"name":"Untag","kind":"Normal"},{"name":"Unwrap","kind":"Normal"},{"name":"VarP","kind":"Agg"},{"name":"VarPop","kind":"Agg"},{"name":"VarS","kind":"Agg"},{"name":"VarSamp","kind":"Agg"},{"name":"Variance","kind":"Agg"},{"name":"VariancePopulation","kind":"Agg"},{"name":"VarianceSample","kind":"Agg"},{"name":"Variant","kind":"Normal"},{"name":"VariantItem","kind":"Normal"},{"name":"VariantType","kind":"Normal"},{"name":"VariantTypeHandle","kind":"Normal"},{"name":"VariantUnderlyingType","kind":"Normal"},{"name":"Version","kind":"Normal"},{"name":"Void","kind":"Normal"},{"name":"VoidType","kind":"Normal"},{"name":"VoidTypeHandle","kind":"Normal"},{"name":"Way","kind":"Normal"},{"name":"WeakField","kind":"Normal"},{"name":"WorldCode","kind":"Normal"}]
diff --git a/yql/essentials/data/language/types.json b/yql/essentials/data/language/types.json
index d57d94e47b..c6c045b97a 100644
--- a/yql/essentials/data/language/types.json
+++ b/yql/essentials/data/language/types.json
@@ -1 +1 @@
-{"Bool":{"kind":"Data"},"Date":{"kind":"Data"},"Date32":{"kind":"Data"},"Datetime":{"kind":"Data"},"Datetime64":{"kind":"Data"},"Double":{"kind":"Data"},"DyNumber":{"kind":"Data"},"EmptyDict":{"kind":"EmptyDict"},"EmptyList":{"kind":"EmptyList"},"Float":{"kind":"Data"},"Generic":{"kind":"Generic"},"Int16":{"kind":"Data"},"Int32":{"kind":"Data"},"Int64":{"kind":"Data"},"Int8":{"kind":"Data"},"Interval":{"kind":"Data"},"Interval64":{"kind":"Data"},"Json":{"kind":"Data"},"JsonDocument":{"kind":"Data"},"String":{"kind":"Data"},"Timestamp":{"kind":"Data"},"Timestamp64":{"kind":"Data"},"TzDate":{"kind":"Data"},"TzDate32":{"kind":"Data"},"TzDatetime":{"kind":"Data"},"TzDatetime64":{"kind":"Data"},"TzTimestamp":{"kind":"Data"},"TzTimestamp64":{"kind":"Data"},"Uint16":{"kind":"Data"},"Uint32":{"kind":"Data"},"Uint64":{"kind":"Data"},"Uint8":{"kind":"Data"},"Unit":{"kind":"Unit"},"Utf8":{"kind":"Data"},"Uuid":{"kind":"Data"},"Void":{"kind":"Void"},"Yson":{"kind":"Data"},"_pgaclitem":{"kind":"Pg"},"_pgbit":{"kind":"Pg"},"_pgbool":{"kind":"Pg"},"_pgbox":{"kind":"Pg"},"_pgbpchar":{"kind":"Pg"},"_pgbytea":{"kind":"Pg"},"_pgchar":{"kind":"Pg"},"_pgcid":{"kind":"Pg"},"_pgcidr":{"kind":"Pg"},"_pgcircle":{"kind":"Pg"},"_pgcstring":{"kind":"Pg"},"_pgdate":{"kind":"Pg"},"_pgdatemultirange":{"kind":"Pg"},"_pgdaterange":{"kind":"Pg"},"_pgfloat4":{"kind":"Pg"},"_pgfloat8":{"kind":"Pg"},"_pggtsvector":{"kind":"Pg"},"_pginet":{"kind":"Pg"},"_pgint2":{"kind":"Pg"},"_pgint2vector":{"kind":"Pg"},"_pgint4":{"kind":"Pg"},"_pgint4multirange":{"kind":"Pg"},"_pgint4range":{"kind":"Pg"},"_pgint8":{"kind":"Pg"},"_pgint8multirange":{"kind":"Pg"},"_pgint8range":{"kind":"Pg"},"_pginterval":{"kind":"Pg"},"_pgjson":{"kind":"Pg"},"_pgjsonb":{"kind":"Pg"},"_pgjsonpath":{"kind":"Pg"},"_pgline":{"kind":"Pg"},"_pglseg":{"kind":"Pg"},"_pgmacaddr":{"kind":"Pg"},"_pgmacaddr8":{"kind":"Pg"},"_pgmoney":{"kind":"Pg"},"_pgname":{"kind":"Pg"},"_pgnumeric":{"kind":"Pg"},"_pgnummultirange":{"kind":"Pg"},"_pgnumrange":{"kind":"Pg"},"_pgoid":{"kind":"Pg"},"_pgoidvector":{"kind":"Pg"},"_pgpath":{"kind":"Pg"},"_pgpg_attribute":{"kind":"Pg"},"_pgpg_class":{"kind":"Pg"},"_pgpg_lsn":{"kind":"Pg"},"_pgpg_proc":{"kind":"Pg"},"_pgpg_snapshot":{"kind":"Pg"},"_pgpg_type":{"kind":"Pg"},"_pgpoint":{"kind":"Pg"},"_pgpolygon":{"kind":"Pg"},"_pgrecord":{"kind":"Pg"},"_pgrefcursor":{"kind":"Pg"},"_pgregclass":{"kind":"Pg"},"_pgregcollation":{"kind":"Pg"},"_pgregconfig":{"kind":"Pg"},"_pgregdictionary":{"kind":"Pg"},"_pgregnamespace":{"kind":"Pg"},"_pgregoper":{"kind":"Pg"},"_pgregoperator":{"kind":"Pg"},"_pgregproc":{"kind":"Pg"},"_pgregprocedure":{"kind":"Pg"},"_pgregrole":{"kind":"Pg"},"_pgregtype":{"kind":"Pg"},"_pgtext":{"kind":"Pg"},"_pgtid":{"kind":"Pg"},"_pgtime":{"kind":"Pg"},"_pgtimestamp":{"kind":"Pg"},"_pgtimestamptz":{"kind":"Pg"},"_pgtimetz":{"kind":"Pg"},"_pgtsmultirange":{"kind":"Pg"},"_pgtsquery":{"kind":"Pg"},"_pgtsrange":{"kind":"Pg"},"_pgtstzmultirange":{"kind":"Pg"},"_pgtstzrange":{"kind":"Pg"},"_pgtsvector":{"kind":"Pg"},"_pgtxid_snapshot":{"kind":"Pg"},"_pguuid":{"kind":"Pg"},"_pgvarbit":{"kind":"Pg"},"_pgvarchar":{"kind":"Pg"},"_pgxid":{"kind":"Pg"},"_pgxid8":{"kind":"Pg"},"_pgxml":{"kind":"Pg"},"pgaclitem":{"kind":"Pg"},"pgany":{"kind":"Pg"},"pganyarray":{"kind":"Pg"},"pganycompatible":{"kind":"Pg"},"pganycompatiblearray":{"kind":"Pg"},"pganycompatiblemultirange":{"kind":"Pg"},"pganycompatiblenonarray":{"kind":"Pg"},"pganycompatiblerange":{"kind":"Pg"},"pganyelement":{"kind":"Pg"},"pganyenum":{"kind":"Pg"},"pganymultirange":{"kind":"Pg"},"pganynonarray":{"kind":"Pg"},"pganyrange":{"kind":"Pg"},"pgbit":{"kind":"Pg"},"pgbool":{"kind":"Pg"},"pgbox":{"kind":"Pg"},"pgbpchar":{"kind":"Pg"},"pgbytea":{"kind":"Pg"},"pgchar":{"kind":"Pg"},"pgcid":{"kind":"Pg"},"pgcidr":{"kind":"Pg"},"pgcircle":{"kind":"Pg"},"pgcstring":{"kind":"Pg"},"pgdate":{"kind":"Pg"},"pgdatemultirange":{"kind":"Pg"},"pgdaterange":{"kind":"Pg"},"pgevent_trigger":{"kind":"Pg"},"pgfdw_handler":{"kind":"Pg"},"pgfloat4":{"kind":"Pg"},"pgfloat8":{"kind":"Pg"},"pggtsvector":{"kind":"Pg"},"pgindex_am_handler":{"kind":"Pg"},"pginet":{"kind":"Pg"},"pgint2":{"kind":"Pg"},"pgint2vector":{"kind":"Pg"},"pgint4":{"kind":"Pg"},"pgint4multirange":{"kind":"Pg"},"pgint4range":{"kind":"Pg"},"pgint8":{"kind":"Pg"},"pgint8multirange":{"kind":"Pg"},"pgint8range":{"kind":"Pg"},"pginternal":{"kind":"Pg"},"pginterval":{"kind":"Pg"},"pgjson":{"kind":"Pg"},"pgjsonb":{"kind":"Pg"},"pgjsonpath":{"kind":"Pg"},"pglanguage_handler":{"kind":"Pg"},"pgline":{"kind":"Pg"},"pglseg":{"kind":"Pg"},"pgmacaddr":{"kind":"Pg"},"pgmacaddr8":{"kind":"Pg"},"pgmoney":{"kind":"Pg"},"pgname":{"kind":"Pg"},"pgnumeric":{"kind":"Pg"},"pgnummultirange":{"kind":"Pg"},"pgnumrange":{"kind":"Pg"},"pgoid":{"kind":"Pg"},"pgoidvector":{"kind":"Pg"},"pgpath":{"kind":"Pg"},"pgpg_attribute":{"kind":"Pg"},"pgpg_brin_bloom_summary":{"kind":"Pg"},"pgpg_brin_minmax_multi_summary":{"kind":"Pg"},"pgpg_class":{"kind":"Pg"},"pgpg_ddl_command":{"kind":"Pg"},"pgpg_dependencies":{"kind":"Pg"},"pgpg_lsn":{"kind":"Pg"},"pgpg_mcv_list":{"kind":"Pg"},"pgpg_ndistinct":{"kind":"Pg"},"pgpg_node_tree":{"kind":"Pg"},"pgpg_proc":{"kind":"Pg"},"pgpg_snapshot":{"kind":"Pg"},"pgpg_type":{"kind":"Pg"},"pgpoint":{"kind":"Pg"},"pgpolygon":{"kind":"Pg"},"pgrecord":{"kind":"Pg"},"pgrefcursor":{"kind":"Pg"},"pgregclass":{"kind":"Pg"},"pgregcollation":{"kind":"Pg"},"pgregconfig":{"kind":"Pg"},"pgregdictionary":{"kind":"Pg"},"pgregnamespace":{"kind":"Pg"},"pgregoper":{"kind":"Pg"},"pgregoperator":{"kind":"Pg"},"pgregproc":{"kind":"Pg"},"pgregprocedure":{"kind":"Pg"},"pgregrole":{"kind":"Pg"},"pgregtype":{"kind":"Pg"},"pgtable_am_handler":{"kind":"Pg"},"pgtext":{"kind":"Pg"},"pgtid":{"kind":"Pg"},"pgtime":{"kind":"Pg"},"pgtimestamp":{"kind":"Pg"},"pgtimestamptz":{"kind":"Pg"},"pgtimetz":{"kind":"Pg"},"pgtrigger":{"kind":"Pg"},"pgtsm_handler":{"kind":"Pg"},"pgtsmultirange":{"kind":"Pg"},"pgtsquery":{"kind":"Pg"},"pgtsrange":{"kind":"Pg"},"pgtstzmultirange":{"kind":"Pg"},"pgtstzrange":{"kind":"Pg"},"pgtsvector":{"kind":"Pg"},"pgtxid_snapshot":{"kind":"Pg"},"pgunknown":{"kind":"Pg"},"pguuid":{"kind":"Pg"},"pgvarbit":{"kind":"Pg"},"pgvarchar":{"kind":"Pg"},"pgvoid":{"kind":"Pg"},"pgxid":{"kind":"Pg"},"pgxid8":{"kind":"Pg"},"pgxml":{"kind":"Pg"}}
+[{"name":"Bool","kind":"Data"},{"name":"Date","kind":"Data"},{"name":"Date32","kind":"Data"},{"name":"Datetime","kind":"Data"},{"name":"Datetime64","kind":"Data"},{"name":"Double","kind":"Data"},{"name":"DyNumber","kind":"Data"},{"name":"EmptyDict","kind":"EmptyDict"},{"name":"EmptyList","kind":"EmptyList"},{"name":"Float","kind":"Data"},{"name":"Generic","kind":"Generic"},{"name":"Int16","kind":"Data"},{"name":"Int32","kind":"Data"},{"name":"Int64","kind":"Data"},{"name":"Int8","kind":"Data"},{"name":"Interval","kind":"Data"},{"name":"Interval64","kind":"Data"},{"name":"Json","kind":"Data"},{"name":"JsonDocument","kind":"Data"},{"name":"String","kind":"Data"},{"name":"Timestamp","kind":"Data"},{"name":"Timestamp64","kind":"Data"},{"name":"TzDate","kind":"Data"},{"name":"TzDate32","kind":"Data"},{"name":"TzDatetime","kind":"Data"},{"name":"TzDatetime64","kind":"Data"},{"name":"TzTimestamp","kind":"Data"},{"name":"TzTimestamp64","kind":"Data"},{"name":"Uint16","kind":"Data"},{"name":"Uint32","kind":"Data"},{"name":"Uint64","kind":"Data"},{"name":"Uint8","kind":"Data"},{"name":"Unit","kind":"Unit"},{"name":"Utf8","kind":"Data"},{"name":"Uuid","kind":"Data"},{"name":"Void","kind":"Void"},{"name":"Yson","kind":"Data"},{"name":"_pgaclitem","kind":"Pg"},{"name":"_pgbit","kind":"Pg"},{"name":"_pgbool","kind":"Pg"},{"name":"_pgbox","kind":"Pg"},{"name":"_pgbpchar","kind":"Pg"},{"name":"_pgbytea","kind":"Pg"},{"name":"_pgchar","kind":"Pg"},{"name":"_pgcid","kind":"Pg"},{"name":"_pgcidr","kind":"Pg"},{"name":"_pgcircle","kind":"Pg"},{"name":"_pgcstring","kind":"Pg"},{"name":"_pgdate","kind":"Pg"},{"name":"_pgdatemultirange","kind":"Pg"},{"name":"_pgdaterange","kind":"Pg"},{"name":"_pgfloat4","kind":"Pg"},{"name":"_pgfloat8","kind":"Pg"},{"name":"_pggtsvector","kind":"Pg"},{"name":"_pginet","kind":"Pg"},{"name":"_pgint2","kind":"Pg"},{"name":"_pgint2vector","kind":"Pg"},{"name":"_pgint4","kind":"Pg"},{"name":"_pgint4multirange","kind":"Pg"},{"name":"_pgint4range","kind":"Pg"},{"name":"_pgint8","kind":"Pg"},{"name":"_pgint8multirange","kind":"Pg"},{"name":"_pgint8range","kind":"Pg"},{"name":"_pginterval","kind":"Pg"},{"name":"_pgjson","kind":"Pg"},{"name":"_pgjsonb","kind":"Pg"},{"name":"_pgjsonpath","kind":"Pg"},{"name":"_pgline","kind":"Pg"},{"name":"_pglseg","kind":"Pg"},{"name":"_pgmacaddr","kind":"Pg"},{"name":"_pgmacaddr8","kind":"Pg"},{"name":"_pgmoney","kind":"Pg"},{"name":"_pgname","kind":"Pg"},{"name":"_pgnumeric","kind":"Pg"},{"name":"_pgnummultirange","kind":"Pg"},{"name":"_pgnumrange","kind":"Pg"},{"name":"_pgoid","kind":"Pg"},{"name":"_pgoidvector","kind":"Pg"},{"name":"_pgpath","kind":"Pg"},{"name":"_pgpg_attribute","kind":"Pg"},{"name":"_pgpg_class","kind":"Pg"},{"name":"_pgpg_lsn","kind":"Pg"},{"name":"_pgpg_proc","kind":"Pg"},{"name":"_pgpg_snapshot","kind":"Pg"},{"name":"_pgpg_type","kind":"Pg"},{"name":"_pgpoint","kind":"Pg"},{"name":"_pgpolygon","kind":"Pg"},{"name":"_pgrecord","kind":"Pg"},{"name":"_pgrefcursor","kind":"Pg"},{"name":"_pgregclass","kind":"Pg"},{"name":"_pgregcollation","kind":"Pg"},{"name":"_pgregconfig","kind":"Pg"},{"name":"_pgregdictionary","kind":"Pg"},{"name":"_pgregnamespace","kind":"Pg"},{"name":"_pgregoper","kind":"Pg"},{"name":"_pgregoperator","kind":"Pg"},{"name":"_pgregproc","kind":"Pg"},{"name":"_pgregprocedure","kind":"Pg"},{"name":"_pgregrole","kind":"Pg"},{"name":"_pgregtype","kind":"Pg"},{"name":"_pgtext","kind":"Pg"},{"name":"_pgtid","kind":"Pg"},{"name":"_pgtime","kind":"Pg"},{"name":"_pgtimestamp","kind":"Pg"},{"name":"_pgtimestamptz","kind":"Pg"},{"name":"_pgtimetz","kind":"Pg"},{"name":"_pgtsmultirange","kind":"Pg"},{"name":"_pgtsquery","kind":"Pg"},{"name":"_pgtsrange","kind":"Pg"},{"name":"_pgtstzmultirange","kind":"Pg"},{"name":"_pgtstzrange","kind":"Pg"},{"name":"_pgtsvector","kind":"Pg"},{"name":"_pgtxid_snapshot","kind":"Pg"},{"name":"_pguuid","kind":"Pg"},{"name":"_pgvarbit","kind":"Pg"},{"name":"_pgvarchar","kind":"Pg"},{"name":"_pgxid","kind":"Pg"},{"name":"_pgxid8","kind":"Pg"},{"name":"_pgxml","kind":"Pg"},{"name":"pgaclitem","kind":"Pg"},{"name":"pgany","kind":"Pg"},{"name":"pganyarray","kind":"Pg"},{"name":"pganycompatible","kind":"Pg"},{"name":"pganycompatiblearray","kind":"Pg"},{"name":"pganycompatiblemultirange","kind":"Pg"},{"name":"pganycompatiblenonarray","kind":"Pg"},{"name":"pganycompatiblerange","kind":"Pg"},{"name":"pganyelement","kind":"Pg"},{"name":"pganyenum","kind":"Pg"},{"name":"pganymultirange","kind":"Pg"},{"name":"pganynonarray","kind":"Pg"},{"name":"pganyrange","kind":"Pg"},{"name":"pgbit","kind":"Pg"},{"name":"pgbool","kind":"Pg"},{"name":"pgbox","kind":"Pg"},{"name":"pgbpchar","kind":"Pg"},{"name":"pgbytea","kind":"Pg"},{"name":"pgchar","kind":"Pg"},{"name":"pgcid","kind":"Pg"},{"name":"pgcidr","kind":"Pg"},{"name":"pgcircle","kind":"Pg"},{"name":"pgcstring","kind":"Pg"},{"name":"pgdate","kind":"Pg"},{"name":"pgdatemultirange","kind":"Pg"},{"name":"pgdaterange","kind":"Pg"},{"name":"pgevent_trigger","kind":"Pg"},{"name":"pgfdw_handler","kind":"Pg"},{"name":"pgfloat4","kind":"Pg"},{"name":"pgfloat8","kind":"Pg"},{"name":"pggtsvector","kind":"Pg"},{"name":"pgindex_am_handler","kind":"Pg"},{"name":"pginet","kind":"Pg"},{"name":"pgint2","kind":"Pg"},{"name":"pgint2vector","kind":"Pg"},{"name":"pgint4","kind":"Pg"},{"name":"pgint4multirange","kind":"Pg"},{"name":"pgint4range","kind":"Pg"},{"name":"pgint8","kind":"Pg"},{"name":"pgint8multirange","kind":"Pg"},{"name":"pgint8range","kind":"Pg"},{"name":"pginternal","kind":"Pg"},{"name":"pginterval","kind":"Pg"},{"name":"pgjson","kind":"Pg"},{"name":"pgjsonb","kind":"Pg"},{"name":"pgjsonpath","kind":"Pg"},{"name":"pglanguage_handler","kind":"Pg"},{"name":"pgline","kind":"Pg"},{"name":"pglseg","kind":"Pg"},{"name":"pgmacaddr","kind":"Pg"},{"name":"pgmacaddr8","kind":"Pg"},{"name":"pgmoney","kind":"Pg"},{"name":"pgname","kind":"Pg"},{"name":"pgnumeric","kind":"Pg"},{"name":"pgnummultirange","kind":"Pg"},{"name":"pgnumrange","kind":"Pg"},{"name":"pgoid","kind":"Pg"},{"name":"pgoidvector","kind":"Pg"},{"name":"pgpath","kind":"Pg"},{"name":"pgpg_attribute","kind":"Pg"},{"name":"pgpg_brin_bloom_summary","kind":"Pg"},{"name":"pgpg_brin_minmax_multi_summary","kind":"Pg"},{"name":"pgpg_class","kind":"Pg"},{"name":"pgpg_ddl_command","kind":"Pg"},{"name":"pgpg_dependencies","kind":"Pg"},{"name":"pgpg_lsn","kind":"Pg"},{"name":"pgpg_mcv_list","kind":"Pg"},{"name":"pgpg_ndistinct","kind":"Pg"},{"name":"pgpg_node_tree","kind":"Pg"},{"name":"pgpg_proc","kind":"Pg"},{"name":"pgpg_snapshot","kind":"Pg"},{"name":"pgpg_type","kind":"Pg"},{"name":"pgpoint","kind":"Pg"},{"name":"pgpolygon","kind":"Pg"},{"name":"pgrecord","kind":"Pg"},{"name":"pgrefcursor","kind":"Pg"},{"name":"pgregclass","kind":"Pg"},{"name":"pgregcollation","kind":"Pg"},{"name":"pgregconfig","kind":"Pg"},{"name":"pgregdictionary","kind":"Pg"},{"name":"pgregnamespace","kind":"Pg"},{"name":"pgregoper","kind":"Pg"},{"name":"pgregoperator","kind":"Pg"},{"name":"pgregproc","kind":"Pg"},{"name":"pgregprocedure","kind":"Pg"},{"name":"pgregrole","kind":"Pg"},{"name":"pgregtype","kind":"Pg"},{"name":"pgtable_am_handler","kind":"Pg"},{"name":"pgtext","kind":"Pg"},{"name":"pgtid","kind":"Pg"},{"name":"pgtime","kind":"Pg"},{"name":"pgtimestamp","kind":"Pg"},{"name":"pgtimestamptz","kind":"Pg"},{"name":"pgtimetz","kind":"Pg"},{"name":"pgtrigger","kind":"Pg"},{"name":"pgtsm_handler","kind":"Pg"},{"name":"pgtsmultirange","kind":"Pg"},{"name":"pgtsquery","kind":"Pg"},{"name":"pgtsrange","kind":"Pg"},{"name":"pgtstzmultirange","kind":"Pg"},{"name":"pgtstzrange","kind":"Pg"},{"name":"pgtsvector","kind":"Pg"},{"name":"pgtxid_snapshot","kind":"Pg"},{"name":"pgunknown","kind":"Pg"},{"name":"pguuid","kind":"Pg"},{"name":"pgvarbit","kind":"Pg"},{"name":"pgvarchar","kind":"Pg"},{"name":"pgvoid","kind":"Pg"},{"name":"pgxid","kind":"Pg"},{"name":"pgxid8","kind":"Pg"},{"name":"pgxml","kind":"Pg"}]
diff --git a/yql/essentials/data/language/udfs_basic.json b/yql/essentials/data/language/udfs_basic.json
new file mode 100644
index 0000000000..c9039913de
--- /dev/null
+++ b/yql/essentials/data/language/udfs_basic.json
@@ -0,0 +1 @@
+{"DateTime":[{"name":"Convert"},{"name":"EndOf"},{"name":"EndOfDay"},{"name":"EndOfMonth"},{"name":"EndOfQuarter"},{"name":"EndOfWeek"},{"name":"EndOfYear"},{"name":"Format"},{"name":"FromMicroseconds"},{"name":"FromMicroseconds64"},{"name":"FromMilliseconds"},{"name":"FromMilliseconds64"},{"name":"FromSeconds"},{"name":"FromSeconds64"},{"name":"GetDayOfMonth"},{"name":"GetDayOfWeek"},{"name":"GetDayOfWeekName"},{"name":"GetDayOfYear"},{"name":"GetHour"},{"name":"GetMicrosecondOfSecond"},{"name":"GetMillisecondOfSecond"},{"name":"GetMinute"},{"name":"GetMonth"},{"name":"GetMonthName"},{"name":"GetSecond"},{"name":"GetTimezoneId"},{"name":"GetTimezoneName"},{"name":"GetWeekOfYear"},{"name":"GetWeekOfYearIso8601"},{"name":"GetYear"},{"name":"Interval64FromDays"},{"name":"Interval64FromHours"},{"name":"Interval64FromMicroseconds"},{"name":"Interval64FromMilliseconds"},{"name":"Interval64FromMinutes"},{"name":"Interval64FromSeconds"},{"name":"IntervalFromDays"},{"name":"IntervalFromHours"},{"name":"IntervalFromMicroseconds"},{"name":"IntervalFromMilliseconds"},{"name":"IntervalFromMinutes"},{"name":"IntervalFromSeconds"},{"name":"MakeDate"},{"name":"MakeDate32"},{"name":"MakeDatetime"},{"name":"MakeDatetime64"},{"name":"MakeTimestamp"},{"name":"MakeTimestamp64"},{"name":"MakeTzDate"},{"name":"MakeTzDate32"},{"name":"MakeTzDatetime"},{"name":"MakeTzDatetime64"},{"name":"MakeTzTimestamp"},{"name":"MakeTzTimestamp64"},{"name":"Parse"},{"name":"Parse64"},{"name":"ParseHttp"},{"name":"ParseIso8601"},{"name":"ParseRfc822"},{"name":"ParseX509"},{"name":"ShiftMonths"},{"name":"ShiftQuarters"},{"name":"ShiftYears"},{"name":"Split"},{"name":"StartOf"},{"name":"StartOfDay"},{"name":"StartOfMonth"},{"name":"StartOfQuarter"},{"name":"StartOfWeek"},{"name":"StartOfYear"},{"name":"TimeOfDay"},{"name":"ToDays"},{"name":"ToHours"},{"name":"ToMicroseconds"},{"name":"ToMilliseconds"},{"name":"ToMinutes"},{"name":"ToSeconds"},{"name":"Update"}],"Digest":[{"name":"Argon2"},{"name":"Blake2B"},{"name":"CityHash"},{"name":"CityHash128"},{"name":"Crc32c"},{"name":"Crc64"},{"name":"FarmHashFingerprint"},{"name":"FarmHashFingerprint128"},{"name":"FarmHashFingerprint2"},{"name":"FarmHashFingerprint32"},{"name":"FarmHashFingerprint64"},{"name":"Fnv32"},{"name":"Fnv64"},{"name":"HighwayHash"},{"name":"IntHash64"},{"name":"Md5HalfMix"},{"name":"Md5Hex"},{"name":"Md5Raw"},{"name":"MurMurHash"},{"name":"MurMurHash2A"},{"name":"MurMurHash2A32"},{"name":"MurMurHash32"},{"name":"NumericHash"},{"name":"Sha1"},{"name":"Sha256"},{"name":"SipHash"},{"name":"SuperFastHash"},{"name":"XXH3"},{"name":"XXH3_128"}],"Hyperscan":[{"name":"BacktrackingGrep"},{"name":"BacktrackingMatch"},{"name":"Capture"},{"name":"Grep"},{"name":"Match"},{"name":"MultiGrep"},{"name":"MultiMatch"},{"name":"Replace"}],"Ip":[{"name":"ConvertToIPv6"},{"name":"FromString"},{"name":"GetSubnet"},{"name":"GetSubnetByMask"},{"name":"IsEmbeddedIPv4"},{"name":"IsIPv4"},{"name":"IsIPv6"},{"name":"SubnetFromString"},{"name":"SubnetMatch"},{"name":"SubnetToString"},{"name":"ToFixedIPv6String"},{"name":"ToString"}],"Json":[{"name":"GetField"}],"Math":[{"name":"Abs"},{"name":"Acos"},{"name":"Asin"},{"name":"Asinh"},{"name":"Atan"},{"name":"Atan2"},{"name":"Cbrt"},{"name":"Ceil"},{"name":"Cos"},{"name":"Cosh"},{"name":"E"},{"name":"Eps"},{"name":"Erf"},{"name":"ErfInv"},{"name":"ErfcInv"},{"name":"Exp"},{"name":"Exp2"},{"name":"Fabs"},{"name":"Floor"},{"name":"Fmod"},{"name":"FuzzyEquals"},{"name":"Hypot"},{"name":"IsFinite"},{"name":"IsInf"},{"name":"IsNaN"},{"name":"Ldexp"},{"name":"Lgamma"},{"name":"Log"},{"name":"Log10"},{"name":"Log2"},{"name":"Mod"},{"name":"NearbyInt"},{"name":"Pi"},{"name":"Pow"},{"name":"Rem"},{"name":"Remainder"},{"name":"Rint"},{"name":"Round"},{"name":"RoundDownward"},{"name":"RoundToNearest"},{"name":"RoundTowardZero"},{"name":"RoundUpward"},{"name":"Sigmoid"},{"name":"Sin"},{"name":"Sinh"},{"name":"Sqrt"},{"name":"Tan"},{"name":"Tanh"},{"name":"Tgamma"},{"name":"Trunc"}],"Pire":[{"name":"Capture"},{"name":"Grep"},{"name":"Match"},{"name":"MultiGrep"},{"name":"MultiMatch"},{"name":"Replace"}],"Re2":[{"name":"Capture"},{"name":"Count"},{"name":"Escape"},{"name":"FindAndConsume"},{"name":"Grep"},{"name":"Match"},{"name":"Options"},{"name":"PatternFromLike"},{"name":"Replace"}],"Re2posix":[{"name":"Capture"},{"name":"Count"},{"name":"Escape"},{"name":"FindAndConsume"},{"name":"Grep"},{"name":"Match"},{"name":"Options"},{"name":"PatternFromLike"},{"name":"Replace"}],"String":[{"name":"AsciiToLower"},{"name":"AsciiToTitle"},{"name":"AsciiToUpper"},{"name":"Base32Decode"},{"name":"Base32Encode"},{"name":"Base32StrictDecode"},{"name":"Base64Decode"},{"name":"Base64Encode"},{"name":"Base64EncodeUrl"},{"name":"Base64StrictDecode"},{"name":"Bin"},{"name":"BinText"},{"name":"CgiEscape"},{"name":"CgiUnescape"},{"name":"Collapse"},{"name":"CollapseText"},{"name":"Contains"},{"name":"DecodeHtml"},{"name":"EncodeHtml"},{"name":"EndsWith"},{"name":"EndsWithIgnoreCase"},{"name":"EscapeC"},{"name":"FromByteList"},{"name":"HasPrefix"},{"name":"HasPrefixIgnoreCase"},{"name":"HasSuffix"},{"name":"HasSuffixIgnoreCase"},{"name":"Hex"},{"name":"HexDecode"},{"name":"HexEncode"},{"name":"HexText"},{"name":"HumanReadableBytes"},{"name":"HumanReadableDuration"},{"name":"HumanReadableQuantity"},{"name":"IsAscii"},{"name":"IsAsciiAlnum"},{"name":"IsAsciiAlpha"},{"name":"IsAsciiDigit"},{"name":"IsAsciiHex"},{"name":"IsAsciiLower"},{"name":"IsAsciiSpace"},{"name":"IsAsciiUpper"},{"name":"JoinFromList"},{"name":"LeftPad"},{"name":"LevensteinDistance"},{"name":"Prec"},{"name":"RemoveAll"},{"name":"RemoveFirst"},{"name":"RemoveLast"},{"name":"ReplaceAll"},{"name":"ReplaceFirst"},{"name":"ReplaceLast"},{"name":"RightPad"},{"name":"SBin"},{"name":"SHex"},{"name":"SplitToList"},{"name":"StartsWith"},{"name":"StartsWithIgnoreCase"},{"name":"Strip"},{"name":"ToByteList"},{"name":"UnescapeC"}],"Unicode":[{"name":"Find"},{"name":"Fold"},{"name":"FromCodePointList"},{"name":"GetLength"},{"name":"IsAlnum"},{"name":"IsAlpha"},{"name":"IsAscii"},{"name":"IsDigit"},{"name":"IsHex"},{"name":"IsLower"},{"name":"IsSpace"},{"name":"IsUnicodeSet"},{"name":"IsUpper"},{"name":"IsUtf"},{"name":"JoinFromList"},{"name":"LevensteinDistance"},{"name":"Normalize"},{"name":"NormalizeNFC"},{"name":"NormalizeNFD"},{"name":"NormalizeNFKC"},{"name":"NormalizeNFKD"},{"name":"RFind"},{"name":"RemoveAll"},{"name":"RemoveFirst"},{"name":"RemoveLast"},{"name":"ReplaceAll"},{"name":"ReplaceFirst"},{"name":"ReplaceLast"},{"name":"Reverse"},{"name":"SplitToList"},{"name":"Strip"},{"name":"Substring"},{"name":"ToCodePointList"},{"name":"ToLower"},{"name":"ToTitle"},{"name":"ToUint64"},{"name":"ToUpper"},{"name":"Translit"},{"name":"TryToUint64"}],"Url":[{"name":"BuildQueryString"},{"name":"CanBePunycodeHostName"},{"name":"CutQueryStringAndFragment"},{"name":"CutScheme"},{"name":"CutWWW"},{"name":"CutWWW2"},{"name":"Decode"},{"name":"Encode"},{"name":"ForceHostNameToPunycode"},{"name":"ForcePunycodeToHostName"},{"name":"GetCGIParam"},{"name":"GetDomain"},{"name":"GetDomainLevel"},{"name":"GetFragment"},{"name":"GetHost"},{"name":"GetHostPort"},{"name":"GetOwner"},{"name":"GetPath"},{"name":"GetPort"},{"name":"GetScheme"},{"name":"GetSchemeHost"},{"name":"GetSchemeHostPort"},{"name":"GetSignificantDomain"},{"name":"GetTLD"},{"name":"GetTail"},{"name":"HostNameToPunycode"},{"name":"IsAllowedByRobotsTxt"},{"name":"IsKnownTLD"},{"name":"IsWellKnownTLD"},{"name":"Normalize"},{"name":"NormalizeWithDefaultHttpScheme"},{"name":"Parse"},{"name":"PunycodeToHostName"},{"name":"QueryStringToDict"},{"name":"QueryStringToList"}],"Yson":[{"name":"Attributes"},{"name":"Contains"},{"name":"ConvertTo"},{"name":"ConvertToBool"},{"name":"ConvertToBoolDict"},{"name":"ConvertToBoolList"},{"name":"ConvertToDict"},{"name":"ConvertToDouble"},{"name":"ConvertToDoubleDict"},{"name":"ConvertToDoubleList"},{"name":"ConvertToInt64"},{"name":"ConvertToInt64Dict"},{"name":"ConvertToInt64List"},{"name":"ConvertToList"},{"name":"ConvertToString"},{"name":"ConvertToStringDict"},{"name":"ConvertToStringList"},{"name":"ConvertToUint64"},{"name":"ConvertToUint64Dict"},{"name":"ConvertToUint64List"},{"name":"Equals"},{"name":"From"},{"name":"GetHash"},{"name":"GetLength"},{"name":"IsBool"},{"name":"IsDict"},{"name":"IsDouble"},{"name":"IsEntity"},{"name":"IsInt64"},{"name":"IsList"},{"name":"IsString"},{"name":"IsUint64"},{"name":"Lookup"},{"name":"LookupBool"},{"name":"LookupDict"},{"name":"LookupDouble"},{"name":"LookupInt64"},{"name":"LookupList"},{"name":"LookupString"},{"name":"LookupUint64"},{"name":"Options"},{"name":"Parse"},{"name":"ParseJson"},{"name":"ParseJsonDecodeUtf8"},{"name":"Serialize"},{"name":"SerializeJson"},{"name":"SerializePretty"},{"name":"SerializeText"},{"name":"WithAttributes"},{"name":"YPath"},{"name":"YPathBool"},{"name":"YPathDict"},{"name":"YPathDouble"},{"name":"YPathInt64"},{"name":"YPathList"},{"name":"YPathString"},{"name":"YPathUint64"}],"Compress":[{"name":"BZip2"},{"name":"Brotli"},{"name":"Gzip"},{"name":"Lz4"},{"name":"Lzf"},{"name":"Lzma"},{"name":"Lzo"},{"name":"Lzq"},{"name":"Snappy"},{"name":"Zlib"},{"name":"Zstd"}],"Decompress":[{"name":"BZip2"},{"name":"Brotli"},{"name":"Gzip"},{"name":"Lz4"},{"name":"Lzf"},{"name":"Lzma"},{"name":"Lzo"},{"name":"Lzq"},{"name":"Snappy"},{"name":"Xz"},{"name":"Zlib"},{"name":"Zstd"}],"Protobuf":[{"name":"Parse"},{"name":"Serialize"},{"name":"TryParse"}],"Streaming":[{"name":"Process"},{"name":"ProcessInline"}],"TryDecompress":[{"name":"BZip2"},{"name":"Brotli"},{"name":"Gzip"},{"name":"Lz4"},{"name":"Lzf"},{"name":"Lzma"},{"name":"Lzo"},{"name":"Lzq"},{"name":"Snappy"},{"name":"Xz"},{"name":"Zlib"},{"name":"Zstd"}]}
diff --git a/yql/essentials/data/language/update_functions.sh b/yql/essentials/data/language/update_functions.sh
new file mode 100755
index 0000000000..dbe8f7b5be
--- /dev/null
+++ b/yql/essentials/data/language/update_functions.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+set -eu
+ya make ../../tools/sql_functions_dump
+../../tools/sql_functions_dump/sql_functions_dump | jq -c > sql_functions.json
diff --git a/yql/essentials/docs/ru/syntax/expressions.md b/yql/essentials/docs/ru/syntax/expressions.md
index e0e424a38e..76930efd31 100644
--- a/yql/essentials/docs/ru/syntax/expressions.md
+++ b/yql/essentials/docs/ru/syntax/expressions.md
@@ -168,15 +168,15 @@ WHERE value IS NOT NULL;
## IS \[NOT\] DISTINCT FROM {#is-distinct-from}
-Сравнение двух значений. В отличие от обычных [операторов сравнения](#comparison-operators), нуллы считаются равными друг другу.
+Сравнение двух значений. В отличие от обычных [операторов сравнения](#comparison-operators), `NULL`-значения считаются равными друг другу.
Сравнение осуществляется по следующим правилам:
-1. операторы `IS DISTINCT FROM`/`IS NOT DISTINCT FROM` определены для тех и только для тех аргументов, для которых определены операторы `!=` и `=`;
-2. результат `IS NOT DISTINCT FROM` равен логическому отрицанию результата `IS DISTINCT FROM` для данных аргументов;
-3. если результат оператора `==` не равен нуллу для некоторых аргументов, то он совпадает с результатом оператора `IS NOT DISTINCT FROM` для тех же аргументов;
-4. если оба аргумента являются незаполненными `Optional`ми или `NULL`ами, то значение `IS NOT DISTINCT FROM` равно `True`
-5. результат `IS NOT DISTINCT FROM` от незаполненного `Optional` или `NULL` и заполненного `Optional` или не-`Optional` значения равен `False`.
+1. Операторы `IS DISTINCT FROM`/`IS NOT DISTINCT FROM` определены для тех и только для тех аргументов, для которых определены операторы `!=` и `=`.
+2. Результат `IS NOT DISTINCT FROM` равен логическому отрицанию результата `IS DISTINCT FROM` для данных аргументов.
+3. Если результат оператора `==` не равен `NULL` для некоторых аргументов, то он совпадает с результатом оператора `IS NOT DISTINCT FROM` для тех же аргументов.
+4. Если оба аргумента являются незаполненными `Optional` или `NULL`, то значение `IS NOT DISTINCT FROM` равно `True`.
+5. Результат `IS NOT DISTINCT FROM` от незаполненного `Optional` или `NULL` и заполненного `Optional` или не-`Optional` значения равен `False`.
Для значений композитных типов эти правила применяются рекурсивно.
@@ -435,13 +435,13 @@ $x, $y = AsTuple($y, $x); -- swap значений выражений
Семантика табличного выражения зависит от контекста в котором оно используется. В YQL табличные выражения могут применяться в следующих контекстах:
-* табличный контекст - после [FROM](select/from.md).
+* табличный контекст &mdash; после [FROM](select/from.md).
Здесь табличные выражения работают как ожидается – например `$input = SELECT a, b, c FROM T; SELECT * FROM $input` вернет таблицу с тремя колонками.
Табличный контекст также возникает после [UNION ALL](select/index.md#unionall), [JOIN](join.md#join), [PROCESS](process.md#process), [REDUCE](reduce.md#reduce);
-* векторный контекст - после [IN](#in). В этом контексте табличное выражение обязано содержать ровно одну колонку (имя этой колонки никак не влияет на результат выражения).
+* векторный контекст &mdash; после [IN](#in). В этом контексте табличное выражение обязано содержать ровно одну колонку (имя этой колонки никак не влияет на результат выражения).
Табличное выражение в векторном контексте типизируется как список (тип элемента списка при этом совпадает с типом колонки). Пример: `SELECT * FROM T WHERE key IN (SELECT k FROM T1)`;
@@ -456,7 +456,7 @@ $input = SELECT 1 AS key, 2 AS value;
$process = PROCESS $input;
SELECT FormatType(TypeOf($process)); -- $process используется в скалярном контексте,
- -- но результат SELECT при этом - List<Struct<'key':Int32,'value':Int32>>
+ -- но результат SELECT при этом — List<Struct<'key':Int32,'value':Int32>>
SELECT $process[0].key; -- вернет 1
@@ -475,7 +475,7 @@ SELECT * FROM $table LEFT JOIN $dict USING(key);
END DEFINE;
SELECT * FROM $merge_dict("Input", $dict); -- $dict здесь используется в скалярном контексте.
- -- ошибка - в скалярном контексте ожидается ровно одна колонка
+ -- ошибка — в скалярном контексте ожидается ровно одна колонка
```
@@ -491,7 +491,7 @@ SELECT * FROM $table LEFT JOIN $dict() USING(key); -- использование
-- (вызов шаблона подзапроса) в табличном контексте
END DEFINE;
-SELECT * FROM $merge_dict("Input", $dict); -- $dict - шаблон позапроса (не табличное выражение)
+SELECT * FROM $merge_dict("Input", $dict); -- $dict — шаблон позапроса (не табличное выражение)
-- передаваемый в качестве аргумента табличного выражения
```
diff --git a/yql/essentials/minikql/aligned_page_pool.cpp b/yql/essentials/minikql/aligned_page_pool.cpp
index bcbb3edd11..e061c08574 100644
--- a/yql/essentials/minikql/aligned_page_pool.cpp
+++ b/yql/essentials/minikql/aligned_page_pool.cpp
@@ -97,12 +97,10 @@ public:
private:
size_t PushPage(void* addr) {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(TAlignedPagePool::IsDefaultAllocatorUsed())) {
FreePage(addr);
return GetPageSize();
}
-#endif
++Count;
Pages.Enqueue(addr);
@@ -141,10 +139,7 @@ public:
}
void* DoMmap(size_t size) {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- // No memory maps allowed while using default allocator
- Y_DEBUG_ABORT_UNLESS(!IsDefaultAllocator);
-#endif
+ Y_DEBUG_ABORT_UNLESS(!TAlignedPagePoolImpl<T>::IsDefaultAllocatorUsed(), "No memory maps allowed while using default allocator");
void* res = T::Mmap(size);
TotalMmappedBytes += size;
@@ -356,12 +351,10 @@ TAlignedPagePoolImpl<T>::~TAlignedPagePoolImpl() {
for (auto it = ActiveBlocks.cbegin(); ActiveBlocks.cend() != it; ActiveBlocks.erase(it++)) {
activeBlocksSize += it->second;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
ReturnBlock(it->first, it->second);
return;
}
-#endif
Free(it->first, it->second);
}
@@ -456,9 +449,7 @@ void* TAlignedPagePoolImpl<T>::GetPage() {
throw TMemoryLimitExceededException();
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_LIKELY(!IsDefaultAllocator)) {
-#endif
+ if (Y_LIKELY(!IsDefaultAllocatorUsed())) {
if (const auto ptr = TGlobalPools<T, false>::Instance().Get(0).GetPage()) {
TotalAllocated += POOL_PAGE_SIZE;
if (AllocNotifyCallback) {
@@ -475,20 +466,14 @@ void* TAlignedPagePoolImpl<T>::GetPage() {
}
++PageMissCount;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
void* res;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
res = GetBlock(POOL_PAGE_SIZE);
} else {
-#endif
res = Alloc(POOL_PAGE_SIZE);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
AllPages.emplace(res);
return res;
@@ -496,13 +481,11 @@ void* TAlignedPagePoolImpl<T>::GetPage() {
template<typename T>
void TAlignedPagePoolImpl<T>::ReturnPage(void* addr) noexcept {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
ReturnBlock(addr, POOL_PAGE_SIZE);
AllPages.erase(addr);
return;
}
-#endif
Y_DEBUG_ABORT_UNLESS(AllPages.find(addr) != AllPages.end());
FreePages.emplace(addr);
@@ -512,8 +495,7 @@ template<typename T>
void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
OffloadAlloc(size);
auto ret = malloc(size);
if (!ret) {
@@ -522,7 +504,6 @@ void* TAlignedPagePoolImpl<T>::GetBlock(size_t size) {
return ret;
}
-#endif
if (size == POOL_PAGE_SIZE) {
return GetPage();
@@ -537,14 +518,12 @@ template<typename T>
void TAlignedPagePoolImpl<T>::ReturnBlock(void* ptr, size_t size) noexcept {
Y_DEBUG_ABORT_UNLESS(size >= POOL_PAGE_SIZE);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocator)) {
+ if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
OffloadFree(size);
free(ptr);
UpdateMemoryYellowZone();
return;
}
-#endif
if (size == POOL_PAGE_SIZE) {
ReturnPage(ptr);
diff --git a/yql/essentials/minikql/aligned_page_pool.h b/yql/essentials/minikql/aligned_page_pool.h
index 511b99b4d7..bc570528c9 100644
--- a/yql/essentials/minikql/aligned_page_pool.h
+++ b/yql/essentials/minikql/aligned_page_pool.h
@@ -240,6 +240,10 @@ public:
#if defined(ALLOW_DEFAULT_ALLOCATOR)
static bool IsDefaultAllocatorUsed();
+#else
+ static consteval bool IsDefaultAllocatorUsed() {
+ return false;
+ }
#endif
protected:
diff --git a/yql/essentials/minikql/comp_nodes/benchmark/block_coalesce/bench.cpp b/yql/essentials/minikql/comp_nodes/benchmark/block_coalesce/bench.cpp
index aa1ea57703..544811424a 100644
--- a/yql/essentials/minikql/comp_nodes/benchmark/block_coalesce/bench.cpp
+++ b/yql/essentials/minikql/comp_nodes/benchmark/block_coalesce/bench.cpp
@@ -95,3 +95,6 @@ static void CustomArguments(benchmark::internal::Benchmark* b) {
BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<ui8>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<ui16>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<ui32>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
+BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<ui64>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
+BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<float>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
+BENCHMARK(NKikimr::NMiniKQL::BenchmarkFixedSizeCoalesce<double>)->Unit(benchmark::kMillisecond)->Apply(CustomArguments);
diff --git a/yql/essentials/minikql/comp_nodes/mkql_block_coalesce.cpp b/yql/essentials/minikql/comp_nodes/mkql_block_coalesce.cpp
index ae190e777e..76ed0dce8a 100644
--- a/yql/essentials/minikql/comp_nodes/mkql_block_coalesce.cpp
+++ b/yql/essentials/minikql/comp_nodes/mkql_block_coalesce.cpp
@@ -78,30 +78,39 @@ bool DispatchBlendingCoalesce(const arrow::Datum& left, const arrow::Datum& righ
auto typeId = typeData.GetTypeId();
switch (NYql::NUdf::GetDataSlot(typeId)) {
- case NYql::NUdf::EDataSlot::Int8:
- DispatchCoalesceImpl<i8>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
- return true;
case NYql::NUdf::EDataSlot::Bool:
+ case NYql::NUdf::EDataSlot::Int8:
case NYql::NUdf::EDataSlot::Uint8:
DispatchCoalesceImpl<ui8>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
return true;
case NYql::NUdf::EDataSlot::Int16:
- DispatchCoalesceImpl<i16>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
- return true;
case NYql::NUdf::EDataSlot::Uint16:
+ case NYql::NUdf::EDataSlot::Date:
DispatchCoalesceImpl<ui16>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
return true;
case NYql::NUdf::EDataSlot::Int32:
- DispatchCoalesceImpl<i32>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
- return true;
case NYql::NUdf::EDataSlot::Uint32:
+ case NYql::NUdf::EDataSlot::Date32:
+ case NYql::NUdf::EDataSlot::Datetime:
DispatchCoalesceImpl<ui32>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
return true;
case NYql::NUdf::EDataSlot::Int64:
case NYql::NUdf::EDataSlot::Uint64:
+ case NYql::NUdf::EDataSlot::Datetime64:
+ case NYql::NUdf::EDataSlot::Timestamp64:
+ case NYql::NUdf::EDataSlot::Interval64:
+ case NYql::NUdf::EDataSlot::Interval:
+ case NYql::NUdf::EDataSlot::Timestamp:
+ DispatchCoalesceImpl<ui64>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
+ return true;
case NYql::NUdf::EDataSlot::Double:
+ static_assert(sizeof(NUdf::TDataType<double>::TLayout) == sizeof(NUdf::TDataType<ui64>::TLayout));
+ DispatchCoalesceImpl<ui64>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
+ return true;
case NYql::NUdf::EDataSlot::Float:
- // TODO(YQL-19645): Support other numeric types.
+ static_assert(sizeof(NUdf::TDataType<float>::TLayout) == sizeof(NUdf::TDataType<ui32>::TLayout));
+ DispatchCoalesceImpl<ui32>(left, right, out, /*outIsOptional=*/rightIsOptional, pool);
+ return true;
default:
// Fallback to general builder/reader pipeline.
return false;
diff --git a/yql/essentials/minikql/comp_nodes/mkql_unwrap.cpp b/yql/essentials/minikql/comp_nodes/mkql_unwrap.cpp
index 307609aa08..75d0f4ba58 100644
--- a/yql/essentials/minikql/comp_nodes/mkql_unwrap.cpp
+++ b/yql/essentials/minikql/comp_nodes/mkql_unwrap.cpp
@@ -20,6 +20,10 @@ public:
}
NUdf::TUnboxedValuePod DoCalculate(TComputationContext& compCtx) const {
+ return DoCalculateImpl(compCtx).Release();
+ }
+
+ NUdf::TUnboxedValue DoCalculateImpl(TComputationContext& compCtx) const {
auto value = Optional()->GetValue(compCtx);
if (value) {
return value.GetOptionalValue();
diff --git a/yql/essentials/minikql/comp_nodes/mkql_weakmember.cpp b/yql/essentials/minikql/comp_nodes/mkql_weakmember.cpp
index 2d53545910..1c058b21a3 100644
--- a/yql/essentials/minikql/comp_nodes/mkql_weakmember.cpp
+++ b/yql/essentials/minikql/comp_nodes/mkql_weakmember.cpp
@@ -26,6 +26,11 @@ public:
}
NUdf::TUnboxedValuePod DoCalculate(TComputationContext& ctx) const {
+ auto result = DoCalculateImpl(ctx);
+ return result.Release();
+ }
+
+ NUdf::TUnboxedValue DoCalculateImpl(TComputationContext& ctx) const {
if (const auto& restDict = RestDict->GetValue(ctx)) {
if (const auto& tryMember = restDict.Lookup(MemberName)) {
return SimpleValueFromYson(SchemeType, tryMember.AsStringRef());
@@ -46,7 +51,7 @@ public:
stringStream.DoWrite(ref.Data(), size);
return stringStream.Value();
} else if (SchemeType == NUdf::EDataSlot::String) {
- return tryMember.Release();
+ return tryMember;
} else {
return {};
}
diff --git a/yql/essentials/minikql/comp_nodes/ut/mkql_block_coalesce_ut.cpp b/yql/essentials/minikql/comp_nodes/ut/mkql_block_coalesce_ut.cpp
index 1a6a044bcf..4f00f15633 100644
--- a/yql/essentials/minikql/comp_nodes/ut/mkql_block_coalesce_ut.cpp
+++ b/yql/essentials/minikql/comp_nodes/ut/mkql_block_coalesce_ut.cpp
@@ -21,24 +21,60 @@ namespace {
#define UNIT_TEST_WITH_INTEGER(TestName) \
template <typename TTestType> \
void TestName##Execute(NUnitTest::TTestContext& ut_context Y_DECLARE_UNUSED); \
- Y_UNIT_TEST(TestName##i8) { \
+ Y_UNIT_TEST(TestName##_i8) { \
TestName##Execute<i8>(ut_context); \
} \
- Y_UNIT_TEST(TestName##ui8) { \
+ Y_UNIT_TEST(TestName##_ui8) { \
TestName##Execute<ui8>(ut_context); \
} \
- Y_UNIT_TEST(TestName##i16) { \
+ Y_UNIT_TEST(TestName##_i16) { \
TestName##Execute<i16>(ut_context); \
} \
- Y_UNIT_TEST(TestName##ui16) { \
+ Y_UNIT_TEST(TestName##_ui16) { \
TestName##Execute<ui16>(ut_context); \
} \
- Y_UNIT_TEST(TestName##i32) { \
+ Y_UNIT_TEST(TestName##_i32) { \
TestName##Execute<i32>(ut_context); \
} \
- Y_UNIT_TEST(TestName##ui32) { \
+ Y_UNIT_TEST(TestName##_ui32) { \
TestName##Execute<ui32>(ut_context); \
} \
+ Y_UNIT_TEST(TestName##_i64) { \
+ TestName##Execute<i64>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_ui64) { \
+ TestName##Execute<ui64>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_float) { \
+ TestName##Execute<float>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_double) { \
+ TestName##Execute<double>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TDate) { \
+ TestName##Execute<NYql::NUdf::TDate>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TDatetime) { \
+ TestName##Execute<NYql::NUdf::TDatetime>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TTimestamp) { \
+ TestName##Execute<NYql::NUdf::TTimestamp>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TInterval) { \
+ TestName##Execute<NYql::NUdf::TInterval>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TDate32) { \
+ TestName##Execute<NYql::NUdf::TDate32>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TDatetime64) { \
+ TestName##Execute<NYql::NUdf::TDatetime64>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TTimestamp64) { \
+ TestName##Execute<NYql::NUdf::TTimestamp64>(ut_context); \
+ } \
+ Y_UNIT_TEST(TestName##_TInterval64) { \
+ TestName##Execute<NYql::NUdf::TInterval64>(ut_context); \
+ } \
\
template <typename TTestType> \
void TestName##Execute(NUnitTest::TTestContext& ut_context Y_DECLARE_UNUSED)
@@ -76,8 +112,17 @@ enum class ERightOperandType {
OPTIONAL_SCALAR
};
+template <typename T>
+using InputOptionalVector =
+ std::vector<TMaybe<typename NUdf::TDataType<T>::TLayout>>;
+
template <typename T, ERightOperandType rightType = ERightOperandType::ARRAY>
-void TestBlockCoalesceForVector(std::vector<TMaybe<T>> left, std::vector<TMaybe<T>> right, std::vector<TMaybe<T>> expected, size_t leftOffset, size_t rightOffset) {
+void TestBlockCoalesceForVector(InputOptionalVector<T> left,
+ InputOptionalVector<T> right,
+ InputOptionalVector<T> expected,
+ size_t leftOffset,
+ size_t rightOffset) {
+ using TLayout = typename NUdf::TDataType<T>::TLayout;
TSetup<false> setup;
NYql::TExprContext exprCtx;
auto* type = setup.PgmBuilder->NewDataType(NUdf::TDataType<T>::Id);
@@ -101,12 +146,12 @@ void TestBlockCoalesceForVector(std::vector<TMaybe<T>> left, std::vector<TMaybe<
arrow::Datum rightOperand;
if constexpr (rightType == ERightOperandType::SCALAR) {
- rightOperand = MakeScalarDatum<T>(right[0].GetRef());
+ rightOperand = MakeScalarDatum<TLayout>(right[0].GetRef());
} else if constexpr (rightType == ERightOperandType::OPTIONAL_SCALAR) {
if (right[0]) {
- rightOperand = MakeScalarDatum<T>(right[0].GetRef());
+ rightOperand = MakeScalarDatum<TLayout>(right[0].GetRef());
} else {
- rightOperand = MakeScalarDatum<T>(0);
+ rightOperand = MakeScalarDatum<TLayout>(0);
rightOperand.scalar()->is_valid = false;
}
} else {
@@ -133,7 +178,9 @@ void TestBlockCoalesceForVector(std::vector<TMaybe<T>> left, std::vector<TMaybe<
}
template <typename T, ERightOperandType rightType = ERightOperandType::ARRAY>
-void TestBlockCoalesce(std::vector<TMaybe<T>> left, std::vector<TMaybe<T>> right, std::vector<TMaybe<T>> expected) {
+void TestBlockCoalesce(InputOptionalVector<T> left,
+ InputOptionalVector<T> right,
+ InputOptionalVector<T> expected) {
// First test different offsets.
for (size_t leftOffset = 0; leftOffset < 10; leftOffset++) {
for (size_t rightOffset = 0; rightOffset < 10; rightOffset++) {
@@ -247,16 +294,16 @@ Y_UNIT_TEST(CoalesceGraphTest) {
}
UNIT_TEST_WITH_INTEGER(KernelRightIsNotNullArray) {
- auto max = std::numeric_limits<TTestType>::max();
- auto min = std::numeric_limits<TTestType>::min();
+ auto max = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::max();
+ auto min = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::min();
TestBlockCoalesce<TTestType, ERightOperandType::ARRAY>({Nothing(), 2, 3, Nothing(), 5, 6, 7, max, 9, Nothing(), 11, 12, 13, Nothing(), Nothing(), Nothing(), min, Nothing(), 19, 20},
{101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120},
{101, 2, 3, 104, 5, 6, 7, max, 9, 110, 11, 12, 13, 114, 115, 116, min, 118, 19, 20});
}
UNIT_TEST_WITH_INTEGER(KernelRightIsScalar) {
- auto max = std::numeric_limits<TTestType>::max();
- auto min = std::numeric_limits<TTestType>::min();
+ auto max = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::max();
+ auto min = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::min();
TestBlockCoalesce<TTestType, ERightOperandType::SCALAR>({Nothing(), 2, 3, Nothing(), 5, 6, 7, max, 9, Nothing(), 11, 12, 13, Nothing(), Nothing(), Nothing(), min, Nothing(), 19, 20},
{77},
@@ -264,8 +311,8 @@ UNIT_TEST_WITH_INTEGER(KernelRightIsScalar) {
}
UNIT_TEST_WITH_INTEGER(KernelRightIsOptionalArray) {
- auto max = std::numeric_limits<TTestType>::max();
- auto min = std::numeric_limits<TTestType>::min();
+ auto max = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::max();
+ auto min = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::min();
TestBlockCoalesce<TTestType, ERightOperandType::OPTIONAL_ARRAY>({Nothing(), 2, 3, Nothing(), 5, 6, 7, max, 9, Nothing(), 11, 12, 13, Nothing(), Nothing(), Nothing(), min, Nothing(), 19, 20},
{Nothing(), 102, Nothing(), 104, Nothing(), 106, 107, 108, 109, 110, 111, 112, 113, 114, Nothing(), 116, 117, 118, Nothing(), 120},
@@ -273,8 +320,8 @@ UNIT_TEST_WITH_INTEGER(KernelRightIsOptionalArray) {
}
UNIT_TEST_WITH_INTEGER(KernelRightIsOptionalInvalidScalar) {
- auto max = std::numeric_limits<TTestType>::max();
- auto min = std::numeric_limits<TTestType>::min();
+ auto max = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::max();
+ auto min = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::min();
TestBlockCoalesce<TTestType, ERightOperandType::OPTIONAL_SCALAR>({Nothing(), 2, 3, Nothing(), 5, 6, 7, max, 9, Nothing(), 11, 12, 13, Nothing(), Nothing(), Nothing(), min, Nothing(), 19, 20},
{Nothing()},
@@ -282,8 +329,8 @@ UNIT_TEST_WITH_INTEGER(KernelRightIsOptionalInvalidScalar) {
}
UNIT_TEST_WITH_INTEGER(KernelRightIsOptionalValidScalar) {
- auto max = std::numeric_limits<TTestType>::max();
- auto min = std::numeric_limits<TTestType>::min();
+ auto max = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::max();
+ auto min = std::numeric_limits<typename NUdf::TDataType<TTestType>::TLayout>::min();
TestBlockCoalesce<TTestType, ERightOperandType::OPTIONAL_SCALAR>({Nothing(), 2, 3, Nothing(), 5, 6, 7, max, 9, Nothing(), 11, 12, 13, Nothing(), Nothing(), Nothing(), min, Nothing(), 19, 20},
{77},
diff --git a/yql/essentials/minikql/computation/mkql_method_address_helper.h b/yql/essentials/minikql/computation/mkql_method_address_helper.h
index 058e409897..35b6a66a90 100644
--- a/yql/essentials/minikql/computation/mkql_method_address_helper.h
+++ b/yql/essentials/minikql/computation/mkql_method_address_helper.h
@@ -1,3 +1,5 @@
+#pragma once
+
#include <yql/essentials/public/udf/udf_value.h>
#if defined(_msan_enabled_) && defined(__linux__)
@@ -151,14 +153,23 @@ inline uintptr_t GetMethodPtr() {
}
#else // SHOULD_WRAP_ALL_UNBOXED_VALUES_FOR_CODEGEN
+namespace NInternal {
+template <typename Method>
+inline uintptr_t GetMethodPtr(Method method) {
+ uintptr_t ptr;
+ std::memcpy(&ptr, &method, sizeof(uintptr_t));
+ return ptr;
+}
+} // namespace NInternal
+
template <MethodPointer auto func>
inline uintptr_t GetMethodPtr() {
- return GetMethodPtr(func);
+ return NInternal::GetMethodPtr(func);
}
template <FunctionPointer auto func>
inline uintptr_t GetMethodPtr() {
- return GetMethodPtr(func);
+ return NInternal::GetMethodPtr(func);
}
#endif // SHOULD_WRAP_ALL_UNBOXED_VALUES_FOR_CODEGEN
diff --git a/yql/essentials/minikql/mkql_alloc.cpp b/yql/essentials/minikql/mkql_alloc.cpp
index 299e691f16..27a963380e 100644
--- a/yql/essentials/minikql/mkql_alloc.cpp
+++ b/yql/essentials/minikql/mkql_alloc.cpp
@@ -27,9 +27,15 @@ void TAllocState::TListEntry::Unlink() noexcept {
TAllocState::TAllocState(const TSourceLocation& location, const NKikimr::TAlignedPagePoolCounters &counters, bool supportsSizedAllocators)
: TAlignedPagePool(location, counters)
+#ifndef NDEBUG
+ , DefaultMemInfo(MakeIntrusive<TMemoryUsageInfo>("default"))
+#endif
, SupportsSizedAllocators(supportsSizedAllocators)
, CurrentPAllocList(&GlobalPAllocList)
{
+#ifndef NDEBUG
+ ActiveMemInfo.emplace(DefaultMemInfo.Get(), DefaultMemInfo);
+#endif
GetRoot()->InitLinks();
OffloadedBlocksRoot.InitLinks();
GlobalPAllocList.InitLinks();
@@ -51,17 +57,13 @@ void TAllocState::CleanupPAllocList(TListEntry* root) {
void TAllocState::CleanupArrowList(TListEntry* root) {
for (auto curr = root->Right; curr != root; ) {
auto next = curr->Right;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
free(curr);
} else {
-#endif
auto size = ((TMkqlArrowHeader*)curr)->Size;
auto fullSize = size + sizeof(TMkqlArrowHeader);
ReleaseAlignedPage(curr, fullSize);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
curr = next;
}
@@ -298,15 +300,11 @@ void* MKQLArrowAllocateOnArena(ui64 size) {
}
void* MKQLArrowAllocate(ui64 size) {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_LIKELY(!TAllocState::IsDefaultAllocatorUsed())) {
-#endif
if (size <= ArrowSizeForArena) {
return MKQLArrowAllocateOnArena(size);
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
TAllocState* state = TlsAllocState;
Y_ENSURE(state);
@@ -316,18 +314,14 @@ void* MKQLArrowAllocate(ui64 size) {
}
void* ptr;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
ptr = malloc(fullSize);
if (!ptr) {
throw TMemoryLimitExceededException();
}
} else {
-#endif
ptr = GetAlignedPage(fullSize);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
auto* header = (TMkqlArrowHeader*)ptr;
header->Offset = 0;
@@ -372,15 +366,11 @@ void MKQLArrowFreeOnArena(const void* ptr) {
}
void MKQLArrowFree(const void* mem, ui64 size) {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_LIKELY(!TAllocState::IsDefaultAllocatorUsed())) {
-#endif
if (size <= ArrowSizeForArena) {
return MKQLArrowFreeOnArena(mem);
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
auto fullSize = size + sizeof(TMkqlArrowHeader);
auto header = ((TMkqlArrowHeader*)mem) - 1;
@@ -396,12 +386,11 @@ void MKQLArrowFree(const void* mem, ui64 size) {
Y_ENSURE(size == header->Size);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
free(header);
return;
}
-#endif
+
ReleaseAlignedPage(header, fullSize);
}
@@ -412,9 +401,7 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
return;
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_LIKELY(!TAllocState::IsDefaultAllocatorUsed())) {
-#endif
if (size <= ArrowSizeForArena) {
auto* page = (TMkqlArrowHeader*)TAllocState::GetPageStart(mem);
@@ -431,9 +418,7 @@ void MKQLArrowUntrack(const void* mem, ui64 size) {
return;
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
}
-#endif
auto it = state->ArrowBuffers.find(mem);
if (it == state->ArrowBuffers.end()) {
diff --git a/yql/essentials/minikql/mkql_alloc.h b/yql/essentials/minikql/mkql_alloc.h
index 24bbbb8e9e..94a7747106 100644
--- a/yql/essentials/minikql/mkql_alloc.h
+++ b/yql/essentials/minikql/mkql_alloc.h
@@ -13,6 +13,7 @@
#include <unordered_map>
#include <atomic>
#include <memory>
+#include <source_location>
namespace NKikimr {
@@ -43,6 +44,25 @@ static_assert(sizeof(TAllocPageHeader) % MKQL_ALIGNMENT == 0, "Incorrect size of
struct TMkqlArrowHeader;
+#ifndef NDEBUG
+using TAllocLocation = std::source_location;
+#else
+struct TAllocLocation
+{
+ const char* file_name() const {
+ return "";
+ }
+
+ std::uint_least32_t line() const {
+ return 0;
+ }
+
+ static TAllocLocation current() {
+ return {};
+ }
+};
+#endif
+
struct TAllocState : public TAlignedPagePool
{
struct TListEntry {
@@ -57,31 +77,11 @@ struct TAllocState : public TAlignedPagePool
};
#ifndef NDEBUG
+ TIntrusivePtr<TMemoryUsageInfo> DefaultMemInfo;
std::unordered_map<TMemoryUsageInfo*, TIntrusivePtr<TMemoryUsageInfo>> ActiveMemInfo;
#endif
bool SupportsSizedAllocators = false;
- void* LargeAlloc(size_t size) {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
- return malloc(size);
- }
-#endif
-
- return Alloc(size);
- }
-
- void LargeFree(void* ptr, size_t size) noexcept {
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- if (Y_UNLIKELY(IsDefaultAllocatorUsed())) {
- free(ptr);
- return;
- }
-#endif
-
- Free(ptr, size);
- }
-
using TCurrentPages = std::array<TAllocPageHeader*, (TMemorySubPoolIdx)EMemorySubPool::Count>;
static TAllocPageHeader EmptyPageHeader;
@@ -307,10 +307,12 @@ private:
void* MKQLAllocSlow(size_t sz, TAllocState* state, const EMemorySubPool mPool);
-inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
+inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemorySubPool mPool, const TAllocLocation& location = TAllocLocation::current()) {
+#ifdef NDEBUG
+ Y_UNUSED(location);
+#endif
Y_DEBUG_ABORT_UNLESS(state);
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
auto ret = (TAllocState::TListEntry*)malloc(sizeof(TAllocState::TListEntry) + sz);
if (!ret) {
@@ -318,28 +320,37 @@ inline void* MKQLAllocFastDeprecated(size_t sz, TAllocState* state, const EMemor
}
ret->Link(&state->OffloadedBlocksRoot);
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret + 1, sz, { location.file_name(), (int)location.line() });
+#endif
return ret + 1;
}
-#endif
auto currPage = state->CurrentPages[(TMemorySubPoolIdx)mPool];
if (Y_LIKELY(currPage->Offset + sz <= currPage->Capacity)) {
void* ret = (char*)currPage + currPage->Offset;
currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
++currPage->UseCount;
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret, sz, { location.file_name(), (int)location.line() });
+#endif
return ret;
}
- return MKQLAllocSlow(sz, state, mPool);
+ auto ret = MKQLAllocSlow(sz, state, mPool);
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret, sz, { location.file_name(), (int)location.line() });
+#endif
+ return ret;
}
-inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemorySubPool mPool) {
+inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemorySubPool mPool, const TAllocLocation& location = TAllocLocation::current()) {
+#ifdef NDEBUG
+ Y_UNUSED(location);
+#endif
Y_DEBUG_ABORT_UNLESS(state);
- bool useMalloc = state->SupportsSizedAllocators && sz > MaxPageUserData;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- useMalloc = useMalloc || TAllocState::IsDefaultAllocatorUsed();
-#endif
+ bool useMalloc = (state->SupportsSizedAllocators && sz > MaxPageUserData) || TAllocState::IsDefaultAllocatorUsed();
if (Y_UNLIKELY(useMalloc)) {
state->OffloadAlloc(sizeof(TAllocState::TListEntry) + sz);
@@ -349,6 +360,9 @@ inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemoryS
}
ret->Link(&state->OffloadedBlocksRoot);
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret + 1, sz, { location.file_name(), (int)location.line() });
+#endif
return ret + 1;
}
@@ -357,10 +371,17 @@ inline void* MKQLAllocFastWithSize(size_t sz, TAllocState* state, const EMemoryS
void* ret = (char*)currPage + currPage->Offset;
currPage->Offset = AlignUp(currPage->Offset + sz, MKQL_ALIGNMENT);
++currPage->UseCount;
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret, sz, { location.file_name(), (int)location.line() });
+#endif
return ret;
}
- return MKQLAllocSlow(sz, state, mPool);
+ auto ret = MKQLAllocSlow(sz, state, mPool);
+#ifndef NDEBUG
+ state->DefaultMemInfo->Take(ret, sz, { location.file_name(), (int)location.line() });
+#endif
+ return ret;
}
void MKQLFreeSlow(TAllocPageHeader* header, TAllocState *state, const EMemorySubPool mPool) noexcept;
@@ -370,7 +391,10 @@ inline void MKQLFreeDeprecated(const void* mem, const EMemorySubPool mPool) noex
return;
}
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
+#ifndef NDEBUG
+ TlsAllocState->DefaultMemInfo->Return(mem);
+#endif
+
if (Y_UNLIKELY(TAllocState::IsDefaultAllocatorUsed())) {
TAllocState *state = TlsAllocState;
Y_DEBUG_ABORT_UNLESS(state);
@@ -380,7 +404,6 @@ inline void MKQLFreeDeprecated(const void* mem, const EMemorySubPool mPool) noex
free(entry);
return;
}
-#endif
TAllocPageHeader* header = (TAllocPageHeader*)TAllocState::GetPageStart(mem);
Y_DEBUG_ABORT_UNLESS(header->MyAlloc == TlsAllocState, "%s", (TStringBuilder() << "wrong allocator was used; "
@@ -398,12 +421,12 @@ inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state,
}
Y_DEBUG_ABORT_UNLESS(state);
-
- bool useFree = state->SupportsSizedAllocators && sz > MaxPageUserData;
-#if defined(ALLOW_DEFAULT_ALLOCATOR)
- useFree = useFree || TAllocState::IsDefaultAllocatorUsed();
+#ifndef NDEBUG
+ state->DefaultMemInfo->Return(mem, sz);
#endif
+ bool useFree = (state->SupportsSizedAllocators && sz > MaxPageUserData) || TAllocState::IsDefaultAllocatorUsed();
+
if (Y_UNLIKELY(useFree)) {
auto entry = (TAllocState::TListEntry*)(mem) - 1;
entry->Unlink();
@@ -423,12 +446,12 @@ inline void MKQLFreeFastWithSize(const void* mem, size_t sz, TAllocState* state,
MKQLFreeSlow(header, state, mPool);
}
-inline void* MKQLAllocDeprecated(size_t sz, const EMemorySubPool mPool) {
- return MKQLAllocFastDeprecated(sz, TlsAllocState, mPool);
+inline void* MKQLAllocDeprecated(size_t sz, const EMemorySubPool mPool, const TAllocLocation& location = TAllocLocation::current()) {
+ return MKQLAllocFastDeprecated(sz, TlsAllocState, mPool, location);
}
-inline void* MKQLAllocWithSize(size_t sz, const EMemorySubPool mPool) {
- return MKQLAllocFastWithSize(sz, TlsAllocState, mPool);
+inline void* MKQLAllocWithSize(size_t sz, const EMemorySubPool mPool, const TAllocLocation& location = TAllocLocation::current()) {
+ return MKQLAllocFastWithSize(sz, TlsAllocState, mPool, location);
}
inline void MKQLFreeWithSize(const void* mem, size_t sz, const EMemorySubPool mPool) noexcept {
@@ -478,6 +501,14 @@ struct TWithMiniKQLAlloc {
};
template <typename T, typename... Args>
+T* AllocateOn(const TAllocLocation& location, TAllocState* state, Args&&... args)
+{
+ void* addr = MKQLAllocFastWithSize(sizeof(T), state, T::MemoryPool, location);
+ return ::new(addr) T(std::forward<Args>(args)...);
+ static_assert(std::is_base_of<TWithMiniKQLAlloc<T::MemoryPool>, T>::value, "Class must inherit TWithMiniKQLAlloc.");
+}
+
+template <typename T, typename... Args>
T* AllocateOn(TAllocState* state, Args&&... args)
{
void* addr = MKQLAllocFastWithSize(sizeof(T), state, T::MemoryPool);
diff --git a/yql/essentials/minikql/mkql_string_util_ut.cpp b/yql/essentials/minikql/mkql_string_util_ut.cpp
index 9826ee0ee1..f0d5545ab7 100644
--- a/yql/essentials/minikql/mkql_string_util_ut.cpp
+++ b/yql/essentials/minikql/mkql_string_util_ut.cpp
@@ -9,10 +9,10 @@ using namespace NKikimr::NMiniKQL;
Y_UNIT_TEST_SUITE(TMiniKQLStringUtils) {
Y_UNIT_TEST(SubstringWithLargeOffset) {
TScopedAlloc alloc(__LOCATION__);
- const auto big = MakeStringNotFilled(NUdf::TUnboxedValuePod::OffsetLimit << 1U);
- const auto sub0 = SubString(big, 1U, 42U);
- const auto sub1 = SubString(big, NUdf::TUnboxedValuePod::OffsetLimit - 1U, 42U);
- const auto sub2 = SubString(big, NUdf::TUnboxedValuePod::OffsetLimit, 42U);
+ const auto big = MakeStringNotFilled(/*size=*/NUdf::TUnboxedValuePod::OffsetLimit << 1U);
+ const auto sub0 = NUdf::TUnboxedValue(SubString(big, 1U, 42U));
+ const auto sub1 = NUdf::TUnboxedValue(SubString(big, NUdf::TUnboxedValuePod::OffsetLimit - 1U, 42U));
+ const auto sub2 = NUdf::TUnboxedValue(SubString(big, NUdf::TUnboxedValuePod::OffsetLimit, 42U));
UNIT_ASSERT(sub0.AsStringValue().Data() == sub1.AsStringValue().Data());
UNIT_ASSERT(sub1.AsStringValue().Data() != sub2.AsStringValue().Data());
diff --git a/yql/essentials/minikql/mkql_type_builder.cpp b/yql/essentials/minikql/mkql_type_builder.cpp
index c9d6e363b4..41502b144a 100644
--- a/yql/essentials/minikql/mkql_type_builder.cpp
+++ b/yql/essentials/minikql/mkql_type_builder.cpp
@@ -2820,7 +2820,6 @@ TType* TTypeBuilder::ValidateBlockStructType(const TStructType* structType) cons
MKQL_ENSURE(isScalar, "Block length column should be scalar");
MKQL_ENSURE(AS_TYPE(TDataType, itemType)->GetSchemeType() == NUdf::TDataType<ui64>::Id, "Expected Uint64");
- MKQL_ENSURE(!hasBlockLengthColumn, "Block struct must contain only one block length column");
hasBlockLengthColumn = true;
} else {
outStructItems.emplace_back(structType->GetMemberName(i), itemType);
diff --git a/yql/essentials/providers/common/config/yql_dispatch.cpp b/yql/essentials/providers/common/config/yql_dispatch.cpp
index 536efac6d9..5a8b2573b1 100644
--- a/yql/essentials/providers/common/config/yql_dispatch.cpp
+++ b/yql/essentials/providers/common/config/yql_dispatch.cpp
@@ -164,6 +164,12 @@ void TSettingDispatcher::Restore() {
}
}
+void TSettingDispatcher::Enumerate(std::function<void(std::string_view)> callback) {
+ for (const auto& name : Names) {
+ callback(name);
+ }
+}
+
TSettingDispatcher::TErrorCallback TSettingDispatcher::GetDefaultErrorCallback() {
return [] (const TString& msg, bool isError) -> bool {
if (isError) {
diff --git a/yql/essentials/providers/common/config/yql_dispatch.h b/yql/essentials/providers/common/config/yql_dispatch.h
index 9c068c9334..0b7a9077dd 100644
--- a/yql/essentials/providers/common/config/yql_dispatch.h
+++ b/yql/essentials/providers/common/config/yql_dispatch.h
@@ -15,6 +15,7 @@
#include <util/generic/strbuf.h>
#include <util/generic/hash.h>
#include <util/generic/hash_set.h>
+#include <util/generic/set.h>
#include <util/generic/yexception.h>
#include <util/generic/vector.h>
#include <util/generic/guid.h>
@@ -344,6 +345,11 @@ public:
if (!Handlers.insert({NormalizeName(name), handler}).second) {
ythrow yexception() << "Duplicate configuration setting name " << name.Quote();
}
+
+ if (!name.StartsWith('_')) {
+ Names.insert(name);
+ }
+
return *handler;
}
@@ -383,10 +389,12 @@ public:
void Restore();
static TErrorCallback GetDefaultErrorCallback();
static TErrorCallback GetErrorCallback(TPositionHandle pos, TExprContext& ctx);
+ void Enumerate(std::function<void(std::string_view)> callback);
protected:
THashSet<TString> ValidClusters;
THashMap<TString, TSettingHandler::TPtr> Handlers;
+ TSet<TString> Names;
};
} // namespace NCommon
diff --git a/yql/essentials/providers/common/mkql/yql_provider_mkql.cpp b/yql/essentials/providers/common/mkql/yql_provider_mkql.cpp
index 983bf85542..abe2f9c1e9 100644
--- a/yql/essentials/providers/common/mkql/yql_provider_mkql.cpp
+++ b/yql/essentials/providers/common/mkql/yql_provider_mkql.cpp
@@ -448,7 +448,9 @@ TMkqlCommonCallableCompiler::TShared::TShared() {
{"FromFlow", &TProgramBuilder::FromFlow},
{"WideToBlocks", &TProgramBuilder::WideToBlocks},
+ {"ListToBlocks", &TProgramBuilder::ListToBlocks},
{"WideFromBlocks", &TProgramBuilder::WideFromBlocks},
+ {"ListFromBlocks", &TProgramBuilder::ListFromBlocks},
{"AsScalar", &TProgramBuilder::AsScalar},
{"Just", &TProgramBuilder::NewOptional},
diff --git a/yql/essentials/providers/common/proto/gateways_config.proto b/yql/essentials/providers/common/proto/gateways_config.proto
index 972e4303e0..b2d02ab309 100644
--- a/yql/essentials/providers/common/proto/gateways_config.proto
+++ b/yql/essentials/providers/common/proto/gateways_config.proto
@@ -598,6 +598,7 @@ enum EGenericDataSourceKind {
MONGO_DB = 10;
REDIS = 11;
PROMETHEUS = 12;
+ ICEBERG = 13;
}
// EGenericProtocol generalizes various kinds of network protocols supported by different databases.
@@ -671,6 +672,57 @@ message TMongoDbDataSourceOptions {
optional EUnsupportedTypeDisplayMode unsupported_type_display_mode = 3;
}
+// TIcebergCatalog represents settings specific to iceberg catalog
+message TIcebergCatalog {
+ // Hadoop Iceberg Catalog which is built on top of a storage
+ message THadoop {
+ }
+
+ // Hive Iceberg Catalog which is based on a Hive Metastore
+ message THive {
+ // Location of a hive metastore
+ // e.g., thrift://host:9083/
+ optional string uri = 1;
+ }
+
+ oneof payload {
+ THadoop hadoop = 1;
+ THive hive = 2;
+ }
+}
+
+// TIcebergWarehouse represents settings specific to iceberg warehouse
+message TIcebergWarehouse {
+ // Iceberg data located in a S3 storage
+ message TS3 {
+ // Data location in a storage
+ // e.g., s3a://iceberg-bucket/storage
+ optional string uri = 1;
+
+ // Endpoint to access a storage
+ // e.g., https://storage.yandexcloud.net
+ optional string endpoint = 2;
+
+ // Region where a storage is located
+ // e.g., ru-central1
+ optional string region = 3;
+ }
+
+ oneof payload {
+ TS3 s3 = 1;
+ }
+}
+
+// TIcebergDataSourceOptions represents settings specific
+// to Iceberg data source
+message TIcebergDataSourceOptions {
+ // Iceberg catalog
+ optional TIcebergCatalog catalog = 1;
+
+ // Iceberg warehouse
+ optional TIcebergWarehouse warehouse = 2;
+}
+
// TGenericDataSourceInstance helps to identify the instance of a data source to redirect request to.
message TGenericDataSourceInstance {
// Data source kind
@@ -696,6 +748,7 @@ message TGenericDataSourceInstance {
TOracleDataSourceOptions oracle_options = 11;
TLoggingDataSourceOptions logging_options = 12;
TMongoDbDataSourceOptions mongodb_options = 13;
+ TIcebergDataSourceOptions iceberg_options = 14;
}
}
diff --git a/yql/essentials/providers/common/provider/yql_provider.cpp b/yql/essentials/providers/common/provider/yql_provider.cpp
index 7f3ef39e9a..f5fdda4052 100644
--- a/yql/essentials/providers/common/provider/yql_provider.cpp
+++ b/yql/essentials/providers/common/provider/yql_provider.cpp
@@ -1084,7 +1084,7 @@ bool FillUsedFilesImpl(
return childrenOk;
}
-static void GetToken(const TString& string, TString& out, const TTypeAnnotationContext& type) {
+void GetToken(const TString& string, TString& out, const TTypeAnnotationContext& type) {
auto separator = string.find(":");
const auto p0 = string.substr(0, separator);
if (p0 == "api") {
diff --git a/yql/essentials/providers/common/provider/yql_provider.h b/yql/essentials/providers/common/provider/yql_provider.h
index efddf213ff..4410f1cbc2 100644
--- a/yql/essentials/providers/common/provider/yql_provider.h
+++ b/yql/essentials/providers/common/provider/yql_provider.h
@@ -216,6 +216,8 @@ void TransformerStatsToYson(const TString& name, const IGraphTransformer::TStati
TString TransformerStatsToYson(const IGraphTransformer::TStatistics& stats, NYson::EYsonFormat format
= NYson::EYsonFormat::Pretty);
+void GetToken(const TString& string, TString& out, const TTypeAnnotationContext& type);
+
void FillSecureParams(const TExprNode::TPtr& node, const TTypeAnnotationContext& types, THashMap<TString, TString>& secureParams);
bool FillUsedFiles(const TExprNode& node, TUserDataTable& files, const TTypeAnnotationContext& types, TExprContext& ctx, const TUserDataTable& crutches = {});
diff --git a/yql/essentials/public/fastcheck/format.cpp b/yql/essentials/public/fastcheck/format.cpp
index 26a1bce2ca..dac43f0ffa 100644
--- a/yql/essentials/public/fastcheck/format.cpp
+++ b/yql/essentials/public/fastcheck/format.cpp
@@ -4,6 +4,7 @@
#include <yql/essentials/sql/v1/lexer/antlr4_ansi/lexer.h>
#include <yql/essentials/sql/v1/proto_parser/antlr4/proto_parser.h>
#include <yql/essentials/sql/v1/proto_parser/antlr4_ansi/proto_parser.h>
+#include <yql/essentials/core/issue/yql_issue.h>
#include <util/string/builder.h>
namespace NYql {
@@ -76,7 +77,7 @@ private:
auto formatter = NSQLFormat::MakeSqlFormatter(lexers, parsers, settings);
TString formattedQuery;
res.Success = formatter->Format(request.Program, formattedQuery, res.Issues);
- if (res.Success && formattedQuery != NormalizeEOL(request.Program)) {
+ if (res.Success && NormalizeEOL(formattedQuery) != NormalizeEOL(request.Program)) {
res.Success = false;
TPosition origPos(0, 1, request.File);
TTextWalker origWalker(origPos, true);
@@ -104,8 +105,10 @@ private:
origSample.erase(origSample.size() - 1);
}
- res.Issues.AddIssue(TIssue(origPos, TStringBuilder() <<
- "Format mismatch, expected:\n" << formattedSample << "\nbut got:\n" << origSample));
+ auto issue = TIssue(origPos, TStringBuilder() <<
+ "Format mismatch, expected:\n" << formattedSample << "\nbut got:\n" << origSample);
+ issue.SetCode(EYqlIssueCode::TIssuesIds_EIssueCode_WARNING, ESeverity::TSeverityIds_ESeverityId_S_WARNING);
+ res.Issues.AddIssue(issue);
}
return res;
diff --git a/yql/essentials/public/fastcheck/linter_ut.cpp b/yql/essentials/public/fastcheck/linter_ut.cpp
index 2c5c0bbd0a..c0315d20cf 100644
--- a/yql/essentials/public/fastcheck/linter_ut.cpp
+++ b/yql/essentials/public/fastcheck/linter_ut.cpp
@@ -115,6 +115,19 @@ Y_UNIT_TEST_SUITE(TLinterTests) {
UNIT_ASSERT_VALUES_EQUAL(res.Checks[0].Issues.Size(), 0);
}
+ Y_UNIT_TEST(GoodFormatYqlWithWinEOLInComment) {
+ TChecksRequest request;
+ request.Program = "--\r\nSELECT\n 1\n;\n\nSELECT\n 2\n;\n";
+ request.Syntax = ESyntax::YQL;
+ request.Filters.ConstructInPlace();
+ request.Filters->push_back(TCheckFilter{.CheckNameGlob = "format"});
+ auto res = RunChecks(request);
+ UNIT_ASSERT_VALUES_EQUAL(res.Checks.size(), 1);
+ UNIT_ASSERT_VALUES_EQUAL(res.Checks[0].CheckName, "format");
+ UNIT_ASSERT_C(res.Checks[0].Success, res.Checks[0].Issues.ToString());
+ UNIT_ASSERT_VALUES_EQUAL(res.Checks[0].Issues.Size(), 0);
+ }
+
Y_UNIT_TEST(UnparsedFormatYql) {
TChecksRequest request;
request.Program = "select1\n";
diff --git a/yql/essentials/public/fastcheck/ya.make b/yql/essentials/public/fastcheck/ya.make
index 477b0dcee3..02488500a8 100644
--- a/yql/essentials/public/fastcheck/ya.make
+++ b/yql/essentials/public/fastcheck/ya.make
@@ -13,6 +13,7 @@ PEERDIR(
yql/essentials/ast
yql/essentials/core/services/mounts
yql/essentials/core/user_data
+ yql/essentials/core/issue/protos
yql/essentials/public/udf/service/exception_policy
yql/essentials/sql
yql/essentials/sql/pg
diff --git a/yql/essentials/public/udf/arrow/bit_util.h b/yql/essentials/public/udf/arrow/bit_util.h
index 091a47bbcc..d8911d4a88 100644
--- a/yql/essentials/public/udf/arrow/bit_util.h
+++ b/yql/essentials/public/udf/arrow/bit_util.h
@@ -143,6 +143,18 @@ Y_FORCE_INLINE ui32 ReplicateEachBitFourTimes(ui8 b) {
return x;
}
+// Repeat 8 times every bit in an 8-bit value.
+// Example: 0b01010101 -> 0b0000000011111111000000001111111100000000111111110000000011111111.
+Y_FORCE_INLINE ui64 ReplicateEachBitEightTimes(ui8 x) {
+ ui64 expanded = x;
+ expanded = (expanded * 0x8040201008040201ULL);
+ expanded &= 0x8080808080808080ULL;
+ expanded >>= 7;
+ expanded *= 0xFF;
+ expanded = NYql::SwapBytes(expanded);
+ return expanded;
+}
+
// BitToByteExpand - Expands the individual bits of an 8-bit input x into an array of 8 elements of type TType.
// Each output element corresponds to one bit from the original value, expanded (via specialized routines) to fill the entire TType
// Example: BitToByteExpand<ui8>(0b10101010) yields REVERSE({0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00}).
@@ -153,12 +165,7 @@ Y_FORCE_INLINE std::array<TType, 8> BitToByteExpand(ui8 x);
template <>
Y_FORCE_INLINE std::array<ui8, 8> BitToByteExpand(ui8 x) {
std::array<ui8, 8> result;
- ui64 expanded = x;
- expanded = (expanded * 0x8040201008040201ULL);
- expanded &= 0x8080808080808080ULL;
- expanded >>= 7;
- expanded *= 0xFF;
- expanded = NYql::SwapBytes(expanded);
+ ui64 expanded = ReplicateEachBitEightTimes(x);
memcpy(&result[0], &expanded, sizeof(expanded));
return result;
}
@@ -186,5 +193,18 @@ Y_FORCE_INLINE std::array<ui32, 8> BitToByteExpand(ui8 x) {
return output;
}
+
+template <>
+Y_FORCE_INLINE std::array<ui64, 8> BitToByteExpand(ui8 x) {
+ std::array<ui8, 8> input = BitToByteExpand<ui8>(x);
+ std::array<ui64, 8> output{};
+
+ for (size_t i = 0; i < 8; ++i) {
+ output[i] = ReplicateEachBitEightTimes(input[i]);
+ }
+
+ return output;
}
+
+} // namespace NUdf
}
diff --git a/yql/essentials/public/udf/arrow/ut/bit_util_ut.cpp b/yql/essentials/public/udf/arrow/ut/bit_util_ut.cpp
index 4af399c8de..dd95030c1b 100644
--- a/yql/essentials/public/udf/arrow/ut/bit_util_ut.cpp
+++ b/yql/essentials/public/udf/arrow/ut/bit_util_ut.cpp
@@ -80,6 +80,25 @@ Y_UNIT_TEST(ReplicateEachBitFourTimes) {
UNIT_ASSERT_EQUAL(ReplicateEachBitFourTimes(0x80), 0xF0000000);
}
+Y_UNIT_TEST(ReplicateEachBitEightTimes) {
+ // Test case 1: All zeros
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0x00), 0x00000000);
+
+ // Test case 2: All ones
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0xFF), 0xFFFFFFFFFFFFFFFF);
+
+ // Test case 3: Alternating bits
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0x55), 0x00FF00FF00FF00FF);
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0xAA), 0xFF00FF00FF00FF00);
+
+ // Test case 4: Random pattern
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0x3C), 0x0000FFFFFFFF0000);
+
+ // Test case 5: Single bit set
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0x01), 0x00000000000000FF);
+ UNIT_ASSERT_EQUAL(ReplicateEachBitEightTimes(0x80), 0xFF00000000000000);
+}
+
Y_UNIT_TEST(BitToByteExpand) {
auto testBody = [](auto n) {
using T = decltype(n);
@@ -125,6 +144,7 @@ Y_UNIT_TEST(BitToByteExpand) {
testBody(ui8());
testBody(ui16());
testBody(ui32());
+ testBody(ui64());
}
} // Y_UNIT_TEST_SUITE(BitExpanding)
diff --git a/yql/essentials/sql/v1/SQLv1.g.in b/yql/essentials/sql/v1/SQLv1.g.in
index dbec682803..6ba9135831 100644
--- a/yql/essentials/sql/v1/SQLv1.g.in
+++ b/yql/essentials/sql/v1/SQLv1.g.in
@@ -1100,7 +1100,7 @@ alter_sequence_action:
| INCREMENT BY? integer
;
-show_create_table_stmt: SHOW CREATE TABLE simple_table_ref;
+show_create_table_stmt: SHOW CREATE (TABLE | VIEW) simple_table_ref;
// Special rules that allow to use certain keywords as identifiers.
identifier: ID_PLAIN | ID_QUOTED;
diff --git a/yql/essentials/sql/v1/SQLv1Antlr4.g.in b/yql/essentials/sql/v1/SQLv1Antlr4.g.in
index fb92a68f9a..66be65e2ae 100644
--- a/yql/essentials/sql/v1/SQLv1Antlr4.g.in
+++ b/yql/essentials/sql/v1/SQLv1Antlr4.g.in
@@ -1100,7 +1100,7 @@ alter_sequence_action:
| INCREMENT BY? integer
;
-show_create_table_stmt: SHOW CREATE TABLE simple_table_ref;
+show_create_table_stmt: SHOW CREATE (TABLE | VIEW) simple_table_ref;
// Special rules that allow to use certain keywords as identifiers.
identifier: ID_PLAIN | ID_QUOTED;
@@ -1775,9 +1775,7 @@ bool_value: (TRUE | FALSE);
real: REAL;
integer: DIGITS | INTEGER_VALUE;
-//
-// Lexer
-//
+//! section:punctuation
EQUALS: '=';
EQUALS2: '==';
@@ -1823,6 +1821,8 @@ fragment QUOTE_SINGLE: '\'';
fragment BACKTICK: '`';
fragment DOUBLE_COMMAT: '@@';
+//! section:letter
+
// http://www.antlr.org/wiki/pages/viewpage.action?pageId=1782
fragment A:('a'|'A');
fragment B:('b'|'B');
@@ -1851,6 +1851,8 @@ fragment X:('x'|'X');
fragment Y:('y'|'Y');
fragment Z:('z'|'Z');
+//! section:keyword
+
ABORT: A B O R T;
ACTION: A C T I O N;
ADD: A D D;
@@ -2144,13 +2146,7 @@ WRAPPER: W R A P P E R;
//WRITE: W R I T E;
XOR: X O R;
-// YQL Default Lexer:
-// GRAMMAR_STRING_CORE_SINGLE = ~(QUOTE_SINGLE | BACKSLASH) | (BACKSLASH .)
-// GRAMMAR_STRING_CORE_DOUBLE = ~(QUOTE_DOUBLE | BACKSLASH) | (BACKSLASH .)
-
-// ANSI Lexer:
-// GRAMMAR_STRING_CORE_SINGLE = ~QUOTE_SINGLE | (QUOTE_SINGLE QUOTE_SINGLE)
-// GRAMMAR_STRING_CORE_DOUBLE = ~QUOTE_DOUBLE | (QUOTE_DOUBLE QUOTE_DOUBLE)
+//! section:other
fragment STRING_CORE_SINGLE: @GRAMMAR_STRING_CORE_SINGLE@;
fragment STRING_CORE_DOUBLE: @GRAMMAR_STRING_CORE_DOUBLE@;
@@ -2163,7 +2159,7 @@ STRING_VALUE: ((STRING_SINGLE | STRING_DOUBLE | STRING_MULTILINE) (S | U | Y | J
ID_PLAIN: ('a'..'z' | 'A'..'Z' | '_') ('a'..'z' | 'A'..'Z' | '_' | DIGIT)*;
-fragment ID_QUOTED_CORE: '\\'. | '``' | ~('`' | '\\');
+fragment ID_QUOTED_CORE: '\\' . | '``' | ~('`' | '\\');
ID_QUOTED: BACKTICK ID_QUOTED_CORE* BACKTICK;
fragment DIGIT: '0'..'9';
@@ -2177,23 +2173,18 @@ DIGITS: DECDIGITS | HEXDIGITS | OCTDIGITS | BINDIGITS;
// not all combinations of P/U with L/S/T/I/B/N are actually valid - this is resolved in sql.cpp
INTEGER_VALUE: DIGITS ((P | U)? (L | S | T | I | B | N)?);
-fragment FLOAT_EXP : E (PLUS | MINUS)? DECDIGITS ;
+fragment FLOAT_EXP: E (PLUS | MINUS)? DECDIGITS;
REAL:
(
DECDIGITS DOT DIGIT* FLOAT_EXP?
| DECDIGITS FLOAT_EXP
// | DOT DECDIGITS FLOAT_EXP? // Conflicts with tuple element access through DOT
- ) (F | P (F ('4'|'8') | N)?)?
+ ) (F | P (F ('4' | '8') | N)?)?
;
BLOB: X QUOTE_SINGLE HEXDIGIT+ QUOTE_SINGLE;
-// YQL Default Lexer:
-// GRAMMAR_MULTILINE_COMMENT_CORE = .
-// ANSI Lexer:
-// GRAMMAR_MULTILINE_COMMENT_CORE = MULTILINE_COMMENT | .
-
fragment MULTILINE_COMMENT: '/*' ( @GRAMMAR_MULTILINE_COMMENT_CORE@ )*? '*/';
-fragment LINE_COMMENT: '--' ~('\n'|'\r')* ('\r' '\n'? | '\n' | EOF);
-WS: (' '|'\r'|'\t'|'\u000C'|'\n')->channel(HIDDEN);
-COMMENT: (MULTILINE_COMMENT|LINE_COMMENT)->channel(HIDDEN);
+fragment LINE_COMMENT: '--' ~('\n' | '\r')* ('\r' '\n'? | '\n' | EOF);
+WS: (' ' | '\r' | '\t' | '\u000C' | '\n') -> channel(HIDDEN);
+COMMENT: (MULTILINE_COMMENT | LINE_COMMENT) -> channel(HIDDEN);
diff --git a/yql/essentials/sql/v1/builtin.cpp b/yql/essentials/sql/v1/builtin.cpp
index 6396eab0c8..d1b5e36c8c 100644
--- a/yql/essentials/sql/v1/builtin.cpp
+++ b/yql/essentials/sql/v1/builtin.cpp
@@ -2701,15 +2701,29 @@ enum EAggrFuncTypeCallback {
};
struct TCoreFuncInfo {
- TString Name;
+ std::string_view Name;
ui32 MinArgs;
ui32 MaxArgs;
};
using TAggrFuncFactoryCallback = std::function<INode::TPtr(TPosition pos, const TVector<TNodePtr>& args, EAggregateMode aggMode, bool isFactory)>;
-using TAggrFuncFactoryCallbackMap = std::unordered_map<TString, TAggrFuncFactoryCallback, THash<TString>>;
+
+struct TAggrFuncFactoryInfo {
+ std::string_view CanonicalSqlName;
+ std::string_view Kind;
+ TAggrFuncFactoryCallback Callback;
+};
+
+using TAggrFuncFactoryCallbackMap = std::unordered_map<TString, TAggrFuncFactoryInfo, THash<TString>>;
using TBuiltinFactoryCallback = std::function<TNodePtr(TPosition pos, const TVector<TNodePtr>& args)>;
-using TBuiltinFactoryCallbackMap = std::unordered_map<TString, TBuiltinFactoryCallback, THash<TString>>;
+
+struct TBuiltinFuncInfo {
+ std::string_view CanonicalSqlName;
+ std::string_view Kind;
+ TBuiltinFactoryCallback Callback;
+};
+
+using TBuiltinFactoryCallbackMap = std::unordered_map<TString, TBuiltinFuncInfo, THash<TString>>;
using TCoreFuncMap = std::unordered_map<TString, TCoreFuncInfo, THash<TString>>;
TAggrFuncFactoryCallback BuildAggrFuncFactoryCallback(
@@ -2880,315 +2894,315 @@ struct TBuiltinFuncData {
TBuiltinFactoryCallbackMap MakeBuiltinFuncs() {
TBuiltinFactoryCallbackMap builtinFuncs = {
// Branching
- {"if", BuildSimpleBuiltinFactoryCallback<TYqlIf<false>>()},
- {"ifstrict", BuildSimpleBuiltinFactoryCallback<TYqlIf<true>>() },
+ {"if", {"If", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlIf<false>>()}},
+ {"ifstrict", {"IfStrict", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlIf<true>>() }},
// String builtins
- {"len", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)},
- {"length", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)},
- {"charlength", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)},
- {"characterlength", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)},
- {"substring", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("Substring")},
- {"find", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("Find")},
- {"rfind", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("RFind")},
- {"byteat", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ByteAt", 2, 2) },
- {"startswith", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StartsWith", 2, 2)},
- {"endswith", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EndsWith", 2, 2)},
+ {"len", {"Length", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)}},
+ {"length", {"Length", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)}},
+ {"charlength", {"Length", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)}},
+ {"characterlength", {"Length", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Size", 1, 1)}},
+ {"substring", {"Substring", "Normal", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("Substring")}},
+ {"find", {"Find", "Normal", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("Find")}},
+ {"rfind", {"RFind", "Normal", BuildNamedBuiltinFactoryCallback<TYqlSubstring>("RFind")}},
+ {"byteat", {"ByteAt", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ByteAt", 2, 2)}},
+ {"startswith", {"StartsWith", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StartsWith", 2, 2)}},
+ {"endswith", {"EndsWith", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EndsWith", 2, 2)}},
// Numeric builtins
- {"abs", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Abs", 1, 1) },
- {"tobytes", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ToBytes", 1, 1) },
- {"frombytes", BuildSimpleBuiltinFactoryCallback<TFromBytes>() },
+ {"abs", {"Abs", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Abs", 1, 1) }},
+ {"tobytes", {"ToBytes", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ToBytes", 1, 1) }},
+ {"frombytes", {"FromBytes", "Normal", BuildSimpleBuiltinFactoryCallback<TFromBytes>()}},
// Compare builtins
- {"minof", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Min", 1, -1)},
- {"maxof", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Max", 1, -1)},
- {"greatest", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Max", 1, -1)},
- {"least", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Min", 1, -1)},
- {"in", BuildSimpleBuiltinFactoryCallback<TYqlIn>()},
+ {"minof", {"MinOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Min", 1, -1)}},
+ {"maxof", {"MaxOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Max", 1, -1)}},
+ {"greatest", {"MaxOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Max", 1, -1)}},
+ {"least", {"MinOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Min", 1, -1)}},
+ {"in", {"", "", BuildSimpleBuiltinFactoryCallback<TYqlIn>()}},
// List builtins
- {"aslist", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsListMayWarn", 0, -1)},
- {"asliststrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsListStrict", 0, -1) },
- {"listlength", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Length", 1, 1)},
- {"listhasitems", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("HasItems", 1, 1)},
- {"listextend", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListExtend", 0, -1)},
- {"listextendstrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListExtendStrict", 0, -1)},
- {"listunionall", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUnionAll", 0, -1) },
- {"listzip", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListZip", -1, -1)},
- {"listzipall", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListZipAll", -1, -1)},
- {"listenumerate", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListEnumerate", 1, 3)},
- {"listreverse", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListReverse", 1, 1)},
- {"listskip", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSkip", 2, 2)},
- {"listtake", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTake", 2, 2)},
- {"listhead", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListHead", 1, 1)},
- {"listlast", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListLast", 1, 1)},
- {"listsort", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(true)},
- {"listsortasc", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(true)},
- {"listsortdesc", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(false)},
- {"listmap", BuildBoolBuiltinFactoryCallback<TListMapBuiltin>(false)},
- {"listflatmap", BuildBoolBuiltinFactoryCallback<TListMapBuiltin>(true)},
- {"listfilter", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListFilter")},
- {"listany", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAny", 1, 1)},
- {"listall", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAll", 1, 1)},
- {"listhas", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListHas", 2, 2)},
- {"listmax", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListMax", 1, 1)},
- {"listmin", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListMin", 1, 1)},
- {"listsum", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSum", 1, 1)},
- {"listfold", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold", 3, 3)},
- {"listfold1", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold1", 3, 3)},
- {"listfoldmap", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFoldMap", 3, 3)},
- {"listfold1map", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold1Map", 3, 3)},
- {"listavg", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAvg", 1, 1)},
- {"listconcat", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListConcat", 1, 2)},
- {"listextract", BuildSimpleBuiltinFactoryCallback<TListExtractBuiltin>()},
- {"listuniq", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUniq", 1, 1)},
- {"listuniqstable", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUniqStable", 1, 1)},
- {"listcreate", BuildSimpleBuiltinFactoryCallback<TListCreateBuiltin>()},
- {"listfromrange", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFromRange", 2, 3) },
- {"listreplicate", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Replicate", 2, 2) },
- {"listtakewhile", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListTakeWhile") },
- {"listskipwhile", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListSkipWhile") },
- {"listtakewhileinclusive", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListTakeWhileInclusive") },
- {"listskipwhileinclusive", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListSkipWhileInclusive") },
- {"listcollect", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListCollect", 1, 1) },
- {"listnotnull", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListNotNull", 1, 1)},
- {"listflatten", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFlatten", 1, 1)},
- {"listtop", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTop", 2, 3)},
- {"listtopasc", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopAsc", 2, 3)},
- {"listtopdesc", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopDesc", 2, 3)},
- {"listtopsort", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSort", 2, 3)},
- {"listtopsortasc", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSortAsc", 2, 3)},
- {"listtopsortdesc", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSortDesc", 2, 3)},
- {"listsample", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSample", 2, 3)},
- {"listsamplen", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSampleN", 2, 3)},
- {"listshuffle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListShuffle", 1, 2)},
+ {"aslist", {"AsList", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsListMayWarn", 0, -1)}},
+ {"asliststrict", {"AsListStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsListStrict", 0, -1) }},
+ {"listlength", {"ListLength", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Length", 1, 1)}},
+ {"listhasitems", {"ListHasItems", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("HasItems", 1, 1)}},
+ {"listextend", {"ListExtend", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListExtend", 0, -1)}},
+ {"listextendstrict", {"ListExtendStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListExtendStrict", 0, -1)}},
+ {"listunionall", {"ListUnionAll", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUnionAll", 0, -1)}},
+ {"listzip", {"ListZip", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListZip", -1, -1)}},
+ {"listzipall", {"ListZipAll", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListZipAll", -1, -1)}},
+ {"listenumerate", {"ListEnumerate", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListEnumerate", 1, 3)}},
+ {"listreverse", {"ListReverse", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListReverse", 1, 1)}},
+ {"listskip", {"ListSkip", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSkip", 2, 2)}},
+ {"listtake", {"ListTake", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTake", 2, 2)}},
+ {"listhead", {"ListHead", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListHead", 1, 1)}},
+ {"listlast", {"ListLast", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListLast", 1, 1)}},
+ {"listsort", {"ListSort", "Normal", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(true)}},
+ {"listsortasc", {"ListSortAsc", "Normal", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(true)}},
+ {"listsortdesc", {"ListSortDesc", "Normal", BuildBoolBuiltinFactoryCallback<TListSortBuiltin>(false)}},
+ {"listmap", {"ListMap", "Normal", BuildBoolBuiltinFactoryCallback<TListMapBuiltin>(false)}},
+ {"listflatmap", {"ListFlatMap", "Normal", BuildBoolBuiltinFactoryCallback<TListMapBuiltin>(true)}},
+ {"listfilter", {"ListFilter", "Normal", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListFilter")}},
+ {"listany", {"ListAny", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAny", 1, 1)}},
+ {"listall", {"ListAll", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAll", 1, 1)}},
+ {"listhas", {"ListHas", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListHas", 2, 2)}},
+ {"listmax", {"ListMax", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListMax", 1, 1)}},
+ {"listmin", {"ListMin", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListMin", 1, 1)}},
+ {"listsum", {"ListSum", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSum", 1, 1)}},
+ {"listfold", {"ListFold", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold", 3, 3)}},
+ {"listfold1", {"ListFold1", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold1", 3, 3)}},
+ {"listfoldmap", {"ListFoldMap", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFoldMap", 3, 3)}},
+ {"listfold1map", {"ListFold1Map", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFold1Map", 3, 3)}},
+ {"listavg", {"ListAvg", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListAvg", 1, 1)}},
+ {"listconcat", {"ListConcat", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListConcat", 1, 2)}},
+ {"listextract", {"ListExtract", "Normal", BuildSimpleBuiltinFactoryCallback<TListExtractBuiltin>()}},
+ {"listuniq", {"ListUniq", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUniq", 1, 1)}},
+ {"listuniqstable", {"ListUniqStable", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListUniqStable", 1, 1)}},
+ {"listcreate", {"ListCreate", "Normal", BuildSimpleBuiltinFactoryCallback<TListCreateBuiltin>()}},
+ {"listfromrange", {"ListFromRange", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFromRange", 2, 3)}},
+ {"listreplicate", {"ListReplicate", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Replicate", 2, 2)}},
+ {"listtakewhile", {"ListTakeWhile", "Normal", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListTakeWhile")}},
+ {"listskipwhile", {"ListSkipWhile", "Normal", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListSkipWhile")}},
+ {"listtakewhileinclusive", {"ListTakeWhileInclusive", "Normal", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListTakeWhileInclusive")}},
+ {"listskipwhileinclusive", {"ListSkipWhileInclusive", "Normal", BuildNamedBuiltinFactoryCallback<TListFilterBuiltin>("ListSkipWhileInclusive")}},
+ {"listcollect", {"ListCollect", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListCollect", 1, 1)}},
+ {"listnotnull", {"ListNotNull", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListNotNull", 1, 1)}},
+ {"listflatten", {"ListFlatten", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListFlatten", 1, 1)}},
+ {"listtop", {"ListTop", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTop", 2, 3)}},
+ {"listtopasc", {"ListTopAsc", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopAsc", 2, 3)}},
+ {"listtopdesc", {"ListTopDesc", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopDesc", 2, 3)}},
+ {"listtopsort", {"ListTopSort", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSort", 2, 3)}},
+ {"listtopsortasc", {"ListTopSortAsc", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSortAsc", 2, 3)}},
+ {"listtopsortdesc", {"ListTopSortDesc", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTopSortDesc", 2, 3)}},
+ {"listsample", {"ListSample", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSample", 2, 3)}},
+ {"listsamplen", {"ListSampleN", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListSampleN", 2, 3)}},
+ {"listshuffle", {"ListShuffle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListShuffle", 1, 2)}},
// Dict builtins
- {"dictlength", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Length", 1, 1)},
- {"dicthasitems", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("HasItems", 1, 1)},
- {"dictcreate", BuildSimpleBuiltinFactoryCallback<TDictCreateBuiltin>()},
- {"setcreate", BuildSimpleBuiltinFactoryCallback<TSetCreateBuiltin>()},
- {"asdict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsDictMayWarn", 0, -1)},
- {"asdictstrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsDictStrict", 0, -1)},
- {"asset", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsSetMayWarn", 0, -1)},
- {"assetstrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsSetStrict", 0, -1)},
- {"todict", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, false>>("One")},
- {"tomultidict", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, false>>("Many")},
- {"tosorteddict", BuildNamedBuiltinFactoryCallback<TYqlToDict<true, false>>("One")},
- {"tosortedmultidict", BuildNamedBuiltinFactoryCallback<TYqlToDict<true, false>>("Many")},
- {"tohasheddict", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, true>>("One")},
- {"tohashedmultidict", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, true>>("Many")},
- {"dictkeys", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictKeys", 1, 1) },
- {"dictpayloads", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictPayloads", 1, 1) },
- {"dictitems", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictItems", 1, 1) },
- {"dictlookup", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Lookup", 2, 2) },
- {"dictcontains", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Contains", 2, 2) },
+ {"dictlength", {"DictLength", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Length", 1, 1)}},
+ {"dicthasitems", {"DictHasItems", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("HasItems", 1, 1)}},
+ {"dictcreate", {"DictCreate", "Normal", BuildSimpleBuiltinFactoryCallback<TDictCreateBuiltin>()}},
+ {"setcreate", {"SetCreate", "Normal", BuildSimpleBuiltinFactoryCallback<TSetCreateBuiltin>()}},
+ {"asdict", {"AsDict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsDictMayWarn", 0, -1)}},
+ {"asdictstrict", {"AsDictStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsDictStrict", 0, -1)}},
+ {"asset", {"AsSet", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsSetMayWarn", 0, -1)}},
+ {"assetstrict", {"AsSetStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AsSetStrict", 0, -1)}},
+ {"todict", {"ToDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, false>>("One")}},
+ {"tomultidict", {"ToMultiDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, false>>("Many")}},
+ {"tosorteddict", {"ToSortedDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<true, false>>("One")}},
+ {"tosortedmultidict", {"ToSortedMultiDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<true, false>>("Many")}},
+ {"tohasheddict", {"ToHashedDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, true>>("One")}},
+ {"tohashedmultidict", {"ToHashedMultiDict", "Normal", BuildNamedBuiltinFactoryCallback<TYqlToDict<false, true>>("Many")}},
+ {"dictkeys", {"DictKeys", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictKeys", 1, 1)}},
+ {"dictpayloads", {"DictPayloads", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictPayloads", 1, 1)}},
+ {"dictitems", {"DictItems", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictItems", 1, 1)}},
+ {"dictlookup", {"DictLookup", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Lookup", 2, 2) }},
+ {"dictcontains", {"DictContains", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Contains", 2, 2)}},
// Atom builtins
- {"asatom", BuildSimpleBuiltinFactoryCallback<TYqlAsAtom>()},
- {"secureparam", BuildNamedBuiltinFactoryCallback<TYqlAtom>("SecureParam")},
-
- {"void", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Void", 0, 0)},
- {"emptylist", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyList", 0, 0)},
- {"emptydict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyDict", 0, 0)},
- {"callable", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Callable", 2, 2)},
- {"way", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Way", 1, 1) },
- {"dynamicvariant", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DynamicVariant", 3, 3) },
- {"variant", BuildSimpleBuiltinFactoryCallback<TYqlVariant>() },
- {"enum", BuildSimpleBuiltinFactoryCallback<TYqlEnum>() },
- {"asvariant", BuildSimpleBuiltinFactoryCallback<TYqlAsVariant>() },
- {"asenum", BuildSimpleBuiltinFactoryCallback<TYqlAsEnum>() },
- {"astagged", BuildSimpleBuiltinFactoryCallback<TYqlAsTagged>() },
- {"untag", BuildSimpleBuiltinFactoryCallback<TYqlUntag>() },
- {"parsetype", BuildSimpleBuiltinFactoryCallback<TYqlParseType>() },
- {"ensuretype", BuildSimpleBuiltinFactoryCallback<TYqlTypeAssert<true>>() },
- {"ensureconvertibleto", BuildSimpleBuiltinFactoryCallback<TYqlTypeAssert<false>>() },
- {"ensure", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Ensure", 2, 3) },
- {"evaluateexpr", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateExpr", 1, 1) },
- {"evaluateatom", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateAtom", 1, 1) },
- {"evaluatetype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateType", 1, 1) },
- {"unwrap", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Unwrap", 1, 2) },
- {"just", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Just", 1, 1) },
- {"nothing", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Nothing", 1, 1) },
- {"formattype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FormatType", 1, 1) },
- {"formattypediff", BuildNamedBuiltinFactoryCallback<TFormatTypeDiff<false>>("FormatTypeDiff") },
- {"formattypediffpretty", BuildNamedBuiltinFactoryCallback<TFormatTypeDiff<true>>("FormatTypeDiffPretty") },
- {"pgtype", BuildSimpleBuiltinFactoryCallback<TYqlPgType>() },
- {"pgconst", BuildSimpleBuiltinFactoryCallback<TYqlPgConst>() },
- {"pgop", BuildSimpleBuiltinFactoryCallback<TYqlPgOp>() },
- {"pgcall", BuildSimpleBuiltinFactoryCallback<TYqlPgCall<false>>() },
- {"pgrangecall", BuildSimpleBuiltinFactoryCallback<TYqlPgCall<true>>() },
- {"pgcast", BuildSimpleBuiltinFactoryCallback<TYqlPgCast>() },
- {"frompg", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FromPg", 1, 1) },
- {"topg", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ToPg", 1, 1) },
- {"pgor", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgOr", 2, 2) },
- {"pgand", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgAnd", 2, 2) },
- {"pgnot", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgNot", 1, 1) },
- {"pgarray", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgArray", 1, -1) },
- {"typeof", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeOf", 1, 1) },
- {"instanceof", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("InstanceOf", 1, 1) },
- {"datatype", BuildSimpleBuiltinFactoryCallback<TYqlDataType>() },
- {"optionaltype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalType", 1, 1) },
- {"listtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListType", 1, 1) },
- {"streamtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamType", 1, 1) },
- {"dicttype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictType", 2, 2) },
- {"tupletype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleType", 0, -1) },
- {"generictype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("GenericType", 0, 0) },
- {"unittype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("UnitType", 0, 0) },
- {"voidtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VoidType", 0, 0) },
- {"resourcetype", BuildSimpleBuiltinFactoryCallback<TYqlResourceType>() },
- {"taggedtype", BuildSimpleBuiltinFactoryCallback<TYqlTaggedType>() },
- {"varianttype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantType", 1, 1) },
- {"callabletype", BuildSimpleBuiltinFactoryCallback<TYqlCallableType>() },
- {"optionalitemtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalItemType", 1, 1) },
- {"listitemtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListItemType", 1, 1) },
- {"streamitemtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamItemType", 1, 1) },
- {"dictkeytype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictKeyType", 1, 1) },
- {"dictpayloadtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictPayloadType", 1, 1) },
- {"tupleelementtype", BuildSimpleBuiltinFactoryCallback<TYqlTupleElementType>() },
- {"structmembertype", BuildSimpleBuiltinFactoryCallback<TYqlStructMemberType>() },
- {"callableresulttype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableResultType", 1, 1) },
- {"callableargumenttype", BuildSimpleBuiltinFactoryCallback<TYqlCallableArgumentType>() },
- {"variantunderlyingtype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantUnderlyingType", 1, 1) },
- {"variantitem", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SqlVariantItem", 1, 1) },
- {"fromysonsimpletype", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FromYsonSimpleType", 2, 2) },
- {"currentutcdate", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcDate", 0, -1) },
- {"currentutcdatetime", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcDatetime", 0, -1) },
- {"currentutctimestamp", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcTimestamp", 0, -1) },
- { "currenttzdate", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzDate", 1, -1) },
- { "currenttzdatetime", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzDatetime", 1, -1) },
- { "currenttztimestamp", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzTimestamp", 1, -1) },
- {"currentoperationid", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentOperationId", 0, 0) },
- {"currentoperationsharedid", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentOperationSharedId", 0, 0) },
- {"currentauthenticateduser", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentAuthenticatedUser", 0, 0) },
- {"addtimezone", BuildSimpleBuiltinFactoryCallback<TYqlAddTimezone>() },
- {"removetimezone", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("RemoveTimezone", 1, 1) },
- {"pickle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Pickle", 1, 1) },
- {"stablepickle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StablePickle", 1, 1) },
- {"unpickle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Unpickle", 2, 2) },
-
- {"typehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeHandle", 1, 1) },
- {"parsetypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ParseTypeHandle", 1, 1) },
- {"typekind", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeKind", 1, 1) },
- {"datatypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DataTypeComponents", 1, 1) },
- {"datatypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DataTypeHandle", 1, 1) },
- {"optionaltypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalTypeHandle", 1, 1) },
- {"listtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTypeHandle", 1, 1) },
- {"streamtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamTypeHandle", 1, 1) },
- {"tupletypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleTypeComponents", 1, 1) },
- {"tupletypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleTypeHandle", 1, 1) },
- {"structtypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructTypeComponents", 1, 1) },
- {"structtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructTypeHandle", 1, 1) },
- {"dicttypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictTypeComponents", 1, 1) },
- {"dicttypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictTypeHandle", 2, 2) },
- {"resourcetypetag", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ResourceTypeTag", 1, 1) },
- {"resourcetypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ResourceTypeHandle", 1, 1) },
- {"taggedtypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TaggedTypeComponents", 1, 1) },
- {"taggedtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TaggedTypeHandle", 2, 2) },
- {"varianttypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantTypeHandle", 1, 1) },
- {"voidtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VoidTypeHandle", 0, 0) },
- {"nulltypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("NullTypeHandle", 0, 0) },
- {"emptylisttypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyListTypeHandle", 0, 0) },
- {"emptydicttypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyDictTypeHandle", 0, 0) },
- {"callabletypecomponents", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableTypeComponents", 1, 1) },
- {"callableargument", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableArgument", 1, 3) },
- {"callabletypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableTypeHandle", 2, 4) },
- {"pgtypename", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgTypeName", 1, 1) },
- {"pgtypehandle", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgTypeHandle", 1, 1) },
- {"formatcode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FormatCode", 1, 1) },
- {"worldcode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("WorldCode", 0, 0) },
- {"atomcode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AtomCode", 1, 1) },
- {"listcode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListCode", 0, -1) },
- {"funccode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FuncCode", 1, -1) },
- {"lambdacode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaCode", 1, 2) },
- {"evaluatecode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateCode", 1, 1) },
- {"reprcode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ReprCode", 1, 1) },
- {"quotecode", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("QuoteCode", 1, 1) },
- {"lambdaargumentscount", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaArgumentsCount", 1, 1) },
- {"lambdaoptionalargumentscount", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaOptionalArgumentsCount", 1, 1) },
- {"subqueryextend", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryExtend", 1, -1) },
- {"subqueryunionall", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryUnionAll", 1, -1) },
- {"subquerymerge", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryMerge", 1, -1) },
- {"subqueryunionmerge", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryUnionMerge", 1, -1) },
- {"subqueryextendfor", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryExtendFor>>() },
- {"subqueryunionallfor", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryUnionAllFor>>() },
- {"subquerymergefor", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryMergeFor>>() },
- {"subqueryunionmergefor", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryUnionMergeFor>>() },
- {"subqueryorderby", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryOrderBy<SubqueryOrderBy>>() },
- {"subqueryassumeorderby", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryOrderBy<SubqueryAssumeOrderBy>>() },
+ {"asatom", {"AsAtom", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlAsAtom>()}},
+ {"secureparam", {"SecureParam", "Normal", BuildNamedBuiltinFactoryCallback<TYqlAtom>("SecureParam")}},
+
+ {"void", {"Void", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Void", 0, 0)}},
+ {"emptylist", {"EmptyList", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyList", 0, 0)}},
+ {"emptydict", {"EmptyDict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyDict", 0, 0)}},
+ {"callable", {"Callable", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Callable", 2, 2)}},
+ {"way", {"Way", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Way", 1, 1)}},
+ {"dynamicvariant", {"DynamicVariant", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DynamicVariant", 3, 3)}},
+ {"variant", {"Variant", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlVariant>()}},
+ {"enum", {"Enum", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlEnum>()}},
+ {"asvariant", {"AsVariant", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlAsVariant>()}},
+ {"asenum", {"AsEnum", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlAsEnum>()}},
+ {"astagged", {"AsTagged", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlAsTagged>()}},
+ {"untag", {"Untag", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlUntag>()}},
+ {"parsetype", {"ParseType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlParseType>()}},
+ {"ensuretype", {"EnsureType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlTypeAssert<true>>()}},
+ {"ensureconvertibleto", {"EnsureConvertibleTo", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlTypeAssert<false>>()}},
+ {"ensure", {"Ensure", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Ensure", 2, 3)}},
+ {"evaluateexpr", {"EvaluateExpr", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateExpr", 1, 1)}},
+ {"evaluateatom", {"EvaluateAtom", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateAtom", 1, 1)}},
+ {"evaluatetype", {"EvaluateType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateType", 1, 1)}},
+ {"unwrap", {"Unwrap", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Unwrap", 1, 2)}},
+ {"just", {"Just", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Just", 1, 1)}},
+ {"nothing", {"Nothing", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Nothing", 1, 1)}},
+ {"formattype", {"FormatType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FormatType", 1, 1)}},
+ {"formattypediff", {"FormatTypeDiff", "Normal", BuildNamedBuiltinFactoryCallback<TFormatTypeDiff<false>>("FormatTypeDiff")}},
+ {"formattypediffpretty", {"FormatTypeDeffPretty", "Normal", BuildNamedBuiltinFactoryCallback<TFormatTypeDiff<true>>("FormatTypeDiffPretty")}},
+ {"pgtype", {"PgType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgType>()}},
+ {"pgconst", {"PgConst", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgConst>()}},
+ {"pgop", {"PgOp", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgOp>()}},
+ {"pgcall", {"PgCall", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgCall<false>>()}},
+ {"pgrangecall", {"PgRangeCall", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgCall<true>>()}},
+ {"pgcast", {"PgCast", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlPgCast>()}},
+ {"frompg", {"FromPg", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FromPg", 1, 1)}},
+ {"topg", {"ToPg", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ToPg", 1, 1)}},
+ {"pgor", {"PgOr", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgOr", 2, 2)}},
+ {"pgand", {"PgAnd", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgAnd", 2, 2)}},
+ {"pgnot", {"PgNot", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgNot", 1, 1)}},
+ {"pgarray", {"PgArray", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgArray", 1, -1)}},
+ {"typeof", {"TypeOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeOf", 1, 1)}},
+ {"instanceof", {"InstanceOf", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("InstanceOf", 1, 1)}},
+ {"datatype", {"DataType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlDataType>()}},
+ {"optionaltype", {"OptionalType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalType", 1, 1)}},
+ {"listtype", {"ListType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListType", 1, 1)}},
+ {"streamtype", {"StreamType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamType", 1, 1)}},
+ {"dicttype", {"DictType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictType", 2, 2)}},
+ {"tupletype", {"TupleType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleType", 0, -1)}},
+ {"generictype", {"GenericType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("GenericType", 0, 0)}},
+ {"unittype", {"UnitType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("UnitType", 0, 0)}},
+ {"voidtype", {"VoidType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VoidType", 0, 0)}},
+ {"resourcetype", {"ResourceType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlResourceType>()}},
+ {"taggedtype", {"TaggedType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlTaggedType>()}},
+ {"varianttype", {"VariantType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantType", 1, 1)}},
+ {"callabletype", {"CallableType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlCallableType>()}},
+ {"optionalitemtype", {"OptionalItemType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalItemType", 1, 1)}},
+ {"listitemtype", {"ListItemType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListItemType", 1, 1)}},
+ {"streamitemtype", {"ListItemType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamItemType", 1, 1)}},
+ {"dictkeytype", {"DictKeyType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictKeyType", 1, 1)}},
+ {"dictpayloadtype", {"DictPayloadType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictPayloadType", 1, 1)}},
+ {"tupleelementtype", {"TupleElementType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlTupleElementType>()}},
+ {"structmembertype", {"StructMemberType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlStructMemberType>()}},
+ {"callableresulttype", {"CallableResultType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableResultType", 1, 1)}},
+ {"callableargumenttype", {"CallableArgumentType", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlCallableArgumentType>()}},
+ {"variantunderlyingtype", {"VariantUnderlyingType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantUnderlyingType", 1, 1)}},
+ {"variantitem", {"VariantItem", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SqlVariantItem", 1, 1)}},
+ {"fromysonsimpletype", {"FromYsonSimpleType", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FromYsonSimpleType", 2, 2)}},
+ {"currentutcdate", {"CurrentUtcDate", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcDate", 0, -1)}},
+ {"currentutcdatetime", {"CurrentUtcDatetime", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcDatetime", 0, -1)}},
+ {"currentutctimestamp", {"CurrentUtcTimestamp", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "CurrentUtcTimestamp", 0, -1)}},
+ {"currenttzdate", {"CurrentTzDate", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzDate", 1, -1)}},
+ {"currenttzdatetime", {"CurrentTzDatetime", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzDatetime", 1, -1)}},
+ {"currenttztimestamp", {"CurrentTzTimestamp", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(1, "CurrentTzTimestamp", 1, -1)}},
+ {"currentoperationid", {"CurrentOperationId", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentOperationId", 0, 0)}},
+ {"currentoperationsharedid", {"CurrentOperationSharedId", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentOperationSharedId", 0, 0)}},
+ {"currentauthenticateduser", {"CurrentAuthenticatedUser", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CurrentAuthenticatedUser", 0, 0)}},
+ {"addtimezone", {"AddTimezone", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlAddTimezone>()}},
+ {"removetimezone", {"RemoveTimezone", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("RemoveTimezone", 1, 1)}},
+ {"pickle", {"Pickle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Pickle", 1, 1)}},
+ {"stablepickle", {"StablePickle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StablePickle", 1, 1)}},
+ {"unpickle", {"Unpickle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Unpickle", 2, 2)}},
+
+ {"typehandle", {"TypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeHandle", 1, 1)}},
+ {"parsetypehandle", {"ParseTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ParseTypeHandle", 1, 1)}},
+ {"typekind", {"TypeKind", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TypeKind", 1, 1)}},
+ {"datatypecomponents", {"DataTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DataTypeComponents", 1, 1)}},
+ {"datatypehandle", {"DataTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DataTypeHandle", 1, 1)}},
+ {"optionaltypehandle", {"OptionalTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("OptionalTypeHandle", 1, 1)}},
+ {"listtypehandle", {"ListTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListTypeHandle", 1, 1)}},
+ {"streamtypehandle", {"StreamTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StreamTypeHandle", 1, 1)}},
+ {"tupletypecomponents", {"TupleTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleTypeComponents", 1, 1)}},
+ {"tupletypehandle", {"TupleTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TupleTypeHandle", 1, 1)}},
+ {"structtypecomponents", {"StructTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructTypeComponents", 1, 1)}},
+ {"structtypehandle", {"StructTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructTypeHandle", 1, 1)}},
+ {"dicttypecomponents", {"DictTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictTypeComponents", 1, 1)}},
+ {"dicttypehandle", {"DictTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("DictTypeHandle", 2, 2)}},
+ {"resourcetypetag", {"ResourceTypeTag", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ResourceTypeTag", 1, 1)}},
+ {"resourcetypehandle", {"ResourceTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ResourceTypeHandle", 1, 1)}},
+ {"taggedtypecomponents", {"TaggedTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TaggedTypeComponents", 1, 1)}},
+ {"taggedtypehandle", {"TaggedTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("TaggedTypeHandle", 2, 2)}},
+ {"varianttypehandle", {"VariantTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VariantTypeHandle", 1, 1)}},
+ {"voidtypehandle", {"VoidTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("VoidTypeHandle", 0, 0)}},
+ {"nulltypehandle", {"NullTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("NullTypeHandle", 0, 0)}},
+ {"emptylisttypehandle", {"EmptyListTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyListTypeHandle", 0, 0)}},
+ {"emptydicttypehandle", {"EmptyDictTypehandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EmptyDictTypeHandle", 0, 0)}},
+ {"callabletypecomponents", {"CallableTypeComponents", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableTypeComponents", 1, 1)}},
+ {"callableargument", {"CallableArgument", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableArgument", 1, 3)}},
+ {"callabletypehandle", {"CallableTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("CallableTypeHandle", 2, 4)}},
+ {"pgtypename", {"PgTypeName", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgTypeName", 1, 1)}},
+ {"pgtypehandle", {"PgTypeHandle", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("PgTypeHandle", 1, 1)}},
+ {"formatcode", {"FormatCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FormatCode", 1, 1)}},
+ {"worldcode", {"WorldCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("WorldCode", 0, 0)}},
+ {"atomcode", {"AtomCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AtomCode", 1, 1)}},
+ {"listcode", {"ListCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ListCode", 0, -1)}},
+ {"funccode", {"FuncCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("FuncCode", 1, -1)}},
+ {"lambdacode", {"LambdaCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaCode", 1, 2)}},
+ {"evaluatecode", {"EvaluateCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("EvaluateCode", 1, 1)}},
+ {"reprcode", {"ReprCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("ReprCode", 1, 1)}},
+ {"quotecode", {"QuoteCode", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("QuoteCode", 1, 1)}},
+ {"lambdaargumentscount", {"LambdaArgumentsCount", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaArgumentsCount", 1, 1)}},
+ {"lambdaoptionalargumentscount", {"LambdaOptionalArgumentsCount", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("LambdaOptionalArgumentsCount", 1, 1)}},
+ {"subqueryextend", {"SubqueryExtend", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryExtend", 1, -1)}},
+ {"subqueryunionall", {"SubqueryUnionAll", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryUnionAll", 1, -1)}},
+ {"subquerymerge", {"SubqueryMerge", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryMerge", 1, -1)}},
+ {"subqueryunionmerge", {"SubqueryUnionMerge", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("SubqueryUnionMerge", 1, -1)}},
+ {"subqueryextendfor", {"SubqueryExtendFor", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryExtendFor>>()}},
+ {"subqueryunionallfor", {"SubqueryUnionAllFor", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryUnionAllFor>>()}},
+ {"subquerymergefor", {"SubqueryMergeFor", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryMergeFor>>()}},
+ {"subqueryunionmergefor", {"SubqueryUnionMergeFor", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryFor<SubqueryUnionMergeFor>>()}},
+ {"subqueryorderby", {"SubqueryOrderBy", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryOrderBy<SubqueryOrderBy>>()}},
+ {"subqueryassumeorderby", {"SubqueryAssumeOrderBy", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlSubqueryOrderBy<SubqueryAssumeOrderBy>>()}},
// Tuple builtins
- {"astuple", BuildSimpleBuiltinFactoryCallback<TTupleNode>()},
+ {"astuple", {"AsTuple", "Normal", BuildSimpleBuiltinFactoryCallback<TTupleNode>()}},
// Struct builtins
- {"trymember", BuildNamedBuiltinFactoryCallback<TTryMember>("TryMember")},
- {"addmember", BuildNamedBuiltinFactoryCallback<TAddMember>("AddMember")},
- {"replacemember", BuildNamedBuiltinFactoryCallback<TAddMember>("ReplaceMember")},
- {"removemember", BuildNamedBuiltinFactoryCallback<TRemoveMember>("RemoveMember")},
- {"forceremovemember", BuildNamedBuiltinFactoryCallback<TRemoveMember>("ForceRemoveMember")},
- {"combinemembers", BuildNamedBuiltinFactoryCallback<TCombineMembers>("FlattenMembers")},
- {"flattenmembers", BuildNamedBuiltinFactoryCallback<TFlattenMembers>("FlattenMembers")},
- {"staticmap", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticMap", 2, 2) },
- {"staticzip", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticZip", 1, -1) },
- {"structunion", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructUnion", 2, 3)},
- {"structintersection", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructIntersection", 2, 3)},
- {"structdifference", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructDifference", 2, 2)},
- {"structsymmetricdifference", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructSymmetricDifference", 2, 2)},
- {"staticfold", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticFold", 3, 3)},
- {"staticfold1", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticFold1", 3, 3)},
+ {"trymember", {"TryMember", "Normal", BuildNamedBuiltinFactoryCallback<TTryMember>("TryMember")}},
+ {"addmember", {"AddMember", "Normal", BuildNamedBuiltinFactoryCallback<TAddMember>("AddMember")}},
+ {"replacemember", {"ReplaceMember", "Normal", BuildNamedBuiltinFactoryCallback<TAddMember>("ReplaceMember")}},
+ {"removemember", {"RemoveMember", "Normal", BuildNamedBuiltinFactoryCallback<TRemoveMember>("RemoveMember")}},
+ {"forceremovemember", {"ForceRemoveMember", "Normal", BuildNamedBuiltinFactoryCallback<TRemoveMember>("ForceRemoveMember")}},
+ {"combinemembers", {"CombineMembers", "Normal", BuildNamedBuiltinFactoryCallback<TCombineMembers>("FlattenMembers")}},
+ {"flattenmembers", {"FlattenMembers", "Normal", BuildNamedBuiltinFactoryCallback<TFlattenMembers>("FlattenMembers")}},
+ {"staticmap", {"StaticMap", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticMap", 2, 2)}},
+ {"staticzip", {"StaticZip", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticZip", 1, -1)}},
+ {"structunion", {"StructUnion", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructUnion", 2, 3)}},
+ {"structintersection", {"StructIntersection", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructIntersection", 2, 3)}},
+ {"structdifference", {"StructDifference", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructDifference", 2, 2)}},
+ {"structsymmetricdifference", {"StructSymmetricDifference", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StructSymmetricDifference", 2, 2)}},
+ {"staticfold", {"StaticFold", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticFold", 3, 3)}},
+ {"staticfold1", {"StaticFold1", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("StaticFold1", 3, 3)}},
// File builtins
- {"filepath", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FilePath")},
- {"filecontent", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FileContent")},
- {"folderpath", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FolderPath") },
- {"files", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("Files")},
- {"parsefile", BuildSimpleBuiltinFactoryCallback<TYqlParseFileOp>()},
+ {"filepath", {"FilePath", "Normal", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FilePath")}},
+ {"filecontent", {"FileContent", "Normal", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FileContent")}},
+ {"folderpath", {"FolderPath", "Normal", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("FolderPath")}},
+ {"files", {"Files", "Normal", BuildNamedBuiltinFactoryCallback<TFileYqlAtom>("Files")}},
+ {"parsefile", {"ParseFile", "Normal", BuildSimpleBuiltinFactoryCallback<TYqlParseFileOp>()}},
// Misc builtins
- {"coalesce", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Coalesce", 1, -1)},
- {"nvl", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Coalesce", 1, -1) },
- {"nanvl", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Nanvl", 2, 2) },
- {"likely", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Likely", 1, -1)},
- {"assumestrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AssumeStrict", 1, 1)},
- {"assumenonstrict", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AssumeNonStrict", 1, 1)},
- {"random", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "Random", 1, -1)},
- {"randomnumber", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "RandomNumber", 1, -1)},
- {"randomuuid", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "RandomUuid", 1, -1) },
- {"tablepath", BuildNamedBuiltinFactoryCallback<TCallDirectRow>("TablePath") },
- {"tablerecordindex", BuildNamedBuiltinFactoryCallback<TCallDirectRow>("TableRecord") },
- {"tablerow", BuildSimpleBuiltinFactoryCallback<TTableRow<false>>() },
- {"jointablerow", BuildSimpleBuiltinFactoryCallback<TTableRow<true>>() },
- {"tablerows", BuildSimpleBuiltinFactoryCallback<TTableRows>() },
- {"weakfield", BuildSimpleBuiltinFactoryCallback<TWeakFieldOp>()},
- {"version", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Version", 0, 0)},
-
- {"systemmetadata", BuildNamedArgcBuiltinFactoryCallback<TCallDirectRow>("SystemMetadata", 1, -1)},
+ {"coalesce", {"Coalesce", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Coalesce", 1, -1)}},
+ {"nvl", {"Nvl", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Coalesce", 1, -1)}},
+ {"nanvl", {"Nanvl", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Nanvl", 2, 2)}},
+ {"likely", {"Likely", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Likely", 1, -1)}},
+ {"assumestrict", {"AssumeStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AssumeStrict", 1, 1)}},
+ {"assumenonstrict", {"AssumeNonStrict", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("AssumeNonStrict", 1, 1)}},
+ {"random", {"Random", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "Random", 1, -1)}},
+ {"randomnumber", {"RandomNumber", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "RandomNumber", 1, -1)}},
+ {"randomuuid", {"RandomUuid", "Normal", BuildNamedDepsArgcBuiltinFactoryCallback<TCallNodeDepArgs>(0, "RandomUuid", 1, -1)}},
+ {"tablepath", {"TablePath", "Normal", BuildNamedBuiltinFactoryCallback<TCallDirectRow>("TablePath")}},
+ {"tablerecordindex", {"TableRecordIndex", "Normal", BuildNamedBuiltinFactoryCallback<TCallDirectRow>("TableRecord")}},
+ {"tablerow", {"TableRow", "Normal", BuildSimpleBuiltinFactoryCallback<TTableRow<false>>()}},
+ {"jointablerow", {"JoinTableRow", "Normal", BuildSimpleBuiltinFactoryCallback<TTableRow<true>>()}},
+ {"tablerows", {"TableRows", "Produce", BuildSimpleBuiltinFactoryCallback<TTableRows>()}},
+ {"weakfield", {"WeakField", "Normal", BuildSimpleBuiltinFactoryCallback<TWeakFieldOp>()}},
+ {"version", {"Version", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallNodeImpl>("Version", 0, 0)}},
+
+ {"systemmetadata", {"SystemMetadata", "Normal", BuildNamedArgcBuiltinFactoryCallback<TCallDirectRow>("SystemMetadata", 1, -1)}},
// Hint builtins
- {"grouping", BuildSimpleBuiltinFactoryCallback<TGroupingNode>()},
+ {"grouping", {"Grouping", "AggKey", BuildSimpleBuiltinFactoryCallback<TGroupingNode>()}},
// Window funcitons
- {"rownumber", BuildNamedArgcBuiltinFactoryCallback<TWinRowNumber>("RowNumber", 0, 0)},
- {"rank", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("Rank", 0, 1)},
- {"denserank", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("DenseRank", 0, 1)},
- {"lead", BuildNamedArgcBuiltinFactoryCallback<TWinLeadLag>("Lead", 1, 2)},
- {"lag", BuildNamedArgcBuiltinFactoryCallback<TWinLeadLag>("Lag", 1, 2)},
- {"percentrank", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("PercentRank", 0, 1)},
- {"cumedist", BuildNamedArgcBuiltinFactoryCallback<TWinCumeDist>("CumeDist", 0, 0)},
- {"ntile", BuildNamedArgcBuiltinFactoryCallback<TWinNTile>("NTile", 1, 1)},
+ {"rownumber", {"RowNumber", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinRowNumber>("RowNumber", 0, 0)}},
+ {"rank", {"Rank", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("Rank", 0, 1)}},
+ {"denserank", {"DenseRank", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("DenseRank", 0, 1)}},
+ {"lead", {"Lead", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinLeadLag>("Lead", 1, 2)}},
+ {"lag", {"Lag", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinLeadLag>("Lag", 1, 2)}},
+ {"percentrank", {"PercentRank", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinRank>("PercentRank", 0, 1)}},
+ {"cumedist", {"CumeDist", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinCumeDist>("CumeDist", 0, 0)}},
+ {"ntile", {"NTile", "Window", BuildNamedArgcBuiltinFactoryCallback<TWinNTile>("NTile", 1, 1)}},
// Session window
- {"sessionwindow", BuildSimpleBuiltinFactoryCallback<TSessionWindow>()},
- {"sessionstart", BuildSimpleBuiltinFactoryCallback<TSessionStart<true>>()},
- {"sessionstate", BuildSimpleBuiltinFactoryCallback<TSessionStart<false>>()},
+ {"sessionwindow", {"SessionWindow", "Partition", BuildSimpleBuiltinFactoryCallback<TSessionWindow>()}},
+ {"sessionstart", {"SessionStart", "Agg", BuildSimpleBuiltinFactoryCallback<TSessionStart<true>>()}},
+ {"sessionstate", {"SessionState", "Agg", BuildSimpleBuiltinFactoryCallback<TSessionStart<false>>()}},
// New hopping
- {"hoppingwindow", BuildSimpleBuiltinFactoryCallback<THoppingWindow>()},
+ {"hoppingwindow", {"", "", BuildSimpleBuiltinFactoryCallback<THoppingWindow>()}},
// Hopping intervals time functions
- {"hopstart", BuildSimpleBuiltinFactoryCallback<THoppingTime<true>>()},
- {"hopend", BuildSimpleBuiltinFactoryCallback<THoppingTime<false>>()},
+ {"hopstart", {"HopStart", "Agg", BuildSimpleBuiltinFactoryCallback<THoppingTime<true>>()}},
+ {"hopend", {"HopEnd", "Agg", BuildSimpleBuiltinFactoryCallback<THoppingTime<false>>()}}
};
return builtinFuncs;
}
@@ -3197,118 +3211,118 @@ struct TBuiltinFuncData {
constexpr auto OverWindow = EAggregateMode::OverWindow;
TAggrFuncFactoryCallbackMap aggrFuncs = {
- {"min", BuildAggrFuncFactoryCallback("Min", "min_traits_factory")},
- {"max", BuildAggrFuncFactoryCallback("Max", "max_traits_factory")},
-
- {"minby", BuildAggrFuncFactoryCallback("MinBy", "min_by_traits_factory", KEY_PAYLOAD)},
- {"maxby", BuildAggrFuncFactoryCallback("MaxBy", "max_by_traits_factory", KEY_PAYLOAD)},
-
- {"sum", BuildAggrFuncFactoryCallback("Sum", "sum_traits_factory")},
- {"sumif", BuildAggrFuncFactoryCallback("SumIf", "sum_if_traits_factory", PAYLOAD_PREDICATE) },
-
- {"checked_sum", BuildAggrFuncFactoryCallback("CheckedSum", "checked_sum_traits_factory")},
- {"checked_sumif", BuildAggrFuncFactoryCallback("CheckedSumIf", "checked_sum_if_traits_factory", PAYLOAD_PREDICATE) },
-
- {"some", BuildAggrFuncFactoryCallback("Some", "some_traits_factory")},
- {"somevalue", BuildAggrFuncFactoryCallback("SomeValue", "some_traits_factory")},
-
- {"count", BuildAggrFuncFactoryCallback("Count", "count_traits_factory", COUNT)},
- {"countif", BuildAggrFuncFactoryCallback("CountIf", "count_if_traits_factory")},
-
- {"every", BuildAggrFuncFactoryCallback("Every", "and_traits_factory")},
- {"booland", BuildAggrFuncFactoryCallback("BoolAnd", "and_traits_factory")},
- {"boolor", BuildAggrFuncFactoryCallback("BoolOr", "or_traits_factory")},
- {"boolxor", BuildAggrFuncFactoryCallback("BoolXor", "xor_traits_factory")},
-
- {"bitand", BuildAggrFuncFactoryCallback("BitAnd", "bit_and_traits_factory")},
- {"bitor", BuildAggrFuncFactoryCallback("BitOr", "bit_or_traits_factory")},
- {"bitxor", BuildAggrFuncFactoryCallback("BitXor", "bit_xor_traits_factory")},
-
- {"avg", BuildAggrFuncFactoryCallback("Avg", "avg_traits_factory")},
- {"avgif", BuildAggrFuncFactoryCallback("AvgIf", "avg_if_traits_factory", PAYLOAD_PREDICATE) },
-
- {"agglist", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)},
- {"aggrlist", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)},
- {"aggregatelist", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)},
- {"agglistdistinct", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)},
- {"aggrlistdistinct", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)},
- {"aggregatelistdistinct", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)},
-
- {"median", BuildAggrFuncFactoryCallback("Median", "percentile_traits_factory", PERCENTILE)},
- {"percentile", BuildAggrFuncFactoryCallback("Percentile", "percentile_traits_factory", PERCENTILE)},
-
- {"mode", BuildAggrFuncFactoryCallback("Mode", "topfreq_traits_factory", TOPFREQ) },
- {"topfreq", BuildAggrFuncFactoryCallback("TopFreq", "topfreq_traits_factory", TOPFREQ) },
-
- {"top", BuildAggrFuncFactoryCallback("Top", "top_traits_factory", TOP)},
- {"bottom", BuildAggrFuncFactoryCallback("Bottom", "bottom_traits_factory", TOP)},
- {"topby", BuildAggrFuncFactoryCallback("TopBy", "top_by_traits_factory", TOP_BY)},
- {"bottomby", BuildAggrFuncFactoryCallback("BottomBy", "bottom_by_traits_factory", TOP_BY)},
-
- {"histogram", BuildAggrFuncFactoryCallback("AdaptiveWardHistogram", "histogram_adaptive_ward_traits_factory", HISTOGRAM, "Histogram")},
- {"histogramcdf", BuildAggrFuncFactoryCallback("AdaptiveWardHistogramCDF", "histogram_cdf_adaptive_ward_traits_factory", HISTOGRAM, "HistogramCDF")},
- {"adaptivewardhistogram", BuildAggrFuncFactoryCallback("AdaptiveWardHistogram", "histogram_adaptive_ward_traits_factory", HISTOGRAM)},
- {"adaptivewardhistogramcdf", BuildAggrFuncFactoryCallback("AdaptiveWardHistogramCDF", "histogram_cdf_adaptive_ward_traits_factory", HISTOGRAM)},
- {"adaptiveweighthistogram", BuildAggrFuncFactoryCallback("AdaptiveWeightHistogram", "histogram_adaptive_weight_traits_factory", HISTOGRAM)},
- {"adaptiveweighthistogramcdf", BuildAggrFuncFactoryCallback("AdaptiveWeightHistogramCDF", "histogram_cdf_adaptive_weight_traits_factory", HISTOGRAM)},
- {"adaptivedistancehistogram", BuildAggrFuncFactoryCallback("AdaptiveDistanceHistogram", "histogram_adaptive_distance_traits_factory", HISTOGRAM)},
- {"adaptivedistancehistogramcdf", BuildAggrFuncFactoryCallback("AdaptiveDistanceHistogramCDF", "histogram_cdf_adaptive_distance_traits_factory", HISTOGRAM)},
- {"blockwardhistogram", BuildAggrFuncFactoryCallback("BlockWardHistogram", "histogram_block_ward_traits_factory", HISTOGRAM)},
- {"blockwardhistogramcdf", BuildAggrFuncFactoryCallback("BlockWardHistogramCDF", "histogram_cdf_block_ward_traits_factory", HISTOGRAM)},
- {"blockweighthistogram", BuildAggrFuncFactoryCallback("BlockWeightHistogram", "histogram_block_weight_traits_factory", HISTOGRAM)},
- {"blockweighthistogramcdf", BuildAggrFuncFactoryCallback("BlockWeightHistogramCDF", "histogram_cdf_block_weight_traits_factory", HISTOGRAM)},
- {"linearhistogram", BuildAggrFuncFactoryCallback("LinearHistogram", "histogram_linear_traits_factory", LINEAR_HISTOGRAM)},
- {"linearhistogramcdf", BuildAggrFuncFactoryCallback("LinearHistogramCDF", "histogram_cdf_linear_traits_factory", LINEAR_HISTOGRAM)},
- {"logarithmichistogram", BuildAggrFuncFactoryCallback("LogarithmicHistogram", "histogram_logarithmic_traits_factory", LINEAR_HISTOGRAM)},
- {"logarithmichistogramcdf", BuildAggrFuncFactoryCallback("LogarithmicHistogramCDF", "histogram_cdf_logarithmic_traits_factory", LINEAR_HISTOGRAM)},
- {"loghistogram", BuildAggrFuncFactoryCallback("LogarithmicHistogram", "histogram_logarithmic_traits_factory", LINEAR_HISTOGRAM, "LogHistogram")},
- {"loghistogramcdf", BuildAggrFuncFactoryCallback("LogarithmicHistogramCDF", "histogram_cdf_logarithmic_traits_factory", LINEAR_HISTOGRAM, "LogHistogramCDF")},
-
- {"hyperloglog", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE)},
- {"hll", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE, "HLL")},
- {"countdistinctestimate", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE, "CountDistinctEstimate")},
-
- {"variance", BuildAggrFuncFactoryCallback("Variance", "variance_0_1_traits_factory")},
- {"stddev", BuildAggrFuncFactoryCallback("StdDev", "variance_1_1_traits_factory")},
- {"populationvariance", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")},
- {"variancepopulation", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")},
- {"populationstddev", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")},
- {"stddevpopulation", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")},
- {"varpop", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")},
- {"stddevpop", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")},
- {"varp", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")},
- {"stddevp", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")},
- {"variancesample", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")},
- {"stddevsample", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")},
- {"varsamp", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")},
- {"stddevsamp", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")},
- {"vars", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")},
- {"stddevs", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")},
-
- {"correlation", BuildAggrFuncFactoryCallback("Correlation", "correlation_traits_factory", TWO_ARGS)},
- {"corr", BuildAggrFuncFactoryCallback("Correlation", "correlation_traits_factory", TWO_ARGS, "Corr")},
- {"covariance", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "Covariance")},
- {"covariancesample", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS)},
- {"covarsamp", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "CovarSamp")},
- {"covar", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "Covar")},
- {"covars", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "CovarS")},
- {"covariancepopulation", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS)},
- {"covarpop", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS, "CovarPop")},
- {"covarp", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS, "CovarP")},
-
- {"udaf", BuildAggrFuncFactoryCallback("UDAF", "udaf_traits_factory", UDAF)},
+ {"min", {"Min", "Agg", BuildAggrFuncFactoryCallback("Min", "min_traits_factory")}},
+ {"max", {"Max", "Agg", BuildAggrFuncFactoryCallback("Max", "max_traits_factory")}},
+
+ {"minby", {"MinBy", "Agg", BuildAggrFuncFactoryCallback("MinBy", "min_by_traits_factory", KEY_PAYLOAD)}},
+ {"maxby", {"MaxBy", "Agg", BuildAggrFuncFactoryCallback("MaxBy", "max_by_traits_factory", KEY_PAYLOAD)}},
+
+ {"sum", {"Sum", "Agg", BuildAggrFuncFactoryCallback("Sum", "sum_traits_factory")}},
+ {"sumif", {"SumIf", "Agg", BuildAggrFuncFactoryCallback("SumIf", "sum_if_traits_factory", PAYLOAD_PREDICATE)}},
+
+ {"checked_sum", {"", "", BuildAggrFuncFactoryCallback("CheckedSum", "checked_sum_traits_factory")}},
+ {"checked_sumif", {"", "", BuildAggrFuncFactoryCallback("CheckedSumIf", "checked_sum_if_traits_factory", PAYLOAD_PREDICATE)}},
+
+ {"some", {"Some", "Agg", BuildAggrFuncFactoryCallback("Some", "some_traits_factory")}},
+ {"somevalue", {"", "", BuildAggrFuncFactoryCallback("SomeValue", "some_traits_factory")}},
+
+ {"count", {"Count", "Agg", BuildAggrFuncFactoryCallback("Count", "count_traits_factory", COUNT)}},
+ {"countif", {"CountIf", "Agg", BuildAggrFuncFactoryCallback("CountIf", "count_if_traits_factory")}},
+
+ {"every", {"", "", BuildAggrFuncFactoryCallback("Every", "and_traits_factory")}},
+ {"booland", {"BoolAnd", "Agg", BuildAggrFuncFactoryCallback("BoolAnd", "and_traits_factory")}},
+ {"boolor", {"BoolOr", "Agg", BuildAggrFuncFactoryCallback("BoolOr", "or_traits_factory")}},
+ {"boolxor", {"BoolXor", "Agg", BuildAggrFuncFactoryCallback("BoolXor", "xor_traits_factory")}},
+
+ {"bitand", {"BitAnd", "Agg", BuildAggrFuncFactoryCallback("BitAnd", "bit_and_traits_factory")}},
+ {"bitor", {"BitOr", "Agg", BuildAggrFuncFactoryCallback("BitOr", "bit_or_traits_factory")}},
+ {"bitxor", {"BitXor", "Agg", BuildAggrFuncFactoryCallback("BitXor", "bit_xor_traits_factory")}},
+
+ {"avg", {"Avg", "Agg", BuildAggrFuncFactoryCallback("Avg", "avg_traits_factory")}},
+ {"avgif", {"AvgIf", "Agg", BuildAggrFuncFactoryCallback("AvgIf", "avg_if_traits_factory", PAYLOAD_PREDICATE)}},
+
+ {"agglist", {"AggList", "Agg", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)}},
+ {"aggrlist", {"AggList", "Agg", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)}},
+ {"aggregatelist", {"AggList", "Agg", BuildAggrFuncFactoryCallback("AggregateList", "list2_traits_factory", LIST)}},
+ {"agglistdistinct", {"AggListDistinct", "Agg", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)}},
+ {"aggrlistdistinct", {"AggListDistinct", "Agg", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)}},
+ {"aggregatelistdistinct", {"AggListDistinct", "Agg", BuildAggrFuncFactoryCallback("AggregateListDistinct", "set_traits_factory", LIST)}},
+
+ {"median", {"Median", "Agg", BuildAggrFuncFactoryCallback("Median", "percentile_traits_factory", PERCENTILE)}},
+ {"percentile", {"Percentile", "Agg", BuildAggrFuncFactoryCallback("Percentile", "percentile_traits_factory", PERCENTILE)}},
+
+ {"mode", {"Mode", "Agg", BuildAggrFuncFactoryCallback("Mode", "topfreq_traits_factory", TOPFREQ)}},
+ {"topfreq", {"TopFreq", "Agg", BuildAggrFuncFactoryCallback("TopFreq", "topfreq_traits_factory", TOPFREQ)}},
+
+ {"top", {"Top", "Agg", BuildAggrFuncFactoryCallback("Top", "top_traits_factory", TOP)}},
+ {"bottom", {"Bottom", "Agg", BuildAggrFuncFactoryCallback("Bottom", "bottom_traits_factory", TOP)}},
+ {"topby", {"TopBy", "Agg", BuildAggrFuncFactoryCallback("TopBy", "top_by_traits_factory", TOP_BY)}},
+ {"bottomby", {"BottomBy", "Agg", BuildAggrFuncFactoryCallback("BottomBy", "bottom_by_traits_factory", TOP_BY)}},
+
+ {"histogram", {"Histogram", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWardHistogram", "histogram_adaptive_ward_traits_factory", HISTOGRAM, "Histogram")}},
+ {"histogramcdf", {"HistogramCDF", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWardHistogramCDF", "histogram_cdf_adaptive_ward_traits_factory", HISTOGRAM, "HistogramCDF")}},
+ {"adaptivewardhistogram", {"AdaptiveWardHistogram", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWardHistogram", "histogram_adaptive_ward_traits_factory", HISTOGRAM)}},
+ {"adaptivewardhistogramcdf", {"AdaptiveWardHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWardHistogramCDF", "histogram_cdf_adaptive_ward_traits_factory", HISTOGRAM)}},
+ {"adaptiveweighthistogram", {"AdaptiveWeightHistogram", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWeightHistogram", "histogram_adaptive_weight_traits_factory", HISTOGRAM)}},
+ {"adaptiveweighthistogramcdf", {"AdaptiveWeightHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("AdaptiveWeightHistogramCDF", "histogram_cdf_adaptive_weight_traits_factory", HISTOGRAM)}},
+ {"adaptivedistancehistogram", {"AdaptiveDistanceHistogram", "Agg", BuildAggrFuncFactoryCallback("AdaptiveDistanceHistogram", "histogram_adaptive_distance_traits_factory", HISTOGRAM)}},
+ {"adaptivedistancehistogramcdf", {"AdaptiveDistanceHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("AdaptiveDistanceHistogramCDF", "histogram_cdf_adaptive_distance_traits_factory", HISTOGRAM)}},
+ {"blockwardhistogram", {"BlockWardHistogram", "Agg", BuildAggrFuncFactoryCallback("BlockWardHistogram", "histogram_block_ward_traits_factory", HISTOGRAM)}},
+ {"blockwardhistogramcdf", {"BlockWardHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("BlockWardHistogramCDF", "histogram_cdf_block_ward_traits_factory", HISTOGRAM)}},
+ {"blockweighthistogram", {"BlockWeightHistogram", "Agg", BuildAggrFuncFactoryCallback("BlockWeightHistogram", "histogram_block_weight_traits_factory", HISTOGRAM)}},
+ {"blockweighthistogramcdf", {"BlockWeightHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("BlockWeightHistogramCDF", "histogram_cdf_block_weight_traits_factory", HISTOGRAM)}},
+ {"linearhistogram", {"LinearHistogram", "Agg", BuildAggrFuncFactoryCallback("LinearHistogram", "histogram_linear_traits_factory", LINEAR_HISTOGRAM)}},
+ {"linearhistogramcdf", {"LinearHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("LinearHistogramCDF", "histogram_cdf_linear_traits_factory", LINEAR_HISTOGRAM)}},
+ {"logarithmichistogram", {"LogarithmicHistogram", "Agg", BuildAggrFuncFactoryCallback("LogarithmicHistogram", "histogram_logarithmic_traits_factory", LINEAR_HISTOGRAM)}},
+ {"logarithmichistogramcdf", {"LogarithmicHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("LogarithmicHistogramCDF", "histogram_cdf_logarithmic_traits_factory", LINEAR_HISTOGRAM)}},
+ {"loghistogram", {"LogHistogram", "Agg", BuildAggrFuncFactoryCallback("LogarithmicHistogram", "histogram_logarithmic_traits_factory", LINEAR_HISTOGRAM, "LogHistogram")}},
+ {"loghistogramcdf", {"LogHistogramCDF", "Agg", BuildAggrFuncFactoryCallback("LogarithmicHistogramCDF", "histogram_cdf_logarithmic_traits_factory", LINEAR_HISTOGRAM, "LogHistogramCDF")}},
+
+ {"hyperloglog", {"HyperLogLog", "Agg", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE)}},
+ {"hll", {"HLL", "Agg", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE, "HLL")}},
+ {"countdistinctestimate", {"CountDistinctEstimate", "Agg", BuildAggrFuncFactoryCallback("HyperLogLog", "hyperloglog_traits_factory", COUNT_DISTINCT_ESTIMATE, "CountDistinctEstimate")}},
+
+ {"variance", {"Variance", "Agg", BuildAggrFuncFactoryCallback("Variance", "variance_0_1_traits_factory")}},
+ {"stddev", {"StdDev", "Agg", BuildAggrFuncFactoryCallback("StdDev", "variance_1_1_traits_factory")}},
+ {"populationvariance", {"PopulationVariance", "Agg", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")}},
+ {"variancepopulation", {"VariancePopulation", "Agg", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")}},
+ {"populationstddev", {"PopulationStdDev", "Agg", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")}},
+ {"stddevpopulation", {"StdDevPopulation", "Agg", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")}},
+ {"varpop", {"VarPop", "Agg", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")}},
+ {"stddevpop", {"StdDevPop", "Agg", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")}},
+ {"varp", {"VarP", "Agg", BuildAggrFuncFactoryCallback("VariancePopulation", "variance_0_0_traits_factory")}},
+ {"stddevp", {"StdDevP", "Agg", BuildAggrFuncFactoryCallback("StdDevPopulation", "variance_1_0_traits_factory")}},
+ {"variancesample", {"VarianceSample", "Agg", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")}},
+ {"stddevsample", {"StdDevSample", "Agg", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")}},
+ {"varsamp", {"VarSamp", "Agg", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")}},
+ {"stddevsamp", {"StdDevSamp", "Agg", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")}},
+ {"vars", {"VarS", "Agg", BuildAggrFuncFactoryCallback("VarianceSample", "variance_0_1_traits_factory")}},
+ {"stddevs", {"StdDevS", "Agg", BuildAggrFuncFactoryCallback("StdDevSample", "variance_1_1_traits_factory")}},
+
+ {"correlation", {"Correlation", "Agg", BuildAggrFuncFactoryCallback("Correlation", "correlation_traits_factory", TWO_ARGS)}},
+ {"corr", {"Corr", "Agg", BuildAggrFuncFactoryCallback("Correlation", "correlation_traits_factory", TWO_ARGS, "Corr")}},
+ {"covariance", {"Covariance", "Agg", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "Covariance")}},
+ {"covariancesample", {"CovarianceSample", "Agg", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS)}},
+ {"covarsamp", {"CovarSamp", "Agg", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "CovarSamp")}},
+ {"covar", {"Covar","Agg",BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "Covar")}},
+ {"covars", {"CovarS", "Agg", BuildAggrFuncFactoryCallback("CovarianceSample", "covariance_sample_traits_factory", TWO_ARGS, "CovarS")}},
+ {"covariancepopulation", {"CovariancePopulation", "Agg", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS)}},
+ {"covarpop", {"CovarPop", "Agg", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS, "CovarPop")}},
+ {"covarp", {"CovarP", "Agg", BuildAggrFuncFactoryCallback("CovariancePopulation", "covariance_population_traits_factory", TWO_ARGS, "CovarP")}},
+
+ {"udaf", {"UDAF", "Agg", BuildAggrFuncFactoryCallback("UDAF", "udaf_traits_factory", UDAF)}},
// Window functions
- {"firstvalue", BuildAggrFuncFactoryCallback("FirstValue", "first_value_traits_factory", {OverWindow})},
- {"lastvalue", BuildAggrFuncFactoryCallback("LastValue", "last_value_traits_factory", {OverWindow})},
- {"nthvalue", BuildAggrFuncFactoryCallback("NthValue", "nth_value_traits_factory", {OverWindow}, NTH_VALUE)},
- {"firstvalueignorenulls", BuildAggrFuncFactoryCallback("FirstValueIgnoreNulls", "first_value_ignore_nulls_traits_factory", {OverWindow})},
- {"lastvalueignorenulls", BuildAggrFuncFactoryCallback("LastValueIgnoreNulls", "last_value_ignore_nulls_traits_factory", {OverWindow})},
- {"nthvalueignorenulls", BuildAggrFuncFactoryCallback("NthValueIgnoreNulls", "nth_value_ignore_nulls_traits_factory", {OverWindow}, NTH_VALUE)},
+ {"firstvalue", {"FirstValue", "Window", BuildAggrFuncFactoryCallback("FirstValue", "first_value_traits_factory", {OverWindow})}},
+ {"lastvalue", {"LastValue", "Window", BuildAggrFuncFactoryCallback("LastValue", "last_value_traits_factory", {OverWindow})}},
+ {"nthvalue", {"NthValue", "Window", BuildAggrFuncFactoryCallback("NthValue", "nth_value_traits_factory", {OverWindow}, NTH_VALUE)}},
+ {"firstvalueignorenulls", {"", "", BuildAggrFuncFactoryCallback("FirstValueIgnoreNulls", "first_value_ignore_nulls_traits_factory", {OverWindow})}},
+ {"lastvalueignorenulls", {"", "", BuildAggrFuncFactoryCallback("LastValueIgnoreNulls", "last_value_ignore_nulls_traits_factory", {OverWindow})}},
+ {"nthvalueignorenulls", {"", "", BuildAggrFuncFactoryCallback("NthValueIgnoreNulls", "nth_value_ignore_nulls_traits_factory", {OverWindow}, NTH_VALUE)}},
// MatchRecognize navigation functions
- {"first", BuildAggrFuncFactoryCallback("First", "first_traits_factory")},
- {"last", BuildAggrFuncFactoryCallback("Last", "last_traits_factory")},
+ {"first", {"First", "MatchRec", BuildAggrFuncFactoryCallback("First", "first_traits_factory")}},
+ {"last", {"Last", "MatchRec", BuildAggrFuncFactoryCallback("Last", "last_traits_factory")}}
};
return aggrFuncs;
}
@@ -3682,7 +3696,7 @@ TNodePtr BuildBuiltinFunc(TContext& ctx, TPosition pos, TString name, const TVec
if ("first" == aggNormalizedName || "last" == aggNormalizedName) {
return new TInvalidBuiltin(pos, "Cannot use FIRST and LAST outside the MATCH_RECOGNIZE context");
}
- return (*aggrCallback).second(pos, args, aggMode, true);
+ return (*aggrCallback).second.Callback(pos, args, aggMode, true);
}
}
@@ -3703,7 +3717,7 @@ TNodePtr BuildBuiltinFunc(TContext& ctx, TPosition pos, TString name, const TVec
if (aggrCallback != aggrFuncs.end()) {
switch (ctx.GetColumnReferenceState()) {
case EColumnRefState::MatchRecognizeMeasures: {
- auto result = (*aggrCallback).second(pos, args, aggMode, false);
+ auto result = (*aggrCallback).second.Callback(pos, args, aggMode, false);
return BuildMatchRecognizeVarAccess(pos, std::move(result));
}
case EColumnRefState::MatchRecognizeDefine:
@@ -3712,7 +3726,7 @@ TNodePtr BuildBuiltinFunc(TContext& ctx, TPosition pos, TString name, const TVec
if ("first" == normalizedName || "last" == normalizedName) {
return new TInvalidBuiltin(pos, "Cannot use FIRST and LAST outside the MATCH_RECOGNIZE context");
}
- return (*aggrCallback).second(pos, args, aggMode, false);
+ return (*aggrCallback).second.Callback(pos, args, aggMode, false);
}
}
if (aggMode == EAggregateMode::Distinct || aggMode == EAggregateMode::OverWindowDistinct) {
@@ -3721,7 +3735,7 @@ TNodePtr BuildBuiltinFunc(TContext& ctx, TPosition pos, TString name, const TVec
auto builtinCallback = builtinFuncs.find(normalizedName);
if (builtinCallback != builtinFuncs.end()) {
- return (*builtinCallback).second(pos, args);
+ return (*builtinCallback).second.Callback(pos, args);
} else if (normalizedName == "udf") {
if (mustUseNamed && *mustUseNamed) {
*mustUseNamed = false;
@@ -3919,4 +3933,32 @@ TNodePtr BuildBuiltinFunc(TContext& ctx, TPosition pos, TString name, const TVec
TDeferredAtom(typeConfig, ctx), nullptr, nullptr);
}
+void EnumerateBuiltins(const std::function<void(std::string_view name, std::string_view kind)>& callback) {
+ const TBuiltinFuncData* funcData = Singleton<TBuiltinFuncData>();
+ const TBuiltinFactoryCallbackMap& builtinFuncs = funcData->BuiltinFuncs;
+ const TAggrFuncFactoryCallbackMap& aggrFuncs = funcData->AggrFuncs;
+ const TCoreFuncMap& coreFuncs = funcData->CoreFuncs;
+
+ std::map<std::string_view, std::string_view> map;
+ for (const auto& x : builtinFuncs) {
+ if (!x.second.CanonicalSqlName.empty()) {
+ map.emplace(x.second.CanonicalSqlName, x.second.Kind);
+ }
+ }
+
+ for (const auto& x : aggrFuncs) {
+ if (!x.second.CanonicalSqlName.empty()) {
+ map.emplace(x.second.CanonicalSqlName, x.second.Kind);
+ }
+ }
+
+ for (const auto& x : coreFuncs) {
+ map.emplace(x.second.Name, "Normal");
+ }
+
+ for (const auto& x : map) {
+ callback(x.first, x.second);
+ }
+}
+
} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/complete/antlr4/c3i.h b/yql/essentials/sql/v1/complete/antlr4/c3i.h
new file mode 100644
index 0000000000..26c7186805
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/antlr4/c3i.h
@@ -0,0 +1,43 @@
+#pragma once
+
+#include "defs.h"
+
+#include <util/generic/fwd.h>
+#include <util/generic/string.h>
+#include <util/generic/vector.h>
+
+#include <unordered_set>
+
+namespace NSQLComplete {
+
+ // std::vector is used to prevent copying a C3 output
+ struct TSuggestedToken {
+ TTokenId Number;
+ std::vector<TTokenId> Following;
+ };
+
+ struct TMatchedRule {
+ TRuleId Index;
+ TParserCallStack ParserCallStack;
+ };
+
+ struct TC3Candidates {
+ TVector<TSuggestedToken> Tokens;
+ TVector<TMatchedRule> Rules;
+ };
+
+ class IC3Engine {
+ public:
+ using TPtr = THolder<IC3Engine>;
+
+ // std::unordered_set is used to prevent copying into c3 core
+ struct TConfig {
+ std::unordered_set<TTokenId> IgnoredTokens;
+ std::unordered_set<TRuleId> PreferredRules;
+ };
+
+ virtual TC3Candidates Complete(TStringBuf prefix) = 0;
+ virtual ~IC3Engine() = default;
+ };
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/c3_engine.h b/yql/essentials/sql/v1/complete/antlr4/c3t.h
index 907339ab1f..750da64229 100644
--- a/yql/essentials/sql/v1/complete/c3_engine.h
+++ b/yql/essentials/sql/v1/complete/antlr4/c3t.h
@@ -1,48 +1,20 @@
#pragma once
-#include "sql_antlr4.h"
-#include "string_util.h"
+#include "c3i.h"
+
+#include <yql/essentials/sql/v1/complete/text/word.h>
#include <contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.h>
#include <contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.h>
#include <contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h>
#include <contrib/libs/antlr4-c3/src/CodeCompletionCore.hpp>
+#include <util/generic/fwd.h>
#include <util/generic/string.h>
#include <util/generic/vector.h>
-#include <unordered_set>
-
namespace NSQLComplete {
- struct TSuggestedToken {
- TTokenId Number;
- };
-
- struct TMatchedRule {
- TRuleId Index;
- };
-
- struct TC3Candidates {
- TVector<TSuggestedToken> Tokens;
- TVector<TMatchedRule> Rules;
- };
-
- class IC3Engine {
- public:
- using TPtr = THolder<IC3Engine>;
-
- // std::unordered_set is used to prevent copying into c3 core
- struct TConfig {
- std::unordered_set<TTokenId> IgnoredTokens;
- std::unordered_set<TRuleId> PreferredRules;
- };
-
- virtual TC3Candidates Complete(TStringBuf queryPrefix) = 0;
- virtual const antlr4::dfa::Vocabulary& GetVocabulary() const = 0;
- virtual ~IC3Engine() = default;
- };
-
template <class Lexer, class Parser>
struct TAntlrGrammar {
using TLexer = Lexer;
@@ -68,29 +40,24 @@ namespace NSQLComplete {
CompletionCore.preferredRules = std::move(config.PreferredRules);
}
- TC3Candidates Complete(TStringBuf queryPrefix) override {
- Assign(queryPrefix);
- const auto caretTokenIndex = CaretTokenIndex(queryPrefix);
+ TC3Candidates Complete(TStringBuf prefix) override {
+ Assign(prefix);
+ const auto caretTokenIndex = CaretTokenIndex(prefix);
auto candidates = CompletionCore.collectCandidates(caretTokenIndex);
return Converted(std::move(candidates));
}
- const antlr4::dfa::Vocabulary& GetVocabulary() const override {
- return Lexer.getVocabulary();
- }
-
private:
- void Assign(TStringBuf queryPrefix) {
- Chars.load(queryPrefix.Data(), queryPrefix.Size(), /* lenient = */ false);
+ void Assign(TStringBuf prefix) {
+ Chars.load(prefix.Data(), prefix.Size(), /* lenient = */ false);
Lexer.reset();
Tokens.setTokenSource(&Lexer);
-
Tokens.fill();
}
- size_t CaretTokenIndex(TStringBuf queryPrefix) {
+ size_t CaretTokenIndex(TStringBuf prefix) {
const auto tokensCount = Tokens.size();
- if (2 <= tokensCount && !LastWord(queryPrefix).Empty()) {
+ if (2 <= tokensCount && !LastWord(prefix).Empty()) {
return tokensCount - 2;
}
return tokensCount - 1;
@@ -98,11 +65,12 @@ namespace NSQLComplete {
static TC3Candidates Converted(c3::CandidatesCollection candidates) {
TC3Candidates converted;
- for (const auto& [token, _] : candidates.tokens) {
- converted.Tokens.emplace_back(token);
+ for (auto& [token, following] : candidates.tokens) {
+ converted.Tokens.emplace_back(token, std::move(following));
}
- for (const auto& [rule, _] : candidates.rules) {
- converted.Rules.emplace_back(rule);
+ for (auto& [rule, data] : candidates.rules) {
+ converted.Rules.emplace_back(rule, std::move(data.ruleList));
+ converted.Rules.back().ParserCallStack.emplace_back(rule);
}
return converted;
}
diff --git a/yql/essentials/sql/v1/complete/antlr4/defs.h b/yql/essentials/sql/v1/complete/antlr4/defs.h
new file mode 100644
index 0000000000..7f2991a8a7
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/antlr4/defs.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include <contrib/libs/antlr4_cpp_runtime/src/Token.h>
+
+#include <cstddef>
+#include <vector>
+
+namespace NSQLComplete {
+
+ using TTokenId = size_t;
+ using TRuleId = size_t;
+
+ constexpr TTokenId TOKEN_EOF = antlr4::Token::EOF;
+
+ // std::vector is used to prevent copying a C3 output
+ using TParserCallStack = std::vector<TRuleId>;
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/antlr4/ya.make b/yql/essentials/sql/v1/complete/antlr4/ya.make
new file mode 100644
index 0000000000..3614560617
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/antlr4/ya.make
@@ -0,0 +1,9 @@
+LIBRARY()
+
+PEERDIR(
+ contrib/libs/antlr4_cpp_runtime
+ contrib/libs/antlr4-c3
+ yql/essentials/sql/v1/complete/text
+)
+
+END()
diff --git a/yql/essentials/sql/v1/complete/bench/main.cpp b/yql/essentials/sql/v1/complete/bench/main.cpp
new file mode 100644
index 0000000000..ace37c43e3
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/bench/main.cpp
@@ -0,0 +1,40 @@
+#include <benchmark/benchmark.h>
+
+#include <yql/essentials/sql/v1/complete/name/static/name_service.h>
+#include <yql/essentials/sql/v1/complete/name/static/ranking.h>
+#include <yql/essentials/sql/v1/complete/sql_complete.h>
+
+#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure_ansi/lexer.h>
+
+#include <util/generic/xrange.h>
+#include <util/system/compiler.h>
+
+namespace NSQLComplete {
+
+ NSQLComplete::TLexerSupplier MakePureLexerSupplier() {
+ NSQLTranslationV1::TLexers lexers;
+ lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
+ lexers.Antlr4PureAnsi = NSQLTranslationV1::MakeAntlr4PureAnsiLexerFactory();
+ return [lexers = std::move(lexers)](bool ansi) {
+ return NSQLTranslationV1::MakeLexer(
+ lexers, ansi, /* antlr4 = */ true,
+ NSQLTranslationV1::ELexerFlavor::Pure);
+ };
+ }
+
+ void BenchmarkComplete(benchmark::State& state) {
+ auto names = NSQLComplete::MakeDefaultNameSet();
+ auto ranking = NSQLComplete::MakeDefaultRanking();
+ auto service = MakeStaticNameService(std::move(names), std::move(ranking));
+ auto engine = MakeSqlCompletionEngine(MakePureLexerSupplier(), std::move(service));
+
+ for (const auto _ : state) {
+ auto completion = engine->Complete({"SELECT "});
+ benchmark::DoNotOptimize(completion);
+ }
+ }
+
+} // namespace NSQLComplete
+
+BENCHMARK(NSQLComplete::BenchmarkComplete);
diff --git a/yql/essentials/sql/v1/complete/bench/ya.make b/yql/essentials/sql/v1/complete/bench/ya.make
new file mode 100644
index 0000000000..aec2f394c8
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/bench/ya.make
@@ -0,0 +1,13 @@
+G_BENCHMARK()
+
+SRCS(
+ main.cpp
+)
+
+PEERDIR(
+ yql/essentials/sql/v1/complete
+ yql/essentials/sql/v1/complete/name/static
+ yql/essentials/sql/v1/lexer
+)
+
+END()
diff --git a/yql/essentials/sql/v1/complete/name/name_service.h b/yql/essentials/sql/v1/complete/name/name_service.h
index a4998fd8ff..d3cf153341 100644
--- a/yql/essentials/sql/v1/complete/name/name_service.h
+++ b/yql/essentials/sql/v1/complete/name/name_service.h
@@ -17,18 +17,24 @@ namespace NSQLComplete {
using TConstraints = std::monostate;
};
+ struct TFunctionName: TIndentifier {
+ using TConstraints = std::monostate;
+ };
+
using TGenericName = std::variant<
- TTypeName>;
+ TTypeName,
+ TFunctionName>;
struct TNameRequest {
struct {
std::optional<TTypeName::TConstraints> TypeName;
+ std::optional<TTypeName::TConstraints> Function;
} Constraints;
TString Prefix = "";
size_t Limit = 128;
bool IsEmpty() const {
- return !Constraints.TypeName;
+ return !Constraints.TypeName && !Constraints.Function;
}
};
diff --git a/yql/essentials/sql/v1/complete/name/static/default_name_set.cpp b/yql/essentials/sql/v1/complete/name/static/default_name_set.cpp
deleted file mode 100644
index 06c55a1d27..0000000000
--- a/yql/essentials/sql/v1/complete/name/static/default_name_set.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-#include "name_service.h"
-
-namespace NSQLComplete {
-
- // TODO(YQL-19747): Use some name registry
- NameSet MakeDefaultNameSet() {
- return {
- .Types = {
- "Bool",
- "Int8",
- "Uint8",
- "Int16",
- "Uint16",
- "Int32",
- "Uint32",
- "Int64",
- "Uint64",
- "Float",
- "Double",
- "String",
- "Utf8",
- "Yson",
- "Json",
- "Uuid",
- "JsonDocument",
- "Date",
- "Datetime",
- "Timestamp",
- "Interval",
- "TzDate",
- "TzDatetime",
- "TzTimestamp",
- "Date32",
- "Datetime64",
- "Timestamp64",
- "Interval64",
- "TzDate32",
- "TzDatetime64",
- "TzTimestamp64",
- "Decimal",
- "DyNumber",
- },
- };
- }
-
-} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/frequency.cpp b/yql/essentials/sql/v1/complete/name/static/frequency.cpp
new file mode 100644
index 0000000000..d9c8ba9652
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/frequency.cpp
@@ -0,0 +1,87 @@
+#include "frequency.h"
+
+#include <library/cpp/json/json_reader.h>
+#include <library/cpp/resource/resource.h>
+
+#include <util/charset/utf8.h>
+
+namespace NSQLComplete {
+
+ constexpr struct {
+ struct {
+ const char* Parent = "parent";
+ const char* Rule = "rule";
+ const char* Sum = "sum";
+ } Key;
+ struct {
+ const char* Type = "TYPE";
+ const char* Func = "FUNC";
+ const char* Module = "MODULE";
+ const char* ModuleFunc = "MODULE_FUNC";
+ } Parent;
+ } Json;
+
+ struct TFrequencyItem {
+ TString Parent;
+ TString Rule;
+ size_t Sum;
+
+ static TFrequencyItem ParseJsonMap(NJson::TJsonValue::TMapType&& json) {
+ return {
+ .Parent = json.at(Json.Key.Parent).GetStringSafe(),
+ .Rule = json.at(Json.Key.Rule).GetStringSafe(),
+ .Sum = json.at(Json.Key.Sum).GetUIntegerSafe(),
+ };
+ }
+
+ static TVector<TFrequencyItem> ParseListFromJsonArray(NJson::TJsonValue::TArray& json) {
+ TVector<TFrequencyItem> items;
+ items.reserve(json.size());
+ for (auto& element : json) {
+ auto item = TFrequencyItem::ParseJsonMap(std::move(element.GetMapSafe()));
+ items.emplace_back(std::move(item));
+ }
+ return items;
+ }
+
+ static TVector<TFrequencyItem> ParseListFromJsonText(const TStringBuf text) {
+ NJson::TJsonValue json = NJson::ReadJsonFastTree(text);
+ return ParseListFromJsonArray(json.GetArraySafe());
+ }
+ };
+
+ TFrequencyData Convert(TVector<TFrequencyItem> items) {
+ TFrequencyData data;
+ for (auto& item : items) {
+ if (item.Parent == Json.Parent.Type ||
+ item.Parent == Json.Parent.Func ||
+ item.Parent == Json.Parent.ModuleFunc ||
+ item.Parent == Json.Parent.Module) {
+ item.Rule = ToLowerUTF8(item.Rule);
+ }
+
+ if (item.Parent == Json.Parent.Type) {
+ data.Types[item.Rule] += item.Sum;
+ } else if (item.Parent == Json.Parent.Func ||
+ item.Parent == Json.Parent.ModuleFunc) {
+ data.Functions[item.Rule] += item.Sum;
+ } else if (item.Parent == Json.Parent.Module) {
+ // Ignore, unsupported: Modules
+ } else {
+ // Ignore, unsupported: Parser Call Stacks
+ }
+ }
+ return data;
+ }
+
+ TFrequencyData ParseJsonFrequencyData(const TStringBuf text) {
+ return Convert(TFrequencyItem::ParseListFromJsonText(text));
+ }
+
+ TFrequencyData LoadFrequencyData() {
+ TString text;
+ Y_ENSURE(NResource::FindExact("rules_corr_basic.json", &text));
+ return ParseJsonFrequencyData(text);
+ }
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/frequency.h b/yql/essentials/sql/v1/complete/name/static/frequency.h
new file mode 100644
index 0000000000..3d128f824b
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/frequency.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include <util/generic/string.h>
+#include <util/generic/hash.h>
+
+namespace NSQLComplete {
+
+ struct TFrequencyData {
+ THashMap<TString, size_t> Types;
+ THashMap<TString, size_t> Functions;
+ };
+
+ TFrequencyData ParseJsonFrequencyData(const TStringBuf text);
+
+ TFrequencyData LoadFrequencyData();
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/frequency_ut.cpp b/yql/essentials/sql/v1/complete/name/static/frequency_ut.cpp
new file mode 100644
index 0000000000..dd6ee2cfbb
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/frequency_ut.cpp
@@ -0,0 +1,37 @@
+#include "frequency.h"
+
+#include <library/cpp/testing/unittest/registar.h>
+
+using namespace NSQLComplete;
+
+Y_UNIT_TEST_SUITE(FrequencyTests) {
+
+ Y_UNIT_TEST(FrequencyDataJson) {
+ TFrequencyData actual = ParseJsonFrequencyData(R"([
+ {"parent":"FUNC","rule":"ABC","sum":1},
+ {"parent":"TYPE","rule":"BIGINT","sum":7101},
+ {"parent":"MODULE_FUNC","rule":"Compress::BZip2","sum":2},
+ {"parent":"MODULE","rule":"re2","sum":3094},
+ {"parent":"TRule_action_or_subquery_args","rule":"TRule_action_or_subquery_args.Block2","sum":4874480}
+ ])");
+
+ TFrequencyData expected = {
+ .Types = {
+ {"bigint", 7101},
+ },
+ .Functions = {
+ {"abc", 1},
+ {"compress::bzip2", 2},
+ },
+ };
+
+ UNIT_ASSERT_VALUES_EQUAL(actual.Types, expected.Types);
+ UNIT_ASSERT_VALUES_EQUAL(actual.Functions, expected.Functions);
+ }
+
+ Y_UNIT_TEST(FrequencyDataResouce) {
+ TFrequencyData data = LoadFrequencyData();
+ Y_UNUSED(data);
+ }
+
+} // Y_UNIT_TEST_SUITE(FrequencyTests)
diff --git a/yql/essentials/sql/v1/complete/name/static/json_name_set.cpp b/yql/essentials/sql/v1/complete/name/static/json_name_set.cpp
new file mode 100644
index 0000000000..29c303b310
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/json_name_set.cpp
@@ -0,0 +1,58 @@
+#include "name_service.h"
+
+#include <library/cpp/json/json_reader.h>
+#include <library/cpp/resource/resource.h>
+
+namespace NSQLComplete {
+
+ NJson::TJsonValue LoadJsonResource(const TStringBuf filename) {
+ TString text;
+ Y_ENSURE(NResource::FindExact(filename, &text));
+ return NJson::ReadJsonFastTree(text);
+ }
+
+ template <class T, class U>
+ T Merge(T lhs, U rhs) {
+ std::copy(std::begin(rhs), std::end(rhs), std::back_inserter(lhs));
+ return lhs;
+ }
+
+ TVector<TString> ParseNames(NJson::TJsonValue::TArray& json) {
+ TVector<TString> keys;
+ keys.reserve(json.size());
+ for (auto& item : json) {
+ keys.emplace_back(item.GetMapSafe().at("name").GetStringSafe());
+ }
+ return keys;
+ }
+
+ TVector<TString> ParseTypes(NJson::TJsonValue json) {
+ return ParseNames(json.GetArraySafe());
+ }
+
+ TVector<TString> ParseFunctions(NJson::TJsonValue json) {
+ return ParseNames(json.GetArraySafe());
+ }
+
+ TVector<TString> ParseUfs(NJson::TJsonValue json) {
+ TVector<TString> names;
+ for (auto& [module, v] : json.GetMapSafe()) {
+ auto functions = ParseNames(v.GetArraySafe());
+ for (auto& function : functions) {
+ function.prepend("::").prepend(module);
+ }
+ std::copy(std::begin(functions), std::end(functions), std::back_inserter(names));
+ }
+ return names;
+ }
+
+ NameSet MakeDefaultNameSet() {
+ return {
+ .Types = ParseTypes(LoadJsonResource("types.json")),
+ .Functions = Merge(
+ ParseFunctions(LoadJsonResource("sql_functions.json")),
+ ParseUfs(LoadJsonResource("udfs_basic.json"))),
+ };
+ }
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/name_service.cpp b/yql/essentials/sql/v1/complete/name/static/name_service.cpp
index 4f0706a4ea..fdb1dd4eae 100644
--- a/yql/essentials/sql/v1/complete/name/static/name_service.cpp
+++ b/yql/essentials/sql/v1/complete/name/static/name_service.cpp
@@ -1,18 +1,30 @@
#include "name_service.h"
+#include "ranking.h"
+
namespace NSQLComplete {
+ bool NoCaseCompare(const TString& lhs, const TString& rhs) {
+ return std::lexicographical_compare(
+ std::begin(lhs), std::end(lhs),
+ std::begin(rhs), std::end(rhs),
+ [](const char lhs, const char rhs) {
+ return ToLower(lhs) < ToLower(rhs);
+ });
+ }
+
+ auto NoCaseCompareLimit(size_t size) {
+ return [size](const TString& lhs, const TString& rhs) -> bool {
+ return strncasecmp(lhs.data(), rhs.data(), size) < 0;
+ };
+ }
+
const TVector<TStringBuf> FilteredByPrefix(
const TString& prefix,
const TVector<TString>& sorted Y_LIFETIME_BOUND) {
auto [first, last] = EqualRange(
- std::begin(sorted),
- std::end(sorted),
- prefix,
- [&](const TString& lhs, const TString& rhs) {
- return strncasecmp(lhs.data(), rhs.data(), prefix.size()) < 0;
- });
-
+ std::begin(sorted), std::end(sorted),
+ prefix, NoCaseCompareLimit(prefix.size()));
return TVector<TStringBuf>(first, last);
}
@@ -23,66 +35,47 @@ namespace NSQLComplete {
}
}
- size_t KindWeight(const TGenericName& name) {
- return std::visit([](const auto& name) {
- using T = std::decay_t<decltype(name)>;
- if constexpr (std::is_same_v<T, TTypeName>) {
- return 1;
- }
- }, name);
- }
-
- const TStringBuf ContentView(const TGenericName& name Y_LIFETIME_BOUND) {
- return std::visit([](const auto& name) -> TStringBuf {
- using T = std::decay_t<decltype(name)>;
- if constexpr (std::is_base_of_v<TIndentifier, T>) {
- return name.Indentifier;
- }
- }, name);
- }
-
- void Sort(TVector<TGenericName>& names) {
- Sort(names, [](const TGenericName& lhs, const TGenericName& rhs) {
- const auto lhs_weight = KindWeight(lhs);
- const auto lhs_content = ContentView(lhs);
-
- const auto rhs_weight = KindWeight(rhs);
- const auto rhs_content = ContentView(rhs);
-
- return std::tie(lhs_weight, lhs_content) <
- std::tie(rhs_weight, rhs_content);
- });
- }
-
class TStaticNameService: public INameService {
public:
- explicit TStaticNameService(NameSet names)
+ explicit TStaticNameService(NameSet names, IRanking::TPtr ranking)
: NameSet_(std::move(names))
+ , Ranking_(std::move(ranking))
{
- Sort(NameSet_.Types);
+ Sort(NameSet_.Types, NoCaseCompare);
+ Sort(NameSet_.Functions, NoCaseCompare);
}
TFuture<TNameResponse> Lookup(TNameRequest request) override {
TNameResponse response;
if (request.Constraints.TypeName) {
- AppendAs<TTypeName>(response.RankedNames,
- FilteredByPrefix(request.Prefix, NameSet_.Types));
+ AppendAs<TTypeName>(
+ response.RankedNames,
+ FilteredByPrefix(request.Prefix, NameSet_.Types));
}
- Sort(response.RankedNames);
+ if (request.Constraints.Function) {
+ AppendAs<TFunctionName>(
+ response.RankedNames,
+ FilteredByPrefix(request.Prefix, NameSet_.Functions));
+ }
- response.RankedNames.crop(request.Limit);
+ Ranking_->CropToSortedPrefix(response.RankedNames, request.Limit);
return NThreading::MakeFuture(std::move(response));
}
private:
NameSet NameSet_;
+ IRanking::TPtr Ranking_;
};
- INameService::TPtr MakeStaticNameService(NameSet names) {
- return INameService::TPtr(new TStaticNameService(std::move(names)));
+ INameService::TPtr MakeStaticNameService() {
+ return MakeStaticNameService(MakeDefaultNameSet(), MakeDefaultRanking());
+ }
+
+ INameService::TPtr MakeStaticNameService(NameSet names, IRanking::TPtr ranking) {
+ return INameService::TPtr(new TStaticNameService(std::move(names), std::move(ranking)));
}
} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/name_service.h b/yql/essentials/sql/v1/complete/name/static/name_service.h
index f9d8ec0000..a5c90465c8 100644
--- a/yql/essentials/sql/v1/complete/name/static/name_service.h
+++ b/yql/essentials/sql/v1/complete/name/static/name_service.h
@@ -1,15 +1,20 @@
#pragma once
+#include "ranking.h"
+
#include <yql/essentials/sql/v1/complete/name/name_service.h>
namespace NSQLComplete {
struct NameSet {
TVector<TString> Types;
+ TVector<TString> Functions;
};
NameSet MakeDefaultNameSet();
- INameService::TPtr MakeStaticNameService(NameSet names);
+ INameService::TPtr MakeStaticNameService();
+
+ INameService::TPtr MakeStaticNameService(NameSet names, IRanking::TPtr ranking);
} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/ranking.cpp b/yql/essentials/sql/v1/complete/name/static/ranking.cpp
new file mode 100644
index 0000000000..45e6e2b2fa
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/ranking.cpp
@@ -0,0 +1,102 @@
+#include "ranking.h"
+
+#include "frequency.h"
+
+#include <yql/essentials/sql/v1/complete/name/name_service.h>
+
+#include <util/charset/utf8.h>
+
+namespace NSQLComplete {
+
+ class TRanking: public IRanking {
+ private:
+ struct TRow {
+ TGenericName Name;
+ size_t Weight;
+ };
+
+ public:
+ TRanking(TFrequencyData frequency)
+ : Frequency_(std::move(frequency))
+ {
+ }
+
+ void CropToSortedPrefix(TVector<TGenericName>& names, size_t limit) override {
+ limit = std::min(limit, names.size());
+
+ TVector<TRow> rows;
+ rows.reserve(names.size());
+ for (TGenericName& name : names) {
+ size_t weight = Weight(name);
+ rows.emplace_back(std::move(name), weight);
+ }
+
+ ::PartialSort(
+ std::begin(rows), std::begin(rows) + limit, std::end(rows),
+ [this](const TRow& lhs, const TRow& rhs) {
+ const size_t lhs_weight = ReversedWeight(lhs.Weight);
+ const auto lhs_content = ContentView(lhs.Name);
+
+ const size_t rhs_weight = ReversedWeight(rhs.Weight);
+ const auto rhs_content = ContentView(rhs.Name);
+
+ return std::tie(lhs_weight, lhs_content) <
+ std::tie(rhs_weight, rhs_content);
+ });
+
+ names.crop(limit);
+ rows.crop(limit);
+
+ for (size_t i = 0; i < limit; ++i) {
+ names[i] = std::move(rows[i].Name);
+ }
+ }
+
+ private:
+ size_t Weight(const TGenericName& name) const {
+ return std::visit([this](const auto& name) -> size_t {
+ using T = std::decay_t<decltype(name)>;
+
+ auto identifier = ToLowerUTF8(ContentView(name));
+
+ if constexpr (std::is_same_v<T, TFunctionName>) {
+ if (auto weight = Frequency_.Functions.FindPtr(identifier)) {
+ return *weight;
+ }
+ }
+
+ if constexpr (std::is_same_v<T, TTypeName>) {
+ if (auto weight = Frequency_.Types.FindPtr(identifier)) {
+ return *weight;
+ }
+ }
+
+ return 0;
+ }, name);
+ }
+
+ static size_t ReversedWeight(size_t weight) {
+ return std::numeric_limits<size_t>::max() - weight;
+ }
+
+ const TStringBuf ContentView(const TGenericName& name Y_LIFETIME_BOUND) const {
+ return std::visit([](const auto& name) -> TStringBuf {
+ using T = std::decay_t<decltype(name)>;
+ if constexpr (std::is_base_of_v<TIndentifier, T>) {
+ return name.Indentifier;
+ }
+ }, name);
+ }
+
+ TFrequencyData Frequency_;
+ };
+
+ IRanking::TPtr MakeDefaultRanking() {
+ return IRanking::TPtr(new TRanking(LoadFrequencyData()));
+ }
+
+ IRanking::TPtr MakeDefaultRanking(TFrequencyData frequency) {
+ return IRanking::TPtr(new TRanking(frequency));
+ }
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/ranking.h b/yql/essentials/sql/v1/complete/name/static/ranking.h
new file mode 100644
index 0000000000..e24607eded
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/ranking.h
@@ -0,0 +1,23 @@
+#pragma once
+
+#include "frequency.h"
+
+#include <yql/essentials/sql/v1/complete/name/name_service.h>
+
+#include <util/generic/hash.h>
+
+namespace NSQLComplete {
+
+ class IRanking {
+ public:
+ using TPtr = THolder<IRanking>;
+
+ virtual void CropToSortedPrefix(TVector<TGenericName>& names, size_t limit) = 0;
+ virtual ~IRanking() = default;
+ };
+
+ IRanking::TPtr MakeDefaultRanking();
+
+ IRanking::TPtr MakeDefaultRanking(TFrequencyData frequency);
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/name/static/ranking_ut.cpp b/yql/essentials/sql/v1/complete/name/static/ranking_ut.cpp
new file mode 100644
index 0000000000..fdd3659336
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/ranking_ut.cpp
@@ -0,0 +1,14 @@
+#include "ranking.h"
+
+#include <library/cpp/testing/unittest/registar.h>
+
+using namespace NSQLComplete;
+
+Y_UNIT_TEST_SUITE(FrequencyTests) {
+
+ Y_UNIT_TEST(FrequencyDataIsParsable) {
+ TFrequencyData data = LoadFrequencyData();
+ Y_UNUSED(data);
+ }
+
+} // Y_UNIT_TEST_SUITE(FrequencyTests)
diff --git a/yql/essentials/sql/v1/complete/name/static/ut/ya.make b/yql/essentials/sql/v1/complete/name/static/ut/ya.make
new file mode 100644
index 0000000000..60963b761b
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/name/static/ut/ya.make
@@ -0,0 +1,7 @@
+UNITTEST_FOR(yql/essentials/sql/v1/complete/name/static)
+
+SRCS(
+ frequency_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/sql/v1/complete/name/static/ya.make b/yql/essentials/sql/v1/complete/name/static/ya.make
index bdf97e2412..639371447a 100644
--- a/yql/essentials/sql/v1/complete/name/static/ya.make
+++ b/yql/essentials/sql/v1/complete/name/static/ya.make
@@ -1,12 +1,26 @@
LIBRARY()
SRCS(
- default_name_set.cpp
+ frequency.cpp
+ json_name_set.cpp
name_service.cpp
+ ranking.cpp
)
PEERDIR(
yql/essentials/sql/v1/complete/name
+ yql/essentials/sql/v1/complete/text
+)
+
+RESOURCE(
+ yql/essentials/data/language/types.json types.json
+ yql/essentials/data/language/sql_functions.json sql_functions.json
+ yql/essentials/data/language/udfs_basic.json udfs_basic.json
+ yql/essentials/data/language/rules_corr_basic.json rules_corr_basic.json
)
END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/sql/v1/complete/sql_antlr4.cpp b/yql/essentials/sql/v1/complete/sql_antlr4.cpp
deleted file mode 100644
index 724032d612..0000000000
--- a/yql/essentials/sql/v1/complete/sql_antlr4.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-#include "sql_antlr4.h"
-
-#include <yql/essentials/sql/v1/format/sql_format.h>
-
-#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Lexer.h>
-#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Parser.h>
-#include <yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/SQLv1Antlr4Lexer.h>
-#include <yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/SQLv1Antlr4Parser.h>
-
-#define RULE_(mode, name) NALA##mode##Antlr4::SQLv1Antlr4Parser::Rule##name
-
-#define RULE(name) RULE_(Default, name)
-
-#define STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(name) \
- static_assert(RULE_(Default, name) == RULE_(Ansi, name))
-
-namespace NSQLComplete {
-
- class TSqlGrammar: public ISqlGrammar {
- public:
- TSqlGrammar(bool isAnsiLexer)
- : Vocabulary(GetVocabulary(isAnsiLexer))
- , AllTokens(ComputeAllTokens())
- , KeywordTokens(ComputeKeywordTokens())
- {
- }
-
- const antlr4::dfa::Vocabulary& GetVocabulary() const override {
- return *Vocabulary;
- }
-
- const std::unordered_set<TTokenId>& GetAllTokens() const override {
- return AllTokens;
- }
-
- const std::unordered_set<TTokenId>& GetKeywordTokens() const override {
- return KeywordTokens;
- }
-
- const TVector<TRuleId>& GetKeywordRules() const override {
- static const TVector<TRuleId> KeywordRules = {
- RULE(Keyword),
- RULE(Keyword_expr_uncompat),
- RULE(Keyword_table_uncompat),
- RULE(Keyword_select_uncompat),
- RULE(Keyword_alter_uncompat),
- RULE(Keyword_in_uncompat),
- RULE(Keyword_window_uncompat),
- RULE(Keyword_hint_uncompat),
- RULE(Keyword_as_compat),
- RULE(Keyword_compat),
- };
-
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_expr_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_table_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_select_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_alter_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_in_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_window_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_hint_uncompat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_as_compat);
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Keyword_compat);
-
- return KeywordRules;
- }
-
- const TVector<TRuleId>& GetTypeNameRules() const override {
- static const TVector<TRuleId> TypeNameRules = {
- RULE(Type_name_simple),
- };
-
- STATIC_ASSERT_RULE_ID_MODE_INDEPENDENT(Type_name_simple);
-
- return TypeNameRules;
- }
-
- private:
- static const antlr4::dfa::Vocabulary* GetVocabulary(bool isAnsiLexer) {
- if (isAnsiLexer) { // Taking a reference is okay as vocabulary storage is static
- return &NALAAnsiAntlr4::SQLv1Antlr4Parser(nullptr).getVocabulary();
- }
- return &NALADefaultAntlr4::SQLv1Antlr4Parser(nullptr).getVocabulary();
- }
-
- std::unordered_set<TTokenId> ComputeAllTokens() {
- const auto& vocabulary = GetVocabulary();
-
- std::unordered_set<TTokenId> allTokens;
-
- for (size_t type = 1; type <= vocabulary.getMaxTokenType(); ++type) {
- allTokens.emplace(type);
- }
-
- return allTokens;
- }
-
- std::unordered_set<TTokenId> ComputeKeywordTokens() {
- const auto& vocabulary = GetVocabulary();
- const auto keywords = NSQLFormat::GetKeywords();
-
- auto keywordTokens = GetAllTokens();
- std::erase_if(keywordTokens, [&](TTokenId token) {
- return !keywords.contains(vocabulary.getSymbolicName(token));
- });
- keywordTokens.erase(TOKEN_EOF);
-
- return keywordTokens;
- }
-
- const antlr4::dfa::Vocabulary* Vocabulary;
- const std::unordered_set<TTokenId> AllTokens;
- const std::unordered_set<TTokenId> KeywordTokens;
- };
-
- const ISqlGrammar& GetSqlGrammar(bool isAnsiLexer) {
- const static TSqlGrammar DefaultSqlGrammar(/* isAnsiLexer = */ false);
- const static TSqlGrammar AnsiSqlGrammar(/* isAnsiLexer = */ true);
-
- if (isAnsiLexer) {
- return AnsiSqlGrammar;
- }
- return DefaultSqlGrammar;
- }
-
-} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/sql_complete.cpp b/yql/essentials/sql/v1/complete/sql_complete.cpp
index 53cb4ada42..74ddbc0415 100644
--- a/yql/essentials/sql/v1/complete/sql_complete.cpp
+++ b/yql/essentials/sql/v1/complete/sql_complete.cpp
@@ -1,9 +1,8 @@
#include "sql_complete.h"
-#include "sql_context.h"
-#include "string_util.h"
-
+#include <yql/essentials/sql/v1/complete/text/word.h>
#include <yql/essentials/sql/v1/complete/name/static/name_service.h>
+#include <yql/essentials/sql/v1/complete/syntax/local.h>
// FIXME(YQL-19747): unwanted dependency on a lexer implementation
#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
@@ -19,10 +18,9 @@ namespace NSQLComplete {
explicit TSqlCompletionEngine(
TLexerSupplier lexer,
INameService::TPtr names,
- ISqlCompletionEngine::TConfiguration configuration
- )
+ ISqlCompletionEngine::TConfiguration configuration)
: Configuration(std::move(configuration))
- , ContextInference(MakeSqlContextInference(lexer))
+ , SyntaxAnalysis(MakeLocalSyntaxAnalysis(lexer))
, Names(std::move(names))
{
}
@@ -31,7 +29,7 @@ namespace NSQLComplete {
auto prefix = input.Text.Head(input.CursorPosition);
auto completedToken = GetCompletedToken(prefix);
- auto context = ContextInference->Analyze(input);
+ auto context = SyntaxAnalysis->Analyze(input);
TVector<TCandidate> candidates;
EnrichWithKeywords(candidates, std::move(context.Keywords), completedToken);
@@ -67,7 +65,7 @@ namespace NSQLComplete {
void EnrichWithNames(
TVector<TCandidate>& candidates,
- const TCompletionContext& context,
+ const TLocalSyntaxContext& context,
const TCompletedToken& prefix) {
if (candidates.size() == Configuration.Limit) {
return;
@@ -82,6 +80,10 @@ namespace NSQLComplete {
request.Constraints.TypeName = TTypeName::TConstraints();
}
+ if (context.IsFunctionName) {
+ request.Constraints.Function = TFunctionName::TConstraints();
+ }
+
if (request.IsEmpty()) {
return;
}
@@ -96,9 +98,13 @@ namespace NSQLComplete {
for (auto& name : names) {
candidates.emplace_back(std::visit([](auto&& name) -> TCandidate {
using T = std::decay_t<decltype(name)>;
- if constexpr (std::is_base_of_v<TIndentifier, T>) {
+ if constexpr (std::is_base_of_v<TTypeName, T>) {
return {ECandidateKind::TypeName, std::move(name.Indentifier)};
}
+ if constexpr (std::is_base_of_v<TFunctionName, T>) {
+ name.Indentifier += "(";
+ return {ECandidateKind::FunctionName, std::move(name.Indentifier)};
+ }
}, std::move(name)));
}
}
@@ -112,7 +118,7 @@ namespace NSQLComplete {
}
TConfiguration Configuration;
- ISqlContextInference::TPtr ContextInference;
+ ILocalSyntaxAnalysis::TPtr SyntaxAnalysis;
INameService::TPtr Names;
};
@@ -122,10 +128,12 @@ namespace NSQLComplete {
lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
lexers.Antlr4PureAnsi = NSQLTranslationV1::MakeAntlr4PureAnsiLexerFactory();
- INameService::TPtr names = MakeStaticNameService(MakeDefaultNameSet());
+ INameService::TPtr names = MakeStaticNameService(MakeDefaultNameSet(), MakeDefaultRanking());
return MakeSqlCompletionEngine([lexers = std::move(lexers)](bool ansi) {
- return NSQLTranslationV1::MakeLexer(lexers, ansi, /* antlr4 = */ true, /* pure = */ true);
+ return NSQLTranslationV1::MakeLexer(
+ lexers, ansi, /* antlr4 = */ true,
+ NSQLTranslationV1::ELexerFlavor::Pure);
}, std::move(names));
}
@@ -134,7 +142,7 @@ namespace NSQLComplete {
INameService::TPtr names,
ISqlCompletionEngine::TConfiguration configuration) {
return ISqlCompletionEngine::TPtr(
- new TSqlCompletionEngine(lexer, std::move(names), std::move(configuration)));
+ new TSqlCompletionEngine(lexer, std::move(names), std::move(configuration)));
}
} // namespace NSQLComplete
@@ -148,6 +156,9 @@ void Out<NSQLComplete::ECandidateKind>(IOutputStream& out, NSQLComplete::ECandid
case NSQLComplete::ECandidateKind::TypeName:
out << "TypeName";
break;
+ case NSQLComplete::ECandidateKind::FunctionName:
+ out << "FunctionName";
+ break;
}
}
diff --git a/yql/essentials/sql/v1/complete/sql_complete.h b/yql/essentials/sql/v1/complete/sql_complete.h
index feee2b47dd..b8a970efd8 100644
--- a/yql/essentials/sql/v1/complete/sql_complete.h
+++ b/yql/essentials/sql/v1/complete/sql_complete.h
@@ -21,6 +21,7 @@ namespace NSQLComplete {
enum class ECandidateKind {
Keyword,
TypeName,
+ FunctionName,
};
struct TCandidate {
diff --git a/yql/essentials/sql/v1/complete/sql_complete_ut.cpp b/yql/essentials/sql/v1/complete/sql_complete_ut.cpp
index 4fb6dfea58..ade78e81a7 100644
--- a/yql/essentials/sql/v1/complete/sql_complete_ut.cpp
+++ b/yql/essentials/sql/v1/complete/sql_complete_ut.cpp
@@ -1,7 +1,9 @@
#include "sql_complete.h"
#include <yql/essentials/sql/v1/complete/name/fallback/name_service.h>
+#include <yql/essentials/sql/v1/complete/name/static/frequency.h>
#include <yql/essentials/sql/v1/complete/name/static/name_service.h>
+#include <yql/essentials/sql/v1/complete/name/static/ranking.h>
#include <yql/essentials/sql/v1/lexer/lexer.h>
#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
@@ -35,6 +37,7 @@ public:
};
Y_UNIT_TEST_SUITE(SqlCompleteTests) {
+ using ECandidateKind::FunctionName;
using ECandidateKind::Keyword;
using ECandidateKind::TypeName;
@@ -43,16 +46,21 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
lexers.Antlr4PureAnsi = NSQLTranslationV1::MakeAntlr4PureAnsiLexerFactory();
return [lexers = std::move(lexers)](bool ansi) {
- return NSQLTranslationV1::MakeLexer(lexers, ansi, /* antlr4 = */ true, /* pure = */ true);
+ return NSQLTranslationV1::MakeLexer(
+ lexers, ansi, /* antlr4 = */ true,
+ NSQLTranslationV1::ELexerFlavor::Pure);
};
}
ISqlCompletionEngine::TPtr MakeSqlCompletionEngineUT() {
TLexerSupplier lexer = MakePureLexerSupplier();
- INameService::TPtr names = MakeStaticNameService({
+ NameSet names = {
.Types = {"Uint64"},
- });
- return MakeSqlCompletionEngine(std::move(lexer), std::move(names));
+ .Functions = {"StartsWith"},
+ };
+ auto ranking = MakeDefaultRanking({});
+ INameService::TPtr service = MakeStaticNameService(std::move(names), std::move(ranking));
+ return MakeSqlCompletionEngine(std::move(lexer), std::move(service));
}
TVector<TCandidate> Complete(ISqlCompletionEngine::TPtr& engine, TStringBuf prefix) {
@@ -69,7 +77,7 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
{Keyword, "CREATE"},
{Keyword, "DECLARE"},
{Keyword, "DEFINE"},
- {Keyword, "DELETE"},
+ {Keyword, "DELETE FROM"},
{Keyword, "DISCARD"},
{Keyword, "DO"},
{Keyword, "DROP"},
@@ -91,7 +99,7 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
{Keyword, "REVOKE"},
{Keyword, "ROLLBACK"},
{Keyword, "SELECT"},
- {Keyword, "SHOW"},
+ {Keyword, "SHOW CREATE"},
{Keyword, "UPDATE"},
{Keyword, "UPSERT"},
{Keyword, "USE"},
@@ -109,13 +117,13 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Alter) {
TVector<TCandidate> expected = {
- {Keyword, "ASYNC"},
- {Keyword, "BACKUP"},
+ {Keyword, "ASYNC REPLICATION"},
+ {Keyword, "BACKUP COLLECTION"},
{Keyword, "DATABASE"},
{Keyword, "EXTERNAL"},
{Keyword, "GROUP"},
{Keyword, "OBJECT"},
- {Keyword, "RESOURCE"},
+ {Keyword, "RESOURCE POOL"},
{Keyword, "SEQUENCE"},
{Keyword, "TABLE"},
{Keyword, "TABLESTORE"},
@@ -130,17 +138,17 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Create) {
TVector<TCandidate> expected = {
- {Keyword, "ASYNC"},
- {Keyword, "BACKUP"},
+ {Keyword, "ASYNC REPLICATION"},
+ {Keyword, "BACKUP COLLECTION"},
{Keyword, "EXTERNAL"},
{Keyword, "GROUP"},
{Keyword, "OBJECT"},
- {Keyword, "OR"},
- {Keyword, "RESOURCE"},
+ {Keyword, "OR REPLACE"},
+ {Keyword, "RESOURCE POOL"},
{Keyword, "TABLE"},
{Keyword, "TABLESTORE"},
- {Keyword, "TEMP"},
- {Keyword, "TEMPORARY"},
+ {Keyword, "TEMP TABLE"},
+ {Keyword, "TEMPORARY TABLE"},
{Keyword, "TOPIC"},
{Keyword, "TRANSFER"},
{Keyword, "USER"},
@@ -162,12 +170,12 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Drop) {
TVector<TCandidate> expected = {
- {Keyword, "ASYNC"},
- {Keyword, "BACKUP"},
+ {Keyword, "ASYNC REPLICATION"},
+ {Keyword, "BACKUP COLLECTION"},
{Keyword, "EXTERNAL"},
{Keyword, "GROUP"},
{Keyword, "OBJECT"},
- {Keyword, "RESOURCE"},
+ {Keyword, "RESOURCE POOL"},
{Keyword, "TABLE"},
{Keyword, "TABLESTORE"},
{Keyword, "TOPIC"},
@@ -190,7 +198,7 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
{Keyword, "CREATE"},
{Keyword, "DECLARE"},
{Keyword, "DEFINE"},
- {Keyword, "DELETE"},
+ {Keyword, "DELETE FROM"},
{Keyword, "DISCARD"},
{Keyword, "DO"},
{Keyword, "DROP"},
@@ -205,14 +213,14 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
{Keyword, "PARALLEL"},
{Keyword, "PRAGMA"},
{Keyword, "PROCESS"},
- {Keyword, "QUERY"},
+ {Keyword, "QUERY PLAN"},
{Keyword, "REDUCE"},
{Keyword, "REPLACE"},
{Keyword, "RESTORE"},
{Keyword, "REVOKE"},
{Keyword, "ROLLBACK"},
{Keyword, "SELECT"},
- {Keyword, "SHOW"},
+ {Keyword, "SHOW CREATE"},
{Keyword, "UPDATE"},
{Keyword, "UPSERT"},
{Keyword, "USE"},
@@ -226,21 +234,21 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Grant) {
TVector<TCandidate> expected = {
{Keyword, "ALL"},
- {Keyword, "ALTER"},
+ {Keyword, "ALTER SCHEMA"},
{Keyword, "CONNECT"},
{Keyword, "CREATE"},
- {Keyword, "DESCRIBE"},
+ {Keyword, "DESCRIBE SCHEMA"},
{Keyword, "DROP"},
- {Keyword, "ERASE"},
+ {Keyword, "ERASE ROW"},
{Keyword, "FULL"},
{Keyword, "GRANT"},
{Keyword, "INSERT"},
{Keyword, "LIST"},
{Keyword, "MANAGE"},
{Keyword, "MODIFY"},
- {Keyword, "REMOVE"},
+ {Keyword, "REMOVE SCHEMA"},
{Keyword, "SELECT"},
- {Keyword, "UPDATE"},
+ {Keyword, "UPDATE ROW"},
{Keyword, "USE"},
};
@@ -261,18 +269,6 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Pragma) {
TVector<TCandidate> expected = {
{Keyword, "ANSI"},
- {Keyword, "CALLABLE"},
- {Keyword, "DICT"},
- {Keyword, "ENUM"},
- {Keyword, "FLOW"},
- {Keyword, "LIST"},
- {Keyword, "OPTIONAL"},
- {Keyword, "RESOURCE"},
- {Keyword, "SET"},
- {Keyword, "STRUCT"},
- {Keyword, "TAGGED"},
- {Keyword, "TUPLE"},
- {Keyword, "VARIANT"},
};
auto engine = MakeSqlCompletionEngineUT();
@@ -282,41 +278,79 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(Select) {
TVector<TCandidate> expected = {
{Keyword, "ALL"},
- {Keyword, "BITCAST"},
+ {Keyword, "BITCAST("},
{Keyword, "CALLABLE"},
{Keyword, "CASE"},
- {Keyword, "CAST"},
+ {Keyword, "CAST("},
{Keyword, "CURRENT_DATE"},
{Keyword, "CURRENT_TIME"},
{Keyword, "CURRENT_TIMESTAMP"},
- {Keyword, "DICT"},
+ {Keyword, "DICT<"},
{Keyword, "DISTINCT"},
{Keyword, "EMPTY_ACTION"},
{Keyword, "ENUM"},
- {Keyword, "EXISTS"},
+ {Keyword, "EXISTS("},
{Keyword, "FALSE"},
- {Keyword, "FLOW"},
- {Keyword, "JSON_EXISTS"},
- {Keyword, "JSON_QUERY"},
- {Keyword, "JSON_VALUE"},
- {Keyword, "LIST"},
+ {Keyword, "FLOW<"},
+ {Keyword, "JSON_EXISTS("},
+ {Keyword, "JSON_QUERY("},
+ {Keyword, "JSON_VALUE("},
+ {Keyword, "LIST<"},
{Keyword, "NOT"},
{Keyword, "NULL"},
- {Keyword, "OPTIONAL"},
- {Keyword, "RESOURCE"},
- {Keyword, "SET"},
+ {Keyword, "OPTIONAL<"},
+ {Keyword, "RESOURCE<"},
+ {Keyword, "SET<"},
{Keyword, "STREAM"},
{Keyword, "STRUCT"},
- {Keyword, "TAGGED"},
+ {Keyword, "TAGGED<"},
{Keyword, "TRUE"},
{Keyword, "TUPLE"},
{Keyword, "VARIANT"},
+ {FunctionName, "StartsWith("},
};
auto engine = MakeSqlCompletionEngineUT();
UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT "}), expected);
}
+ Y_UNIT_TEST(SelectWhere) {
+ TVector<TCandidate> expected = {
+ {Keyword, "BITCAST("},
+ {Keyword, "CALLABLE"},
+ {Keyword, "CASE"},
+ {Keyword, "CAST("},
+ {Keyword, "CURRENT_DATE"},
+ {Keyword, "CURRENT_TIME"},
+ {Keyword, "CURRENT_TIMESTAMP"},
+ {Keyword, "DICT<"},
+ {Keyword, "EMPTY_ACTION"},
+ {Keyword, "ENUM"},
+ {Keyword, "EXISTS("},
+ {Keyword, "FALSE"},
+ {Keyword, "FLOW<"},
+ {Keyword, "JSON_EXISTS("},
+ {Keyword, "JSON_QUERY("},
+ {Keyword, "JSON_VALUE("},
+ {Keyword, "LIST<"},
+ {Keyword, "NOT"},
+ {Keyword, "NULL"},
+ {Keyword, "OPTIONAL<"},
+ {Keyword, "RESOURCE<"},
+ {Keyword, "SET<"},
+ {Keyword, "STREAM<"},
+ {Keyword, "STRUCT"},
+ {Keyword, "TAGGED<"},
+ {Keyword, "TRUE"},
+ {Keyword, "TUPLE"},
+ {Keyword, "VARIANT"},
+ {FunctionName, "StartsWith("},
+ };
+
+ auto engine = MakeSqlCompletionEngineUT();
+ UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT * FROM a WHERE "}), expected);
+ }
+
Y_UNIT_TEST(Upsert) {
TVector<TCandidate> expected = {
{Keyword, "INTO"},
@@ -329,20 +363,20 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(TypeName) {
TVector<TCandidate> expected = {
- {Keyword, "CALLABLE"},
- {Keyword, "DECIMAL"},
- {Keyword, "DICT"},
- {Keyword, "ENUM"},
- {Keyword, "FLOW"},
- {Keyword, "LIST"},
- {Keyword, "OPTIONAL"},
- {Keyword, "RESOURCE"},
- {Keyword, "SET"},
- {Keyword, "STREAM"},
+ {Keyword, "CALLABLE<("},
+ {Keyword, "DECIMAL("},
+ {Keyword, "DICT<"},
+ {Keyword, "ENUM<"},
+ {Keyword, "FLOW<"},
+ {Keyword, "LIST<"},
+ {Keyword, "OPTIONAL<"},
+ {Keyword, "RESOURCE<"},
+ {Keyword, "SET<"},
+ {Keyword, "STREAM<"},
{Keyword, "STRUCT"},
- {Keyword, "TAGGED"},
+ {Keyword, "TAGGED<"},
{Keyword, "TUPLE"},
- {Keyword, "VARIANT"},
+ {Keyword, "VARIANT<"},
{TypeName, "Uint64"},
};
@@ -360,7 +394,7 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(WordBreak) {
auto engine = MakeSqlCompletionEngineUT();
- UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT ("}).size(), 28);
+ UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT ("}).size(), 29);
UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT (1)"}).size(), 30);
UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT 1;"}).size(), 35);
}
@@ -396,23 +430,24 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
Y_UNIT_TEST(InvalidStatementsRecovery) {
auto engine = MakeSqlCompletionEngineUT();
- UNIT_ASSERT_VALUES_EQUAL(Complete(engine, "select select; ").size(), 35);
- UNIT_ASSERT_VALUES_EQUAL(Complete(engine, "select select;").size(), 35);
+ UNIT_ASSERT_GE(Complete(engine, "select select; ").size(), 35);
+ UNIT_ASSERT_GE(Complete(engine, "select select;").size(), 35);
UNIT_ASSERT_VALUES_EQUAL_C(Complete(engine, "!;").size(), 0, "Lexer failing");
}
- Y_UNIT_TEST(DefaultNameSet) {
+ Y_UNIT_TEST(DefaultNameService) {
auto set = MakeDefaultNameSet();
- auto service = MakeStaticNameService(std::move(set));
+ auto service = MakeStaticNameService(std::move(set), MakeDefaultRanking());
auto engine = MakeSqlCompletionEngine(MakePureLexerSupplier(), std::move(service));
{
TVector<TCandidate> expected = {
- {TypeName, "Uint16"},
- {TypeName, "Uint32"},
{TypeName, "Uint64"},
- {TypeName, "Uint8"},
+ {TypeName, "Uint32"},
{TypeName, "Utf8"},
{TypeName, "Uuid"},
+ {TypeName, "Uint8"},
+ {TypeName, "Unit"},
+ {TypeName, "Uint16"},
};
UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT OPTIONAL<U"}), expected);
}
@@ -439,13 +474,62 @@ Y_UNIT_TEST_SUITE(SqlCompleteTests) {
auto silent = MakeHolder<TSilentNameService>();
auto primary = MakeDeadlinedNameService(std::move(silent), TDuration::MilliSeconds(1));
- auto standby = MakeStaticNameService(MakeDefaultNameSet());
+ auto standby = MakeStaticNameService(MakeDefaultNameSet(), MakeDefaultRanking({}));
auto fallback = MakeFallbackNameService(std::move(primary), std::move(standby));
auto engine = MakeSqlCompletionEngine(MakePureLexerSupplier(), std::move(fallback));
- UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT CAST (1 AS U"}).size(), 6);
- UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT CAST (1 AS "}).size(), 47);
+ UNIT_ASSERT_GE(Complete(engine, {"SELECT CAST (1 AS U"}).size(), 6);
+ UNIT_ASSERT_GE(Complete(engine, {"SELECT CAST (1 AS "}).size(), 47);
+ UNIT_ASSERT_GE(Complete(engine, {"SELECT "}).size(), 55);
+ }
+
+ Y_UNIT_TEST(Ranking) {
+ TFrequencyData frequency = {
+ .Types = {
+ {"int32", 128},
+ {"int64", 64},
+ {"interval", 32},
+ {"interval64", 32},
+ },
+ .Functions = {
+ {"min", 128},
+ {"max", 64},
+ {"maxof", 64},
+ {"minby", 32},
+ {"maxby", 32},
+ },
+ };
+ auto service = MakeStaticNameService(MakeDefaultNameSet(), MakeDefaultRanking(frequency));
+ auto engine = MakeSqlCompletionEngine(MakePureLexerSupplier(), std::move(service));
+ {
+ TVector<TCandidate> expected = {
+ {TypeName, "Int32"},
+ {TypeName, "Int64"},
+ {TypeName, "Interval"},
+ {TypeName, "Interval64"},
+ {TypeName, "Int16"},
+ {TypeName, "Int8"},
+ };
+ UNIT_ASSERT_VALUES_EQUAL(Complete(engine, {"SELECT OPTIONAL<I"}), expected);
+ }
+ {
+ TVector<TCandidate> expectedPrefix = {
+ {FunctionName, "Min("},
+ {FunctionName, "Max("},
+ {FunctionName, "MaxOf("},
+ {FunctionName, "MaxBy("},
+ {FunctionName, "MinBy("},
+ {FunctionName, "Math::Abs("},
+ {FunctionName, "Math::Acos("},
+ {FunctionName, "Math::Asin("},
+ };
+
+ auto actualPrefix = Complete(engine, {"SELECT m"});
+ actualPrefix.crop(expectedPrefix.size());
+
+ UNIT_ASSERT_VALUES_EQUAL(actualPrefix, expectedPrefix);
+ }
}
} // Y_UNIT_TEST_SUITE(SqlCompleteTests)
diff --git a/yql/essentials/sql/v1/complete/sql_context.h b/yql/essentials/sql/v1/complete/sql_context.h
deleted file mode 100644
index 5f370bafe3..0000000000
--- a/yql/essentials/sql/v1/complete/sql_context.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include "sql_complete.h"
-
-#include <yql/essentials/sql/v1/lexer/lexer.h>
-
-#include <util/generic/string.h>
-
-namespace NSQLComplete {
-
- struct TCompletionContext {
- TVector<TString> Keywords;
- bool IsTypeName;
- };
-
- class ISqlContextInference {
- public:
- using TPtr = THolder<ISqlContextInference>;
-
- virtual TCompletionContext Analyze(TCompletionInput input) = 0;
- virtual ~ISqlContextInference() = default;
- };
-
- ISqlContextInference::TPtr MakeSqlContextInference(TLexerSupplier lexer);
-
-} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/string_util.h b/yql/essentials/sql/v1/complete/string_util.h
index bafc578d82..f069176460 100644
--- a/yql/essentials/sql/v1/complete/string_util.h
+++ b/yql/essentials/sql/v1/complete/string_util.h
@@ -1,17 +1,4 @@
#pragma once
-#include <util/charset/unidata.h>
-
-#include <string_view>
-
-namespace NSQLComplete {
-
- static const char WordBreakCharacters[] = " \t\v\f\a\b\r\n`~!@#$%^&*-=+[](){}\\|;:'\".,<>/?";
-
- bool IsWordBoundary(char ch);
-
- size_t LastWordIndex(TStringBuf text);
-
- TStringBuf LastWord(TStringBuf text);
-
-} // namespace NSQLComplete
+// TODO(YQL-19747): Migrate YDB CLI to yql/essentials/sql/v1/complete/text/word.h
+#include <yql/essentials/sql/v1/complete/text/word.h>
diff --git a/yql/essentials/sql/v1/complete/sql_syntax.cpp b/yql/essentials/sql/v1/complete/syntax/ansi.cpp
index ba5a08d371..7ca153e44c 100644
--- a/yql/essentials/sql/v1/complete/sql_syntax.cpp
+++ b/yql/essentials/sql/v1/complete/syntax/ansi.cpp
@@ -1,4 +1,4 @@
-#include "sql_syntax.h"
+#include "ansi.h"
#include <yql/essentials/public/issue/yql_issue.h>
#include <yql/essentials/sql/settings/translation_settings.h>
diff --git a/yql/essentials/sql/v1/complete/sql_syntax.h b/yql/essentials/sql/v1/complete/syntax/ansi.h
index f03cbc9fb9..f03cbc9fb9 100644
--- a/yql/essentials/sql/v1/complete/sql_syntax.h
+++ b/yql/essentials/sql/v1/complete/syntax/ansi.h
diff --git a/yql/essentials/sql/v1/complete/syntax/grammar.cpp b/yql/essentials/sql/v1/complete/syntax/grammar.cpp
new file mode 100644
index 0000000000..b4f64630f7
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/grammar.cpp
@@ -0,0 +1,86 @@
+#include "grammar.h"
+
+#include <yql/essentials/sql/v1/reflect/sql_reflect.h>
+
+namespace NSQLComplete {
+
+ class TSqlGrammar: public ISqlGrammar {
+ public:
+ TSqlGrammar(const NSQLReflect::TLexerGrammar& grammar)
+ : Vocabulary(GetVocabularyP())
+ , AllTokens(ComputeAllTokens())
+ , KeywordTokens(ComputeKeywordTokens(grammar))
+ , PunctuationTokens(ComputePunctuationTokens(grammar))
+ {
+ }
+
+ const antlr4::dfa::Vocabulary& GetVocabulary() const override {
+ return *Vocabulary;
+ }
+
+ const std::unordered_set<TTokenId>& GetAllTokens() const override {
+ return AllTokens;
+ }
+
+ const std::unordered_set<TTokenId>& GetKeywordTokens() const override {
+ return KeywordTokens;
+ }
+
+ const std::unordered_set<TTokenId>& GetPunctuationTokens() const override {
+ return PunctuationTokens;
+ }
+
+ private:
+ static const antlr4::dfa::Vocabulary* GetVocabularyP() {
+ return &NALADefaultAntlr4::SQLv1Antlr4Parser(nullptr).getVocabulary();
+ }
+
+ std::unordered_set<TTokenId> ComputeAllTokens() {
+ const auto& vocabulary = GetVocabulary();
+
+ std::unordered_set<TTokenId> allTokens;
+
+ for (size_t type = 1; type <= vocabulary.getMaxTokenType(); ++type) {
+ allTokens.emplace(type);
+ }
+
+ return allTokens;
+ }
+
+ std::unordered_set<TTokenId> ComputeKeywordTokens(
+ const NSQLReflect::TLexerGrammar& grammar) {
+ const auto& vocabulary = GetVocabulary();
+
+ auto keywordTokens = GetAllTokens();
+ std::erase_if(keywordTokens, [&](TTokenId token) {
+ return !grammar.KeywordNames.contains(vocabulary.getSymbolicName(token));
+ });
+ keywordTokens.erase(TOKEN_EOF);
+
+ return keywordTokens;
+ }
+
+ std::unordered_set<TTokenId> ComputePunctuationTokens(
+ const NSQLReflect::TLexerGrammar& grammar) {
+ const auto& vocabulary = GetVocabulary();
+
+ auto punctuationTokens = GetAllTokens();
+ std::erase_if(punctuationTokens, [&](TTokenId token) {
+ return !grammar.PunctuationNames.contains(vocabulary.getSymbolicName(token));
+ });
+
+ return punctuationTokens;
+ }
+
+ const antlr4::dfa::Vocabulary* Vocabulary;
+ const std::unordered_set<TTokenId> AllTokens;
+ const std::unordered_set<TTokenId> KeywordTokens;
+ const std::unordered_set<TTokenId> PunctuationTokens;
+ };
+
+ const ISqlGrammar& GetSqlGrammar() {
+ const static TSqlGrammar DefaultSqlGrammar(NSQLReflect::LoadLexerGrammar());
+ return DefaultSqlGrammar;
+ }
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/sql_antlr4.h b/yql/essentials/sql/v1/complete/syntax/grammar.h
index 1cf6424cc8..a349bd4a3d 100644
--- a/yql/essentials/sql/v1/complete/sql_antlr4.h
+++ b/yql/essentials/sql/v1/complete/syntax/grammar.h
@@ -1,29 +1,30 @@
#pragma once
-#include <contrib/libs/antlr4_cpp_runtime/src/Token.h>
-#include <contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h>
+#include <yql/essentials/sql/v1/complete/antlr4/defs.h>
-#include <util/generic/vector.h>
+#include <contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h>
#include <unordered_set>
-namespace NSQLComplete {
+#ifdef TOKEN_QUERY // Conflict with the winnt.h
+ #undef TOKEN_QUERY
+#endif
+#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Parser.h>
- using TTokenId = size_t;
- using TRuleId = size_t;
+#define RULE_(mode, name) NALA##mode##Antlr4::SQLv1Antlr4Parser::Rule##name
+#define RULE(name) RULE_(Default, name)
- constexpr TTokenId TOKEN_EOF = antlr4::Token::EOF;
+namespace NSQLComplete {
class ISqlGrammar {
public:
virtual const antlr4::dfa::Vocabulary& GetVocabulary() const = 0;
virtual const std::unordered_set<TTokenId>& GetAllTokens() const = 0;
virtual const std::unordered_set<TTokenId>& GetKeywordTokens() const = 0;
- virtual const TVector<TRuleId>& GetKeywordRules() const = 0;
- virtual const TVector<TRuleId>& GetTypeNameRules() const = 0;
+ virtual const std::unordered_set<TTokenId>& GetPunctuationTokens() const = 0;
virtual ~ISqlGrammar() = default;
};
- const ISqlGrammar& GetSqlGrammar(bool isAnsiLexer);
+ const ISqlGrammar& GetSqlGrammar();
} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/syntax/grammar_ut.cpp b/yql/essentials/sql/v1/complete/syntax/grammar_ut.cpp
new file mode 100644
index 0000000000..50fb6b4305
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/grammar_ut.cpp
@@ -0,0 +1,37 @@
+#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Parser.h>
+#include <yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/SQLv1Antlr4Parser.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+Y_UNIT_TEST_SUITE(RuleTests) {
+ THolder<antlr4::Parser> GetDummyParser(bool ansi) {
+ if (ansi) {
+ return MakeHolder<NALAAnsiAntlr4::SQLv1Antlr4Parser>(nullptr);
+ }
+ return MakeHolder<NALADefaultAntlr4::SQLv1Antlr4Parser>(nullptr);
+ }
+
+ Y_UNIT_TEST(RuleIndexModeIndependent) {
+ auto defaultRules = GetDummyParser(/* ansi = */ false)->getRuleIndexMap();
+ auto ansiRules = GetDummyParser(/* ansi = */ true)->getRuleIndexMap();
+
+ UNIT_ASSERT_EQUAL(defaultRules, ansiRules);
+ }
+
+ Y_UNIT_TEST(TokenTypeModeIndependent) {
+ auto defaultVocab = GetDummyParser(/* ansi = */ false)->getVocabulary();
+ auto ansiVocab = GetDummyParser(/* ansi = */ true)->getVocabulary();
+
+ UNIT_ASSERT_VALUES_EQUAL(defaultVocab.getMaxTokenType(), ansiVocab.getMaxTokenType());
+
+ for (size_t type = 0; type <= defaultVocab.getMaxTokenType(); ++type) {
+ UNIT_ASSERT_VALUES_EQUAL(
+ defaultVocab.getSymbolicName(type), ansiVocab.getSymbolicName(type));
+ UNIT_ASSERT_VALUES_EQUAL(
+ defaultVocab.getDisplayName(type), ansiVocab.getDisplayName(type));
+ UNIT_ASSERT_VALUES_EQUAL(
+ defaultVocab.getLiteralName(type), ansiVocab.getLiteralName(type));
+ }
+ }
+
+} // Y_UNIT_TEST_SUITE(RuleTests)
diff --git a/yql/essentials/sql/v1/complete/sql_context.cpp b/yql/essentials/sql/v1/complete/syntax/local.cpp
index 4cc809479b..cac43e5a32 100644
--- a/yql/essentials/sql/v1/complete/sql_context.cpp
+++ b/yql/essentials/sql/v1/complete/syntax/local.cpp
@@ -1,7 +1,11 @@
-#include "sql_context.h"
+#include "local.h"
-#include "c3_engine.h"
-#include "sql_syntax.h"
+#include "ansi.h"
+#include "parser_call_stack.h"
+#include "grammar.h"
+
+#include <yql/essentials/sql/v1/complete/antlr4/c3i.h>
+#include <yql/essentials/sql/v1/complete/antlr4/c3t.h>
#include <yql/essentials/core/issue/yql_issue.h>
@@ -9,7 +13,7 @@
#include <util/stream/output.h>
#ifdef TOKEN_QUERY // Conflict with the winnt.h
-#undef TOKEN_QUERY
+ #undef TOKEN_QUERY
#endif
#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Lexer.h>
#include <yql/essentials/parser/antlr_ast/gen/v1_antlr4/SQLv1Antlr4Parser.h>
@@ -19,7 +23,7 @@
namespace NSQLComplete {
template <bool IsAnsiLexer>
- class TSpecializedSqlContextInference: public ISqlContextInference {
+ class TSpecializedLocalSyntaxAnalysis: public ILocalSyntaxAnalysis {
private:
using TDefaultYQLGrammar = TAntlrGrammar<
NALADefaultAntlr4::SQLv1Antlr4Lexer,
@@ -35,14 +39,14 @@ namespace NSQLComplete {
TDefaultYQLGrammar>;
public:
- explicit TSpecializedSqlContextInference(TLexerSupplier lexer)
- : Grammar(&GetSqlGrammar(IsAnsiLexer))
+ explicit TSpecializedLocalSyntaxAnalysis(TLexerSupplier lexer)
+ : Grammar(&GetSqlGrammar())
, Lexer_(lexer(/* ansi = */ IsAnsiLexer))
, C3(ComputeC3Config())
{
}
- TCompletionContext Analyze(TCompletionInput input) override {
+ TLocalSyntaxContext Analyze(TCompletionInput input) override {
TStringBuf prefix;
if (!GetC3Prefix(input, &prefix)) {
return {};
@@ -52,6 +56,7 @@ namespace NSQLComplete {
return {
.Keywords = SiftedKeywords(candidates),
.IsTypeName = IsTypeNameMatched(candidates),
+ .IsFunctionName = IsFunctionNameMatched(candidates),
};
}
@@ -68,21 +73,14 @@ namespace NSQLComplete {
for (auto keywordToken : Grammar->GetKeywordTokens()) {
ignoredTokens.erase(keywordToken);
}
+ for (auto punctuationToken : Grammar->GetPunctuationTokens()) {
+ ignoredTokens.erase(punctuationToken);
+ }
return ignoredTokens;
}
std::unordered_set<TRuleId> ComputePreferredRules() {
- const auto& keywordRules = Grammar->GetKeywordRules();
- const auto& typeNameRules = Grammar->GetTypeNameRules();
-
- std::unordered_set<TRuleId> preferredRules;
-
- // Excludes tokens obtained from keyword rules
- preferredRules.insert(std::begin(keywordRules), std::end(keywordRules));
-
- preferredRules.insert(std::begin(typeNameRules), std::end(typeNameRules));
-
- return preferredRules;
+ return GetC3PreferredRules();
}
bool GetC3Prefix(TCompletionInput input, TStringBuf* prefix) {
@@ -112,17 +110,37 @@ namespace NSQLComplete {
TVector<TString> keywords;
for (const auto& token : candidates.Tokens) {
if (keywordTokens.contains(token.Number)) {
- keywords.emplace_back(vocabulary.getDisplayName(token.Number));
+ keywords.emplace_back(Display(vocabulary, token.Number));
+ for (auto following : token.Following) {
+ if (keywordTokens.contains(following)) {
+ keywords.back() += " ";
+ }
+ keywords.back() += Display(vocabulary, following);
+ }
}
}
return keywords;
}
+ std::string Display(const antlr4::dfa::Vocabulary& vocabulary, TTokenId tokenType) {
+ auto name = vocabulary.getDisplayName(tokenType);
+ if (2 <= name.length() && name.starts_with('\'') && name.ends_with('\'')) {
+ name.erase(static_cast<std::string::size_type>(0), 1);
+ name.pop_back();
+ }
+ return name;
+ }
+
bool IsTypeNameMatched(const TC3Candidates& candidates) {
- const auto& typeNameRules = Grammar->GetTypeNameRules();
- return FindIf(candidates.Rules, [&](const TMatchedRule& rule) {
- return Find(typeNameRules, rule.Index) != std::end(typeNameRules);
- }) != std::end(candidates.Rules);
+ return AnyOf(candidates.Rules, [&](const TMatchedRule& rule) {
+ return IsLikelyTypeStack(rule.ParserCallStack);
+ });
+ }
+
+ bool IsFunctionNameMatched(const TC3Candidates& candidates) {
+ return AnyOf(candidates.Rules, [&](const TMatchedRule& rule) {
+ return IsLikelyFunctionStack(rule.ParserCallStack);
+ });
}
const ISqlGrammar* Grammar;
@@ -130,34 +148,34 @@ namespace NSQLComplete {
TC3Engine<G> C3;
};
- class TSqlContextInference: public ISqlContextInference {
+ class TLocalSyntaxAnalysis: public ILocalSyntaxAnalysis {
public:
- explicit TSqlContextInference(TLexerSupplier lexer)
+ explicit TLocalSyntaxAnalysis(TLexerSupplier lexer)
: DefaultEngine(lexer)
, AnsiEngine(lexer)
{
}
- TCompletionContext Analyze(TCompletionInput input) override {
+ TLocalSyntaxContext Analyze(TCompletionInput input) override {
auto isAnsiLexer = IsAnsiQuery(TString(input.Text));
auto& engine = GetSpecializedEngine(isAnsiLexer);
return engine.Analyze(std::move(input));
}
private:
- ISqlContextInference& GetSpecializedEngine(bool isAnsiLexer) {
+ ILocalSyntaxAnalysis& GetSpecializedEngine(bool isAnsiLexer) {
if (isAnsiLexer) {
return AnsiEngine;
}
return DefaultEngine;
}
- TSpecializedSqlContextInference</* IsAnsiLexer = */ false> DefaultEngine;
- TSpecializedSqlContextInference</* IsAnsiLexer = */ true> AnsiEngine;
+ TSpecializedLocalSyntaxAnalysis</* IsAnsiLexer = */ false> DefaultEngine;
+ TSpecializedLocalSyntaxAnalysis</* IsAnsiLexer = */ true> AnsiEngine;
};
- ISqlContextInference::TPtr MakeSqlContextInference(TLexerSupplier lexer) {
- return TSqlContextInference::TPtr(new TSqlContextInference(lexer));
+ ILocalSyntaxAnalysis::TPtr MakeLocalSyntaxAnalysis(TLexerSupplier lexer) {
+ return TLocalSyntaxAnalysis::TPtr(new TLocalSyntaxAnalysis(lexer));
}
} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/syntax/local.h b/yql/essentials/sql/v1/complete/syntax/local.h
new file mode 100644
index 0000000000..79984d00e2
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/local.h
@@ -0,0 +1,28 @@
+#pragma once
+
+#include <yql/essentials/sql/v1/complete/sql_complete.h>
+
+#include <yql/essentials/sql/v1/lexer/lexer.h>
+
+#include <util/generic/string.h>
+#include <util/generic/vector.h>
+
+namespace NSQLComplete {
+
+ struct TLocalSyntaxContext {
+ TVector<TString> Keywords;
+ bool IsTypeName;
+ bool IsFunctionName;
+ };
+
+ class ILocalSyntaxAnalysis {
+ public:
+ using TPtr = THolder<ILocalSyntaxAnalysis>;
+
+ virtual TLocalSyntaxContext Analyze(TCompletionInput input) = 0;
+ virtual ~ILocalSyntaxAnalysis() = default;
+ };
+
+ ILocalSyntaxAnalysis::TPtr MakeLocalSyntaxAnalysis(TLexerSupplier lexer);
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/syntax/parser_call_stack.cpp b/yql/essentials/sql/v1/complete/syntax/parser_call_stack.cpp
new file mode 100644
index 0000000000..855d9af160
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/parser_call_stack.cpp
@@ -0,0 +1,65 @@
+#include "parser_call_stack.h"
+
+#include "grammar.h"
+
+#include <util/generic/vector.h>
+#include <util/generic/algorithm.h>
+#include <util/generic/yexception.h>
+
+namespace NSQLComplete {
+
+ const TVector<TRuleId> KeywordRules = {
+ RULE(Keyword),
+ RULE(Keyword_expr_uncompat),
+ RULE(Keyword_table_uncompat),
+ RULE(Keyword_select_uncompat),
+ RULE(Keyword_alter_uncompat),
+ RULE(Keyword_in_uncompat),
+ RULE(Keyword_window_uncompat),
+ RULE(Keyword_hint_uncompat),
+ RULE(Keyword_as_compat),
+ RULE(Keyword_compat),
+ };
+
+ const TVector<TRuleId> TypeNameRules = {
+ RULE(Type_name_simple),
+ };
+
+ const TVector<TRuleId> FunctionNameRules = {
+ RULE(Id_expr),
+ RULE(An_id_or_type),
+ RULE(Id_or_type),
+ };
+
+ bool EndsWith(const TParserCallStack& suffix, const TParserCallStack& stack) {
+ if (stack.size() < suffix.size()) {
+ return false;
+ }
+ const size_t prefixSize = stack.size() - suffix.size();
+ return Equal(std::begin(stack) + prefixSize, std::end(stack), std::begin(suffix));
+ }
+
+ bool ContainsRule(TRuleId rule, const TParserCallStack& stack) {
+ return Find(stack, rule) != std::end(stack);
+ }
+
+ bool IsLikelyTypeStack(const TParserCallStack& stack) {
+ return EndsWith({RULE(Type_name_simple)}, stack);
+ }
+
+ bool IsLikelyFunctionStack(const TParserCallStack& stack) {
+ return EndsWith({RULE(Unary_casual_subexpr), RULE(Id_expr)}, stack) ||
+ EndsWith({RULE(Unary_casual_subexpr),
+ RULE(Atom_expr),
+ RULE(An_id_or_type)}, stack);
+ }
+
+ std::unordered_set<TRuleId> GetC3PreferredRules() {
+ std::unordered_set<TRuleId> preferredRules;
+ preferredRules.insert(std::begin(KeywordRules), std::end(KeywordRules));
+ preferredRules.insert(std::begin(TypeNameRules), std::end(TypeNameRules));
+ preferredRules.insert(std::begin(FunctionNameRules), std::end(FunctionNameRules));
+ return preferredRules;
+ }
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/syntax/parser_call_stack.h b/yql/essentials/sql/v1/complete/syntax/parser_call_stack.h
new file mode 100644
index 0000000000..756586988d
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/parser_call_stack.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include <yql/essentials/sql/v1/complete/antlr4/defs.h>
+
+namespace NSQLComplete {
+
+ bool IsLikelyTypeStack(const TParserCallStack& stack);
+
+ bool IsLikelyFunctionStack(const TParserCallStack& stack);
+
+ std::unordered_set<TRuleId> GetC3PreferredRules();
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/syntax/ut/ya.make b/yql/essentials/sql/v1/complete/syntax/ut/ya.make
new file mode 100644
index 0000000000..e070185af9
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/ut/ya.make
@@ -0,0 +1,7 @@
+UNITTEST_FOR(yql/essentials/sql/v1/complete/syntax)
+
+SRCS(
+ grammar_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/sql/v1/complete/syntax/ya.make b/yql/essentials/sql/v1/complete/syntax/ya.make
new file mode 100644
index 0000000000..24fd94a952
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/syntax/ya.make
@@ -0,0 +1,30 @@
+LIBRARY()
+
+SRCS(
+ ansi.cpp
+ grammar.cpp
+ local.cpp
+ parser_call_stack.cpp
+)
+
+ADDINCL(
+ yql/essentials/sql/v1/complete
+)
+
+PEERDIR(
+ yql/essentials/core/issue
+
+ yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4
+ yql/essentials/parser/antlr_ast/gen/v1_antlr4
+ yql/essentials/parser/lexer_common
+
+ yql/essentials/sql/settings
+ yql/essentials/sql/v1/lexer
+ yql/essentials/sql/v1/reflect
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/sql/v1/complete/text/ut/ya.make b/yql/essentials/sql/v1/complete/text/ut/ya.make
new file mode 100644
index 0000000000..3c023ccfb4
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/text/ut/ya.make
@@ -0,0 +1,7 @@
+UNITTEST_FOR(yql/essentials/sql/v1/complete/text)
+
+SRCS(
+ word_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/sql/v1/complete/string_util.cpp b/yql/essentials/sql/v1/complete/text/word.cpp
index 12a6701065..0468f62b03 100644
--- a/yql/essentials/sql/v1/complete/string_util.cpp
+++ b/yql/essentials/sql/v1/complete/text/word.cpp
@@ -1,4 +1,4 @@
-#include "string_util.h"
+#include "word.h"
#include <util/generic/strbuf.h>
diff --git a/yql/essentials/sql/v1/complete/text/word.h b/yql/essentials/sql/v1/complete/text/word.h
new file mode 100644
index 0000000000..e56f023940
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/text/word.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include <util/charset/unidata.h>
+
+namespace NSQLComplete {
+
+ static const char WordBreakCharacters[] = " \t\v\f\a\b\r\n`~!@#$%^&*-=+[](){}\\|;:'\".,<>/?";
+
+ bool IsWordBoundary(char ch);
+
+ size_t LastWordIndex(TStringBuf text);
+
+ TStringBuf LastWord(TStringBuf text);
+
+} // namespace NSQLComplete
diff --git a/yql/essentials/sql/v1/complete/string_util_ut.cpp b/yql/essentials/sql/v1/complete/text/word_ut.cpp
index ca3ed546a3..0eff931b1a 100644
--- a/yql/essentials/sql/v1/complete/string_util_ut.cpp
+++ b/yql/essentials/sql/v1/complete/text/word_ut.cpp
@@ -1,10 +1,10 @@
-#include "string_util.h"
+#include "word.h"
#include <library/cpp/testing/unittest/registar.h>
using namespace NSQLComplete;
-Y_UNIT_TEST_SUITE(StringUtilTest) {
+Y_UNIT_TEST_SUITE(WordTest) {
Y_UNIT_TEST(Blank) {
UNIT_ASSERT_VALUES_EQUAL(LastWord(""), "");
UNIT_ASSERT_VALUES_EQUAL(LastWord(" "), "");
@@ -18,4 +18,4 @@ Y_UNIT_TEST_SUITE(StringUtilTest) {
UNIT_ASSERT_VALUES_EQUAL(LastWord("two"), "two");
UNIT_ASSERT_VALUES_EQUAL(LastWord("one two"), "two");
}
-} // Y_UNIT_TEST_SUITE(StringUtilTest)
+} // Y_UNIT_TEST_SUITE(WordTest)
diff --git a/yql/essentials/sql/v1/complete/text/ya.make b/yql/essentials/sql/v1/complete/text/ya.make
new file mode 100644
index 0000000000..030e69172a
--- /dev/null
+++ b/yql/essentials/sql/v1/complete/text/ya.make
@@ -0,0 +1,11 @@
+LIBRARY()
+
+SRCS(
+ word.cpp
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/sql/v1/complete/ut/ya.make b/yql/essentials/sql/v1/complete/ut/ya.make
index 7b1cde8bfb..0a5d13dec4 100644
--- a/yql/essentials/sql/v1/complete/ut/ya.make
+++ b/yql/essentials/sql/v1/complete/ut/ya.make
@@ -2,7 +2,6 @@ UNITTEST_FOR(yql/essentials/sql/v1/complete)
SRCS(
sql_complete_ut.cpp
- string_util_ut.cpp
)
PEERDIR(
diff --git a/yql/essentials/sql/v1/complete/ya.make b/yql/essentials/sql/v1/complete/ya.make
index b401bcb3fa..141b5c471d 100644
--- a/yql/essentials/sql/v1/complete/ya.make
+++ b/yql/essentials/sql/v1/complete/ya.make
@@ -1,30 +1,21 @@
LIBRARY()
SRCS(
- sql_antlr4.cpp
sql_complete.cpp
- sql_context.cpp
- sql_syntax.cpp
- string_util.cpp
)
PEERDIR(
- contrib/libs/antlr4_cpp_runtime
- contrib/libs/antlr4-c3
- yql/essentials/core/issue
- yql/essentials/sql/settings
- yql/essentials/sql/v1/format
yql/essentials/sql/v1/lexer
# FIXME(YQL-19747): unwanted dependency on a lexer implementation
yql/essentials/sql/v1/lexer/antlr4_pure
yql/essentials/sql/v1/lexer/antlr4_pure_ansi
- yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4
- yql/essentials/parser/antlr_ast/gen/v1_antlr4
-
+ yql/essentials/sql/v1/complete/antlr4
yql/essentials/sql/v1/complete/name
yql/essentials/sql/v1/complete/name/static
+ yql/essentials/sql/v1/complete/syntax
+ yql/essentials/sql/v1/complete/text
)
END()
diff --git a/yql/essentials/sql/v1/context.cpp b/yql/essentials/sql/v1/context.cpp
index 569ae375eb..de2668608e 100644
--- a/yql/essentials/sql/v1/context.cpp
+++ b/yql/essentials/sql/v1/context.cpp
@@ -658,4 +658,16 @@ void TTranslation::AltNotImplemented(const TString& ruleName, ui32 altCase, cons
Error() << ruleName << ": alternative is not implemented yet: " << AltDescription(node, altCase, descr);
}
+void EnumerateSqlFlags(std::function<void(std::string_view)> callback) {
+ for (const auto& x : CTX_PRAGMA_FIELDS) {
+ callback(x.first);
+ callback(TString("Disable") + x.first);
+ }
+
+ for (const auto& x : CTX_PRAGMA_MAYBE_FIELDS) {
+ callback(x.first);
+ callback(TString("Disable") + x.first);
+ }
+}
+
} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/context.h b/yql/essentials/sql/v1/context.h
index b911370b3e..17e86c77c3 100644
--- a/yql/essentials/sql/v1/context.h
+++ b/yql/essentials/sql/v1/context.h
@@ -471,4 +471,6 @@ namespace NSQLTranslationV1 {
protected:
TContext& Ctx;
};
+
+ void EnumerateSqlFlags(std::function<void(std::string_view)> callback);
} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/format/sql_format_ut.h b/yql/essentials/sql/v1/format/sql_format_ut.h
index 2747ce2871..8bb2af2793 100644
--- a/yql/essentials/sql/v1/format/sql_format_ut.h
+++ b/yql/essentials/sql/v1/format/sql_format_ut.h
@@ -153,6 +153,15 @@ Y_UNIT_TEST(ShowCreateTable) {
setup.Run(cases);
}
+Y_UNIT_TEST(ShowCreateView) {
+ TCases cases = {
+ {"use plato;show create view user;","USE plato;\n\nSHOW CREATE VIEW user;\n"},
+ };
+
+ TSetup setup;
+ setup.Run(cases);
+}
+
Y_UNIT_TEST(Use) {
TCases cases = {
{"use user;","USE user;\n"},
diff --git a/yql/essentials/sql/v1/lexer/lexer.cpp b/yql/essentials/sql/v1/lexer/lexer.cpp
index 5621cc65d7..88ced55ccf 100644
--- a/yql/essentials/sql/v1/lexer/lexer.cpp
+++ b/yql/essentials/sql/v1/lexer/lexer.cpp
@@ -11,6 +11,7 @@
#include <util/string/ascii.h>
#include <util/string/builder.h>
#include <util/string/strip.h>
+#include <util/string/join.h>
#if defined(_tsan_enabled_)
#include <util/system/mutex.h>
@@ -29,8 +30,8 @@ using NSQLTranslation::MakeDummyLexerFactory;
class TV1Lexer : public ILexer {
public:
- explicit TV1Lexer(const TLexers& lexers, bool ansi, bool antlr4, bool pure)
- : Factory(GetFactory(lexers, ansi, antlr4, pure))
+ explicit TV1Lexer(const TLexers& lexers, bool ansi, bool antlr4, ELexerFlavor flavor)
+ : Factory(GetFactory(lexers, ansi, antlr4, flavor))
{
}
@@ -42,52 +43,70 @@ public:
}
private:
- static NSQLTranslation::TLexerFactoryPtr GetFactory(const TLexers& lexers, bool ansi, bool antlr4, bool pure = false) {
- if (!ansi && !antlr4 && !pure) {
- if (lexers.Antlr3) {
- return lexers.Antlr3;
- }
- return MakeDummyLexerFactory("antlr3");
- } else if (ansi && !antlr4 && !pure) {
- if (lexers.Antlr3Ansi) {
- return lexers.Antlr3Ansi;
- }
- return MakeDummyLexerFactory("antlr3_ansi");
- } else if (!ansi && antlr4 && !pure) {
- if (lexers.Antlr4) {
- return lexers.Antlr4;
- }
- return MakeDummyLexerFactory("antlr4");
- } else if (ansi && antlr4 && !pure) {
- if (lexers.Antlr4Ansi) {
- return lexers.Antlr4Ansi;
- }
- return MakeDummyLexerFactory("antlr4_ansi");
- } else if (!ansi && antlr4 && pure) {
- if (lexers.Antlr4Pure) {
- return lexers.Antlr4Pure;
- }
- return MakeDummyLexerFactory("antlr4_pure");
- } else if (ansi && antlr4 && pure) {
- if (lexers.Antlr4PureAnsi) {
- return lexers.Antlr4PureAnsi;
- }
- return MakeDummyLexerFactory("antlr4_pure_ansi");
- } else if (!ansi && !antlr4 && pure) {
- return MakeDummyLexerFactory("antlr3_pure");
+ static NSQLTranslation::TLexerFactoryPtr GetFactory(const TLexers& lexers, bool ansi, bool antlr4, ELexerFlavor flavor) {
+ if (auto ptr = GetMaybeFactory(lexers, ansi, antlr4, flavor)) {
+ return ptr;
+ }
+ return MakeDummyLexerFactory(GetLexerName(ansi, antlr4, flavor));
+ }
+
+ static NSQLTranslation::TLexerFactoryPtr GetMaybeFactory(const TLexers& lexers, bool ansi, bool antlr4, ELexerFlavor flavor) {
+ if (!ansi && !antlr4 && flavor == ELexerFlavor::Default) {
+ return lexers.Antlr3;
+ } else if (ansi && !antlr4 && flavor == ELexerFlavor::Default) {
+ return lexers.Antlr3Ansi;
+ } else if (!ansi && antlr4 && flavor == ELexerFlavor::Default) {
+ return lexers.Antlr4;
+ } else if (ansi && antlr4 && flavor == ELexerFlavor::Default) {
+ return lexers.Antlr4Ansi;
+ } else if (!ansi && antlr4 && flavor == ELexerFlavor::Pure) {
+ return lexers.Antlr4Pure;
+ } else if (ansi && antlr4 && flavor == ELexerFlavor::Pure) {
+ return lexers.Antlr4PureAnsi;
+ } else if (!ansi && !antlr4 && flavor == ELexerFlavor::Regex) {
+ return lexers.Regex;
+ } else if (ansi && !antlr4 && flavor == ELexerFlavor::Regex) {
+ return lexers.RegexAnsi;
} else {
- return MakeDummyLexerFactory("antlr3_pure_ansi");
+ return nullptr;
}
}
+ static TString GetLexerName(bool ansi, bool antlr4, ELexerFlavor flavor) {
+ TVector<const TStringBuf> parts;
+
+ if (antlr4) {
+ parts.emplace_back("antlr4");
+ } else if (!antlr4 && flavor != ELexerFlavor::Regex) {
+ parts.emplace_back("antlr3");
+ }
+
+ switch (flavor) {
+ case ELexerFlavor::Default: {
+ } break;
+ case ELexerFlavor::Pure: {
+ parts.emplace_back("pure");
+ } break;
+ case ELexerFlavor::Regex: {
+ parts.emplace_back("regex");
+ } break;
+ }
+
+ if (ansi) {
+ parts.emplace_back("ansi");
+ }
+
+ return JoinSeq("_", parts);
+ }
+
private:
NSQLTranslation::TLexerFactoryPtr Factory;
};
} // namespace
-NSQLTranslation::ILexer::TPtr MakeLexer(const TLexers& lexers, bool ansi, bool antlr4, bool pure) {
- return NSQLTranslation::ILexer::TPtr(new TV1Lexer(lexers, ansi, antlr4, pure));
+NSQLTranslation::ILexer::TPtr MakeLexer(const TLexers& lexers, bool ansi, bool antlr4, ELexerFlavor flavor) {
+ return NSQLTranslation::ILexer::TPtr(new TV1Lexer(lexers, ansi, antlr4, flavor));
}
bool IsProbablyKeyword(const NSQLTranslation::TParsedToken& token) {
diff --git a/yql/essentials/sql/v1/lexer/lexer.h b/yql/essentials/sql/v1/lexer/lexer.h
index 1cc8566fcf..226e8b6ed2 100644
--- a/yql/essentials/sql/v1/lexer/lexer.h
+++ b/yql/essentials/sql/v1/lexer/lexer.h
@@ -11,9 +11,18 @@ struct TLexers {
NSQLTranslation::TLexerFactoryPtr Antlr4Ansi;
NSQLTranslation::TLexerFactoryPtr Antlr4Pure;
NSQLTranslation::TLexerFactoryPtr Antlr4PureAnsi;
+ NSQLTranslation::TLexerFactoryPtr Regex;
+ NSQLTranslation::TLexerFactoryPtr RegexAnsi;
};
-NSQLTranslation::ILexer::TPtr MakeLexer(const TLexers& lexers, bool ansi, bool antlr4, bool pure = false);
+enum class ELexerFlavor {
+ Default,
+ Pure,
+ Regex,
+};
+
+NSQLTranslation::ILexer::TPtr MakeLexer(
+ const TLexers& lexers, bool ansi, bool antlr4, ELexerFlavor flavor = ELexerFlavor::Default);
// "Probably" because YQL keyword can be an identifier
// depending on a query context. For example
diff --git a/yql/essentials/sql/v1/lexer/lexer_ut.cpp b/yql/essentials/sql/v1/lexer/lexer_ut.cpp
index 3ad01f631b..549dc9d8fa 100644
--- a/yql/essentials/sql/v1/lexer/lexer_ut.cpp
+++ b/yql/essentials/sql/v1/lexer/lexer_ut.cpp
@@ -1,35 +1,60 @@
#include "lexer.h"
+#include "lexer_ut.h"
#include <yql/essentials/core/issue/yql_issue.h>
#include <yql/essentials/sql/settings/translation_settings.h>
#include <yql/essentials/sql/v1/lexer/antlr3/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr3_ansi/lexer.h>
#include <yql/essentials/sql/v1/lexer/antlr4/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_ansi/lexer.h>
#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure_ansi/lexer.h>
+#include <yql/essentials/sql/v1/lexer/regex/lexer.h>
#include <library/cpp/testing/unittest/registar.h>
+#include <util/string/ascii.h>
+#include <util/random/random.h>
+
+#define UNIT_ASSERT_TOKENIZED(LEXER, QUERY, TOKENS) \
+ do { \
+ auto tokens = Tokenized((LEXER), (QUERY)); \
+ UNIT_ASSERT_VALUES_EQUAL(tokens, (TOKENS)); \
+ } while (false)
+
using namespace NSQLTranslation;
using namespace NSQLTranslationV1;
-std::pair<TParsedTokenList, NYql::TIssues> Tokenize(ILexer::TPtr& lexer, TString queryUtf8) {
+TLexers Lexers = {
+ .Antlr3 = MakeAntlr3LexerFactory(),
+ .Antlr3Ansi = MakeAntlr4AnsiLexerFactory(),
+ .Antlr4 = MakeAntlr4LexerFactory(),
+ .Antlr4Ansi = MakeAntlr4AnsiLexerFactory(),
+ .Antlr4Pure = MakeAntlr4PureLexerFactory(),
+ .Antlr4PureAnsi = MakeAntlr4PureAnsiLexerFactory(),
+ .Regex = MakeRegexLexerFactory(/* ansi = */ false),
+ .RegexAnsi = MakeRegexLexerFactory(/* ansi = */ true),
+};
+
+std::pair<TParsedTokenList, NYql::TIssues> Tokenize(ILexer::TPtr& lexer, const TString& query) {
TParsedTokenList tokens;
NYql::TIssues issues;
- Tokenize(*lexer, queryUtf8, "", tokens, issues, SQL_MAX_PARSER_ERRORS);
+ Tokenize(*lexer, query, "", tokens, issues, SQL_MAX_PARSER_ERRORS);
return {tokens, issues};
}
-TVector<TString> GetIssueMessages(ILexer::TPtr& lexer, TString queryUtf8) {
+TVector<TString> GetIssueMessages(ILexer::TPtr& lexer, const TString& query) {
TVector<TString> messages;
- for (const auto& issue : Tokenize(lexer, queryUtf8).second) {
+ for (const auto& issue : Tokenize(lexer, query).second) {
messages.emplace_back(issue.ToString(/* oneLine = */ true));
}
return messages;
}
-TVector<TString> GetTokenViews(ILexer::TPtr& lexer, TString queryUtf8) {
+TVector<TString> GetTokenViews(ILexer::TPtr& lexer, const TString& query) {
TVector<TString> names;
- for (auto& token : Tokenize(lexer, queryUtf8).first) {
+ for (auto& token : Tokenize(lexer, query).first) {
TString view = std::move(token.Name);
if (view == "ID_PLAIN" || view == "STRING_VALUE") {
view.append(" (");
@@ -41,26 +66,92 @@ TVector<TString> GetTokenViews(ILexer::TPtr& lexer, TString queryUtf8) {
return names;
}
-void AssertEquivialent(const TParsedToken& lhs, const TParsedToken& rhs) {
- if (lhs.Name == "EOF" && rhs.Name == "EOF") {
- return;
+TString ToString(TParsedToken token) {
+ TString& string = token.Name;
+ if (token.Name != token.Content && token.Name != "EOF") {
+ string += "(";
+ string += token.Content;
+ string += ")";
}
+ return string;
+}
- UNIT_ASSERT_VALUES_EQUAL(lhs.Name, rhs.Name);
- UNIT_ASSERT_VALUES_EQUAL(lhs.Content, rhs.Content);
- UNIT_ASSERT_VALUES_EQUAL(lhs.Line, rhs.Line);
+TString Tokenized(ILexer::TPtr& lexer, const TString& query) {
+ TParsedTokenList tokens;
+ NYql::TIssues issues;
+ bool ok = Tokenize(*lexer, query, "Test", tokens, issues, SQL_MAX_PARSER_ERRORS);
+
+ TString out;
+ if (!ok) {
+ out = "[INVALID] ";
+ }
+
+ for (auto& token : tokens) {
+ out += ToString(std::move(token));
+ out += " ";
+ }
+ if (!out.empty()) {
+ out.pop_back();
+ }
+ return out;
}
-void AssertEquivialent(const TParsedTokenList& lhs, const TParsedTokenList& rhs) {
- UNIT_ASSERT_VALUES_EQUAL(lhs.size(), rhs.size());
- for (size_t i = 0; i < lhs.size(); ++i) {
- AssertEquivialent(lhs.at(i), rhs.at(i));
+TString RandomMultilineCommentLikeText(size_t maxSize) {
+ auto size = RandomNumber<size_t>(maxSize);
+ TString comment;
+ for (size_t i = 0; i < size; ++i) {
+ if (auto /* isOpen */ _ = RandomNumber<bool>()) {
+ comment += "/*";
+ } else {
+ comment += "*/";
+ }
+
+ for (int gap = RandomNumber<size_t>(2); gap > 0; --gap) {
+ comment += " ";
+ }
}
+ return comment;
}
Y_UNIT_TEST_SUITE(SQLv1Lexer) {
- Y_UNIT_TEST(AntlrVersionIndependent) {
- const TVector<TString> queriesUtf8 = {
+ Y_UNIT_TEST(UnsupportedIssues) {
+ NSQLTranslationV1::TLexers factories;
+
+ TVector<ILexer::TPtr> lexers;
+ for (auto ansi : {false, true}) {
+ for (auto antlr4 : {false, true}) {
+ for (auto flavor : {ELexerFlavor::Default, ELexerFlavor::Pure, ELexerFlavor::Regex}) {
+ lexers.emplace_back(MakeLexer(factories, ansi, antlr4, flavor));
+ }
+ }
+ }
+
+ TVector<TString> actual;
+ for (auto& lexer : lexers) {
+ auto issues = GetIssueMessages(lexer, "");
+ actual.emplace_back(std::move(issues.at(0)));
+ }
+
+ TVector<TString> expected = {
+ "<main>: Error: Lexer antlr3 is not supported",
+ "<main>: Error: Lexer antlr3_pure is not supported",
+ "<main>: Error: Lexer regex is not supported",
+ "<main>: Error: Lexer antlr4 is not supported",
+ "<main>: Error: Lexer antlr4_pure is not supported",
+ "<main>: Error: Lexer antlr4_regex is not supported",
+ "<main>: Error: Lexer antlr3_ansi is not supported",
+ "<main>: Error: Lexer antlr3_pure_ansi is not supported",
+ "<main>: Error: Lexer regex_ansi is not supported",
+ "<main>: Error: Lexer antlr4_ansi is not supported",
+ "<main>: Error: Lexer antlr4_pure_ansi is not supported",
+ "<main>: Error: Lexer antlr4_regex_ansi is not supported",
+ };
+
+ UNIT_ASSERT_VALUES_EQUAL(actual, expected);
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(AntlrAndFlavorIndependent) {
+ static const TVector<TString> queries = {
"",
" ",
"SELECT",
@@ -78,35 +169,31 @@ Y_UNIT_TEST_SUITE(SQLv1Lexer) {
"\"select\"select",
};
- NSQLTranslationV1::TLexers lexers;
- lexers.Antlr3 = NSQLTranslationV1::MakeAntlr3LexerFactory();
- lexers.Antlr4 = NSQLTranslationV1::MakeAntlr4LexerFactory();
- lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
-
- auto lexer3 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ false);
- auto lexer4 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true);
- auto lexer4p = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true, /* pure = */ true);
-
- for (const auto& query : queriesUtf8) {
- auto [tokens3, issues3] = Tokenize(lexer3, query);
- auto [tokens4, issues4] = Tokenize(lexer4, query);
- auto [tokens4p, issues4p] = Tokenize(lexer4p, query);
- AssertEquivialent(tokens3, tokens4);
- AssertEquivialent(tokens3, tokens4p);
- UNIT_ASSERT(issues3.Empty());
- UNIT_ASSERT(issues4.Empty());
- UNIT_ASSERT(issues4p.Empty());
+ static TVector<TString> expectations(queries.size());
+
+ if (ANSI) {
+ return;
+ }
+
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+
+ for (size_t i = 0; i < queries.size(); ++i) {
+ const auto& query = queries[i];
+ auto& expected = expectations[i];
+
+ if (expected.empty()) {
+ expected = Tokenized(lexer, query);
+ return;
+ }
+
+ UNIT_ASSERT_TOKENIZED(lexer, query, expected);
}
}
TVector<TString> InvalidQueries();
void TestInvalidTokensSkipped(bool antlr4, const TVector<TVector<TString>>& expected) {
- NSQLTranslationV1::TLexers lexers;
- lexers.Antlr3 = NSQLTranslationV1::MakeAntlr3LexerFactory();
- lexers.Antlr4 = NSQLTranslationV1::MakeAntlr4LexerFactory();
-
- auto lexer = MakeLexer(lexers, /* ansi = */ false, antlr4);
+ auto lexer = MakeLexer(Lexers, /* ansi = */ false, antlr4);
auto input = InvalidQueries();
UNIT_ASSERT_VALUES_EQUAL(input.size(), expected.size());
@@ -161,29 +248,26 @@ Y_UNIT_TEST_SUITE(SQLv1Lexer) {
}
Y_UNIT_TEST(IssuesCollected) {
- NSQLTranslationV1::TLexers lexers;
- lexers.Antlr3 = NSQLTranslationV1::MakeAntlr3LexerFactory();
- lexers.Antlr4 = NSQLTranslationV1::MakeAntlr4LexerFactory();
-
- auto lexer3 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ false);
- auto lexer4 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true);
- auto lexer4p = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true, /* pure = */ true);
+ auto lexer3 = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ false);
+ auto lexer4 = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ true);
+ auto lexer4p = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ true, ELexerFlavor::Pure);
+ auto lexerR = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ false, ELexerFlavor::Regex);
for (const auto& query : InvalidQueries()) {
auto issues3 = GetIssueMessages(lexer3, query);
auto issues4 = GetIssueMessages(lexer4, query);
auto issues4p = GetIssueMessages(lexer4p, query);
+ auto issuesR = GetIssueMessages(lexerR, query);
UNIT_ASSERT(!issues3.empty());
UNIT_ASSERT(!issues4.empty());
UNIT_ASSERT(!issues4p.empty());
+ UNIT_ASSERT(!issuesR.empty());
}
}
Y_UNIT_TEST(IssueMessagesAntlr3) {
- NSQLTranslationV1::TLexers lexers;
- lexers.Antlr3 = NSQLTranslationV1::MakeAntlr3LexerFactory();
- auto lexer3 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ false);
+ auto lexer3 = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ false);
auto actual = GetIssueMessages(lexer3, "\xF0\x9F\x98\x8A SELECT * FR");
@@ -198,10 +282,7 @@ Y_UNIT_TEST_SUITE(SQLv1Lexer) {
}
Y_UNIT_TEST(IssueMessagesAntlr4) {
- NSQLTranslationV1::TLexers lexers;
- lexers.Antlr4 = NSQLTranslationV1::MakeAntlr4LexerFactory();
-
- auto lexer4 = MakeLexer(lexers, /* ansi = */ false, /* antlr4 = */ true);
+ auto lexer4 = MakeLexer(Lexers, /* ansi = */ false, /* antlr4 = */ true);
auto actual = GetIssueMessages(lexer4, "\xF0\x9F\x98\x8A SELECT * FR");
@@ -211,4 +292,165 @@ Y_UNIT_TEST_SUITE(SQLv1Lexer) {
UNIT_ASSERT_VALUES_EQUAL(actual, expected);
}
-}
+
+ Y_UNIT_TEST_ON_EACH_LEXER(Whitespace) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "", "EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, " ", "WS( ) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, " ", "WS( ) WS( ) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\n", "WS(\n) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(Keyword) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "SELECT", "SELECT EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "INSERT", "INSERT EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "FROM", "FROM EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "from", "FROM(from) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(Punctuation) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(
+ lexer,
+ "* / + - <|",
+ "ASTERISK(*) WS( ) SLASH(/) WS( ) "
+ "PLUS(+) WS( ) MINUS(-) WS( ) STRUCT_OPEN(<|) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "SELECT*FROM", "SELECT ASTERISK(*) FROM EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(IdPlain) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "variable my_table", "ID_PLAIN(variable) WS( ) ID_PLAIN(my_table) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(IdQuoted) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "``", "ID_QUOTED(``) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "` `", "ID_QUOTED(` `) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "`local/table`", "ID_QUOTED(`local/table`) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(Number) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "1", "DIGITS(1) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123", "DIGITS(123) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123u", "INTEGER_VALUE(123u) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123ui", "INTEGER_VALUE(123ui) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123.45", "REAL(123.45) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123.45E10", "REAL(123.45E10) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "123.45E+10", "REAL(123.45E+10) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "1E+10", "REAL(1E+10) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(SingleLineString) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "\"\"", "STRING_VALUE(\"\") EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\' \'", "STRING_VALUE(\' \') EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\" \"", "STRING_VALUE(\" \") EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\"test\"", "STRING_VALUE(\"test\") EOF");
+
+ if (!ANSI) {
+ UNIT_ASSERT_TOKENIZED(lexer, "\"\\\"\"", "STRING_VALUE(\"\\\"\") EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\"\"\"\"", "STRING_VALUE(\"\") STRING_VALUE(\"\") EOF");
+ } else {
+ UNIT_ASSERT_TOKENIZED(lexer, "\"\\\"\"", "[INVALID] STRING_VALUE(\"\\\") EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "\"\"\"\"", "STRING_VALUE(\"\"\"\") EOF");
+ }
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(MultiLineString) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "@@@@", "STRING_VALUE(@@@@) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "@@ @@@", "STRING_VALUE(@@ @@@) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "@@test@@", "STRING_VALUE(@@test@@) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "@@line1\nline2@@", "STRING_VALUE(@@line1\nline2@@) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(SingleLineComment) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "--yql", "COMMENT(--yql) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "-- yql ", "COMMENT(-- yql ) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "-- yql\nSELECT", "COMMENT(-- yql\n) SELECT EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "-- yql --", "COMMENT(-- yql --) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(MultiLineComment) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "/* yql */", "COMMENT(/* yql */) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* yql */ */", "COMMENT(/* yql */) WS( ) ASTERISK(*) SLASH(/) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* yql\n * yql\n */", "COMMENT(/* yql\n * yql\n */) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(RecursiveMultiLineComment) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ if (!ANSI) {
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* yql */", "COMMENT(/* /* yql */) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* yql */ */", "COMMENT(/* /* yql */) WS( ) ASTERISK(*) SLASH(/) EOF");
+ } else {
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* yql */", "COMMENT(/* /* yql */) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* yql */ */", "COMMENT(/* yql */) WS( ) ASTERISK(*) SLASH(/) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* /* yql */ */", "COMMENT(/* /* /* yql */ */) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* yql */ */ */", "COMMENT(/* /* yql */ */) WS( ) ASTERISK(*) SLASH(/) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* yql */ */", "COMMENT(/* /* yql */ */) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/*/*/*/", "COMMENT(/*/*/) ASTERISK(*) SLASH(/) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/*/**/*/*/*/", "COMMENT(/*/**/*/) ASTERISK(*) SLASH(/) ASTERISK(*) SLASH(/) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "/* /* */ a /* /* */", "COMMENT(/* /* */ a /* /* */) EOF");
+ }
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(RandomRecursiveMultiLineComment) {
+ if (!ANTLR4 && FLAVOR != ELexerFlavor::Regex || FLAVOR != ELexerFlavor::Pure) {
+ return;
+ }
+
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ auto reference = MakeLexer(Lexers, ANSI, /* antlr4 = */ true, ELexerFlavor::Pure);
+
+ SetRandomSeed(100);
+ for (size_t i = 0; i < 512; ++i) {
+ auto input = RandomMultilineCommentLikeText(/* maxSize = */ 32);
+ TString actual = Tokenized(lexer, input);
+ TString expected = Tokenized(reference, input);
+
+ UNIT_ASSERT_VALUES_EQUAL_C(actual, expected, "Input: " << input);
+ }
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(SimpleQuery) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+ UNIT_ASSERT_TOKENIZED(lexer, "select 1", "SELECT(select) WS( ) DIGITS(1) EOF");
+ UNIT_ASSERT_TOKENIZED(lexer, "SELect 1", "SELECT(SELect) WS( ) DIGITS(1) EOF");
+ }
+
+ Y_UNIT_TEST_ON_EACH_LEXER(ComplexQuery) {
+ auto lexer = MakeLexer(Lexers, ANSI, ANTLR4, FLAVOR);
+
+ TString query =
+ "SELECT\n"
+ " 123467,\n"
+ " \"Hello, {name}!\",\n"
+ " (1 + (5U * 1 / 0)),\n"
+ " MIN(identifier),\n"
+ " Bool(field),\n"
+ " Math::Sin(var)\n"
+ "FROM `local/test/space/table`\n"
+ "JOIN test;";
+
+ TString expected =
+ "SELECT WS(\n) "
+ "WS( ) WS( ) DIGITS(123467) COMMA(,) WS(\n) "
+ "WS( ) WS( ) STRING_VALUE(\"Hello, {name}!\") COMMA(,) WS(\n) "
+ "WS( ) WS( ) LPAREN(() DIGITS(1) WS( ) PLUS(+) WS( ) LPAREN(() INTEGER_VALUE(5U) WS( ) "
+ "ASTERISK(*) WS( ) DIGITS(1) WS( ) SLASH(/) WS( ) DIGITS(0) RPAREN()) "
+ "RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(MIN) LPAREN(() ID_PLAIN(identifier) RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(Bool) LPAREN(() ID_PLAIN(field) RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(Math) NAMESPACE(::) ID_PLAIN(Sin) LPAREN(() ID_PLAIN(var) RPAREN()) WS(\n) "
+ "FROM WS( ) ID_QUOTED(`local/test/space/table`) WS(\n) "
+ "JOIN WS( ) ID_PLAIN(test) SEMICOLON(;) EOF";
+
+ UNIT_ASSERT_TOKENIZED(lexer, query, expected);
+ }
+
+} // Y_UNIT_TEST_SUITE(SQLv1Lexer)
diff --git a/yql/essentials/sql/v1/lexer/lexer_ut.h b/yql/essentials/sql/v1/lexer/lexer_ut.h
new file mode 100644
index 0000000000..b4304eb707
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/lexer_ut.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#include "lexer.h"
+
+#define LEXER_NAME_ANSI_false_ANTLR4_false_FLAVOR_Default "antlr3"
+#define LEXER_NAME_ANSI_false_ANTLR4_true_FLAVOR_Default "antlr4"
+#define LEXER_NAME_ANSI_true_ANTLR4_false_FLAVOR_Default "antlr3_ansi"
+#define LEXER_NAME_ANSI_true_ANTLR4_true_FLAVOR_Default "antlr4_ansi"
+#define LEXER_NAME_ANSI_false_ANTLR4_true_FLAVOR_Pure "antlr4_pure"
+#define LEXER_NAME_ANSI_true_ANTLR4_true_FLAVOR_Pure "antlr4_pure_ansi"
+#define LEXER_NAME_ANSI_false_ANTLR4_false_FLAVOR_Regex "regex"
+#define LEXER_NAME_ANSI_true_ANTLR4_false_FLAVOR_Regex "regex_ansi"
+
+#define Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, ANSI, ANTLR4, FLAVOR) \
+ TCurrentTest::AddTest( \
+ #N "::" LEXER_NAME_ANSI_##ANSI##_ANTLR4_##ANTLR4##_FLAVOR_##FLAVOR, \
+ static_cast<void (*)(NUnitTest::TTestContext&)>(&N<ANSI, ANTLR4, ELexerFlavor::FLAVOR>), \
+ /* forceFork = */ false)
+
+#define Y_UNIT_TEST_ON_EACH_LEXER(N) \
+ template <bool ANSI, bool ANTLR4, ELexerFlavor FLAVOR> \
+ void N(NUnitTest::TTestContext&); \
+ struct TTestRegistration##N { \
+ TTestRegistration##N() { \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, false, false, Default); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, false, true, Default); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, true, false, Default); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, true, true, Default); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, false, true, Pure); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, true, true, Pure); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, false, false, Regex); \
+ Y_UNIT_TEST_ON_EACH_LEXER_ADD_TEST(N, true, false, Regex); \
+ } \
+ }; \
+ static TTestRegistration##N testRegistration##N; \
+ template <bool ANSI, bool ANTLR4, ELexerFlavor FLAVOR> \
+ void N(NUnitTest::TTestContext&)
diff --git a/yql/essentials/sql/v1/lexer/regex/lexer.cpp b/yql/essentials/sql/v1/lexer/regex/lexer.cpp
new file mode 100644
index 0000000000..b8ca033b0c
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/lexer.cpp
@@ -0,0 +1,254 @@
+#include "lexer.h"
+
+#include "regex.h"
+
+#include <contrib/libs/re2/re2/re2.h>
+
+#include <yql/essentials/core/issue/yql_issue.h>
+#include <yql/essentials/sql/v1/reflect/sql_reflect.h>
+
+#include <util/generic/algorithm.h>
+#include <util/generic/string.h>
+#include <util/string/subst.h>
+#include <util/string/ascii.h>
+
+namespace NSQLTranslationV1 {
+
+ using NSQLTranslation::TParsedToken;
+ using NSQLTranslation::TParsedTokenList;
+
+ class TRegexLexer: public NSQLTranslation::ILexer {
+ static constexpr const char* CommentTokenName = "COMMENT";
+
+ public:
+ TRegexLexer(
+ bool ansi,
+ NSQLReflect::TLexerGrammar grammar,
+ const TVector<std::tuple<TString, TString>>& RegexByOtherName)
+ : Grammar_(std::move(grammar))
+ , Ansi_(ansi)
+ {
+ for (const auto& [token, regex] : RegexByOtherName) {
+ if (token == CommentTokenName) {
+ CommentRegex_.Reset(new RE2(regex));
+ } else {
+ OtherRegexes_.emplace_back(token, new RE2(regex));
+ }
+ }
+ }
+
+ bool Tokenize(
+ const TString& query,
+ const TString& queryName,
+ const TTokenCallback& onNextToken,
+ NYql::TIssues& issues,
+ size_t maxErrors) override {
+ size_t errors = 0;
+ for (size_t pos = 0; pos < query.size();) {
+ TParsedToken matched = Match(TStringBuf(query, pos));
+
+ if (matched.Name.empty() && maxErrors == errors) {
+ break;
+ }
+
+ if (matched.Name.empty()) {
+ pos += 1;
+ errors += 1;
+ issues.AddIssue(NYql::TPosition(pos, 0, queryName), "no candidates");
+ continue;
+ }
+
+ pos += matched.Content.length();
+ onNextToken(std::move(matched));
+ }
+
+ onNextToken(TParsedToken{.Name = "EOF"});
+ return errors == 0;
+ }
+
+ private:
+ TParsedToken Match(const TStringBuf prefix) {
+ TParsedTokenList matches;
+
+ size_t keywordCount = MatchKeyword(prefix, matches);
+ MatchPunctuation(prefix, matches);
+ MatchRegex(prefix, matches);
+ MatchComment(prefix, matches);
+
+ if (matches.empty()) {
+ return {};
+ }
+
+ auto maxLength = MaxElementBy(matches, [](const TParsedToken& m) {
+ return m.Content.length();
+ })->Content.length();
+
+ auto max = FindIf(matches, [&](const TParsedToken& m) {
+ return m.Content.length() == maxLength;
+ });
+
+ auto isMatched = [&](const TStringBuf name) {
+ return std::end(matches) != FindIf(matches, [&](const auto& m) {
+ return m.Name == name;
+ });
+ };
+
+ size_t conflicts = CountIf(matches, [&](const TParsedToken& m) {
+ return m.Content.length() == max->Content.length();
+ });
+ conflicts -= 1;
+ Y_ENSURE(
+ conflicts == 0 ||
+ (conflicts == 1 && keywordCount != 0 && isMatched("ID_PLAIN")) ||
+ (conflicts == 1 && isMatched("DIGITS") && isMatched("INTEGER_VALUE")));
+
+ Y_ENSURE(!max->Content.empty());
+ return *max;
+ }
+
+ bool MatchKeyword(const TStringBuf prefix, TParsedTokenList& matches) {
+ size_t count = 0;
+ for (const auto& keyword : Grammar_.KeywordNames) {
+ const TStringBuf content = prefix.substr(0, keyword.length());
+ if (AsciiEqualsIgnoreCase(content, keyword)) {
+ matches.emplace_back(keyword, TString(content));
+ count += 1;
+ }
+ }
+ return count;
+ }
+
+ size_t MatchPunctuation(const TStringBuf prefix, TParsedTokenList& matches) {
+ size_t count = 0;
+ for (const auto& name : Grammar_.PunctuationNames) {
+ const auto& content = Grammar_.BlockByName.at(name);
+ if (prefix.substr(0, content.length()) == content) {
+ matches.emplace_back(name, content);
+ count += 1;
+ }
+ }
+ return count;
+ }
+
+ size_t MatchRegex(const TStringBuf prefix, TParsedTokenList& matches) {
+ size_t count = 0;
+ for (const auto& [token, regex] : OtherRegexes_) {
+ if (const TStringBuf match = TryMatchRegex(prefix, *regex); !match.empty()) {
+ matches.emplace_back(token, TString(match));
+ count += 1;
+ }
+ }
+ return count;
+ }
+
+ const TStringBuf TryMatchRegex(const TStringBuf prefix, const RE2& regex) {
+ re2::StringPiece input(prefix.data(), prefix.size());
+ if (RE2::Consume(&input, regex)) {
+ return TStringBuf(prefix.data(), input.data());
+ }
+ return "";
+ }
+
+ size_t MatchComment(const TStringBuf prefix, TParsedTokenList& matches) {
+ const TStringBuf reContent = TryMatchRegex(prefix, *CommentRegex_);
+ if (reContent.empty()) {
+ return 0;
+ }
+
+ if (!(Ansi_ && prefix.StartsWith("/*"))) {
+ matches.emplace_back(CommentTokenName, TString(reContent));
+ return 1;
+ }
+
+ size_t ll1Length = MatchANSIMultilineComment(prefix);
+ const TStringBuf ll1Content = prefix.SubString(0, ll1Length);
+
+ Y_ENSURE(ll1Content == 0 || reContent <= ll1Content);
+ if (ll1Content == 0) {
+ matches.emplace_back(CommentTokenName, TString(reContent));
+ return 1;
+ }
+
+ matches.emplace_back(CommentTokenName, TString(ll1Content));
+ return 1;
+ }
+
+ size_t MatchANSIMultilineComment(TStringBuf remaining) {
+ if (!remaining.StartsWith("/*")) {
+ return 0;
+ }
+
+ size_t skipped = 0;
+
+ remaining.Skip(2);
+ skipped += 2;
+
+ for (;;) {
+ if (remaining.StartsWith("*/")) {
+ remaining.Skip(2);
+ skipped += 2;
+ return skipped;
+ }
+
+ bool isSkipped = false;
+ if (remaining.StartsWith("/*")) {
+ size_t limit = remaining.rfind("*/");
+ if (limit == std::string::npos) {
+ return 0;
+ }
+
+ size_t len = MatchANSIMultilineComment(remaining.Head(limit));
+ remaining.Skip(len);
+ skipped += len;
+
+ isSkipped = len != 0;
+ }
+
+ if (isSkipped) {
+ continue;
+ }
+
+ if (remaining.size() == 0) {
+ return 0;
+ }
+
+ remaining.Skip(1);
+ skipped += 1;
+ }
+ }
+
+ NSQLReflect::TLexerGrammar Grammar_;
+ TVector<std::tuple<TString, THolder<RE2>>> OtherRegexes_;
+ THolder<RE2> CommentRegex_;
+ bool Ansi_;
+ };
+
+ namespace {
+
+ class TFactory final: public NSQLTranslation::ILexerFactory {
+ public:
+ explicit TFactory(bool ansi)
+ : Ansi_(ansi)
+ , Grammar_(NSQLReflect::LoadLexerGrammar())
+ , RegexByOtherName_(MakeRegexByOtherName(Grammar_, Ansi_))
+ {
+ }
+
+ NSQLTranslation::ILexer::TPtr MakeLexer() const override {
+ return NSQLTranslation::ILexer::TPtr(
+ new TRegexLexer(Ansi_, Grammar_, RegexByOtherName_));
+ }
+
+ private:
+ bool Ansi_;
+ NSQLReflect::TLexerGrammar Grammar_;
+ TVector<std::tuple<TString, TString>> RegexByOtherName_;
+ };
+
+ } // namespace
+
+ NSQLTranslation::TLexerFactoryPtr MakeRegexLexerFactory(bool ansi) {
+ return NSQLTranslation::TLexerFactoryPtr(new TFactory(ansi));
+ }
+
+} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/lexer/regex/lexer.h b/yql/essentials/sql/v1/lexer/regex/lexer.h
new file mode 100644
index 0000000000..e9968954e1
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/lexer.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include <yql/essentials/parser/lexer_common/lexer.h>
+
+namespace NSQLTranslationV1 {
+
+ NSQLTranslation::TLexerFactoryPtr MakeRegexLexerFactory(bool ansi);
+
+} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/lexer/regex/lexer_ut.cpp b/yql/essentials/sql/v1/lexer/regex/lexer_ut.cpp
new file mode 100644
index 0000000000..ae0d018e42
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/lexer_ut.cpp
@@ -0,0 +1,219 @@
+#include "lexer.h"
+
+#include <yql/essentials/public/issue/yql_issue.h>
+#include <yql/essentials/sql/settings/translation_settings.h>
+#include <yql/essentials/sql/v1/lexer/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure_ansi/lexer.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <util/random/random.h>
+
+using namespace NSQLTranslationV1;
+using NSQLTranslation::SQL_MAX_PARSER_ERRORS;
+using NSQLTranslation::Tokenize;
+using NSQLTranslation::TParsedToken;
+using NSQLTranslation::TParsedTokenList;
+using NYql::TIssues;
+
+TLexers Lexers = {
+ .Antlr4PureAnsi = MakeAntlr4PureAnsiLexerFactory(),
+ .Regex = MakeRegexLexerFactory(/* ansi = */ false),
+ .RegexAnsi = MakeRegexLexerFactory(/* ansi = */ true),
+};
+
+auto PureAnsiLexer = MakeLexer(
+ Lexers, /* ansi = */ true, /* antlr4 = */ true, ELexerFlavor::Pure);
+
+auto DefaultLexer = MakeLexer(
+ Lexers, /* ansi = */ false, /* antlr4 = */ false, ELexerFlavor::Regex);
+
+auto AnsiLexer = MakeLexer(
+ Lexers, /* ansi = */ true, /* antlr4 = */ false, ELexerFlavor::Regex);
+
+TString ToString(TParsedToken token) {
+ TString& string = token.Name;
+ if (token.Name != token.Content && token.Name != "EOF") {
+ string += "(";
+ string += token.Content;
+ string += ")";
+ }
+ return string;
+}
+
+TString Tokenized(NSQLTranslation::ILexer& lexer, const TString& query) {
+ TParsedTokenList tokens;
+ TIssues issues;
+ bool ok = Tokenize(lexer, query, "Test", tokens, issues, SQL_MAX_PARSER_ERRORS);
+
+ TString out;
+ if (!ok) {
+ out = "[INVALID] ";
+ }
+
+ for (auto& token : tokens) {
+ out += ToString(std::move(token));
+ out += " ";
+ }
+ if (!out.empty()) {
+ out.pop_back();
+ }
+ return out;
+}
+
+TString RandomMultilineCommentLikeText(size_t maxSize) {
+ auto size = RandomNumber<size_t>(maxSize);
+ TString comment;
+ for (size_t i = 0; i < size; ++i) {
+ if (auto /* isOpen */ _ = RandomNumber<bool>()) {
+ comment += "/*";
+ } else {
+ comment += "*/";
+ }
+
+ for (int gap = RandomNumber<size_t>(2); gap > 0; --gap) {
+ comment += " ";
+ }
+ }
+ return comment;
+}
+
+void Check(TString input, TString expected, bool ansi) {
+ auto* lexer = DefaultLexer.Get();
+ if (ansi) {
+ lexer = AnsiLexer.Get();
+ }
+ UNIT_ASSERT_VALUES_EQUAL(Tokenized(*lexer, input), expected);
+}
+
+void Check(TString input, TString expected) {
+ Check(input, expected, /* ansi = */ false);
+ Check(input, expected, /* ansi = */ true);
+}
+
+Y_UNIT_TEST_SUITE(RegexLexerTests) {
+ Y_UNIT_TEST(Whitespace) {
+ Check("", "EOF");
+ Check(" ", "WS( ) EOF");
+ Check(" ", "WS( ) WS( ) EOF");
+ Check("\n", "WS(\n) EOF");
+ }
+
+ Y_UNIT_TEST(SinleLineComment) {
+ Check("--yql", "COMMENT(--yql) EOF");
+ Check("-- yql ", "COMMENT(-- yql ) EOF");
+ Check("-- yql\nSELECT", "COMMENT(-- yql\n) SELECT EOF");
+ Check("-- yql --", "COMMENT(-- yql --) EOF");
+ }
+
+ Y_UNIT_TEST(MultiLineComment) {
+ Check("/* yql */", "COMMENT(/* yql */) EOF");
+ Check("/* yql */ */", "COMMENT(/* yql */) WS( ) ASTERISK(*) SLASH(/) EOF");
+ Check("/* yql\n * yql\n */", "COMMENT(/* yql\n * yql\n */) EOF");
+ }
+
+ Y_UNIT_TEST(RecursiveMultiLineCommentDefault) {
+ Check("/* /* yql */", "COMMENT(/* /* yql */) EOF", /* ansi = */ false);
+ Check("/* /* yql */ */", "COMMENT(/* /* yql */) WS( ) ASTERISK(*) SLASH(/) EOF", /* ansi = */ false);
+ }
+
+ Y_UNIT_TEST(RecursiveMultiLineCommentAnsi) {
+ Check("/* /* yql */", "COMMENT(/* /* yql */) EOF", /* ansi = */ true);
+ Check("/* yql */ */", "COMMENT(/* yql */) WS( ) ASTERISK(*) SLASH(/) EOF", /* ansi = */ true);
+ Check("/* /* /* yql */ */", "COMMENT(/* /* /* yql */ */) EOF", /* ansi = */ true);
+ Check("/* /* yql */ */ */", "COMMENT(/* /* yql */ */) WS( ) ASTERISK(*) SLASH(/) EOF", /* ansi = */ true);
+ Check("/* /* yql */ */", "COMMENT(/* /* yql */ */) EOF", /* ansi = */ true);
+ Check("/*/*/*/", "COMMENT(/*/*/) ASTERISK(*) SLASH(/) EOF", /* ansi = */ true);
+ Check("/*/**/*/*/*/", "COMMENT(/*/**/*/) ASTERISK(*) SLASH(/) ASTERISK(*) SLASH(/) EOF", /* ansi = */ true);
+ Check("/* /* */ a /* /* */", "COMMENT(/* /* */ a /* /* */) EOF", /* ansi = */ true);
+ }
+
+ Y_UNIT_TEST(RecursiveMultiLineCommentAnsiReferenceComparion) {
+ SetRandomSeed(100);
+ for (size_t i = 0; i < 512; ++i) {
+ auto input = RandomMultilineCommentLikeText(/* maxSize = */ 128);
+ TString actual = Tokenized(*AnsiLexer, input);
+ TString expected = Tokenized(*PureAnsiLexer, input);
+ UNIT_ASSERT_VALUES_EQUAL_C(actual, expected, "Input: " << input);
+ }
+ }
+
+ Y_UNIT_TEST(Keyword) {
+ Check("SELECT", "SELECT EOF");
+ Check("INSERT", "INSERT EOF");
+ Check("FROM", "FROM EOF");
+ }
+
+ Y_UNIT_TEST(Punctuation) {
+ Check(
+ "* / + - <|",
+ "ASTERISK(*) WS( ) SLASH(/) WS( ) "
+ "PLUS(+) WS( ) MINUS(-) WS( ) STRUCT_OPEN(<|) EOF");
+ Check("SELECT*FROM", "SELECT ASTERISK(*) FROM EOF");
+ }
+
+ Y_UNIT_TEST(IdPlain) {
+ Check("variable my_table", "ID_PLAIN(variable) WS( ) ID_PLAIN(my_table) EOF");
+ }
+
+ Y_UNIT_TEST(IdQuoted) {
+ Check("``", "ID_QUOTED(``) EOF");
+ Check("` `", "ID_QUOTED(` `) EOF");
+ Check("` `", "ID_QUOTED(` `) EOF");
+ Check("`local/table`", "ID_QUOTED(`local/table`) EOF");
+ }
+
+ Y_UNIT_TEST(SinleLineString) {
+ Check("\"\"", "STRING_VALUE(\"\") EOF");
+ Check("\' \'", "STRING_VALUE(\' \') EOF");
+ Check("\" \"", "STRING_VALUE(\" \") EOF");
+ Check("\"test\"", "STRING_VALUE(\"test\") EOF");
+
+ Check("\"\\\"\"", "STRING_VALUE(\"\\\"\") EOF", /* ansi = */ false);
+ Check("\"\\\"\"", "[INVALID] STRING_VALUE(\"\\\") EOF", /* ansi = */ true);
+
+ Check("\"\"\"\"", "STRING_VALUE(\"\") STRING_VALUE(\"\") EOF", /* ansi = */ false);
+ Check("\"\"\"\"", "STRING_VALUE(\"\"\"\") EOF", /* ansi = */ true);
+ }
+
+ Y_UNIT_TEST(MultiLineString) {
+ Check("@@@@", "STRING_VALUE(@@@@) EOF");
+ Check("@@ @@@", "STRING_VALUE(@@ @@@) EOF");
+ Check("@@test@@", "STRING_VALUE(@@test@@) EOF");
+ Check("@@line1\nline2@@", "STRING_VALUE(@@line1\nline2@@) EOF");
+ }
+
+ Y_UNIT_TEST(Query) {
+ TString query =
+ "SELECT\n"
+ " 123467,\n"
+ " \"Hello, {name}!\",\n"
+ " (1 + (5 * 1 / 0)),\n"
+ " MIN(identifier),\n"
+ " Bool(field),\n"
+ " Math::Sin(var)\n"
+ "FROM `local/test/space/table`\n"
+ "JOIN test;";
+
+ TString expected =
+ "SELECT WS(\n) "
+ "WS( ) WS( ) INTEGER_VALUE(123467) COMMA(,) WS(\n) "
+ "WS( ) WS( ) STRING_VALUE(\"Hello, {name}!\") COMMA(,) WS(\n) "
+ "WS( ) WS( ) LPAREN(() INTEGER_VALUE(1) WS( ) PLUS(+) WS( ) LPAREN(() INTEGER_VALUE(5) WS( ) "
+ "ASTERISK(*) WS( ) INTEGER_VALUE(1) WS( ) SLASH(/) WS( ) INTEGER_VALUE(0) RPAREN()) "
+ "RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(MIN) LPAREN(() ID_PLAIN(identifier) RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(Bool) LPAREN(() ID_PLAIN(field) RPAREN()) COMMA(,) WS(\n) "
+ "WS( ) WS( ) ID_PLAIN(Math) NAMESPACE(::) ID_PLAIN(Sin) LPAREN(() ID_PLAIN(var) RPAREN()) WS(\n) "
+ "FROM WS( ) ID_QUOTED(`local/test/space/table`) WS(\n) "
+ "JOIN WS( ) ID_PLAIN(test) SEMICOLON(;) EOF";
+
+ Check(query, expected);
+ }
+
+ Y_UNIT_TEST(Invalid) {
+ Check("\"", "[INVALID] EOF");
+ Check("\" SELECT", "[INVALID] WS( ) SELECT EOF");
+ }
+
+} // Y_UNIT_TEST_SUITE(RegexLexerTests)
diff --git a/yql/essentials/sql/v1/lexer/regex/regex.cpp b/yql/essentials/sql/v1/lexer/regex/regex.cpp
new file mode 100644
index 0000000000..937d21572f
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/regex.cpp
@@ -0,0 +1,240 @@
+#include "regex.h"
+
+#include <contrib/libs/re2/re2/re2.h>
+
+#include <util/generic/vector.h>
+
+#define SUBSTITUTION(name, mode) \
+ {#name, name##_##mode}
+
+#define SUBSTITUTIONS(mode) \
+ { \
+ #mode, { \
+ SUBSTITUTION(GRAMMAR_STRING_CORE_SINGLE, mode), \
+ SUBSTITUTION(GRAMMAR_STRING_CORE_DOUBLE, mode), \
+ SUBSTITUTION(GRAMMAR_MULTILINE_COMMENT_CORE, mode), \
+ } \
+ }
+
+namespace NSQLTranslationV1 {
+
+ class TLexerGrammarToRegexTranslator {
+ private:
+ struct TRewriteRule {
+ TString Repr;
+ std::function<void(TString&)> Apply;
+ };
+
+ using TRewriteRules = TVector<TRewriteRule>;
+
+ public:
+ explicit TLexerGrammarToRegexTranslator(const NSQLReflect::TLexerGrammar& grammar, bool ansi)
+ : Grammar_(&grammar)
+ , Mode_(ansi ? "ANSI" : "DEFAULT")
+ {
+ AddExternalRules(Inliners_);
+ AddFragmentRules(Inliners_);
+
+ AddLetterRules(Transformations_);
+ AddTransformationRules(Transformations_);
+
+ UnwrapQuotes_ = UnwrapQuotesRule();
+ AddSpaceCollapses(SpaceCollapses_);
+ UnwrapQuotedSpace_ = UnwrapQuotedSpaceRule();
+ }
+
+ TString ToRegex(const TStringBuf name) {
+ TString text = Grammar_->BlockByName.at(name);
+ Inline(text);
+ Transform(text);
+ Finalize(text);
+ return text;
+ }
+
+ private:
+ void Inline(TString& text) {
+ ApplyEachWhileChanging(text, Inliners_);
+ }
+
+ void AddExternalRules(TRewriteRules& rules) {
+ THashMap<TString, THashMap<TString, TString>> Substitutions = {
+ SUBSTITUTIONS(DEFAULT),
+ SUBSTITUTIONS(ANSI),
+ };
+
+ // ANSI mode MULTILINE_COMMENT is recursive
+ Substitutions["ANSI"]["GRAMMAR_MULTILINE_COMMENT_CORE"] =
+ Substitutions["DEFAULT"]["GRAMMAR_MULTILINE_COMMENT_CORE"];
+
+ for (const auto& [k, v] : Substitutions.at(Mode_)) {
+ rules.emplace_back(RegexRewriteRule("@" + k + "@", v));
+ }
+ }
+
+ void AddFragmentRules(TRewriteRules& rules) {
+ const THashSet<TString> PunctuationFragments = {
+ "BACKSLASH",
+ "QUOTE_DOUBLE",
+ "QUOTE_SINGLE",
+ "BACKTICK",
+ "DOUBLE_COMMAT",
+ };
+
+ for (const auto& [name, definition] : Grammar_->BlockByName) {
+ TString def = definition;
+ if (
+ Grammar_->PunctuationNames.contains(name) ||
+ PunctuationFragments.contains(name)) {
+ def = "'" + def + "'";
+ }
+ def = QuoteAntlrRewrite(std::move(def));
+
+ rules.emplace_back(RegexRewriteRule(
+ "(\\b" + name + "\\b)",
+ "(" + def + ")"));
+ }
+ }
+
+ void Transform(TString& text) {
+ ApplyEachWhileChanging(text, Transformations_);
+ }
+
+ void AddLetterRules(TRewriteRules& rules) {
+ for (char letter = 'A'; letter <= 'Z'; ++letter) {
+ TString lower(char(ToLower(letter)));
+ TString upper(char(ToUpper(letter)));
+ rules.emplace_back(RegexRewriteRule(
+ "([^'\\w\\[\\]]|^)" + upper + "([^'\\w\\[\\]]|$)",
+ "\\1[" + lower + upper + "]\\2"));
+ }
+ }
+
+ void AddTransformationRules(TRewriteRules& rules) {
+ rules.emplace_back(RegexRewriteRule(
+ R"(~\('(..?)' \| '(..?)'\))", R"([^\1\2])"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(~\('(..?)'\))", R"([^\1])"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(('..?')\.\.('..?'))", R"([\1-\2])"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(\((.)\))", R"(\1)"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(\((\[.{1,8}\])\))", R"(\1)"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(\(('..?')\))", R"(\1)"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"( \.)", R"( (.|\\n))"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"(\bEOF\b)", R"($)"));
+
+ rules.emplace_back(RegexRewriteRule(
+ R"('\\u000C' \|)", ""));
+ }
+
+ void Finalize(TString& text) {
+ UnwrapQuotes_.Apply(text);
+ ApplyEachWhileChanging(text, SpaceCollapses_);
+ UnwrapQuotedSpace_.Apply(text);
+ }
+
+ void AddSpaceCollapses(TRewriteRules& rules) {
+ rules.emplace_back(RegexRewriteRule(R"(([^']|^) )", R"(\1)"));
+ rules.emplace_back(RegexRewriteRule(R"( ([^']|$))", R"(\1)"));
+ }
+
+ void ApplyEachOnce(TString& text, const TRewriteRules& rules) {
+ for (const auto& rule : rules) {
+ rule.Apply(text);
+ }
+ }
+
+ void ApplyEachWhileChanging(TString& text, const TRewriteRules& rules) {
+ constexpr size_t Limit = 16;
+
+ TString prev;
+ for (size_t i = 0; i < Limit + 1 && prev != text; ++i) {
+ prev = text;
+ ApplyEachOnce(text, rules);
+ Y_ENSURE(i != Limit);
+ }
+ }
+
+ TRewriteRule RegexRewriteRule(const TString& regex, TString rewrite) {
+ auto re2 = std::make_shared<RE2>(regex, RE2::Quiet);
+ Y_ENSURE(re2->ok(), re2->error() << " on regex '" << regex << "'");
+
+ TString error;
+ Y_ENSURE(
+ re2->CheckRewriteString(rewrite, &error),
+ error << " on rewrite '" << rewrite << "'");
+
+ return {
+ .Repr = regex + " -> " + rewrite,
+ .Apply = [re2, rewrite = std::move(rewrite)](TString& text) {
+ RE2::GlobalReplace(&text, *re2, rewrite);
+ },
+ };
+ }
+
+ TRewriteRule UnwrapQuotesRule() {
+ const TString regex = R"('([^ ][^ ]?)')";
+ auto re2 = std::make_shared<RE2>(regex, RE2::Quiet);
+ Y_ENSURE(re2->ok(), re2->error() << " on regex '" << regex << "'");
+
+ return {
+ .Repr = regex + " -> Quoted(\\1)",
+ .Apply = [re2](TString& text) {
+ TString content;
+ std::size_t i = 256;
+ while (RE2::PartialMatch(text, *re2, &content) && --i != 0) {
+ TString quoted = RE2::QuoteMeta(content);
+ for (size_t i = 0; i < 2 && quoted.StartsWith(R"(\\)"); ++i) {
+ quoted.erase(std::begin(quoted));
+ }
+ SubstGlobal(text, "'" + content + "'", quoted);
+ }
+ Y_ENSURE(i != 0);
+ },
+ };
+ }
+
+ TRewriteRule UnwrapQuotedSpaceRule() {
+ return RegexRewriteRule(R"(' ')", R"( )");
+ }
+
+ TString QuoteAntlrRewrite(TString rewrite) {
+ SubstGlobal(rewrite, R"(\)", R"(\\)");
+ SubstGlobal(rewrite, R"('\\')", R"('\\\\')");
+ return rewrite;
+ }
+
+ const NSQLReflect::TLexerGrammar* Grammar_;
+ const TStringBuf Mode_;
+
+ TRewriteRules Inliners_;
+
+ TRewriteRules Transformations_;
+
+ TRewriteRule UnwrapQuotes_;
+ TRewriteRules SpaceCollapses_;
+ TRewriteRule UnwrapQuotedSpace_;
+ };
+
+ TVector<std::tuple<TString, TString>> MakeRegexByOtherName(const NSQLReflect::TLexerGrammar& grammar, bool ansi) {
+ TLexerGrammarToRegexTranslator translator(grammar, ansi);
+
+ TVector<std::tuple<TString, TString>> regexes;
+ for (const auto& token : grammar.OtherNames) {
+ regexes.emplace_back(token, translator.ToRegex(token));
+ }
+ return regexes;
+ }
+
+} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/lexer/regex/regex.h b/yql/essentials/sql/v1/lexer/regex/regex.h
new file mode 100644
index 0000000000..1e9d92b653
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/regex.h
@@ -0,0 +1,14 @@
+#pragma once
+
+#include <yql/essentials/sql/v1/reflect/sql_reflect.h>
+
+#include <util/generic/hash.h>
+
+namespace NSQLTranslationV1 {
+
+ // Makes regexes only for tokens from OtherNames,
+ // as keywords and punctuation are trivially matched.
+ TVector<std::tuple<TString, TString>> MakeRegexByOtherName(
+ const NSQLReflect::TLexerGrammar& grammar, bool ansi);
+
+} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/lexer/regex/regex_ut.cpp b/yql/essentials/sql/v1/lexer/regex/regex_ut.cpp
new file mode 100644
index 0000000000..47a94f53ed
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/regex_ut.cpp
@@ -0,0 +1,90 @@
+#include "regex.h"
+
+#include <library/cpp/testing/unittest/registar.h>
+
+#include <contrib/libs/re2/re2/re2.h>
+
+using namespace NSQLTranslationV1;
+
+namespace {
+ auto grammar = NSQLReflect::LoadLexerGrammar();
+ auto defaultRegexes = MakeRegexByOtherNameMap(grammar, /* ansi = */ false);
+ auto ansiRegexes = MakeRegexByOtherNameMap(grammar, /* ansi = */ true);
+
+ void CheckRegex(bool ansi, const TStringBuf name, const TStringBuf expected) {
+ const auto& regexes = ansi ? ansiRegexes : defaultRegexes;
+ const TString regex = regexes.at(name);
+
+ const RE2 re2(regex);
+ Y_ENSURE(re2.ok(), re2.error());
+
+ UNIT_ASSERT_VALUES_EQUAL(regex, expected);
+ }
+
+} // namespace
+
+Y_UNIT_TEST_SUITE(SqlRegexTests) {
+ Y_UNIT_TEST(StringValue) {
+ CheckRegex(
+ /* ansi = */ false,
+ "STRING_VALUE",
+ R"(((((\'([^'\\]|(\\(.|\n)))*\'))|((\"([^"\\]|(\\(.|\n)))*\"))|((\@\@(.|\n)*?\@\@)+\@?))([sS]|[uU]|[yY]|[jJ]|[pP]([tT]|[bB]|[vV])?)?))");
+ }
+
+ Y_UNIT_TEST(AnsiStringValue) {
+ CheckRegex(
+ /* ansi = */ true,
+ "STRING_VALUE",
+ R"(((((\'([^']|(\'\'))*\'))|((\"([^"]|(\"\"))*\"))|((\@\@(.|\n)*?\@\@)+\@?))([sS]|[uU]|[yY]|[jJ]|[pP]([tT]|[bB]|[vV])?)?))");
+ }
+
+ Y_UNIT_TEST(IdPlain) {
+ CheckRegex(
+ /* ansi = */ false,
+ "ID_PLAIN",
+ R"(([a-z]|[A-Z]|_)([a-z]|[A-Z]|_|[0-9])*)");
+ }
+
+ Y_UNIT_TEST(IdQuoted) {
+ CheckRegex(
+ /* ansi = */ false,
+ "ID_QUOTED",
+ R"(\`(\\(.|\n)|\`\`|[^`\\])*\`)");
+ }
+
+ Y_UNIT_TEST(Digits) {
+ CheckRegex(
+ /* ansi = */ false,
+ "DIGITS",
+ R"(([0-9]+)|(0[xX]([0-9]|[a-f]|[A-F])+)|(0[oO][0-8]+)|(0[bB](0|1)+))");
+ }
+
+ Y_UNIT_TEST(Real) {
+ CheckRegex(
+ /* ansi = */ false,
+ "REAL",
+ R"((([0-9]+)\.[0-9]*([eE](\+|\-)?([0-9]+))?|([0-9]+)([eE](\+|\-)?([0-9]+)))([fF]|[pP]([fF](4|8)|[nN])?)?)");
+ }
+
+ Y_UNIT_TEST(Ws) {
+ CheckRegex(
+ /* ansi = */ false,
+ "WS",
+ R"(( |\r|\t|\n))");
+ }
+
+ Y_UNIT_TEST(Comment) {
+ CheckRegex(
+ /* ansi = */ false,
+ "COMMENT",
+ R"(((\/\*(.|\n)*?\*\/)|(\-\-[^\n\r]*(\r\n?|\n|$))))");
+ }
+
+ Y_UNIT_TEST(AnsiCommentSameAsDefault) {
+ // Because of recursive definition
+ UNIT_ASSERT_VALUES_EQUAL(
+ ansiRegexes.at("COMMENT"),
+ defaultRegexes.at("COMMENT"));
+ }
+
+} // Y_UNIT_TEST_SUITE(SqlRegexTests)
diff --git a/yql/essentials/sql/v1/lexer/regex/ut/ya.make b/yql/essentials/sql/v1/lexer/regex/ut/ya.make
new file mode 100644
index 0000000000..09eb74a3f6
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/ut/ya.make
@@ -0,0 +1,13 @@
+UNITTEST_FOR(yql/essentials/sql/v1/lexer/regex)
+
+PEERDIR(
+ yql/essentials/sql/v1/lexer
+ yql/essentials/sql/v1/lexer/antlr4_pure_ansi
+)
+
+SRCS(
+ lexer_ut.cpp
+ regex_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/sql/v1/lexer/regex/ya.make b/yql/essentials/sql/v1/lexer/regex/ya.make
new file mode 100644
index 0000000000..249dfbd11d
--- /dev/null
+++ b/yql/essentials/sql/v1/lexer/regex/ya.make
@@ -0,0 +1,39 @@
+LIBRARY()
+
+PEERDIR(
+ contrib/libs/re2
+ yql/essentials/public/issue
+ yql/essentials/parser/lexer_common
+ yql/essentials/sql/settings
+ yql/essentials/sql/v1/reflect
+)
+
+# TODO(vityaman): Extract to a single ya.make for reusage.
+
+SET(GRAMMAR_STRING_CORE_SINGLE_DEFAULT "~(QUOTE_SINGLE | BACKSLASH) | (BACKSLASH .)")
+SET(GRAMMAR_STRING_CORE_DOUBLE_DEFAULT "~(QUOTE_DOUBLE | BACKSLASH) | (BACKSLASH .)")
+SET(GRAMMAR_MULTILINE_COMMENT_CORE_DEFAULT "(.)")
+
+SET(GRAMMAR_STRING_CORE_SINGLE_ANSI "~QUOTE_SINGLE | (QUOTE_SINGLE QUOTE_SINGLE)")
+SET(GRAMMAR_STRING_CORE_DOUBLE_ANSI "~QUOTE_DOUBLE | (QUOTE_DOUBLE QUOTE_DOUBLE)")
+SET(GRAMMAR_MULTILINE_COMMENT_CORE_ANSI "MULTILINE_COMMENT | .")
+
+CFLAGS(
+ -DGRAMMAR_STRING_CORE_SINGLE_DEFAULT="\\\"${GRAMMAR_STRING_CORE_SINGLE_DEFAULT}\\\""
+ -DGRAMMAR_STRING_CORE_DOUBLE_DEFAULT="\\\"${GRAMMAR_STRING_CORE_DOUBLE_DEFAULT}\\\""
+ -DGRAMMAR_MULTILINE_COMMENT_CORE_DEFAULT="\\\"${GRAMMAR_MULTILINE_COMMENT_CORE_DEFAULT}\\\""
+ -DGRAMMAR_STRING_CORE_SINGLE_ANSI="\\\"${GRAMMAR_STRING_CORE_SINGLE_ANSI}\\\""
+ -DGRAMMAR_STRING_CORE_DOUBLE_ANSI="\\\"${GRAMMAR_STRING_CORE_DOUBLE_ANSI}\\\""
+ -DGRAMMAR_MULTILINE_COMMENT_CORE_ANSI="\\\"${GRAMMAR_MULTILINE_COMMENT_CORE_ANSI}\\\""
+)
+
+SRCS(
+ lexer.cpp
+ regex.cpp
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/sql/v1/lexer/ut/ya.make b/yql/essentials/sql/v1/lexer/ut/ya.make
index c50c8cd727..87cb156cd9 100644
--- a/yql/essentials/sql/v1/lexer/ut/ya.make
+++ b/yql/essentials/sql/v1/lexer/ut/ya.make
@@ -4,8 +4,12 @@ PEERDIR(
yql/essentials/core/issue
yql/essentials/parser/lexer_common
yql/essentials/sql/v1/lexer/antlr3
+ yql/essentials/sql/v1/lexer/antlr3_ansi
yql/essentials/sql/v1/lexer/antlr4
+ yql/essentials/sql/v1/lexer/antlr4_ansi
yql/essentials/sql/v1/lexer/antlr4_pure
+ yql/essentials/sql/v1/lexer/antlr4_pure_ansi
+ yql/essentials/sql/v1/lexer/regex
)
SRCS(
diff --git a/yql/essentials/sql/v1/node.h b/yql/essentials/sql/v1/node.h
index dacb28d844..2b06ea7a78 100644
--- a/yql/essentials/sql/v1/node.h
+++ b/yql/essentials/sql/v1/node.h
@@ -1639,6 +1639,7 @@ namespace NSQLTranslationV1 {
return {};
}
+ void EnumerateBuiltins(const std::function<void(std::string_view name, std::string_view kind)>& callback);
bool Parseui32(TNodePtr from, ui32& to);
TNodePtr GroundWithExpr(const TNodePtr& ground, const TNodePtr& expr);
const TString* DeriveCommonSourceName(const TVector<TNodePtr> &nodes);
diff --git a/yql/essentials/sql/v1/query.cpp b/yql/essentials/sql/v1/query.cpp
index 8e88606478..fc18200b51 100644
--- a/yql/essentials/sql/v1/query.cpp
+++ b/yql/essentials/sql/v1/query.cpp
@@ -3677,9 +3677,10 @@ TNodePtr BuildAnalyze(TPosition pos, const TString& service, const TDeferredAtom
class TShowCreateNode final : public TAstListNode {
public:
- TShowCreateNode(TPosition pos, const TTableRef& tr, TScopedStatePtr scoped)
+ TShowCreateNode(TPosition pos, const TTableRef& tr, const TString& type, TScopedStatePtr scoped)
: TAstListNode(pos)
, Table(tr)
+ , Type(type)
, Scoped(scoped)
, FakeSource(BuildFakeSource(pos))
{
@@ -3691,9 +3692,9 @@ public:
if (!Table.Options->Init(ctx, src)) {
return false;
}
- Table.Options = L(Table.Options, Q(Y(Q("showCreateTable"))));
+ Table.Options = L(Table.Options, Q(Y(Q(Type))));
} else {
- Table.Options = Y(Q(Y(Q("showCreateTable"))));
+ Table.Options = Y(Q(Y(Q(Type))));
}
bool asRef = ctx.PragmaRefSelect;
@@ -3741,12 +3742,14 @@ public:
}
private:
TTableRef Table;
+ // showCreateTable, showCreateView, ...
+ TString Type;
TScopedStatePtr Scoped;
TSourcePtr FakeSource;
};
-TNodePtr BuildShowCreate(TPosition pos, const TTableRef& tr, TScopedStatePtr scoped) {
- return new TShowCreateNode(pos, tr, scoped);
+TNodePtr BuildShowCreate(TPosition pos, const TTableRef& tr, const TString& type, TScopedStatePtr scoped) {
+ return new TShowCreateNode(pos, tr, type, scoped);
}
class TBaseBackupCollectionNode
diff --git a/yql/essentials/sql/v1/reflect/sql_reflect.cpp b/yql/essentials/sql/v1/reflect/sql_reflect.cpp
new file mode 100644
index 0000000000..c0af06e0b4
--- /dev/null
+++ b/yql/essentials/sql/v1/reflect/sql_reflect.cpp
@@ -0,0 +1,173 @@
+#include "sql_reflect.h"
+
+#include <library/cpp/resource/resource.h>
+
+#include <util/string/split.h>
+#include <util/string/strip.h>
+
+namespace NSQLReflect {
+
+ const TStringBuf ReflectPrefix = "//!";
+ const TStringBuf SectionPrefix = "//! section:";
+ const TStringBuf SectionPunctuation = "//! section:punctuation";
+ const TStringBuf SectionLetter = "//! section:letter";
+ const TStringBuf SectionKeyword = "//! section:keyword";
+ const TStringBuf SectionOther = "//! section:other";
+ const TStringBuf FragmentPrefix = "fragment ";
+
+ TVector<TString> GetResourceLines(const TStringBuf key) {
+ TString text;
+ Y_ENSURE(NResource::FindExact(key, &text));
+
+ TVector<TString> lines;
+ Split(text, "\n", lines);
+ return lines;
+ }
+
+ void Format(TVector<TString>& lines) {
+ for (size_t i = 0; i < lines.size(); ++i) {
+ auto& line = lines[i];
+
+ StripInPlace(line);
+
+ if (line.StartsWith("//") || (line.Contains(':') && line.Contains(';'))) {
+ continue;
+ }
+
+ size_t j = i + 1;
+ do {
+ line += lines.at(j);
+ } while (!lines.at(j++).Contains(';'));
+
+ auto first = std::next(std::begin(lines), i + 1);
+ auto last = std::next(std::begin(lines), j);
+ lines.erase(first, last);
+ }
+
+ for (auto& line : lines) {
+ CollapseInPlace(line);
+ SubstGlobal(line, " ;", ";");
+ SubstGlobal(line, " :", ":");
+ SubstGlobal(line, " )", ")");
+ SubstGlobal(line, "( ", "(");
+ }
+ }
+
+ void Purify(TVector<TString>& lines) {
+ const auto [first, last] = std::ranges::remove_if(lines, [](const TString& line) {
+ return (line.StartsWith("//") && !line.StartsWith(ReflectPrefix)) || line.empty();
+ });
+ lines.erase(first, last);
+ }
+
+ THashMap<TStringBuf, TVector<TString>> GroupBySection(TVector<TString>&& lines) {
+ TVector<TStringBuf> sections = {
+ "",
+ SectionPunctuation,
+ SectionLetter,
+ SectionKeyword,
+ SectionOther,
+ };
+
+ size_t section = 0;
+
+ THashMap<TStringBuf, TVector<TString>> groups;
+ for (auto& line : lines) {
+ if (line.StartsWith(SectionPrefix)) {
+ Y_ENSURE(sections.at(section + 1) == line);
+ section += 1;
+ continue;
+ }
+
+ groups[sections.at(section)].emplace_back(std::move(line));
+ }
+
+ groups.erase("");
+ groups.erase(SectionLetter);
+
+ return groups;
+ }
+
+ std::tuple<TString, TString> ParseLexerRule(TString&& line) {
+ size_t colonPos = line.find(':');
+ size_t semiPos = line.rfind(';');
+
+ Y_ENSURE(
+ colonPos != TString::npos &&
+ semiPos != TString::npos &&
+ colonPos < semiPos);
+
+ TString block = line.substr(colonPos + 2, semiPos - colonPos - 2);
+ SubstGlobal(block, "\\\\", "\\");
+
+ TString name = std::move(line);
+ name.resize(colonPos);
+
+ return std::make_tuple(std::move(name), std::move(block));
+ }
+
+ void ParsePunctuationLine(TString&& line, TLexerGrammar& grammar) {
+ auto [name, block] = ParseLexerRule(std::move(line));
+ block = block.erase(std::begin(block));
+ block.pop_back();
+
+ SubstGlobal(block, "\\\'", "\'");
+
+ if (!name.StartsWith(FragmentPrefix)) {
+ grammar.PunctuationNames.emplace(name);
+ }
+
+ SubstGlobal(name, FragmentPrefix, "");
+ grammar.BlockByName.emplace(std::move(name), std::move(block));
+ }
+
+ void ParseKeywordLine(TString&& line, TLexerGrammar& grammar) {
+ auto [name, block] = ParseLexerRule(std::move(line));
+ SubstGlobal(block, "'", "");
+ SubstGlobal(block, " ", "");
+
+ Y_ENSURE(name == block || (name == "TSKIP" && block == "SKIP"));
+ grammar.KeywordNames.emplace(std::move(name));
+ }
+
+ void ParseOtherLine(TString&& line, TLexerGrammar& grammar) {
+ auto [name, block] = ParseLexerRule(std::move(line));
+
+ if (!name.StartsWith(FragmentPrefix)) {
+ grammar.OtherNames.emplace_back(name);
+ }
+
+ SubstGlobal(name, FragmentPrefix, "");
+ SubstGlobal(block, " -> channel(HIDDEN)", "");
+ grammar.BlockByName.emplace(std::move(name), std::move(block));
+ }
+
+ TLexerGrammar LoadLexerGrammar() {
+ TVector<TString> lines = GetResourceLines("SQLv1Antlr4.g.in");
+ Purify(lines);
+ Format(lines);
+ Purify(lines);
+
+ THashMap<TStringBuf, TVector<TString>> sections;
+ sections = GroupBySection(std::move(lines));
+
+ TLexerGrammar grammar;
+
+ for (auto& [section, lines] : sections) {
+ for (auto& line : lines) {
+ if (section == SectionPunctuation) {
+ ParsePunctuationLine(std::move(line), grammar);
+ } else if (section == SectionKeyword) {
+ ParseKeywordLine(std::move(line), grammar);
+ } else if (section == SectionOther) {
+ ParseOtherLine(std::move(line), grammar);
+ } else {
+ Y_ABORT("Unexpected section %s", section);
+ }
+ }
+ }
+
+ return grammar;
+ }
+
+} // namespace NSQLReflect
diff --git a/yql/essentials/sql/v1/reflect/sql_reflect.h b/yql/essentials/sql/v1/reflect/sql_reflect.h
new file mode 100644
index 0000000000..ca39870687
--- /dev/null
+++ b/yql/essentials/sql/v1/reflect/sql_reflect.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include <util/generic/string.h>
+#include <util/generic/hash.h>
+#include <util/generic/hash_set.h>
+#include <util/generic/vector.h>
+
+namespace NSQLReflect {
+
+ struct TLexerGrammar {
+ THashSet<TString> KeywordNames;
+ THashSet<TString> PunctuationNames;
+ TVector<TString> OtherNames;
+ THashMap<TString, TString> BlockByName;
+ };
+
+ TLexerGrammar LoadLexerGrammar();
+
+} // namespace NSQLReflect
diff --git a/yql/essentials/sql/v1/reflect/sql_reflect_ut.cpp b/yql/essentials/sql/v1/reflect/sql_reflect_ut.cpp
new file mode 100644
index 0000000000..7bef2879e5
--- /dev/null
+++ b/yql/essentials/sql/v1/reflect/sql_reflect_ut.cpp
@@ -0,0 +1,46 @@
+#include "sql_reflect.h"
+
+#include <library/cpp/testing/unittest/registar.h>
+
+using namespace NSQLReflect;
+
+namespace {
+ TLexerGrammar grammar = LoadLexerGrammar();
+} // namespace
+
+Y_UNIT_TEST_SUITE(SqlReflectTests) {
+ Y_UNIT_TEST(Keywords) {
+ UNIT_ASSERT_VALUES_EQUAL(grammar.KeywordNames.contains("SELECT"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.KeywordNames.contains("INSERT"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.KeywordNames.contains("WHERE"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.KeywordNames.contains("COMMIT"), true);
+ }
+
+ Y_UNIT_TEST(Punctuation) {
+ UNIT_ASSERT_VALUES_EQUAL(grammar.PunctuationNames.contains("LPAREN"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.BlockByName.at("LPAREN"), "(");
+
+ UNIT_ASSERT_VALUES_EQUAL(grammar.PunctuationNames.contains("MINUS"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.BlockByName.at("MINUS"), "-");
+
+ UNIT_ASSERT_VALUES_EQUAL(grammar.PunctuationNames.contains("NAMESPACE"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.BlockByName.at("NAMESPACE"), "::");
+ }
+
+ Y_UNIT_TEST(Other) {
+ UNIT_ASSERT_VALUES_EQUAL(grammar.OtherNames.contains("REAL"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.OtherNames.contains("STRING_VALUE"), true);
+ UNIT_ASSERT_VALUES_EQUAL(grammar.OtherNames.contains("STRING_MULTILINE"), false);
+
+ UNIT_ASSERT_VALUES_EQUAL(
+ grammar.BlockByName.at("FLOAT_EXP"),
+ "E (PLUS | MINUS)? DECDIGITS");
+ UNIT_ASSERT_VALUES_EQUAL(
+ grammar.BlockByName.at("STRING_MULTILINE"),
+ "(DOUBLE_COMMAT .*? DOUBLE_COMMAT)+ COMMAT?");
+ UNIT_ASSERT_VALUES_EQUAL(
+ grammar.BlockByName.at("REAL"),
+ "(DECDIGITS DOT DIGIT* FLOAT_EXP? | DECDIGITS FLOAT_EXP) (F | P (F ('4' | '8') | N)?)?");
+ }
+
+} // Y_UNIT_TEST_SUITE(SqlReflectTests)
diff --git a/yql/essentials/sql/v1/reflect/ut/ya.make b/yql/essentials/sql/v1/reflect/ut/ya.make
new file mode 100644
index 0000000000..ee52ff0837
--- /dev/null
+++ b/yql/essentials/sql/v1/reflect/ut/ya.make
@@ -0,0 +1,7 @@
+UNITTEST_FOR(yql/essentials/sql/v1/reflect)
+
+SRCS(
+ sql_reflect_ut.cpp
+)
+
+END()
diff --git a/yql/essentials/sql/v1/reflect/ya.make b/yql/essentials/sql/v1/reflect/ya.make
new file mode 100644
index 0000000000..5865654c86
--- /dev/null
+++ b/yql/essentials/sql/v1/reflect/ya.make
@@ -0,0 +1,13 @@
+LIBRARY()
+
+SRCS(
+ sql_reflect.cpp
+)
+
+RESOURCE(DONT_PARSE yql/essentials/sql/v1/SQLv1Antlr4.g.in SQLv1Antlr4.g.in)
+
+END()
+
+RECURSE_FOR_TESTS(
+ ut
+)
diff --git a/yql/essentials/sql/v1/select.cpp b/yql/essentials/sql/v1/select.cpp
index 832495bc3c..c4290f3268 100644
--- a/yql/essentials/sql/v1/select.cpp
+++ b/yql/essentials/sql/v1/select.cpp
@@ -2956,6 +2956,10 @@ public:
return Source->GetSessionWindowSpec();
}
+ IJoin* GetJoin() override {
+ return Source->GetJoin();
+ }
+
TNodePtr DoClone() const final {
return {};
}
diff --git a/yql/essentials/sql/v1/source.h b/yql/essentials/sql/v1/source.h
index 6eb040f2e4..3048b2d584 100644
--- a/yql/essentials/sql/v1/source.h
+++ b/yql/essentials/sql/v1/source.h
@@ -318,7 +318,7 @@ namespace NSQLTranslationV1 {
TNodePtr BuildWriteTable(TPosition pos, const TString& label, const TTableRef& table, EWriteColumnMode mode, TNodePtr options,
TScopedStatePtr scoped);
TNodePtr BuildAnalyze(TPosition pos, const TString& service, const TDeferredAtom& cluster, const TAnalyzeParams& params, TScopedStatePtr scoped);
- TNodePtr BuildShowCreate(TPosition pos, const TTableRef& table, TScopedStatePtr scoped);
+ TNodePtr BuildShowCreate(TPosition pos, const TTableRef& table, const TString& type, TScopedStatePtr scoped);
TNodePtr BuildAlterSequence(TPosition pos, const TString& service, const TDeferredAtom& cluster, const TString& id, const TSequenceParameters& params, TScopedStatePtr scoped);
TSourcePtr TryMakeSourceFromExpression(TPosition pos, TContext& ctx, const TString& currService, const TDeferredAtom& currCluster,
TNodePtr node, const TString& view = {});
diff --git a/yql/essentials/sql/v1/sql_query.cpp b/yql/essentials/sql/v1/sql_query.cpp
index b59ae88c4b..d6b6f96bef 100644
--- a/yql/essentials/sql/v1/sql_query.cpp
+++ b/yql/essentials/sql/v1/sql_query.cpp
@@ -191,7 +191,7 @@ static bool TransferSettingsEntry(std::map<TString, TNodePtr>& out,
ctx.Context().Error() << key.Name << " is not supported in ALTER";
return false;
}
-
+
if (!out.emplace(keyName, value).second) {
ctx.Context().Error() << "Duplicate transfer setting: " << key.Name;
}
@@ -1961,7 +1961,7 @@ bool TSqlQuery::Statement(TVector<TNodePtr>& blocks, const TRule_sql_stmt_core&
break;
}
case TRule_sql_stmt_core::kAltSqlStmtCore62: {
- // show_create_table_stmt: SHOW CREATE TABLE table_ref
+ // show_create_table_stmt: SHOW CREATE (TABLE | VIEW) table_ref
Ctx.BodyPart();
const auto& rule = core.GetAlt_sql_stmt_core62().GetRule_show_create_table_stmt1();
@@ -1969,8 +1969,16 @@ bool TSqlQuery::Statement(TVector<TNodePtr>& blocks, const TRule_sql_stmt_core&
if (!SimpleTableRefImpl(rule.GetRule_simple_table_ref4(), tr)) {
return false;
}
+ TString type;
+ if (auto typeToken = to_lower(rule.GetToken3().GetValue()); typeToken == "table") {
+ type = "showCreateTable";
+ } else if (typeToken == "view") {
+ type = "showCreateView";
+ } else {
+ YQL_ENSURE(false, "Unsupported SHOW CREATE statement type: " << typeToken);
+ }
- AddStatementToBlocks(blocks, BuildShowCreate(Ctx.Pos(), tr, Ctx.Scoped));
+ AddStatementToBlocks(blocks, BuildShowCreate(Ctx.Pos(), tr, type, Ctx.Scoped));
break;
}
case TRule_sql_stmt_core::ALT_NOT_SET:
@@ -2577,6 +2585,7 @@ void TSqlQuery::AlterTableDropChangefeed(const TRule_alter_table_drop_changefeed
params.DropChangefeeds.emplace_back(IdEx(node.GetRule_an_id3(), *this));
}
+/// @see EnumeratePragmas too
TNodePtr TSqlQuery::PragmaStatement(const TRule_pragma_stmt& stmt, bool& success) {
success = false;
const TString& prefix = OptIdPrefixAsStr(stmt.GetRule_opt_id_prefix_or_type2(), *this);
@@ -3521,14 +3530,20 @@ TNodePtr TSqlQuery::Build(const TRule_delete_stmt& stmt) {
TSourcePtr source = BuildTableSource(Ctx.Pos(), table);
+ const bool isBatch = stmt.HasBlock1();
TNodePtr options = nullptr;
+
if (stmt.HasBlock6()) {
+ if (isBatch) {
+ Ctx.Error(GetPos(stmt.GetToken2()))
+ << "BATCH DELETE is unsupported with RETURNING";
+ return nullptr;
+ }
+
options = ReturningList(stmt.GetBlock6().GetRule_returning_columns_list1());
options = options->Y(options);
}
- const bool isBatch = stmt.HasBlock1();
-
if (stmt.HasBlock5()) {
switch (stmt.GetBlock5().Alt_case()) {
case TRule_delete_stmt_TBlock5::kAlt1: {
@@ -3585,14 +3600,20 @@ TNodePtr TSqlQuery::Build(const TRule_update_stmt& stmt) {
return nullptr;
}
+ const bool isBatch = stmt.HasBlock1();
TNodePtr options = nullptr;
+
if (stmt.HasBlock5()) {
+ if (isBatch) {
+ Ctx.Error(GetPos(stmt.GetToken2()))
+ << "BATCH UPDATE is unsupported with RETURNING";
+ return nullptr;
+ }
+
options = ReturningList(stmt.GetBlock5().GetRule_returning_columns_list1());
options = options->Y(options);
}
- const bool isBatch = stmt.HasBlock1();
-
switch (stmt.GetBlock4().Alt_case()) {
case TRule_update_stmt_TBlock4::kAlt1: {
const auto& alt = stmt.GetBlock4().GetAlt1();
@@ -3925,4 +3946,60 @@ bool TSqlQuery::ParseTableStoreFeatures(std::map<TString, TDeferredAtom> & resul
return true;
}
+void EnumeratePragmas(std::function<void(std::string_view)> callback) {
+ callback("ClassicDivision");
+ callback("StrictJoinKeyTypes");
+ callback("DisableStrictJoinKeyTypes");
+ callback("CheckedOps");
+ callback("UnicodeLiterals");
+ callback("DisableUnicodeLiterals");
+ callback("WarnUntypedStringLiterals");
+ callback("DisableWarnUntypedStringLiterals");
+ callback("File");
+ callback("FileOption");
+ callback("Folder");
+ callback("Udf");
+ callback("Library");
+ callback("Package");
+ callback("PackageVersion");
+ callback("RefSelect");
+ callback("SampleSelect");
+ callback("AllowDotInAlias");
+ callback("OverrideLibrary");
+ callback("DirectRead");
+ callback("AutoCommit");
+ callback("UseTablePrefixForEach");
+ callback("PathPrefix");
+ callback("GroupByLimit");
+ callback("GroupByCubeLimit");
+ callback("SimpleColumns");
+ callback("DisableSimpleColumns");
+ callback("ResultRowsLimit");
+ callback("ResultSizeLimit");
+ callback("RuntimeLogLevel");
+ callback("Warning");
+ callback("Greetings");
+ callback("WarningMsg");
+ callback("ErrorMsg");
+ callback("AllowUnnamedColumns");
+ callback("WarnUnnamedColumns");
+ callback("DiscoveryMode");
+ callback("EnableSystemColumns");
+ callback("DqEngine");
+ callback("BlockEngine");
+ callback("JsonQueryReturnsJsonDocument");
+ callback("DisableJsonQueryReturnsJsonDocument");
+ callback("PositionalUnionAll");
+ callback("PqReadBy");
+ callback("DataWatermarks");
+ callback("FeatureR010");
+ callback("CostBasedOptimizer");
+ callback("Engine");
+ callback("yson.AutoConvert");
+ callback("yson.Strict");
+ callback("yson.DisableStrict");
+ callback("yson.CastToString");
+ callback("yson.DisableCastToString");
+}
+
} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/sql_query.h b/yql/essentials/sql/v1/sql_query.h
index ea5b917f8f..52cd3c402b 100644
--- a/yql/essentials/sql/v1/sql_query.h
+++ b/yql/essentials/sql/v1/sql_query.h
@@ -84,4 +84,6 @@ private:
const bool TopLevel;
};
+void EnumeratePragmas(std::function<void(std::string_view)> callback);
+
} // namespace NSQLTranslationV1
diff --git a/yql/essentials/sql/v1/sql_ut_common.h b/yql/essentials/sql/v1/sql_ut_common.h
index 36fe641ba6..564885f8c5 100644
--- a/yql/essentials/sql/v1/sql_ut_common.h
+++ b/yql/essentials/sql/v1/sql_ut_common.h
@@ -1475,6 +1475,12 @@ Y_UNIT_TEST_SUITE(SqlParsingOnly) {
UNIT_ASSERT_VALUES_EQUAL(1, elementStat["Write"]);
}
+ Y_UNIT_TEST(DeleteFromTableBatchReturning) {
+ NYql::TAstParseResult res = SqlToYql("batch delete from plato.Input returning *;", 10, "kikimr");
+ UNIT_ASSERT(!res.Root);
+ UNIT_ASSERT_NO_DIFF(Err2Str(res), "<main>:1:6: Error: BATCH DELETE is unsupported with RETURNING\n");
+ }
+
Y_UNIT_TEST(DeleteFromTableOnValues) {
NYql::TAstParseResult res = SqlToYql("delete from plato.Input on (key) values (1);",
10, "kikimr");
@@ -1559,6 +1565,12 @@ Y_UNIT_TEST_SUITE(SqlParsingOnly) {
UNIT_ASSERT_VALUES_EQUAL(1, elementStat["Write"]);
}
+ Y_UNIT_TEST(UpdateByValuesBatchReturning) {
+ NYql::TAstParseResult res = SqlToYql("batch update plato.Input set value = 'cool' where key = 200 returning key;", 10, "kikimr");
+ UNIT_ASSERT(!res.Root);
+ UNIT_ASSERT_NO_DIFF(Err2Str(res), "<main>:1:6: Error: BATCH UPDATE is unsupported with RETURNING\n");
+ }
+
Y_UNIT_TEST(UpdateByMultiValues) {
NYql::TAstParseResult res = SqlToYql("update plato.Input set (key, value, subkey) = ('2','ddd',':') where key = 200;", 10, "kikimr");
UNIT_ASSERT(res.Root);
@@ -3197,6 +3209,26 @@ Y_UNIT_TEST_SUITE(SqlParsingOnly) {
UNIT_ASSERT_VALUES_EQUAL(1, elementStat["showCreateTable"]);
}
+ Y_UNIT_TEST(ShowCreateView) {
+ NYql::TAstParseResult res = SqlToYql(R"(
+ USE plato;
+ SHOW CREATE VIEW user;
+ )");
+ UNIT_ASSERT(res.Root);
+
+ TVerifyLineFunc verifyLine = [](const TString& word, const TString& line) {
+ if (word == "Read") {
+ UNIT_ASSERT_STRING_CONTAINS(line, "showCreateView");
+ }
+ };
+
+ TWordCountHive elementStat = {{"Read"}, {"showCreateView"}};
+ VerifyProgram(res, elementStat, verifyLine);
+
+ UNIT_ASSERT_VALUES_EQUAL(elementStat["Read"], 1);
+ UNIT_ASSERT_VALUES_EQUAL(elementStat["showCreateView"], 1);
+ }
+
Y_UNIT_TEST(OptionalAliases) {
UNIT_ASSERT(SqlToYql("USE plato; SELECT foo FROM (SELECT key foo FROM Input);").IsOk());
UNIT_ASSERT(SqlToYql("USE plato; SELECT a.x FROM Input1 a JOIN Input2 b ON a.key = b.key;").IsOk());
diff --git a/yql/essentials/tests/s-expressions/minirun/part6/canondata/result.json b/yql/essentials/tests/s-expressions/minirun/part6/canondata/result.json
index 8b0a76c9b1..181e08dbb8 100644
--- a/yql/essentials/tests/s-expressions/minirun/part6/canondata/result.json
+++ b/yql/essentials/tests/s-expressions/minirun/part6/canondata/result.json
@@ -1,4 +1,18 @@
{
+ "test.test[Blocks-ListToBlocks-default.txt-Debug]": [
+ {
+ "checksum": "a75bba410aa5a961fb719529319ca282",
+ "size": 929,
+ "uri": "https://{canondata_backend}/1130705/45dbf61264fbf40322799d506e95dda3522ed97f/resource.tar.gz#test.test_Blocks-ListToBlocks-default.txt-Debug_/opt.yql"
+ }
+ ],
+ "test.test[Blocks-ListToBlocks-default.txt-Results]": [
+ {
+ "checksum": "86b7325d54c0a9d92f4a858c1328793d",
+ "size": 1794,
+ "uri": "https://{canondata_backend}/1130705/45dbf61264fbf40322799d506e95dda3522ed97f/resource.tar.gz#test.test_Blocks-ListToBlocks-default.txt-Results_/results.txt"
+ }
+ ],
"test.test[Builtins-AsOptionalType-default.txt-Debug]": [
{
"checksum": "147133ffe72e4619d67cefcf20918941",
diff --git a/yql/essentials/tests/s-expressions/minirun/part8/canondata/result.json b/yql/essentials/tests/s-expressions/minirun/part8/canondata/result.json
index 42fd52ccd1..2e73910948 100644
--- a/yql/essentials/tests/s-expressions/minirun/part8/canondata/result.json
+++ b/yql/essentials/tests/s-expressions/minirun/part8/canondata/result.json
@@ -41,6 +41,20 @@
"uri": "https://{canondata_backend}/1597364/1f3e7f25c6ddb50b091fa9aaedebacdf37917233/resource.tar.gz#test.test_Aggregation-InMemAggregateZero-default.txt-Results_/results.txt"
}
],
+ "test.test[Blocks-ListFromBlocks-default.txt-Debug]": [
+ {
+ "checksum": "e997dbfaff24e3885002450606790825",
+ "size": 928,
+ "uri": "https://{canondata_backend}/1937429/6dc717bd36879ce84e2fa1eb85b97eefce0733e9/resource.tar.gz#test.test_Blocks-ListFromBlocks-default.txt-Debug_/opt.yql"
+ }
+ ],
+ "test.test[Blocks-ListFromBlocks-default.txt-Results]": [
+ {
+ "checksum": "86b7325d54c0a9d92f4a858c1328793d",
+ "size": 1794,
+ "uri": "https://{canondata_backend}/1937429/6dc717bd36879ce84e2fa1eb85b97eefce0733e9/resource.tar.gz#test.test_Blocks-ListFromBlocks-default.txt-Results_/results.txt"
+ }
+ ],
"test.test[Builtins-ToIntegral-default.txt-Debug]": [
{
"checksum": "33f569baf5940bbd79fbf635f47cc363",
diff --git a/yql/essentials/tests/s-expressions/suites/Blocks/ListFromBlocks.yqls b/yql/essentials/tests/s-expressions/suites/Blocks/ListFromBlocks.yqls
new file mode 100644
index 0000000000..d84d220223
--- /dev/null
+++ b/yql/essentials/tests/s-expressions/suites/Blocks/ListFromBlocks.yqls
@@ -0,0 +1,23 @@
+(
+(let config (DataSource 'config))
+(let res_sink (DataSink 'result))
+
+(let row1 (AsStruct '('"key" (Int32 '1)) '('"subkey" (Int32 '"1001")) '('"value" (String '"AAA"))))
+(let row2 (AsStruct '('"key" (Int32 '2)) '('"subkey" (Int32 '"1002")) '('"value" (String '"AAB"))))
+(let row3 (AsStruct '('"key" (Int32 '3)) '('"subkey" (Int32 '"1003")) '('"value" (String '"AAC"))))
+(let row4 (AsStruct '('"key" (Int32 '4)) '('"subkey" (Int32 '"1004")) '('"value" (String '"AAD"))))
+(let row5 (AsStruct '('"key" (Int32 '5)) '('"subkey" (Int32 '"1005")) '('"value" (String '"AAE"))))
+(let table (AsList row1 row2 row3 row4 row5))
+
+(let expandLambda (lambda '(item) (Member item '"key") (Member item '"subkey") (Member item '"value")))
+(let wideBlockStream (WideToBlocks (FromFlow (ExpandMap (ToFlow table) expandLambda))))
+
+(let narrowLambda (lambda '(key subkey value blockLength) (AsStruct '('"key" key) '('"subkey" subkey) '('"value" value) '('"_yql_block_length" blockLength))))
+(let blockList (ForwardList (NarrowMap (ToFlow wideBlockStream) narrowLambda)))
+
+(let list (ListFromBlocks blockList))
+
+(let world (Write! world res_sink (Key) list '('('type))))
+(let world (Commit! world res_sink))
+(return world)
+)
diff --git a/yql/essentials/tests/s-expressions/suites/Blocks/ListToBlocks.yqls b/yql/essentials/tests/s-expressions/suites/Blocks/ListToBlocks.yqls
new file mode 100644
index 0000000000..ad596fcc38
--- /dev/null
+++ b/yql/essentials/tests/s-expressions/suites/Blocks/ListToBlocks.yqls
@@ -0,0 +1,23 @@
+(
+(let config (DataSource 'config))
+(let res_sink (DataSink 'result))
+
+(let row1 (AsStruct '('"key" (Int32 '1)) '('"subkey" (Int32 '"1001")) '('"value" (String '"AAA"))))
+(let row2 (AsStruct '('"key" (Int32 '2)) '('"subkey" (Int32 '"1002")) '('"value" (String '"AAB"))))
+(let row3 (AsStruct '('"key" (Int32 '3)) '('"subkey" (Int32 '"1003")) '('"value" (String '"AAC"))))
+(let row4 (AsStruct '('"key" (Int32 '4)) '('"subkey" (Int32 '"1004")) '('"value" (String '"AAD"))))
+(let row5 (AsStruct '('"key" (Int32 '5)) '('"subkey" (Int32 '"1005")) '('"value" (String '"AAE"))))
+(let table (AsList row1 row2 row3 row4 row5))
+
+(let blockList (ListToBlocks table))
+
+(let expandLambda (lambda '(item) (Member item '"key") (Member item '"subkey") (Member item '"value") (Member item '"_yql_block_length")))
+(let wideStream (WideFromBlocks (FromFlow (ExpandMap (ToFlow blockList) expandLambda))))
+
+(let narrowLambda (lambda '(key subkey value) (AsStruct '('"key" key) '('"subkey" subkey) '('"value" value))))
+(let list (ForwardList (NarrowMap (ToFlow wideStream) narrowLambda)))
+
+(let world (Write! world res_sink (Key) list '('('type))))
+(let world (Commit! world res_sink))
+(return world)
+)
diff --git a/yql/essentials/tests/sql/minirun/part7/canondata/result.json b/yql/essentials/tests/sql/minirun/part7/canondata/result.json
index af489ad7c1..e8c9926ae7 100644
--- a/yql/essentials/tests/sql/minirun/part7/canondata/result.json
+++ b/yql/essentials/tests/sql/minirun/part7/canondata/result.json
@@ -1285,5 +1285,19 @@
"size": 2401,
"uri": "https://{canondata_backend}/1817427/cd7fe4c1c700931e8c564489ae0d616c780dd82b/resource.tar.gz#test.test_window-win_func_percent_rank-default.txt-Results_/results.txt"
}
+ ],
+ "test.test[window-yql-19709-default.txt-Debug]": [
+ {
+ "checksum": "b4f5b1907698dfd26478f9b7345f7794",
+ "size": 1095,
+ "uri": "https://{canondata_backend}/1942525/74ba357282f32d148ae3efbd223bc512d04c025e/resource.tar.gz#test.test_window-yql-19709-default.txt-Debug_/opt.yql"
+ }
+ ],
+ "test.test[window-yql-19709-default.txt-Results]": [
+ {
+ "checksum": "0cc039edab483039733b1fe709ffe480",
+ "size": 2414,
+ "uri": "https://{canondata_backend}/1942525/74ba357282f32d148ae3efbd223bc512d04c025e/resource.tar.gz#test.test_window-yql-19709-default.txt-Results_/results.txt"
+ }
]
}
diff --git a/yql/essentials/tests/sql/sql2yql/canondata/result.json b/yql/essentials/tests/sql/sql2yql/canondata/result.json
index 6580d9eca7..bd605930b7 100644
--- a/yql/essentials/tests/sql/sql2yql/canondata/result.json
+++ b/yql/essentials/tests/sql/sql2yql/canondata/result.json
@@ -7405,6 +7405,13 @@
"uri": "https://{canondata_backend}/1942173/99e88108149e222741552e7e6cddef041d6a2846/resource.tar.gz#test_sql2yql.test_window-yql-18879_/sql.yql"
}
],
+ "test_sql2yql.test[window-yql-19709]": [
+ {
+ "checksum": "8d68ed158e1fd7d258c5beea512209ea",
+ "size": 2013,
+ "uri": "https://{canondata_backend}/1847551/ee61b63f66a264ad4afe7437ef7f90941caf4456/resource.tar.gz#test_sql2yql.test_window-yql-19709_/sql.yql"
+ }
+ ],
"test_sql_format.test[action-action_opt_args]": [
{
"uri": "file://test_sql_format.test_action-action_opt_args_/formatted.sql"
@@ -11240,6 +11247,11 @@
"uri": "file://test_sql_format.test_window-yql-18879_/formatted.sql"
}
],
+ "test_sql_format.test[window-yql-19709]": [
+ {
+ "uri": "file://test_sql_format.test_window-yql-19709_/formatted.sql"
+ }
+ ],
"test_sql_negative.test[action-no_columns_in_do-default.txt]": [
{
"checksum": "0fad6da8e4c5a2ab2c1e5a231ea430d1",
diff --git a/yql/essentials/tests/sql/sql2yql/canondata/test_sql_format.test_window-yql-19709_/formatted.sql b/yql/essentials/tests/sql/sql2yql/canondata/test_sql_format.test_window-yql-19709_/formatted.sql
new file mode 100644
index 0000000000..ff40767025
--- /dev/null
+++ b/yql/essentials/tests/sql/sql2yql/canondata/test_sql_format.test_window-yql-19709_/formatted.sql
@@ -0,0 +1,24 @@
+/* syntax version 1 */
+/* postgres can not */
+PRAGMA DistinctOverWindow;
+
+$input = AsList(
+ AsStruct(1 AS key, 1001 AS subkey, 'AAA' AS value),
+ AsStruct(150 AS key, 150 AS subkey, 'AAB' AS value),
+ AsStruct(3 AS key, 3003 AS subkey, 'AAC' AS value),
+ AsStruct(150 AS key, 150 AS subkey, 'AAD' AS value),
+ AsStruct(5 AS key, 5005 AS subkey, 'AAE' AS value),
+);
+
+SELECT
+ count(DISTINCT i1.key) OVER (
+ PARTITION BY
+ i1.subkey
+ ) AS cnt,
+FROM
+ AS_TABLE($input) AS i1
+CROSS JOIN
+ AS_TABLE($input) AS i2
+ORDER BY
+ cnt
+;
diff --git a/yql/essentials/tests/sql/suites/window/yql-19709.sql b/yql/essentials/tests/sql/suites/window/yql-19709.sql
new file mode 100644
index 0000000000..bbd71b1ac4
--- /dev/null
+++ b/yql/essentials/tests/sql/suites/window/yql-19709.sql
@@ -0,0 +1,18 @@
+/* syntax version 1 */
+/* postgres can not */
+
+PRAGMA DistinctOverWindow;
+
+$input = AsList(
+ AsStruct(1 AS key, 1001 AS subkey, "AAA" AS value),
+ AsStruct(150 AS key, 150 AS subkey, "AAB" AS value),
+ AsStruct(3 AS key, 3003 AS subkey, "AAC" AS value),
+ AsStruct(150 AS key, 150 AS subkey, "AAD" AS value),
+ AsStruct(5 AS key, 5005 AS subkey, "AAE" AS value),
+);
+
+SELECT
+ count(DISTINCT i1.key) OVER (PARTITION BY i1.subkey) AS cnt,
+FROM AS_TABLE($input) AS i1
+CROSS JOIN AS_TABLE($input) AS i2
+ORDER BY cnt;
diff --git a/yql/essentials/tools/sql2yql/sql2yql.cpp b/yql/essentials/tools/sql2yql/sql2yql.cpp
index 99fd528be2..f285ca89a5 100644
--- a/yql/essentials/tools/sql2yql/sql2yql.cpp
+++ b/yql/essentials/tools/sql2yql/sql2yql.cpp
@@ -169,8 +169,8 @@ bool TestLexers(
lexers.Antlr4Ansi = NSQLTranslationV1::MakeAntlr4AnsiLexerFactory();
lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
lexers.Antlr4PureAnsi = NSQLTranslationV1::MakeAntlr4PureAnsiLexerFactory();
- auto lexerMain = NSQLTranslationV1::MakeLexer(lexers, settings.AnsiLexer, true, false);
- auto lexerPure = NSQLTranslationV1::MakeLexer(lexers, settings.AnsiLexer, true, true);
+ auto lexerMain = NSQLTranslationV1::MakeLexer(lexers, settings.AnsiLexer, true, NSQLTranslationV1::ELexerFlavor::Default);
+ auto lexerPure = NSQLTranslationV1::MakeLexer(lexers, settings.AnsiLexer, true, NSQLTranslationV1::ELexerFlavor::Pure);
TVector<NSQLTranslation::TParsedToken> mainTokens;
if (!lexerMain->Tokenize(query, "", [&](auto token) { mainTokens.push_back(token);}, issues, NSQLTranslation::SQL_MAX_PARSER_ERRORS)) {
Cerr << issues.ToString();
diff --git a/yql/essentials/tools/sql_functions_dump/sql_functions_dump.cpp b/yql/essentials/tools/sql_functions_dump/sql_functions_dump.cpp
new file mode 100644
index 0000000000..66dae62d63
--- /dev/null
+++ b/yql/essentials/tools/sql_functions_dump/sql_functions_dump.cpp
@@ -0,0 +1,40 @@
+#include <yql/essentials/sql/v1/node.h>
+#include <yql/essentials/utils/backtrace/backtrace.h>
+#include <library/cpp/json/writer/json.h>
+#include <util/generic/yexception.h>
+
+using namespace NYql;
+
+int Main(int argc, const char *argv[])
+{
+ Y_UNUSED(argc);
+ Y_UNUSED(argv);
+ NJsonWriter::TBuf json;
+ json.BeginList();
+ NSQLTranslationV1::EnumerateBuiltins([&](auto name, auto kind) {
+ json.BeginObject();
+ json.WriteKey("name");
+ json.WriteString(name);
+ json.WriteKey("kind");
+ json.WriteString(kind);
+ json.EndObject();
+ });
+
+ json.EndList();
+ Cout << json.Str() << Endl;
+
+ return 0;
+}
+
+int main(int argc, const char *argv[]) {
+ NYql::NBacktrace::RegisterKikimrFatalActions();
+ NYql::NBacktrace::EnableKikimrSymbolize();
+
+ try {
+ return Main(argc, argv);
+ }
+ catch (...) {
+ Cerr << CurrentExceptionMessage() << Endl;
+ return 1;
+ }
+}
diff --git a/yql/essentials/tools/sql_functions_dump/test/test.py b/yql/essentials/tools/sql_functions_dump/test/test.py
new file mode 100644
index 0000000000..ca4d1ad5b8
--- /dev/null
+++ b/yql/essentials/tools/sql_functions_dump/test/test.py
@@ -0,0 +1,20 @@
+import yatest.common
+import json
+import os
+
+DATA_PATH = yatest.common.source_path('yql/essentials/data/language')
+TOOL_PATH = yatest.common.binary_path('yql/essentials/tools/sql_functions_dump/sql_functions_dump')
+
+
+def test_functions_dump():
+ with open(os.path.join(DATA_PATH, "sql_functions.json")) as f:
+ func_from_file = json.load(f)
+ res = yatest.common.execute(
+ [TOOL_PATH],
+ check_exit_code=True,
+ wait=True
+ )
+ func_from_tool = json.loads(res.stdout)
+ assert func_from_tool == func_from_file, 'JSON_DIFFER\n' \
+ 'File:\n %(func_from_file)s\n\n' \
+ 'Tool:\n %(func_from_tool)s\n' % locals()
diff --git a/yql/essentials/tools/sql_functions_dump/test/ya.make b/yql/essentials/tools/sql_functions_dump/test/ya.make
new file mode 100644
index 0000000000..e9e5f0d6a4
--- /dev/null
+++ b/yql/essentials/tools/sql_functions_dump/test/ya.make
@@ -0,0 +1,15 @@
+PY3TEST()
+
+TEST_SRCS(
+ test.py
+)
+
+DEPENDS(
+ yql/essentials/tools/sql_functions_dump
+)
+
+DATA(
+ arcadia/yql/essentials/data/language
+)
+
+END()
diff --git a/yql/essentials/tools/sql_functions_dump/ya.make b/yql/essentials/tools/sql_functions_dump/ya.make
new file mode 100644
index 0000000000..73dda18547
--- /dev/null
+++ b/yql/essentials/tools/sql_functions_dump/ya.make
@@ -0,0 +1,20 @@
+PROGRAM()
+
+SRCS(
+ sql_functions_dump.cpp
+)
+
+PEERDIR(
+ yql/essentials/sql
+ yql/essentials/sql/v1
+ yql/essentials/utils/backtrace
+ yql/essentials/sql/pg_dummy
+ yql/essentials/public/udf/service/stub
+ library/cpp/json
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ test
+)
diff --git a/yql/essentials/tools/types_dump/types_dump.cpp b/yql/essentials/tools/types_dump/types_dump.cpp
index b57b0b043c..32117c20e8 100644
--- a/yql/essentials/tools/types_dump/types_dump.cpp
+++ b/yql/essentials/tools/types_dump/types_dump.cpp
@@ -11,10 +11,11 @@ int Main(int argc, const char *argv[])
Y_UNUSED(argc);
Y_UNUSED(argv);
NJsonWriter::TBuf json;
- json.BeginObject();
+ json.BeginList();
EnumerateSimpleTypes([&](auto name, auto kind) {
- json.WriteKey(name);
json.BeginObject();
+ json.WriteKey("name");
+ json.WriteString(name);
json.WriteKey("kind");
json.WriteString(kind);
json.EndObject();
@@ -25,19 +26,20 @@ int Main(int argc, const char *argv[])
});
Sort(pgNames);
for (const auto& name : pgNames) {
+ json.BeginObject();
+ json.WriteKey("name");
if (name.StartsWith('_')) {
- json.WriteKey("_pg" + name.substr(1));
+ json.WriteString("_pg" + name.substr(1));
} else {
- json.WriteKey("pg" + name);
+ json.WriteString("pg" + name);
}
- json.BeginObject();
json.WriteKey("kind");
json.WriteString("Pg");
json.EndObject();
}
- json.EndObject();
+ json.EndList();
Cout << json.Str() << Endl;
return 0;
diff --git a/yql/essentials/tools/ya.make b/yql/essentials/tools/ya.make
index 66981f04f2..fb830e742a 100644
--- a/yql/essentials/tools/ya.make
+++ b/yql/essentials/tools/ya.make
@@ -8,6 +8,7 @@ RECURSE(
purebench
sql2yql
sql_formatter
+ sql_functions_dump
types_dump
udf_dep_stub
udf_probe
diff --git a/yql/essentials/tools/yql_complete/ya.make b/yql/essentials/tools/yql_complete/ya.make
index d745f9142a..107e6ba562 100644
--- a/yql/essentials/tools/yql_complete/ya.make
+++ b/yql/essentials/tools/yql_complete/ya.make
@@ -5,6 +5,8 @@ PROGRAM()
PEERDIR(
library/cpp/getopt
yql/essentials/sql/v1/complete
+ yql/essentials/sql/v1/lexer/antlr4_pure
+ yql/essentials/sql/v1/lexer/antlr4_pure_ansi
)
SRCS(
diff --git a/yql/essentials/tools/yql_complete/yql_complete.cpp b/yql/essentials/tools/yql_complete/yql_complete.cpp
index 289573190b..320b9f1b48 100644
--- a/yql/essentials/tools/yql_complete/yql_complete.cpp
+++ b/yql/essentials/tools/yql_complete/yql_complete.cpp
@@ -1,14 +1,38 @@
#include <yql/essentials/sql/v1/complete/sql_complete.h>
+#include <yql/essentials/sql/v1/complete/name/static/frequency.h>
+#include <yql/essentials/sql/v1/complete/name/static/ranking.h>
+#include <yql/essentials/sql/v1/complete/name/static/name_service.h>
+
+#include <yql/essentials/sql/v1/lexer/antlr4_pure/lexer.h>
+#include <yql/essentials/sql/v1/lexer/antlr4_pure_ansi/lexer.h>
#include <library/cpp/getopt/last_getopt.h>
#include <util/stream/file.h>
+NSQLComplete::TFrequencyData LoadFrequencyDataFromFile(TString filepath) {
+ TString text = TUnbufferedFileInput(filepath).ReadAll();
+ return NSQLComplete::ParseJsonFrequencyData(text);
+}
+
+NSQLComplete::TLexerSupplier MakePureLexerSupplier() {
+ NSQLTranslationV1::TLexers lexers;
+ lexers.Antlr4Pure = NSQLTranslationV1::MakeAntlr4PureLexerFactory();
+ lexers.Antlr4PureAnsi = NSQLTranslationV1::MakeAntlr4PureAnsiLexerFactory();
+ return [lexers = std::move(lexers)](bool ansi) {
+ return NSQLTranslationV1::MakeLexer(
+ lexers, ansi, /* antlr4 = */ true,
+ NSQLTranslationV1::ELexerFlavor::Pure);
+ };
+}
+
int Run(int argc, char* argv[]) {
NLastGetopt::TOpts opts = NLastGetopt::TOpts::Default();
TString inFileName;
+ TString freqFileName;
TMaybe<ui64> pos;
opts.AddLongOption('i', "input", "input file").RequiredArgument("input").StoreResult(&inFileName);
+ opts.AddLongOption('f', "freq", "frequences file").StoreResult(&freqFileName);
opts.AddLongOption('p', "pos", "position").StoreResult(&pos);
opts.SetFreeArgsNum(0);
opts.AddHelpOption();
@@ -20,9 +44,21 @@ int Run(int argc, char* argv[]) {
inFile.Reset(new TUnbufferedFileInput(inFileName));
}
IInputStream& in = inFile ? *inFile.Get() : Cin;
-
auto queryString = in.ReadAll();
- auto engine = NSQLComplete::MakeSqlCompletionEngine();
+
+ NSQLComplete::IRanking::TPtr ranking;
+ if (freqFileName.empty()) {
+ ranking = NSQLComplete::MakeDefaultRanking();
+ } else {
+ auto freq = LoadFrequencyDataFromFile(freqFileName);
+ ranking = NSQLComplete::MakeDefaultRanking(std::move(freq));
+ }
+ auto engine = NSQLComplete::MakeSqlCompletionEngine(
+ MakePureLexerSupplier(),
+ NSQLComplete::MakeStaticNameService(
+ NSQLComplete::MakeDefaultNameSet(),
+ std::move(ranking)));
+
NSQLComplete::TCompletionInput input;
input.Text = queryString;
if (pos) {
diff --git a/yql/essentials/udfs/language/yql/test/canondata/result.json b/yql/essentials/udfs/language/yql/test/canondata/result.json
index 86fc5e7c75..0fb3474a34 100644
--- a/yql/essentials/udfs/language/yql/test/canondata/result.json
+++ b/yql/essentials/udfs/language/yql/test/canondata/result.json
@@ -14,6 +14,11 @@
"uri": "file://test.test_ExtractInFuncs_/results.txt"
}
],
+ "test.test[ExtractPragmas]": [
+ {
+ "uri": "file://test.test_ExtractPragmas_/results.txt"
+ }
+ ],
"test.test[ExtractTypes]": [
{
"uri": "file://test.test_ExtractTypes_/results.txt"
diff --git a/yql/essentials/udfs/language/yql/test/canondata/test.test_ExtractPragmas_/results.txt b/yql/essentials/udfs/language/yql/test/canondata/test.test_ExtractPragmas_/results.txt
new file mode 100644
index 0000000000..fe436fa6d4
--- /dev/null
+++ b/yql/essentials/udfs/language/yql/test/canondata/test.test_ExtractPragmas_/results.txt
@@ -0,0 +1,87 @@
+[
+ {
+ "Write" = [
+ {
+ "Type" = [
+ "ListType";
+ [
+ "StructType";
+ [
+ [
+ "q";
+ [
+ "DataType";
+ "String"
+ ]
+ ];
+ [
+ "column1";
+ [
+ "OptionalType";
+ [
+ "ListType";
+ [
+ "TupleType";
+ [
+ [
+ "DataType";
+ "String"
+ ];
+ [
+ "DataType";
+ "String"
+ ];
+ [
+ "DataType";
+ "Uint64"
+ ]
+ ]
+ ]
+ ]
+ ]
+ ]
+ ]
+ ]
+ ];
+ "Data" = [
+ [
+ "pragma dq.Foo";
+ [
+ [
+ [
+ "PRAGMA";
+ "dq.Foo";
+ "1"
+ ]
+ ]
+ ]
+ ];
+ [
+ "pragma warningmsg('foo')";
+ [
+ [
+ [
+ "PRAGMA";
+ "warningmsg";
+ "1"
+ ]
+ ]
+ ]
+ ];
+ [
+ "pragma yt.Bar";
+ [
+ [
+ [
+ "PRAGMA";
+ "yt.Bar";
+ "1"
+ ]
+ ]
+ ]
+ ]
+ ]
+ }
+ ]
+ }
+] \ No newline at end of file
diff --git a/yql/essentials/udfs/language/yql/test/cases/ExtractPragmas.sql b/yql/essentials/udfs/language/yql/test/cases/ExtractPragmas.sql
new file mode 100644
index 0000000000..f6caec4d6b
--- /dev/null
+++ b/yql/essentials/udfs/language/yql/test/cases/ExtractPragmas.sql
@@ -0,0 +1,8 @@
+SELECT
+ q,ListSort(ListFilter(YqlLang::RuleFreq(q),($x)->($x.0 in ("PRAGMA"))))
+FROM (VALUES
+ ("pragma warningmsg('foo')"),
+ ("pragma dq.Foo"),
+ ("pragma yt.Bar")
+) AS a(q)
+order by q
diff --git a/yql/essentials/udfs/language/yql/yql_language_udf.cpp b/yql/essentials/udfs/language/yql/yql_language_udf.cpp
index 2bcff38b6b..f5df852d81 100644
--- a/yql/essentials/udfs/language/yql/yql_language_udf.cpp
+++ b/yql/essentials/udfs/language/yql/yql_language_udf.cpp
@@ -45,6 +45,8 @@ public:
VisitUnaryCasualSubexpr(dynamic_cast<const TRule_in_unary_casual_subexpr&>(msg));
} else if (descr == TRule_type_name_simple::GetDescriptor()) {
VisitSimpleType(dynamic_cast<const TRule_type_name_simple&>(msg));
+ } else if (descr == TRule_pragma_stmt::GetDescriptor()) {
+ VisitPragmaStmt(dynamic_cast<const TRule_pragma_stmt&>(msg));
}
TStringBuf fullName = descr->full_name();
@@ -85,6 +87,12 @@ private:
}
}
+ void VisitPragmaStmt(const TRule_pragma_stmt& msg) {
+ const TString prefix = OptIdPrefixAsStr(msg.GetRule_opt_id_prefix_or_type2(), Translation);
+ const TString pragma(Id(msg.GetRule_an_id3(), Translation));
+ Freqs[std::make_pair("PRAGMA", prefix.empty() ? pragma : (prefix + "." + pragma))] += 1;
+ }
+
template<typename TUnaryCasualExprRule>
void VisitUnaryCasualSubexpr(const TUnaryCasualExprRule& msg) {
const auto& block = msg.GetBlock1();
diff --git a/yql/essentials/utils/method_index.h b/yql/essentials/utils/method_index.h
index 4668d558f3..04944049c2 100644
--- a/yql/essentials/utils/method_index.h
+++ b/yql/essentials/utils/method_index.h
@@ -15,11 +15,4 @@ inline size_t GetMethodIndex(Method method) {
return GetMethodPtrIndex(ptr);
}
-template<typename Method>
-inline uintptr_t GetMethodPtr(Method method) {
- uintptr_t ptr;
- std::memcpy(&ptr, &method, sizeof(uintptr_t));
- return ptr;
-}
-
}
diff --git a/yt/cpp/mapreduce/client/client.cpp b/yt/cpp/mapreduce/client/client.cpp
index 690580285a..18c7a3ad5d 100644
--- a/yt/cpp/mapreduce/client/client.cpp
+++ b/yt/cpp/mapreduce/client/client.cpp
@@ -9,6 +9,7 @@
#include "init.h"
#include "lock.h"
#include "operation.h"
+#include "partition_reader.h"
#include "retryful_writer.h"
#include "transaction.h"
#include "transaction_pinger.h"
@@ -420,6 +421,14 @@ TRawTableReaderPtr TClientBase::CreateRawReader(
return CreateClientReader(path, format, options).Get();
}
+TRawTableReaderPtr TClientBase::CreateRawTablePartitionReader(
+ const TString& cookie,
+ const TFormat& format,
+ const TTablePartitionReaderOptions& options)
+{
+ return NDetail::CreateTablePartitionReader(RawClient_, ClientRetryPolicy_->CreatePolicyForReaderRequest(), cookie, format, options);
+}
+
TRawTableWriterPtr TClientBase::CreateRawWriter(
const TRichYPath& path,
const TFormat& format,
@@ -883,6 +892,45 @@ THolder<TClientWriter> TClientBase::CreateClientWriter(
std::move(skiffOptions));
}
+::TIntrusivePtr<INodeReaderImpl> TClientBase::CreateNodeTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options)
+{
+ auto format = TFormat::YsonBinary();
+ ApplyFormatHints<TNode>(&format, options.FormatHints_);
+
+ return MakeIntrusive<TNodeTableReader>(CreateRawTablePartitionReader(cookie, format, options));
+}
+
+::TIntrusivePtr<IProtoReaderImpl> TClientBase::CreateProtoTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const Message* prototype)
+{
+ auto descriptors = TVector<const ::google::protobuf::Descriptor*>{
+ prototype->GetDescriptor(),
+ };
+ auto format = TFormat::Protobuf(descriptors, Context_.Config->ProtobufFormatWithDescriptors);
+ return MakeIntrusive<TLenvalProtoTableReader>(
+ CreateRawTablePartitionReader(cookie, format, options),
+ std::move(descriptors));
+}
+
+::TIntrusivePtr<ISkiffRowReaderImpl> TClientBase::CreateSkiffRowTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const ISkiffRowSkipperPtr& skipper,
+ const NSkiff::TSkiffSchemaPtr& schema)
+{
+ auto skiffOptions = TCreateSkiffSchemaOptions().HasRangeIndex(true);
+ auto resultSchema = NYT::NDetail::CreateSkiffSchema(TVector{schema}, skiffOptions);
+ return new TSkiffRowTableReader(
+ CreateRawTablePartitionReader(cookie, NYT::NDetail::CreateSkiffFormat(resultSchema), options),
+ resultSchema,
+ {skipper},
+ std::move(skiffOptions));
+}
+
::TIntrusivePtr<INodeWriterImpl> TClientBase::CreateNodeWriter(
const TRichYPath& path, const TTableWriterOptions& options)
{
@@ -1561,13 +1609,17 @@ TClientContext CreateClientContext(
context.Config = options.Config_ ? options.Config_ : TConfig::Get();
context.TvmOnly = options.TvmOnly_;
context.ProxyAddress = options.ProxyAddress_;
- context.ProxyUnixDomainSocket = options.ProxyUnixDomainSocket_;
+ context.UseProxyUnixDomainSocket = options.UseProxyUnixDomainSocket_;
if (options.UseTLS_) {
context.UseTLS = *options.UseTLS_;
}
- SetupClusterContext(context, serverName);
+ if (!options.UseProxyUnixDomainSocket_) {
+ SetupClusterContext(context, serverName);
+ } else {
+ context.ServerName = serverName;
+ }
if (context.Config->HttpProxyRole && context.Config->Hosts == DefaultHosts) {
context.Config->Hosts = "hosts?role=" + context.Config->HttpProxyRole;
diff --git a/yt/cpp/mapreduce/client/client.h b/yt/cpp/mapreduce/client/client.h
index ef0741044c..71d62965fc 100644
--- a/yt/cpp/mapreduce/client/client.h
+++ b/yt/cpp/mapreduce/client/client.h
@@ -130,6 +130,11 @@ public:
const TFormat& format,
const TTableReaderOptions& options) override;
+ TRawTableReaderPtr CreateRawTablePartitionReader(
+ const TString& cookie,
+ const TFormat& format,
+ const TTablePartitionReaderOptions& options) override;
+
TRawTableWriterPtr CreateRawWriter(
const TRichYPath& path,
const TFormat& format,
@@ -268,6 +273,21 @@ private:
const ISkiffRowSkipperPtr& skipper,
const NSkiff::TSkiffSchemaPtr& schema) override;
+ ::TIntrusivePtr<INodeReaderImpl> CreateNodeTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options) override;
+
+ ::TIntrusivePtr<IProtoReaderImpl> CreateProtoTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const Message* prototype) override;
+
+ ::TIntrusivePtr<ISkiffRowReaderImpl> CreateSkiffRowTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const ISkiffRowSkipperPtr& skipper,
+ const NSkiff::TSkiffSchemaPtr& schema) override;
+
::TIntrusivePtr<INodeWriterImpl> CreateNodeWriter(
const TRichYPath& path, const TTableWriterOptions& options) override;
diff --git a/yt/cpp/mapreduce/client/operation.cpp b/yt/cpp/mapreduce/client/operation.cpp
index 9a1511025a..553b1a0777 100644
--- a/yt/cpp/mapreduce/client/operation.cpp
+++ b/yt/cpp/mapreduce/client/operation.cpp
@@ -854,6 +854,9 @@ void BuildCommonOperationPart(
MergeNodes((*specNode)["annotations"], nirvanaContext.Annotations);
}
+ if (baseSpec.Alias_) {
+ (*specNode)["alias"] = *baseSpec.Alias_;
+ }
TString pool;
if (baseSpec.Pool_) {
pool = *baseSpec.Pool_;
diff --git a/yt/cpp/mapreduce/client/partition_reader.cpp b/yt/cpp/mapreduce/client/partition_reader.cpp
new file mode 100644
index 0000000000..1610a087cc
--- /dev/null
+++ b/yt/cpp/mapreduce/client/partition_reader.cpp
@@ -0,0 +1,66 @@
+#include "partition_reader.h"
+
+#include <yt/cpp/mapreduce/common/retry_request.h>
+
+#include <yt/cpp/mapreduce/interface/raw_client.h>
+
+namespace NYT::NDetail {
+
+////////////////////////////////////////////////////////////////////////////////
+
+class TPartitionTableReader
+ : public TRawTableReader
+{
+public:
+ TPartitionTableReader(std::unique_ptr<IInputStream> input)
+ : Input_(std::move(input))
+ { }
+
+ bool Retry(
+ const TMaybe<ui32>& /*rangeIndex*/,
+ const TMaybe<ui64>& /*rowIndex*/,
+ const std::exception_ptr& /*error*/) override
+ {
+ return false;
+ }
+
+ void ResetRetries() override
+ { }
+
+ bool HasRangeIndices() const override
+ {
+ return false;
+ }
+
+protected:
+ size_t DoRead(void* buf, size_t len) override
+ {
+ return Input_->Read(buf, len);
+ }
+
+private:
+ std::unique_ptr<IInputStream> Input_;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+TRawTableReaderPtr CreateTablePartitionReader(
+ const IRawClientPtr& rawClient,
+ const IRequestRetryPolicyPtr& retryPolicy,
+ const TString& cookie,
+ const TMaybe<TFormat>& format,
+ const TTablePartitionReaderOptions& options)
+{
+
+ auto stream = NDetail::RequestWithRetry<std::unique_ptr<IInputStream>>(
+ retryPolicy,
+ [&] (TMutationId /*mutationId*/) {
+ return rawClient->ReadTablePartition(cookie, format, options);
+ }
+ );
+ return MakeIntrusive<TPartitionTableReader>(std::move(stream));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+} // namespace NYT::NDetail
diff --git a/yt/cpp/mapreduce/client/partition_reader.h b/yt/cpp/mapreduce/client/partition_reader.h
new file mode 100644
index 0000000000..19b7258ac8
--- /dev/null
+++ b/yt/cpp/mapreduce/client/partition_reader.h
@@ -0,0 +1,18 @@
+#include <yt/cpp/mapreduce/common/fwd.h>
+
+#include <yt/cpp/mapreduce/interface/io.h>
+
+namespace NYT::NDetail {
+
+////////////////////////////////////////////////////////////////////////////////
+
+TRawTableReaderPtr CreateTablePartitionReader(
+ const IRawClientPtr& rawClient,
+ const IRequestRetryPolicyPtr& retryPolicy,
+ const TString& cookie,
+ const TMaybe<TFormat>& format,
+ const TTablePartitionReaderOptions& options);
+
+////////////////////////////////////////////////////////////////////////////////
+
+} // namespace NYT::NDetail
diff --git a/yt/cpp/mapreduce/client/ya.make b/yt/cpp/mapreduce/client/ya.make
index 6ac61f8f7e..c3f4c1876d 100644
--- a/yt/cpp/mapreduce/client/ya.make
+++ b/yt/cpp/mapreduce/client/ya.make
@@ -17,6 +17,7 @@ SRCS(
operation_preparer.cpp
operation_tracker.cpp
operation.cpp
+ partition_reader.cpp
prepare_operation.cpp
py_helpers.cpp
retry_heavy_write_request.cpp
diff --git a/yt/cpp/mapreduce/common/retry_lib.cpp b/yt/cpp/mapreduce/common/retry_lib.cpp
index 53216bd3f8..e898bfc381 100644
--- a/yt/cpp/mapreduce/common/retry_lib.cpp
+++ b/yt/cpp/mapreduce/common/retry_lib.cpp
@@ -227,6 +227,7 @@ static TMaybe<TDuration> TryGetBackoffDuration(const TErrorResponse& errorRespon
NSequoiaClient::SequoiaRetriableError,
NRpc::TransientFailure,
Canceled,
+ Timeout,
}) {
if (allCodes.contains(code)) {
return config->RetryInterval;
diff --git a/yt/cpp/mapreduce/http/context.cpp b/yt/cpp/mapreduce/http/context.cpp
index a2e59c0fd8..26873d248b 100644
--- a/yt/cpp/mapreduce/http/context.cpp
+++ b/yt/cpp/mapreduce/http/context.cpp
@@ -15,7 +15,7 @@ bool operator==(const TClientContext& lhs, const TClientContext& rhs)
lhs.TvmOnly == rhs.TvmOnly &&
lhs.ProxyAddress == rhs.ProxyAddress &&
lhs.RpcProxyRole == rhs.RpcProxyRole &&
- lhs.ProxyUnixDomainSocket == rhs.ProxyUnixDomainSocket;
+ lhs.UseProxyUnixDomainSocket == rhs.UseProxyUnixDomainSocket;
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/cpp/mapreduce/http/context.h b/yt/cpp/mapreduce/http/context.h
index 0601b495f1..85bed9a030 100644
--- a/yt/cpp/mapreduce/http/context.h
+++ b/yt/cpp/mapreduce/http/context.h
@@ -23,7 +23,7 @@ struct TClientContext
TConfigPtr Config = TConfig::Get();
TMaybe<TString> ProxyAddress;
TMaybe<TString> RpcProxyRole;
- TMaybe<TString> ProxyUnixDomainSocket;
+ bool UseProxyUnixDomainSocket = false;
};
bool operator==(const TClientContext& lhs, const TClientContext& rhs);
diff --git a/yt/cpp/mapreduce/http_client/raw_client.cpp b/yt/cpp/mapreduce/http_client/raw_client.cpp
index 734282c2cf..ab8fbf5821 100644
--- a/yt/cpp/mapreduce/http_client/raw_client.cpp
+++ b/yt/cpp/mapreduce/http_client/raw_client.cpp
@@ -755,6 +755,24 @@ std::unique_ptr<IInputStream> THttpRawClient::ReadTable(
return std::make_unique<NHttpClient::THttpResponseStream>(std::move(responseInfo));
}
+std::unique_ptr<IInputStream> THttpRawClient::ReadTablePartition(
+ const TString& cookie,
+ const TMaybe<TFormat>& format,
+ const TTablePartitionReaderOptions& options)
+{
+ TMutationId mutationId;
+ THttpHeader header("GET", "api/v4/read_table_partition", /*isApi*/ false);
+ header.SetOutputFormat(format);
+ header.SetResponseCompression(ToString(Context_.Config->AcceptEncoding));
+ auto params = NRawClient::SerializeParamsForReadTablePartition(cookie, options);
+ header.MergeParameters(params);
+
+ TRequestConfig config;
+ config.IsHeavy = true;
+ auto responseInfo = RequestWithoutRetry(Context_, mutationId, header, /*body*/ {}, config);
+ return std::make_unique<NHttpClient::THttpResponseStream>(std::move(responseInfo));
+}
+
std::unique_ptr<IInputStream> THttpRawClient::ReadBlobTable(
const TTransactionId& transactionId,
const TRichYPath& path,
diff --git a/yt/cpp/mapreduce/http_client/raw_client.h b/yt/cpp/mapreduce/http_client/raw_client.h
index d292688978..adcec39884 100644
--- a/yt/cpp/mapreduce/http_client/raw_client.h
+++ b/yt/cpp/mapreduce/http_client/raw_client.h
@@ -280,6 +280,11 @@ public:
const TMaybe<TFormat>& format,
const TTableReaderOptions& options = {}) override;
+ std::unique_ptr<IInputStream> ReadTablePartition(
+ const TString& cookie,
+ const TMaybe<TFormat>& format,
+ const TTablePartitionReaderOptions& options = {}) override;
+
std::unique_ptr<IInputStream> ReadBlobTable(
const TTransactionId& transactionId,
const TRichYPath& path,
diff --git a/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.cpp b/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.cpp
index 081845be5e..8487a1de8f 100644
--- a/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.cpp
+++ b/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.cpp
@@ -668,6 +668,13 @@ TNode SerializeParamsForReadTable(
return result;
}
+TNode SerializeParamsForReadTablePartition(const TString& cookie, const TTablePartitionReaderOptions& /*options*/)
+{
+ TNode node;
+ node["cookie"] = cookie;
+ return node;
+}
+
TNode SerializeParamsForReadBlobTable(
const TTransactionId& transactionId,
const TRichYPath& path,
@@ -815,6 +822,7 @@ TNode SerializeParamsForGetTablePartitions(
result["max_partition_count"] = *options.MaxPartitionCount_;
}
result["adjust_data_weight_per_partition"] = options.AdjustDataWeightPerPartition_;
+ result["enable_cookies"] = options.EnableCookies_;
return result;
}
diff --git a/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.h b/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.h
index 5dc2045bc5..90c8dd8e24 100644
--- a/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.h
+++ b/yt/cpp/mapreduce/http_client/rpc_parameters_serialization.h
@@ -159,6 +159,10 @@ TNode SerializeParamsForReadTable(
const TTransactionId& transactionId,
const TTableReaderOptions& options);
+TNode SerializeParamsForReadTablePartition(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options);
+
TNode SerializeParamsForReadBlobTable(
const TTransactionId& transactionId,
const TRichYPath& path,
diff --git a/yt/cpp/mapreduce/interface/client_method_options.h b/yt/cpp/mapreduce/interface/client_method_options.h
index 866f900571..4bb2df112c 100644
--- a/yt/cpp/mapreduce/interface/client_method_options.h
+++ b/yt/cpp/mapreduce/interface/client_method_options.h
@@ -675,7 +675,7 @@ struct TTableReaderOptions
FLUENT_FIELD_DEFAULT(size_t, SizeLimit, 4 << 20);
///
- /// @brief Allows to fine tune format that is used for reading tables.
+ /// @brief Allows fine-tuning of the format used for reading tables.
///
/// Has no effect when used with raw-reader.
FLUENT_FIELD_OPTION(TFormatHints, FormatHints);
@@ -686,6 +686,20 @@ struct TTableReaderOptions
FLUENT_FIELD_DEFAULT(TControlAttributes, ControlAttributes, TControlAttributes());
};
+/// Options for @ref NYT::IClient::CreatePartitionTableReader
+struct TTablePartitionReaderOptions
+{
+ /// @cond Doxygen_Suppress
+ using TSelf = TTablePartitionReaderOptions;
+ /// @endcond
+
+ ///
+ /// @brief Allows fine-tuning of the format used for reading tables.
+ ///
+ /// Has no effect when used with raw-reader.
+ FLUENT_FIELD_OPTION(TFormatHints, FormatHints);
+};
+
/// Options for @ref NYT::IClient::CreateTableWriter
struct TTableWriterOptions
: public TIOOptions<TTableWriterOptions>
@@ -1119,9 +1133,9 @@ struct TCreateClientOptions
/// @brief Proxy Address to be used for connection
FLUENT_FIELD_OPTION(TString, ProxyAddress);
- /// @brief Specifies the Unix socket used for connection,
- /// typically when the RPC proxy is enabled within the job proxy.
- FLUENT_FIELD_OPTION(TString, ProxyUnixDomainSocket);
+ /// @brief Use unix domain socket for connection.
+ /// Typically you will need this option when the RPC proxy is enabled within the job proxy.
+ FLUENT_FIELD_DEFAULT(bool, UseProxyUnixDomainSocket, false);
};
///
@@ -1471,6 +1485,12 @@ struct TGetTablePartitionsOptions
///
/// |True| by default.
FLUENT_FIELD_DEFAULT(bool, AdjustDataWeightPerPartition, true);
+
+ ///
+ /// @brief Enable partition cookies in response.
+ ///
+ /// Partition cookies allow to efficiently read partitions using @ref NYT::IClientBase::CreateTablePartitionReader method.
+ FLUENT_FIELD_DEFAULT(bool, EnableCookies, false);
};
///
diff --git a/yt/cpp/mapreduce/interface/common.h b/yt/cpp/mapreduce/interface/common.h
index 9752e15822..82556fb08a 100644
--- a/yt/cpp/mapreduce/interface/common.h
+++ b/yt/cpp/mapreduce/interface/common.h
@@ -1248,6 +1248,9 @@ struct TMultiTablePartition
/// Aggregate statistics of all the table ranges in the partition.
TStatistics AggregateStatistics;
+
+ /// Partition cookie that can be passed to @ref NYT::IClientBase::CreatePartitionReader
+ TMaybe<TString> Cookie;
};
/// Table partitions from GetTablePartitions command.
diff --git a/yt/cpp/mapreduce/interface/io-inl.h b/yt/cpp/mapreduce/interface/io-inl.h
index 056910f785..ba53b874b9 100644
--- a/yt/cpp/mapreduce/interface/io-inl.h
+++ b/yt/cpp/mapreduce/interface/io-inl.h
@@ -2,6 +2,7 @@
#ifndef IO_INL_H_
#error "Direct inclusion of this file is not allowed, use io.h"
+#include "io.h" // For the sake of sane code completion.
#endif
#undef IO_INL_H_
@@ -632,6 +633,30 @@ inline TTableReaderPtr<T> IIOClient::CreateTableReader(
}
}
+template <>
+inline TTableReaderPtr<TNode> IIOClient::CreateTablePartitionReader<TNode>(
+ const TString& cookie, const TTablePartitionReaderOptions& options)
+{
+ return new TTableReader<TNode>(CreateNodeTablePartitionReader(cookie, options));
+}
+
+template <class T>
+inline TTableReaderPtr<T> IIOClient::CreateTablePartitionReader(
+ const TString& path, const TTablePartitionReaderOptions& options)
+{
+ if constexpr (TIsBaseOf<Message, T>::Value) {
+ T prototype;
+ return new TTableReader<T>(CreateProtoReader(path, options, &prototype));
+ } else if constexpr (TIsSkiffRow<T>::value) {
+ const auto& hints = options.FormatHints_ ? options.FormatHints_->SkiffRowHints_ : Nothing();
+ auto schema = GetSkiffSchema<T>(hints);
+ auto skipper = CreateSkiffSkipper<T>(hints);
+ return new TTableReader<T>(CreateSkiffRowReader(path, options, skipper, schema), hints);
+ } else {
+ static_assert(TDependentFalse<T>, "Unsupported type for table reader");
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
template <typename T>
diff --git a/yt/cpp/mapreduce/interface/io.h b/yt/cpp/mapreduce/interface/io.h
index becde0def2..aadbca3298 100644
--- a/yt/cpp/mapreduce/interface/io.h
+++ b/yt/cpp/mapreduce/interface/io.h
@@ -331,7 +331,7 @@ private:
///
/// @see @ref NYT::TTableReaderIterator
template <class T>
-TTableReaderIterator<T> begin(TTableReader<T>& reader)
+TTableReaderIterator<T> begin(TTableReader<T>& reader) // NOLINT
{
return TTableReaderIterator<T>(&reader);
}
@@ -340,7 +340,7 @@ TTableReaderIterator<T> begin(TTableReader<T>& reader)
///
/// @see @ref NYT::TTableReaderIterator
template <class T>
-TTableReaderIterator<T> end(TTableReader<T>&)
+TTableReaderIterator<T> end(TTableReader<T>&) // NOLINT
{
return TTableReaderIterator<T>(nullptr);
}
@@ -460,6 +460,30 @@ public:
const TTableWriterOptions& options = TTableWriterOptions()) = 0;
///
+ /// @brief Create raw reader of table partition
+ ///
+ /// Reader returns unparsed data in specified format.
+ ///
+ /// @param cookie Partition cookie received from @ref NYT::IClientBase::GetTablesPartitions.
+ /// @param format Format description.
+ /// @param options Additional options.
+ virtual TRawTableReaderPtr CreateRawTablePartitionReader(
+ const TString& cookie,
+ const TFormat& format,
+ const TTablePartitionReaderOptions& options = {}) = 0;
+
+ ///
+ /// @brief Create reader of table partition
+ ///
+ /// @param cookie Partition cookie received from @ref NYT::IClientBase::GetTablesPartitions.
+ /// @param format Format description.
+ /// @param options Additional options.
+ template <class T>
+ TTableReaderPtr<T> CreateTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options = {});
+
+ ///
/// @brief Create a reader for [blob table](https://docs.yandex-team.ru/docs/yt/description/storage/blobtables) at `path`.
///
/// @param path Blob table path.
@@ -497,6 +521,20 @@ private:
const ISkiffRowSkipperPtr& skipper,
const NSkiff::TSkiffSchemaPtr& schema) = 0;
+ virtual ::TIntrusivePtr<INodeReaderImpl> CreateNodeTablePartitionReader(
+ const TString& cookie, const TTablePartitionReaderOptions& options) = 0;
+
+ virtual ::TIntrusivePtr<IProtoReaderImpl> CreateProtoTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const ::google::protobuf::Message* prototype) = 0;
+
+ virtual ::TIntrusivePtr<ISkiffRowReaderImpl> CreateSkiffRowTablePartitionReader(
+ const TString& cookie,
+ const TTablePartitionReaderOptions& options,
+ const ISkiffRowSkipperPtr& skipper,
+ const NSkiff::TSkiffSchemaPtr& schema) = 0;
+
virtual ::TIntrusivePtr<INodeWriterImpl> CreateNodeWriter(
const TRichYPath& path, const TTableWriterOptions& options) = 0;
diff --git a/yt/cpp/mapreduce/interface/operation.h b/yt/cpp/mapreduce/interface/operation.h
index 869c8f9c0b..50c694b5b0 100644
--- a/yt/cpp/mapreduce/interface/operation.h
+++ b/yt/cpp/mapreduce/interface/operation.h
@@ -523,6 +523,9 @@ struct TOperationSpecBase
/// If operation doesn't finish in time it will be aborted.
FLUENT_FIELD_OPTION(TDuration, TimeLimit);
+ /// @brief Alias for searching for an operation in the future.
+ FLUENT_FIELD_OPTION(TString, Alias);
+
/// @brief Title to be shown in web interface.
FLUENT_FIELD_OPTION(TString, Title);
diff --git a/yt/cpp/mapreduce/interface/raw_client.h b/yt/cpp/mapreduce/interface/raw_client.h
index 24f8de61b6..23819357db 100644
--- a/yt/cpp/mapreduce/interface/raw_client.h
+++ b/yt/cpp/mapreduce/interface/raw_client.h
@@ -282,6 +282,11 @@ public:
const TMaybe<TFormat>& format,
const TTableReaderOptions& options = {}) = 0;
+ virtual std::unique_ptr<IInputStream> ReadTablePartition(
+ const TString& cookie,
+ const TMaybe<TFormat>& format,
+ const TTablePartitionReaderOptions& options = {}) = 0;
+
virtual std::unique_ptr<IInputStream> ReadBlobTable(
const TTransactionId& transactionId,
const TRichYPath& path,
diff --git a/yt/cpp/mapreduce/interface/serialize.cpp b/yt/cpp/mapreduce/interface/serialize.cpp
index 5ea65b62f1..64f38e7d0a 100644
--- a/yt/cpp/mapreduce/interface/serialize.cpp
+++ b/yt/cpp/mapreduce/interface/serialize.cpp
@@ -537,6 +537,7 @@ void Deserialize(TMultiTablePartition& partition, const TNode& node)
const auto& nodeMap = node.AsMap();
DESERIALIZE_ITEM("table_ranges", partition.TableRanges);
DESERIALIZE_ITEM("aggregate_statistics", partition.AggregateStatistics);
+ DESERIALIZE_ITEM("cookie", partition.Cookie)
}
void Deserialize(TMultiTablePartitions& partitions, const TNode& node)
diff --git a/yt/python/yt/common.py b/yt/python/yt/common.py
index 26a9599e51..054c269883 100644
--- a/yt/python/yt/common.py
+++ b/yt/python/yt/common.py
@@ -375,6 +375,10 @@ class YtError(Exception):
"""Probably lock conflict in Sequoia tables."""
return self.contains_code(6002)
+ def is_backup_checkpoint_rejected(self):
+ """Backup checkpoint rejected."""
+ return self.contains_code(1733)
+
class YtResponseError(YtError):
"""Represents an error in YT response."""
diff --git a/yt/yql/providers/yt/codec/yt_codec.cpp b/yt/yql/providers/yt/codec/yt_codec.cpp
index 6e64136937..14e317df97 100644
--- a/yt/yql/providers/yt/codec/yt_codec.cpp
+++ b/yt/yql/providers/yt/codec/yt_codec.cpp
@@ -305,6 +305,13 @@ void TMkqlIOSpecs::InitDecoder(NCommon::TCodecContext& codecCtx,
}
}
+ if (InputBlockRepresentation_ == EBlockRepresentation::BlockStruct) {
+ if (auto pos = rowType->FindMemberIndex(BlockLengthColumnName)) {
+ virtualColumns.insert(*pos);
+ decoder.FillBlockStructSize = pos;
+ }
+ }
+
THashSet<ui32> usedPos;
for (ui32 index = 0; index < rowType->GetMembersCount(); ++index) {
auto name = rowType->GetMemberNameStr(index);
@@ -444,6 +451,7 @@ void TMkqlIOSpecs::InitInput(NCommon::TCodecContext& codecCtx,
TSpecInfo localSpecInfo;
TSpecInfo* specInfo = &localSpecInfo;
TString decoderRefName = TStringBuilder() << "_internal" << inputIndex;
+ bool newSpec = false;
if (inputSpecs[inputIndex].IsString()) {
auto refName = inputSpecs[inputIndex].AsString();
decoderRefName = refName;
@@ -453,9 +461,14 @@ void TMkqlIOSpecs::InitInput(NCommon::TCodecContext& codecCtx,
Y_ENSURE(inAttrs.HasKey(YqlIOSpecRegistry) && inAttrs[YqlIOSpecRegistry].HasKey(refName), "Bad input registry reference: " << refName);
specInfo = &specInfoRegistry[refName];
LoadSpecInfo(true, inAttrs[YqlIOSpecRegistry][refName], codecCtx, *specInfo);
+ newSpec = true;
}
} else {
LoadSpecInfo(true, inputSpecs[inputIndex], codecCtx, localSpecInfo);
+ newSpec = true;
+ }
+ if (InputBlockRepresentation_ == EBlockRepresentation::BlockStruct && newSpec) {
+ specInfo->Type = codecCtx.Builder.NewStructType(specInfo->Type, BlockLengthColumnName, TDataType::Create(NUdf::TDataType<ui64>::Id, codecCtx.Env));
}
TStructType* inStruct = AS_TYPE(TStructType, specInfo->Type);
diff --git a/yt/yql/providers/yt/codec/yt_codec.h b/yt/yql/providers/yt/codec/yt_codec.h
index 4e6ef543a5..ed30956479 100644
--- a/yt/yql/providers/yt/codec/yt_codec.h
+++ b/yt/yql/providers/yt/codec/yt_codec.h
@@ -31,6 +31,12 @@ public:
Y_DECLARE_FLAGS(TSystemFields, ESystemField);
+ enum class EBlockRepresentation {
+ None,
+ WideBlock,
+ BlockStruct,
+ };
+
struct TSpecInfo {
NKikimr::NMiniKQL::TType* Type = nullptr;
bool StrictSchema = true;
@@ -65,6 +71,7 @@ public:
TMaybe<ui32> FillSysColumnIndex;
TMaybe<ui32> FillSysColumnNum;
TMaybe<ui32> FillSysColumnKeySwitch;
+ TMaybe<ui32> FillBlockStructSize;
};
struct TEncoderSpec {
@@ -137,6 +144,10 @@ public:
IsTableContent_ = true;
}
+ void SetInputBlockRepresentation(EBlockRepresentation type) {
+ InputBlockRepresentation_ = type;
+ }
+
void SetTableOffsets(const TVector<ui64>& offsets);
void Clear();
@@ -156,6 +167,8 @@ public:
TString OptLLVM_;
TSystemFields SystemFields_;
+ EBlockRepresentation InputBlockRepresentation_ = EBlockRepresentation::None;
+
NKikimr::NMiniKQL::IStatsRegistry* JobStats_ = nullptr;
THashMap<TString, TDecoderSpec> Decoders;
TVector<const TDecoderSpec*> Inputs;
diff --git a/yt/yql/providers/yt/codec/yt_codec_io.cpp b/yt/yql/providers/yt/codec/yt_codec_io.cpp
index 0a6b31f4e7..a46ecc0f68 100644
--- a/yt/yql/providers/yt/codec/yt_codec_io.cpp
+++ b/yt/yql/providers/yt/codec/yt_codec_io.cpp
@@ -651,7 +651,7 @@ struct TMkqlReaderImpl::TDecoder {
KeySwitch_ = false;
}
- void Reset(bool hasRangeIndices, ui32 tableIndex, bool ignoreStreamTableIndex) {
+ virtual void Reset(bool hasRangeIndices, ui32 tableIndex, bool ignoreStreamTableIndex) {
HasRangeIndices_ = hasRangeIndices;
TableIndex_ = tableIndex;
AtStart_ = true;
@@ -1463,7 +1463,7 @@ public:
, Pool_(pool)
{
InputStream_ = std::make_unique<TInputBufArrowInputStream>(buf, pool);
- ResetColumnConverters();
+ HandleTableSwitch();
HandlesSysColumns_ = true;
}
@@ -1482,14 +1482,19 @@ public:
YQL_ENSURE(!Chunks_.empty());
}
+ bool isWideBlock = (Specs_.InputBlockRepresentation_ == TMkqlIOSpecs::EBlockRepresentation::WideBlock);
+
auto& decoder = *Specs_.Inputs[TableIndex_];
- Row_ = SpecsCache_.NewRow(TableIndex_, items, true);
+ Row_ = SpecsCache_.NewRow(TableIndex_, items, isWideBlock);
auto& [chunkRowIndex, chunkLen, chunk] = Chunks_.front();
for (size_t i = 0; i < decoder.StructSize; i++) {
+ if (i == decoder.FillBlockStructSize) {
+ continue;
+ }
items[i] = SpecsCache_.GetHolderFactory().CreateArrowBlock(std::move(chunk[i]));
}
- items[decoder.StructSize] = SpecsCache_.GetHolderFactory().CreateArrowBlock(arrow::Datum(static_cast<uint64_t>(chunkLen)));
+ items[BlockSizeStructIndex_] = SpecsCache_.GetHolderFactory().CreateArrowBlock(arrow::Datum(static_cast<uint64_t>(chunkLen)));
RowIndex_ = chunkRowIndex;
Chunks_.pop_front();
@@ -1505,17 +1510,17 @@ public:
}
StreamReader_ = ARROW_RESULT(streamReaderResult);
- auto oldTableIndex = TableIndex_;
if (!IgnoreStreamTableIndex) {
+ auto oldTableIndex = TableIndex_;
auto tableIdKey = StreamReader_->schema()->metadata()->Get("TableId");
if (tableIdKey.ok()) {
TableIndex_ = std::stoi(tableIdKey.ValueOrDie());
YQL_ENSURE(TableIndex_ < Specs_.Inputs.size());
}
- }
- if (TableIndex_ != oldTableIndex) {
- ResetColumnConverters();
+ if (TableIndex_ != oldTableIndex) {
+ HandleTableSwitch();
+ }
}
}
@@ -1523,6 +1528,8 @@ public:
ARROW_OK(StreamReader_->ReadNext(&batch));
if (!batch) {
if (InputStream_->EOSReached()) {
+ // Prepare for possible table switch
+ StreamReader_.reset();
return false;
}
@@ -1565,6 +1572,9 @@ public:
}
} else if (decoder.FillSysColumnIndex == inputFields[i].StructIndex) {
convertedColumn = ARROW_RESULT(arrow::MakeArrayFromScalar(arrow::UInt32Scalar(TableIndex_), batch->num_rows()));
+ } else if (decoder.FillBlockStructSize == inputFields[i].StructIndex) {
+ // Actual value will be specified later
+ convertedColumn = arrow::Datum(static_cast<uint64_t>(0));
} else if (inputFields[i].StructIndex == Max<ui32>()) {
// Input field won't appear in the result
continue;
@@ -1593,14 +1603,22 @@ public:
return true;
}
- void ResetColumnConverters() {
- auto& fields = Specs_.Inputs[TableIndex_]->FieldsVec;
+ void HandleTableSwitch() {
+ auto& decoder = Specs_.Inputs[TableIndex_];
+
ColumnConverters_.clear();
- ColumnConverters_.reserve(fields.size());
- for (auto& field: fields) {
+ ColumnConverters_.reserve(decoder->FieldsVec.size());
+ for (auto& field: decoder->FieldsVec) {
YQL_ENSURE(!field.Type->IsPg());
ColumnConverters_.emplace_back(MakeYtColumnConverter(field.Type, nullptr, *Pool_, Specs_.Inputs[TableIndex_]->NativeYtTypeFlags));
}
+
+ BlockSizeStructIndex_ = GetBlockSizeStructIndex(Specs_, TableIndex_);
+ }
+
+ void Reset(bool hasRangeIndices, ui32 tableIndex, bool ignoreStreamTableIndex) override {
+ TDecoder::Reset(hasRangeIndices, tableIndex, ignoreStreamTableIndex);
+ HandleTableSwitch();
}
private:
@@ -1610,6 +1628,8 @@ private:
TDeque<std::tuple<ui64, ui64, std::vector<arrow::Datum>>> Chunks_;
+ size_t BlockSizeStructIndex_ = 0;
+
const TMkqlIOSpecs& Specs_;
arrow::MemoryPool* Pool_;
};
@@ -2517,6 +2537,27 @@ void DecodeToYson(TMkqlIOCache& specsCache, size_t tableIndex, const NUdf::TUnbo
WriteRowItems(specsCache, tableIndex, items, {}, ysonOut);
}
+ui32 GetBlockSizeStructIndex(const TMkqlIOSpecs& specs, size_t tableIndex) {
+ auto& decoder = specs.Inputs[tableIndex];
+
+ ui32 blockSizeStructIndex = 0;
+ switch (specs.InputBlockRepresentation_) {
+ case TMkqlIOSpecs::EBlockRepresentation::WideBlock:
+ blockSizeStructIndex = decoder->StructSize;
+ break;
+
+ case TMkqlIOSpecs::EBlockRepresentation::BlockStruct:
+ YQL_ENSURE(decoder->FillBlockStructSize.Defined());
+ blockSizeStructIndex = *decoder->FillBlockStructSize;
+ break;
+
+ default:
+ YQL_ENSURE(false, "unknown block representation");
+ }
+
+ return blockSizeStructIndex;
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////
} // NYql
diff --git a/yt/yql/providers/yt/codec/yt_codec_io.h b/yt/yql/providers/yt/codec/yt_codec_io.h
index 47f8d09863..3a8c421295 100644
--- a/yt/yql/providers/yt/codec/yt_codec_io.h
+++ b/yt/yql/providers/yt/codec/yt_codec_io.h
@@ -164,4 +164,6 @@ void DecodeToYson(TMkqlIOCache& specsCache, size_t tableIndex, const NKikimr::NU
THolder<NCommon::IBlockReader> MakeBlockReader(NYT::TRawTableReader& source, size_t blockCount, size_t blockSize);
+ui32 GetBlockSizeStructIndex(const TMkqlIOSpecs& specs, size_t tableIndex);
+
} // NYql
diff --git a/yt/yql/providers/yt/common/yql_yt_settings.cpp b/yt/yql/providers/yt/common/yql_yt_settings.cpp
index e8f019ea16..27d6a032a1 100644
--- a/yt/yql/providers/yt/common/yql_yt_settings.cpp
+++ b/yt/yql/providers/yt/common/yql_yt_settings.cpp
@@ -366,6 +366,13 @@ TYtConfiguration::TYtConfiguration(TTypeAnnotationContext& typeCtx)
OperationSpec[cluster] = spec;
HybridDqExecution = false;
});
+ REGISTER_SETTING(*this, FmrOperationSpec)
+ .Parser([](const TString& v) { return NYT::NodeFromYsonString(v, ::NYson::EYsonType::Node); })
+ .Validator([] (const TString&, const NYT::TNode& value) {
+ if (!value.IsMap()) {
+ throw yexception() << "Expected yson map, but got " << value.GetType();
+ }
+ });
REGISTER_SETTING(*this, Annotations)
.Parser([](const TString& v) { return NYT::NodeFromYsonString(v); })
.Validator([] (const TString&, const NYT::TNode& value) {
diff --git a/yt/yql/providers/yt/common/yql_yt_settings.h b/yt/yql/providers/yt/common/yql_yt_settings.h
index e2dd916629..5a291b5dab 100644
--- a/yt/yql/providers/yt/common/yql_yt_settings.h
+++ b/yt/yql/providers/yt/common/yql_yt_settings.h
@@ -183,6 +183,7 @@ struct TYtSettings {
NCommon::TConfSetting<TString, true> DockerImage;
NCommon::TConfSetting<NYT::TNode, true> JobEnv;
NCommon::TConfSetting<NYT::TNode, true> OperationSpec;
+ NCommon::TConfSetting<NYT::TNode, true> FmrOperationSpec;
NCommon::TConfSetting<NYT::TNode, true> Annotations;
NCommon::TConfSetting<NYT::TNode, true> StartedBy;
NCommon::TConfSetting<NYT::TNode, true> Description;
diff --git a/yt/yql/providers/yt/comp_nodes/yql_mkql_block_table_content.cpp b/yt/yql/providers/yt/comp_nodes/yql_mkql_block_table_content.cpp
index d935da2004..d4b65187b4 100644
--- a/yt/yql/providers/yt/comp_nodes/yql_mkql_block_table_content.cpp
+++ b/yt/yql/providers/yt/comp_nodes/yql_mkql_block_table_content.cpp
@@ -1,5 +1,5 @@
#include "yql_mkql_block_table_content.h"
-#include "yql_mkql_file_block_stream.h"
+#include "yql_mkql_file_list.h"
#include <yql/essentials/minikql/computation/mkql_computation_node_impl.h>
#include <yql/essentials/minikql/mkql_node_cast.h>
@@ -20,19 +20,20 @@ class TYtBlockTableContentWrapper : public TMutableComputationNode<TYtBlockTable
typedef TMutableComputationNode<TYtBlockTableContentWrapper> TBaseComputation;
public:
TYtBlockTableContentWrapper(TComputationMutables& mutables, NCommon::TCodecContext& codecCtx,
- TVector<TString>&& files, const TString& inputSpec, TStructType* origStructType, bool decompress, std::optional<ui64> expectedRowCount)
+ TVector<TString>&& files, const TString& inputSpec, TType* listType, bool decompress, std::optional<ui64> expectedRowCount)
: TBaseComputation(mutables)
, Files_(std::move(files))
, Decompress_(decompress)
, ExpectedRowCount_(std::move(expectedRowCount))
{
Spec_.SetUseBlockInput();
+ Spec_.SetInputBlockRepresentation(TMkqlIOSpecs::EBlockRepresentation::BlockStruct);
Spec_.SetIsTableContent();
- Spec_.Init(codecCtx, inputSpec, {}, {}, origStructType, {}, TString());
+ Spec_.Init(codecCtx, inputSpec, {}, {}, AS_TYPE(TListType, listType)->GetItemType(), {}, TString());
}
NUdf::TUnboxedValuePod DoCalculate(TComputationContext& ctx) const {
- return ctx.HolderFactory.Create<TFileWideBlockStreamValue>(Spec_, ctx.HolderFactory, Files_, Decompress_, 4, 1_MB, ExpectedRowCount_);
+ return ctx.HolderFactory.Create<TFileListValue>(Spec_, ctx.HolderFactory, Files_, Decompress_, 4, 1_MB, ExpectedRowCount_);
}
private:
@@ -47,15 +48,14 @@ private:
IComputationNode* WrapYtBlockTableContent(NCommon::TCodecContext& codecCtx,
TComputationMutables& mutables, TCallable& callable, TStringBuf pathPrefix)
{
- MKQL_ENSURE(callable.GetInputsCount() == 6, "Expected 6 arguments");
+ MKQL_ENSURE(callable.GetInputsCount() == 5, "Expected 5 arguments");
TString uniqueId(AS_VALUE(TDataLiteral, callable.GetInput(0))->AsValue().AsStringRef());
- auto origStructType = AS_TYPE(TStructType, AS_VALUE(TTypeType, callable.GetInput(1)));
- const ui32 tablesCount = AS_VALUE(TDataLiteral, callable.GetInput(2))->AsValue().Get<ui32>();
- TString inputSpec(AS_VALUE(TDataLiteral, callable.GetInput(3))->AsValue().AsStringRef());
- const bool decompress = AS_VALUE(TDataLiteral, callable.GetInput(4))->AsValue().Get<bool>();
+ const ui32 tablesCount = AS_VALUE(TDataLiteral, callable.GetInput(1))->AsValue().Get<ui32>();
+ TString inputSpec(AS_VALUE(TDataLiteral, callable.GetInput(2))->AsValue().AsStringRef());
+ const bool decompress = AS_VALUE(TDataLiteral, callable.GetInput(3))->AsValue().Get<bool>();
std::optional<ui64> length;
- TTupleLiteral* lengthTuple = AS_VALUE(TTupleLiteral, callable.GetInput(5));
+ TTupleLiteral* lengthTuple = AS_VALUE(TTupleLiteral, callable.GetInput(4));
if (lengthTuple->GetValuesCount() > 0) {
MKQL_ENSURE(lengthTuple->GetValuesCount() == 1, "Expect 1 element in the length tuple");
length = AS_VALUE(TDataLiteral, lengthTuple->GetValue(0))->AsValue().Get<ui64>();
@@ -67,7 +67,7 @@ IComputationNode* WrapYtBlockTableContent(NCommon::TCodecContext& codecCtx,
}
return new TYtBlockTableContentWrapper(mutables, codecCtx, std::move(files), inputSpec,
- origStructType, decompress, length);
+ callable.GetType()->GetReturnType(), decompress, length);
}
} // NYql
diff --git a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.cpp b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.cpp
index d814246f76..ae32ee6bb5 100644
--- a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.cpp
+++ b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.cpp
@@ -57,7 +57,8 @@ bool TFileInputState::NextValue() {
MkqlReader_.Next();
if (Spec_->UseBlockInput_) {
- auto blockCountValue = CurrentValue_.GetElement(Spec_->Inputs[CurrentInput_]->StructSize);
+ auto blockSizeStructIndex = GetBlockSizeStructIndex(*Spec_, CurrentInput_);
+ auto blockCountValue = CurrentValue_.GetElement(blockSizeStructIndex);
CurrentRecord_ += GetBlockCount(blockCountValue);
} else {
++CurrentRecord_;
diff --git a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.cpp b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.cpp
index 7d720cbbd5..410abc6ca9 100644
--- a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.cpp
+++ b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.cpp
@@ -1,14 +1,16 @@
#include "yql_mkql_file_list.h"
-#include "yql_mkql_file_input_state.h"
+
+#include <yql/essentials/minikql/computation/mkql_block_impl.h>
namespace NYql {
using namespace NKikimr::NMiniKQL;
-TFileListValueBase::TIterator::TIterator(TMemoryUsageInfo* memInfo, THolder<IInputState>&& state, std::optional<ui64> length)
+TFileListValueBase::TIterator::TIterator(TMemoryUsageInfo* memInfo, const TMkqlIOSpecs& spec, THolder<TFileInputState>&& state, std::optional<ui64> length)
: TComputationValue(memInfo)
, State_(std::move(state))
, ExpectedLength_(std::move(length))
+ , Spec_(spec)
{
}
@@ -22,19 +24,25 @@ bool TFileListValueBase::TIterator::Next(NUdf::TUnboxedValue& value) {
return false;
}
+ value = State_->GetCurrent();
if (ExpectedLength_) {
MKQL_ENSURE(*ExpectedLength_ > 0, "Invalid file length. State: " << State_->DebugInfo());
- --(*ExpectedLength_);
+ if (Spec_.UseBlockInput_) {
+ auto blockSizeStructIndex = GetBlockSizeStructIndex(Spec_, State_->GetTableIndex());
+ auto blockCountValue = value.GetElement(blockSizeStructIndex);
+ (*ExpectedLength_) -= GetBlockCount(blockCountValue);
+ } else {
+ --(*ExpectedLength_);
+ }
}
- value = State_->GetCurrent();
return true;
}
NUdf::TUnboxedValue TFileListValueBase::GetListIterator() const {
- return NUdf::TUnboxedValuePod(new TIterator(GetMemInfo(), MakeState(), Length));
+ return NUdf::TUnboxedValuePod(new TIterator(GetMemInfo(), Spec, MakeState(), Length));
}
-THolder<IInputState> TFileListValue::MakeState() const {
+THolder<TFileInputState> TFileListValue::MakeState() const {
return MakeHolder<TFileInputState>(Spec, HolderFactory, MakeMkqlFileInputs(FilePaths, Decompress), BlockCount, BlockSize);
}
diff --git a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.h b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.h
index 912a083efe..aa0b8d184c 100644
--- a/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.h
+++ b/yt/yql/providers/yt/comp_nodes/yql_mkql_file_list.h
@@ -3,6 +3,7 @@
#include "yql_mkql_input_stream.h"
#include <yt/yql/providers/yt/codec/yt_codec.h>
+#include <yt/yql/providers/yt/comp_nodes/yql_mkql_file_input_state.h>
#include <yql/essentials/minikql/computation/mkql_computation_node.h>
#include <yql/essentials/minikql/computation/mkql_custom_list.h>
@@ -28,19 +29,21 @@ public:
protected:
class TIterator : public NKikimr::NMiniKQL::TComputationValue<TIterator> {
public:
- TIterator(NKikimr::NMiniKQL::TMemoryUsageInfo* memInfo, THolder<IInputState>&& state, std::optional<ui64> length);
+ TIterator(NKikimr::NMiniKQL::TMemoryUsageInfo* memInfo, const TMkqlIOSpecs& spec, THolder<TFileInputState>&& state, std::optional<ui64> length);
private:
bool Next(NUdf::TUnboxedValue& value) override;
bool AtStart_ = true;
- THolder<IInputState> State_;
+ THolder<TFileInputState> State_;
std::optional<ui64> ExpectedLength_;
+
+ const TMkqlIOSpecs& Spec_;
};
NUdf::TUnboxedValue GetListIterator() const override;
- virtual THolder<IInputState> MakeState() const = 0;
+ virtual THolder<TFileInputState> MakeState() const = 0;
protected:
const TMkqlIOSpecs& Spec;
@@ -66,7 +69,7 @@ public:
}
protected:
- THolder<IInputState> MakeState() const override;
+ THolder<TFileInputState> MakeState() const override;
private:
const TVector<TString> FilePaths;
diff --git a/yt/yql/providers/yt/fmr/coordinator/impl/ya.make b/yt/yql/providers/yt/fmr/coordinator/impl/ya.make
index 11d323d128..5a16fbe0ce 100644
--- a/yt/yql/providers/yt/fmr/coordinator/impl/ya.make
+++ b/yt/yql/providers/yt/fmr/coordinator/impl/ya.make
@@ -7,6 +7,7 @@ SRCS(
PEERDIR(
library/cpp/random_provider
library/cpp/threading/future
+ library/cpp/yson/node
yt/yql/providers/yt/fmr/coordinator/interface
yql/essentials/utils/log
yql/essentials/utils
diff --git a/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.cpp b/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.cpp
index 9c14f30403..1831e7f1ac 100644
--- a/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.cpp
+++ b/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.cpp
@@ -64,7 +64,12 @@ public:
TString taskId = GenerateId();
auto taskParams = MakeDefaultTaskParamsFromOperation(request.OperationParams);
- TTask::TPtr createdTask = MakeTask(request.TaskType, taskId, taskParams, request.SessionId, request.ClusterConnection);
+ TMaybe<NYT::TNode> jobSettings = Nothing();
+ auto fmrOperationSpec = request.FmrOperationSpec;
+ if (fmrOperationSpec && fmrOperationSpec->IsMap() && fmrOperationSpec->HasKey("job_settings")) {
+ jobSettings = (*fmrOperationSpec)["job_settings"];
+ }
+ TTask::TPtr createdTask = MakeTask(request.TaskType, taskId, taskParams, request.SessionId, request.ClusterConnection, jobSettings);
Tasks_[taskId] = TCoordinatorTaskInfo{.Task = createdTask, .TaskStatus = ETaskStatus::Accepted, .OperationId = operationId};
diff --git a/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.h b/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.h
index d8a526096a..b3c06dbe5c 100644
--- a/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.h
+++ b/yt/yql/providers/yt/fmr/coordinator/impl/yql_yt_coordinator_impl.h
@@ -1,6 +1,7 @@
#pragma once
#include <library/cpp/random_provider/random_provider.h>
+#include <library/cpp/yson/node/node.h>
#include <util/system/mutex.h>
#include <util/system/guard.h>
#include <util/generic/queue.h>
diff --git a/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/ya.make b/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/ya.make
index 62df195306..cd29edc85d 100644
--- a/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/ya.make
+++ b/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/ya.make
@@ -5,6 +5,7 @@ SRCS(
)
PEERDIR(
+ library/cpp/yson/node
yt/yql/providers/yt/fmr/coordinator/interface
yt/yql/providers/yt/fmr/proto
yt/yql/providers/yt/fmr/request_options/proto_helpers
diff --git a/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/yql_yt_coordinator_proto_helpers.cpp b/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/yql_yt_coordinator_proto_helpers.cpp
index fbb4a641a0..8c244c3f1a 100644
--- a/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/yql_yt_coordinator_proto_helpers.cpp
+++ b/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/yql_yt_coordinator_proto_helpers.cpp
@@ -1,4 +1,5 @@
#include "yql_yt_coordinator_proto_helpers.h"
+#include <library/cpp/yson/node/node_io.h>
namespace NYql::NFmr {
@@ -71,6 +72,9 @@ NProto::TStartOperationRequest StartOperationRequestToProto(const TStartOperatio
protoStartOperationRequest.SetNumRetries(startOperationRequest.NumRetries);
auto protoClusterConnection = ClusterConnectionToProto(startOperationRequest.ClusterConnection);
protoStartOperationRequest.MutableClusterConnection()->Swap(&protoClusterConnection);
+ if (startOperationRequest.FmrOperationSpec) {
+ protoStartOperationRequest.SetFmrOperationSpec(NYT::NodeToYsonString(*startOperationRequest.FmrOperationSpec));
+ }
return protoStartOperationRequest;
}
@@ -84,6 +88,9 @@ TStartOperationRequest StartOperationRequestFromProto(const NProto::TStartOperat
}
startOperationRequest.NumRetries = protoStartOperationRequest.GetNumRetries();
startOperationRequest.ClusterConnection = ClusterConnectionFromProto(protoStartOperationRequest.GetClusterConnection());
+ if (protoStartOperationRequest.HasFmrOperationSpec()) {
+ startOperationRequest.FmrOperationSpec = NYT::NodeFromYsonString(protoStartOperationRequest.GetFmrOperationSpec());
+ }
return startOperationRequest;
}
diff --git a/yt/yql/providers/yt/fmr/coordinator/interface/yql_yt_coordinator.h b/yt/yql/providers/yt/fmr/coordinator/interface/yql_yt_coordinator.h
index 15a06b2d59..15ecd6c97a 100644
--- a/yt/yql/providers/yt/fmr/coordinator/interface/yql_yt_coordinator.h
+++ b/yt/yql/providers/yt/fmr/coordinator/interface/yql_yt_coordinator.h
@@ -25,7 +25,8 @@ struct TStartOperationRequest {
TString SessionId;
TMaybe<TString> IdempotencyKey = Nothing();
ui32 NumRetries = 1; // Not supported yet
- TClusterConnection ClusterConnection = {};
+ TClusterConnection ClusterConnection = {}; // TODO - change to map
+ TMaybe<NYT::TNode> FmrOperationSpec = Nothing();
};
struct TStartOperationResponse {
diff --git a/yt/yql/providers/yt/fmr/job/impl/ut/yql_yt_job_ut.cpp b/yt/yql/providers/yt/fmr/job/impl/ut/yql_yt_job_ut.cpp
index ac3a9263c6..7b17dee907 100644
--- a/yt/yql/providers/yt/fmr/job/impl/ut/yql_yt_job_ut.cpp
+++ b/yt/yql/providers/yt/fmr/job/impl/ut/yql_yt_job_ut.cpp
@@ -156,7 +156,7 @@ Y_UNIT_TEST_SUITE(TaskRunTests) {
TYtTableRef output = TYtTableRef("test_cluster", "test_path");
TUploadTaskParams params = TUploadTaskParams(input, output);
- TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id");
+ TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id", TClusterConnection());
auto key = GetTableDataServiceKey(input.TableId, "test_part_id", 0);
tableDataServicePtr->Put(key, GetBinaryYson(TableContent_1));
ETaskStatus status = RunJob(task, tableDataServicePtr, ytService, cancelFlag).TaskStatus;
@@ -178,7 +178,7 @@ Y_UNIT_TEST_SUITE(TaskRunTests) {
TYtTableRef output = TYtTableRef("test_cluster", "test_path");
TUploadTaskParams params = TUploadTaskParams(input, output);
- TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id");
+ TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id", TClusterConnection());
// No tables in tableDataService
ETaskStatus status = RunJob(task, tableDataServicePtr, ytService, cancelFlag).TaskStatus;
@@ -205,7 +205,7 @@ Y_UNIT_TEST_SUITE(TaskRunTests) {
auto params = TMergeTaskParams(inputs, output);
auto tableDataServiceExpectedOutputKey = GetTableDataServiceKey(output.TableId, output.PartId, 0);
- TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id");
+ TTask::TPtr task = MakeTask(ETaskType::Merge, "test_task_id", params, "test_session_id", TClusterConnection());
auto key_1 = GetTableDataServiceKey(input_1.TableId, "test_part_id", 0);
auto key_3 = GetTableDataServiceKey(input_3.TableId, "test_part_id", 0);
@@ -239,7 +239,7 @@ Y_UNIT_TEST_SUITE(TaskRunTests) {
auto params = TMergeTaskParams(inputs, output);
auto tableDataServiceExpectedOutputKey = GetTableDataServiceKey(output.TableId, output.PartId, 0);
- TTask::TPtr task = MakeTask(ETaskType::Upload, "test_task_id", params, "test_session_id");
+ TTask::TPtr task = MakeTask(ETaskType::Merge, "test_task_id", params, "test_session_id", TClusterConnection());
auto key_1 = GetTableDataServiceKey(input_1.TableId, "test_part_id", 0);
auto key_3 = GetTableDataServiceKey(input_3.TableId, "test_part_id", 0);
diff --git a/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.cpp b/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.cpp
index aa3d0b5112..c8da1df480 100644
--- a/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.cpp
+++ b/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.cpp
@@ -16,15 +16,12 @@ namespace NYql::NFmr {
class TFmrJob: public IFmrJob {
public:
- TFmrJob(ITableDataService::TPtr tableDataService, IYtService::TPtr ytService, std::shared_ptr<std::atomic<bool>> cancelFlag, const TFmrJobSettings& settings)
+ TFmrJob(ITableDataService::TPtr tableDataService, IYtService::TPtr ytService, std::shared_ptr<std::atomic<bool>> cancelFlag, const TMaybe<TFmrJobSettings>& settings)
: TableDataService_(tableDataService), YtService_(ytService), CancelFlag_(cancelFlag), Settings_(settings)
{
}
- virtual std::variant<TError, TStatistics> Download(
- const TDownloadTaskParams& params,
- const TClusterConnection& clusterConnection
- ) override {
+ virtual std::variant<TError, TStatistics> Download(const TDownloadTaskParams& params, const TClusterConnection& clusterConnection) override {
try {
const auto ytTable = params.Input;
const auto cluster = params.Input.Cluster;
@@ -36,9 +33,9 @@ public:
YQL_CLOG(DEBUG, FastMapReduce) << "Downloading " << cluster << '.' << path;
auto ytTableReader = YtService_->MakeReader(ytTable, clusterConnection); // TODO - pass YtReader settings from Gateway
- auto tableDataServiceWriter = TFmrTableDataServiceWriter(tableId, partId, TableDataService_, Settings_.FmrTableDataServiceWriterSettings);
+ auto tableDataServiceWriter = TFmrTableDataServiceWriter(tableId, partId, TableDataService_, GetFmrTableDataServiceWriterSettings());
- ParseRecords(*ytTableReader, tableDataServiceWriter, Settings_.ParseRecordSettings.BlockCount, Settings_.ParseRecordSettings.BlockSize);
+ ParseRecords(*ytTableReader, tableDataServiceWriter, GetParseRecordSettings().BlockCount, GetParseRecordSettings().BlockSize);
tableDataServiceWriter.Flush();
TTableStats stats = tableDataServiceWriter.GetStats();
@@ -59,9 +56,9 @@ public:
YQL_CLOG(DEBUG, FastMapReduce) << "Uploading " << cluster << '.' << path;
- auto tableDataServiceReader = TFmrTableDataServiceReader(tableId, tableRanges, TableDataService_, Settings_.FmrTableDataServiceReaderSettings);
- auto ytTableWriter = YtService_->MakeWriter(ytTable, clusterConnection); // TODO - pass YtReader settings from Gateway
- ParseRecords(tableDataServiceReader, *ytTableWriter, Settings_.ParseRecordSettings.BlockCount, Settings_.ParseRecordSettings.BlockSize);
+ auto tableDataServiceReader = TFmrTableDataServiceReader(tableId, tableRanges, TableDataService_, GetFmrTableDataServiceReaderSettings());
+ auto ytTableWriter = YtService_->MakeWriter(ytTable, clusterConnection);
+ ParseRecords(tableDataServiceReader, *ytTableWriter, GetParseRecordSettings().BlockCount, GetParseRecordSettings().BlockSize);
ytTableWriter->Flush();
return TStatistics();
@@ -71,6 +68,7 @@ public:
}
virtual std::variant<TError, TStatistics> Merge(const TMergeTaskParams& params, const TClusterConnection& clusterConnection) override {
+ // TODO - unordered_map<ClusterConnection>
// расширить таск парамс. добавить туда мету
try {
const auto inputs = params.Input;
@@ -78,13 +76,13 @@ public:
YQL_CLOG(DEBUG, FastMapReduce) << "Merging " << inputs.size() << " inputs";
- auto tableDataServiceWriter = TFmrTableDataServiceWriter(output.TableId, output.PartId, TableDataService_, Settings_.FmrTableDataServiceWriterSettings);
+ auto tableDataServiceWriter = TFmrTableDataServiceWriter(output.TableId, output.PartId, TableDataService_, GetFmrTableDataServiceWriterSettings());
for (const auto& inputTableRef : inputs) {
if (CancelFlag_->load()) {
return TError("Canceled");
}
auto inputTableReader = GetTableInputStream(inputTableRef, clusterConnection);
- ParseRecords(*inputTableReader, tableDataServiceWriter, Settings_.ParseRecordSettings.BlockCount, Settings_.ParseRecordSettings.BlockSize);
+ ParseRecords(*inputTableReader, tableDataServiceWriter, GetParseRecordSettings().BlockCount, GetParseRecordSettings().BlockSize);
}
tableDataServiceWriter.Flush();
return TStatistics({{output, tableDataServiceWriter.GetStats()}});
@@ -101,17 +99,29 @@ private:
if (ytTable) {
return YtService_->MakeReader(*ytTable, clusterConnection); // TODO - pass YtReader settings from Gateway
} else if (fmrTable) {
- return MakeIntrusive<TFmrTableDataServiceReader>(fmrTable->TableId, fmrTable->TableRanges, TableDataService_, Settings_.FmrTableDataServiceReaderSettings);
+ return MakeIntrusive<TFmrTableDataServiceReader>(fmrTable->TableId, fmrTable->TableRanges, TableDataService_, GetFmrTableDataServiceReaderSettings());
} else {
ythrow yexception() << "Unsupported table type";
}
}
+ TParseRecordSettings GetParseRecordSettings() {
+ return Settings_ ? Settings_->ParseRecordSettings : TParseRecordSettings();
+ }
+
+ TFmrTableDataServiceReaderSettings GetFmrTableDataServiceReaderSettings() {
+ return Settings_ ? Settings_->FmrTableDataServiceReaderSettings : TFmrTableDataServiceReaderSettings();
+ }
+
+ TFmrTableDataServiceWriterSettings GetFmrTableDataServiceWriterSettings() {
+ return Settings_ ? Settings_->FmrTableDataServiceWriterSettings : TFmrTableDataServiceWriterSettings();
+ }
+
private:
ITableDataService::TPtr TableDataService_;
IYtService::TPtr YtService_;
std::shared_ptr<std::atomic<bool>> CancelFlag_;
- const TFmrJobSettings Settings_;
+ TMaybe<TFmrJobSettings> Settings_;
};
IFmrJob::TPtr MakeFmrJob(
@@ -128,9 +138,10 @@ TJobResult RunJob(
ITableDataService::TPtr tableDataService,
IYtService::TPtr ytService,
std::shared_ptr<std::atomic<bool>> cancelFlag,
- const TFmrJobSettings& settings
+ const TMaybe<TFmrJobSettings>& settings
) {
- IFmrJob::TPtr job = MakeFmrJob(tableDataService, ytService, cancelFlag, settings);
+ TFmrJobSettings jobSettings = settings ? *settings : GetJobSettingsFromTask(task);
+ IFmrJob::TPtr job = MakeFmrJob(tableDataService, ytService, cancelFlag, jobSettings);
auto processTask = [job, task] (auto&& taskParams) {
using T = std::decay_t<decltype(taskParams)>;
@@ -160,4 +171,36 @@ TJobResult RunJob(
return {ETaskStatus::Completed, *statistics};
};
+TFmrJobSettings GetJobSettingsFromTask(TTask::TPtr task) {
+ if (!task->JobSettings) {
+ return TFmrJobSettings();
+ }
+ auto jobSettings = *task->JobSettings;
+ YQL_ENSURE(jobSettings.IsMap());
+ TFmrJobSettings resultSettings{};
+ if (jobSettings.HasKey("parse_record_settings")) {
+ auto& parseRecordSettings = jobSettings["parse_record_settings"];
+ if (parseRecordSettings.HasKey("block_count")) {
+ resultSettings.ParseRecordSettings.BlockCount = parseRecordSettings["block_count"].AsInt64();
+ }
+ if (parseRecordSettings.HasKey("block_size")) {
+ resultSettings.ParseRecordSettings.BlockSize = parseRecordSettings["block_size"].AsInt64();
+ // TODO - support different formats (B, MB, ...)
+ }
+ }
+ if (jobSettings.HasKey("fmr_reader_settings")) {
+ auto& fmrReaderSettings = jobSettings["fmr_reader_settings"];
+ if (fmrReaderSettings.HasKey("read_ahead_chunks")) {
+ resultSettings.FmrTableDataServiceReaderSettings.ReadAheadChunks = fmrReaderSettings["read_ahead_chunks"].AsInt64();
+ }
+ }
+ if (jobSettings.HasKey("fmr_writer_settings")) {
+ auto& fmrWriterSettings = jobSettings["fmr_writer_settings"];
+ if (fmrWriterSettings.HasKey("chunk_size")) {
+ resultSettings.FmrTableDataServiceWriterSettings.ChunkSize = fmrWriterSettings["chunk_size"].AsInt64();
+ }
+ }
+ return resultSettings;
+}
+
} // namespace NYql
diff --git a/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.h b/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.h
index 7010a2eb96..cb21e95f9c 100644
--- a/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.h
+++ b/yt/yql/providers/yt/fmr/job/impl/yql_yt_job_impl.h
@@ -9,7 +9,7 @@ namespace NYql::NFmr {
struct TParseRecordSettings {
ui64 BlockCount = 1;
- ui64 BlockSize = 1024 * 1024; // 1Mb
+ ui64 BlockSize = 1024 * 1024;
};
struct TFmrJobSettings {
@@ -20,6 +20,8 @@ struct TFmrJobSettings {
IFmrJob::TPtr MakeFmrJob(ITableDataService::TPtr tableDataService, IYtService::TPtr ytService, std::shared_ptr<std::atomic<bool>> cancelFlag, const TFmrJobSettings& settings = {});
-TJobResult RunJob(TTask::TPtr task, ITableDataService::TPtr tableDataService, IYtService::TPtr ytService, std::shared_ptr<std::atomic<bool>> cancelFlag, const TFmrJobSettings& settings = {});
+TJobResult RunJob(TTask::TPtr task, ITableDataService::TPtr tableDataService, IYtService::TPtr ytService, std::shared_ptr<std::atomic<bool>> cancelFlag, const TMaybe<TFmrJobSettings>& settings = Nothing());
+
+TFmrJobSettings GetJobSettingsFromTask(TTask::TPtr task);
} // namespace NYql
diff --git a/yt/yql/providers/yt/fmr/job/impl/yql_yt_table_data_service_writer.h b/yt/yql/providers/yt/fmr/job/impl/yql_yt_table_data_service_writer.h
index a708b79543..2e27cd4f07 100644
--- a/yt/yql/providers/yt/fmr/job/impl/yql_yt_table_data_service_writer.h
+++ b/yt/yql/providers/yt/fmr/job/impl/yql_yt_table_data_service_writer.h
@@ -10,7 +10,7 @@
namespace NYql::NFmr {
struct TFmrTableDataServiceWriterSettings {
- ui64 ChunkSize = 1024 * 1024; // 1Mb
+ ui64 ChunkSize = 1024 * 1024;
};
class TFmrTableDataServiceWriter: public NYT::TRawTableWriter {
diff --git a/yt/yql/providers/yt/fmr/proto/coordinator.proto b/yt/yql/providers/yt/fmr/proto/coordinator.proto
index 99d0ca2361..a104cf26ea 100644
--- a/yt/yql/providers/yt/fmr/proto/coordinator.proto
+++ b/yt/yql/providers/yt/fmr/proto/coordinator.proto
@@ -22,6 +22,7 @@ message TStartOperationRequest {
optional string IdempotencyKey = 4;
uint32 NumRetries = 5;
TClusterConnection ClusterConnection = 6;
+ optional string FmrOperationSpec = 7;
}
message TStartOperationResponse {
diff --git a/yt/yql/providers/yt/fmr/proto/request_options.proto b/yt/yql/providers/yt/fmr/proto/request_options.proto
index a2580eb39f..4bd35eacaa 100644
--- a/yt/yql/providers/yt/fmr/proto/request_options.proto
+++ b/yt/yql/providers/yt/fmr/proto/request_options.proto
@@ -156,6 +156,7 @@ message TTask {
string SessionId = 4;
optional uint32 NumRetries = 5;
TClusterConnection ClusterConnection = 6;
+ optional string JobSettings = 7;
}
message TTaskState {
diff --git a/yt/yql/providers/yt/fmr/request_options/proto_helpers/yql_yt_request_proto_helpers.cpp b/yt/yql/providers/yt/fmr/request_options/proto_helpers/yql_yt_request_proto_helpers.cpp
index 0ce6dea92d..0860c00996 100644
--- a/yt/yql/providers/yt/fmr/request_options/proto_helpers/yql_yt_request_proto_helpers.cpp
+++ b/yt/yql/providers/yt/fmr/request_options/proto_helpers/yql_yt_request_proto_helpers.cpp
@@ -1,4 +1,5 @@
#include "yql_yt_request_proto_helpers.h"
+#include <library/cpp/yson/node/node_io.h>
namespace NYql::NFmr {
@@ -399,6 +400,9 @@ NProto::TTask TaskToProto(const TTask& task) {
protoTask.SetNumRetries(task.NumRetries);
auto clusterConnection = ClusterConnectionToProto(task.ClusterConnection);
protoTask.MutableClusterConnection()->Swap(&clusterConnection);
+ if (task.JobSettings) {
+ protoTask.SetJobSettings(NYT::NodeToYsonString(*task.JobSettings));
+ }
return protoTask;
}
@@ -410,6 +414,9 @@ TTask TaskFromProto(const NProto::TTask& protoTask) {
task.SessionId = protoTask.GetSessionId();
task.NumRetries = protoTask.GetNumRetries();
task.ClusterConnection = ClusterConnectionFromProto(protoTask.GetClusterConnection());
+ if (protoTask.HasJobSettings()) {
+ task.JobSettings = NYT::NodeFromYsonString(protoTask.GetJobSettings());
+ }
return task;
}
diff --git a/yt/yql/providers/yt/fmr/request_options/ya.make b/yt/yql/providers/yt/fmr/request_options/ya.make
index 9e330848c2..df82ec258f 100644
--- a/yt/yql/providers/yt/fmr/request_options/ya.make
+++ b/yt/yql/providers/yt/fmr/request_options/ya.make
@@ -5,6 +5,7 @@ SRCS(
)
PEERDIR(
+ library/cpp/yson/node
library/cpp/threading/future
)
diff --git a/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.cpp b/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.cpp
index 319cf20801..0dc3650855 100644
--- a/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.cpp
+++ b/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.cpp
@@ -2,8 +2,8 @@
namespace NYql::NFmr {
-TTask::TPtr MakeTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection) {
- return MakeIntrusive<TTask>(taskType, taskId, taskParams, sessionId, clusterConnection);
+TTask::TPtr MakeTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection, const TMaybe<NYT::TNode>& jobSettings) {
+ return MakeIntrusive<TTask>(taskType, taskId, taskParams, sessionId, clusterConnection, jobSettings);
}
TTaskState::TPtr MakeTaskState(ETaskStatus taskStatus, const TString& taskId, const TMaybe<TFmrError>& taskErrorMessage, const TStatistics& stats) {
diff --git a/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.h b/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.h
index 2c2e94a057..de18d91fa0 100644
--- a/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.h
+++ b/yt/yql/providers/yt/fmr/request_options/yql_yt_request_options.h
@@ -1,5 +1,6 @@
#pragma once
+#include <library/cpp/yson/node/node.h>
#include <util/digest/numeric.h>
#include <util/generic/maybe.h>
#include <util/generic/string.h>
@@ -167,8 +168,8 @@ struct TClusterConnection {
struct TTask: public TThrRefBase {
TTask() = default;
- TTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection, ui32 numRetries = 1)
- : TaskType(taskType), TaskId(taskId), TaskParams(taskParams), SessionId(sessionId), ClusterConnection(clusterConnection), NumRetries(numRetries)
+ TTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection, const TMaybe<NYT::TNode> & jobSettings = Nothing(), ui32 numRetries = 1)
+ : TaskType(taskType), TaskId(taskId), TaskParams(taskParams), SessionId(sessionId), ClusterConnection(clusterConnection), JobSettings(jobSettings), NumRetries(numRetries)
{
}
@@ -177,6 +178,7 @@ struct TTask: public TThrRefBase {
TTaskParams TaskParams = {};
TString SessionId;
TClusterConnection ClusterConnection = {};
+ TMaybe<NYT::TNode> JobSettings = {};
ui32 NumRetries; // Not supported yet
using TPtr = TIntrusivePtr<TTask>;
@@ -197,7 +199,7 @@ struct TTaskState: public TThrRefBase {
using TPtr = TIntrusivePtr<TTaskState>;
};
-TTask::TPtr MakeTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection = TClusterConnection{});
+TTask::TPtr MakeTask(ETaskType taskType, const TString& taskId, const TTaskParams& taskParams, const TString& sessionId, const TClusterConnection& clusterConnection = TClusterConnection{}, const TMaybe<NYT::TNode>& jobSettings = Nothing());
TTaskState::TPtr MakeTaskState(ETaskStatus taskStatus, const TString& taskId, const TMaybe<TFmrError>& taskErrorMessage = Nothing(), const TStatistics& stats = TStatistics());
diff --git a/yt/yql/providers/yt/fmr/yt_service/impl/yql_yt_yt_service_impl.cpp b/yt/yql/providers/yt/fmr/yt_service/impl/yql_yt_yt_service_impl.cpp
index c6e535fb71..d618766ede 100644
--- a/yt/yql/providers/yt/fmr/yt_service/impl/yql_yt_yt_service_impl.cpp
+++ b/yt/yql/providers/yt/fmr/yt_service/impl/yql_yt_yt_service_impl.cpp
@@ -28,13 +28,13 @@ public:
NYT::TRawTableWriterPtr MakeWriter(
const TYtTableRef& ytTable,
const TClusterConnection& clusterConnection,
- const TYtWriterSettings& writerSetttings
+ const TYtWriterSettings& /*writerSetttings*/
) override {
auto client = CreateClient(clusterConnection);
auto transaction = client->AttachTransaction(GetGuid(clusterConnection.TransactionId));
- auto path = NYT::TRichYPath(NYT::AddPathPrefix(ytTable.Path, "//"));
- auto richPath = NYT::TRichYPath(path).Append(writerSetttings.AppendMode);
- return transaction->CreateRawWriter(richPath, NYT::TFormat::YsonBinary());
+ TString ytPath = NYT::AddPathPrefix(ytTable.Path, "//");
+ auto richPath = NYT::TRichYPath(ytPath).Append(true);
+ return transaction->CreateRawWriter(richPath, NYT::TFormat::YsonBinary()); // TODO - support writerOptions
}
private:
diff --git a/yt/yql/providers/yt/fmr/yt_service/interface/yql_yt_yt_service.h b/yt/yql/providers/yt/fmr/yt_service/interface/yql_yt_yt_service.h
index 1661c18895..7ed2a47fc0 100644
--- a/yt/yql/providers/yt/fmr/yt_service/interface/yql_yt_yt_service.h
+++ b/yt/yql/providers/yt/fmr/yt_service/interface/yql_yt_yt_service.h
@@ -11,7 +11,6 @@ struct TYtReaderSettings {
};
struct TYtWriterSettings {
- bool AppendMode = true;
};
class IYtService: public TThrRefBase {
diff --git a/yt/yql/providers/yt/gateway/file/yql_yt_file.cpp b/yt/yql/providers/yt/gateway/file/yql_yt_file.cpp
index 529e0806df..429880e66e 100644
--- a/yt/yql/providers/yt/gateway/file/yql_yt_file.cpp
+++ b/yt/yql/providers/yt/gateway/file/yql_yt_file.cpp
@@ -1593,7 +1593,7 @@ private:
}
TClusterConnectionResult GetClusterConnection(const TClusterConnectionOptions&& /*options*/) override {
- ythrow yexception() << "GetClusterConnection should not be called for file gateway";
+ return TClusterConnectionResult();
}
diff --git a/yt/yql/providers/yt/gateway/file/yql_yt_file_comp_nodes.cpp b/yt/yql/providers/yt/gateway/file/yql_yt_file_comp_nodes.cpp
index 65453c8137..889c06436f 100644
--- a/yt/yql/providers/yt/gateway/file/yql_yt_file_comp_nodes.cpp
+++ b/yt/yql/providers/yt/gateway/file/yql_yt_file_comp_nodes.cpp
@@ -102,7 +102,7 @@ public:
}
protected:
- THolder<IInputState> MakeState() const override {
+ THolder<TFileInputState> MakeState() const override {
return MakeHolder<TFileInputStateWithTableState>(Spec, HolderFactory, MakeTextYsonInputs(TablePaths_),
0u, 1_MB, TTableState(TableState_));
}
diff --git a/yt/yql/providers/yt/gateway/file/yql_yt_file_mkql_compiler.cpp b/yt/yql/providers/yt/gateway/file/yql_yt_file_mkql_compiler.cpp
index f1ffba0d20..2296595dc7 100644
--- a/yt/yql/providers/yt/gateway/file/yql_yt_file_mkql_compiler.cpp
+++ b/yt/yql/providers/yt/gateway/file/yql_yt_file_mkql_compiler.cpp
@@ -639,13 +639,7 @@ void RegisterYtFileMkqlCompilers(NCommon::TMkqlCallableCompilerBase& compiler) {
output.Ref(), itemsCount, ctx, true);
}
- return ctx.ProgramBuilder.WideToBlocks(ctx.ProgramBuilder.FromFlow(ctx.ProgramBuilder.ExpandMap(ctx.ProgramBuilder.ToFlow(values), [&](TRuntimeNode item) -> TRuntimeNode::TList {
- TRuntimeNode::TList result;
- for (auto& origItem : origItemStructType->GetItems()) {
- result.push_back(ctx.ProgramBuilder.Member(item, origItem->GetName()));
- }
- return result;
- })));
+ return ctx.ProgramBuilder.ListToBlocks(values);
});
compiler.AddCallable({TYtSort::CallableName(), TYtCopy::CallableName(), TYtMerge::CallableName()},
diff --git a/yt/yql/providers/yt/gateway/fmr/ya.make b/yt/yql/providers/yt/gateway/fmr/ya.make
index c424b95d0b..d00a1b6ee4 100644
--- a/yt/yql/providers/yt/gateway/fmr/ya.make
+++ b/yt/yql/providers/yt/gateway/fmr/ya.make
@@ -5,13 +5,16 @@ SRCS(
)
PEERDIR(
+ yql/essentials/providers/common/codec
yql/essentials/utils/log
- yt/cpp/mapreduce/client
+ yt/cpp/mapreduce/common
+ yt/cpp/mapreduce/interface
yt/yql/providers/yt/gateway/lib
yt/yql/providers/yt/gateway/native
yt/yql/providers/yt/expr_nodes
yt/yql/providers/yt/fmr/coordinator/interface
yt/yql/providers/yt/lib/config_clusters
+ yt/yql/providers/yt/lib/schema
yt/yql/providers/yt/provider
)
diff --git a/yt/yql/providers/yt/gateway/fmr/yql_yt_fmr.cpp b/yt/yql/providers/yt/gateway/fmr/yql_yt_fmr.cpp
index aab5bdb6dd..dd5160d07c 100644
--- a/yt/yql/providers/yt/gateway/fmr/yql_yt_fmr.cpp
+++ b/yt/yql/providers/yt/gateway/fmr/yql_yt_fmr.cpp
@@ -2,16 +2,20 @@
#include <thread>
+#include <yt/cpp/mapreduce/common/helpers.h>
#include <yt/cpp/mapreduce/interface/client.h>
#include <yt/yql/providers/yt/expr_nodes/yql_yt_expr_nodes.h>
#include <yt/yql/providers/yt/gateway/lib/yt_helpers.h>
#include <yt/yql/providers/yt/gateway/native/yql_yt_native.h>
+#include <yt/yql/providers/yt/lib/schema/schema.h>
#include <yt/yql/providers/yt/provider/yql_yt_helpers.h>
+#include <yql/essentials/providers/common/codec/yql_codec_type_flags.h>
#include <yql/essentials/utils/log/log.h>
#include <yql/essentials/utils/log/profile.h>
#include <util/generic/ptr.h>
+#include <util/string/split.h>
#include <util/thread/pool.h>
using namespace NThreading;
@@ -27,7 +31,7 @@ enum class ETablePresenceStatus {
Both
};
-struct TDownloadTableToFmrResult: public NCommon::TOperationResult {}; // Download Yt -> Fmr TableDataService
+struct TFmrOperationResult: public NCommon::TOperationResult {};
class TFmrYtGateway final: public TYtForwardingGatewayBase {
public:
@@ -41,7 +45,7 @@ public:
auto getOperationStatusesFunc = [&] {
while (!StopFmrGateway_) {
with_lock(SessionStates_->Mutex) {
- auto checkOperationStatuses = [&] <typename T> (std::unordered_map<TString, TPromise<T>>& operationStatuses, const TString& sessionId) {
+ auto checkOperationStatuses = [&] (std::unordered_map<TString, TPromise<TFmrOperationResult>>& operationStatuses, const TString& sessionId) {
for (auto& [operationId, promise]: operationStatuses) {
YQL_CLOG(TRACE, FastMapReduce) << "Sending get operation request to coordinator with operationId: " << operationId;
@@ -53,9 +57,15 @@ public:
with_lock(SessionStates_->Mutex) {
bool operationCompleted = getOperationStatus != EOperationStatus::Accepted && getOperationStatus != EOperationStatus::InProgress;
if (operationCompleted) {
- // operation finished, set value in future returned in Publish / Download
+ // operation finished, set value in future returned in DoMerge / DoUpload
bool hasCompletedSuccessfully = getOperationStatus == EOperationStatus::Completed;
- SendOperationCompletionSignal(promise, hasCompletedSuccessfully, operationErrorMessages);
+ if (hasCompletedSuccessfully) {
+ TFmrOperationResult fmrOperationResult{};
+ fmrOperationResult.SetSuccess();
+ promise.SetValue(fmrOperationResult);
+ } else {
+ promise.SetException(JoinRange(' ', operationErrorMessages.begin(), operationErrorMessages.end()));
+ }
YQL_CLOG(DEBUG, FastMapReduce) << "Sending delete operation request to coordinator with operationId: " << operationId;
auto deleteOperationFuture = Coordinator_->DeleteOperation({operationId});
deleteOperationFuture.Subscribe([&, sessionId, operationId] (const auto& deleteFuture) {
@@ -66,8 +76,7 @@ public:
YQL_ENSURE( SessionStates_->Sessions.contains(sessionId));
auto& sessionInfo = SessionStates_->Sessions[sessionId];
auto& operationStates = sessionInfo.OperationStates;
- operationStates.DownloadOperationStatuses.erase(operationId);
- operationStates.UploadOperationStatuses.erase(operationId);
+ operationStates.OperationStatuses.erase(operationId);
}
});
}
@@ -78,8 +87,7 @@ public:
for (auto [sessionId, sessionInfo]: SessionStates_->Sessions) {
auto& operationStates = sessionInfo.OperationStates;
- checkOperationStatuses(operationStates.DownloadOperationStatuses, sessionId);
- checkOperationStatuses(operationStates.UploadOperationStatuses, sessionId);
+ checkOperationStatuses(operationStates.OperationStatuses, sessionId);
}
}
Sleep(TimeToSleepBetweenGetOperationRequests_);
@@ -93,138 +101,73 @@ public:
GetOperationStatusesThread_.join();
}
- TFuture<TPublishResult> Publish(const TExprNode::TPtr& node, TExprContext& ctx, TPublishOptions&& options) final {
+ TFuture<TRunResult> Run(const TExprNode::TPtr& node, TExprContext& ctx, TRunOptions&& options) final {
YQL_LOG_CTX_SCOPE(TStringBuf("Gateway"), __FUNCTION__);
- if (!Coordinator_) {
- return Slave_->Publish(node, ctx, std::move(options));
- }
- auto publish = TYtPublish(node);
+ auto nodePos = ctx.GetPosition(node->Pos());
+ TYtOpBase opBase(node);
TString sessionId = options.SessionId();
- auto cluster = publish.DataSink().Cluster().StringValue();
- auto token = options.Config()->Auth.Get();
- TString transformedInputPath;
- TString userName = GetUsername(sessionId);
- for (auto out: publish.Input()) {
- auto outTable = GetOutTable(out).Cast<TYtOutTable>();
- TStringBuf inputPath = outTable.Name().Value();
- transformedInputPath = NYql::TransformPath(GetTablesTmpFolder(*options.Config()), inputPath, true, userName);
- break;
+ if (auto op = opBase.Maybe<TYtMerge>()) {
+ auto ytMerge = op.Cast();
+ std::vector<TYtTableRef> inputTables = GetMergeInputTables(ytMerge);
+ TYtTableRef outputTable = GetMergeOutputTable(ytMerge);
+ auto future = DoMerge(inputTables, outputTable, std::move(options));
+ return future.Apply([this, pos = nodePos, outputTable = std::move(outputTable), options = std::move(options)] (const TFuture<TFmrOperationResult>& f) {
+ try {
+ f.GetValue(); // rethrow error if any
+ TString sessionId = options.SessionId();
+ auto config = options.Config();
+ TString transformedOutputTableId = GetTransformedPath(outputTable.Path, sessionId, config);
+ TString fmrOutputTableId = outputTable.Cluster + "." + transformedOutputTableId;
+ SetTablePresenceStatus(fmrOutputTableId, sessionId, ETablePresenceStatus::OnlyInFmr);
+ TRunResult result;
+ result.OutTableStats.emplace_back(outputTable.Path, MakeIntrusive<TYtTableStatInfo>()); // TODO - add statistics?
+ result.OutTableStats.back().second->Id = "fmr_" + fmrOutputTableId;
+ result.SetSuccess();
+ return MakeFuture<TRunResult>(std::move(result));
+ } catch (...) {
+ return MakeFuture(ResultFromCurrentException<TRunResult>(pos));
+ }
+ });
+ } else {
+ return Slave_->Run(node, ctx, std::move(options));
}
+ }
- // TODO - handle several inputs in Publish, use ColumnGroups, Run Merge
-
- auto outputPath = publish.Publish().Name().StringValue();
- auto idempotencyKey = GenerateId();
+ TFuture<TPublishResult> Publish(const TExprNode::TPtr& node, TExprContext& ctx, TPublishOptions&& options) final {
+ TString sessionId = options.SessionId();
+ YQL_LOG_CTX_SCOPE(TStringBuf("Gateway"), __FUNCTION__);
+ auto nodePos = ctx.GetPosition(node->Pos());
+ auto publish = TYtPublish(node);
- auto fmrTableId = cluster + "." + outputPath;
+ auto cluster = publish.DataSink().Cluster().StringValue();
+ std::vector<TFmrTableRef> fmrTableIds;
+ auto config = options.Config();
- TFuture<TDownloadTableToFmrResult> downloadToFmrFuture;
- TFuture<void> downloadedSuccessfully;
+ std::vector<TFuture<TFmrOperationResult>> uploadFmrTablesToYtFutures;
- with_lock(SessionStates_->Mutex) {
- auto& tablePresenceStatuses = SessionStates_->Sessions[sessionId].TablePresenceStatuses;
+ for (auto out: publish.Input()) {
+ auto outTableWithCluster = GetOutTableWithCluster(out);
+ auto outTable = GetOutTable(out).Cast<TYtOutTable>();
+ TStringBuf inputPath = outTable.Name().Value();
+ TString transformedInputPath = GetTransformedPath(ToString(inputPath), sessionId, config);
+ auto outputBase = out.Operation().Cast<TYtOutputOpBase>().Ptr();
- if (!tablePresenceStatuses.contains(fmrTableId)) {
- TYtTableRef ytTable{.Path = transformedInputPath, .Cluster = cluster};
- TFmrTableRef fmrTable{.TableId = fmrTableId};
- tablePresenceStatuses[fmrTableId] = ETablePresenceStatus::Both;
- downloadToFmrFuture = DownloadToFmrTableDataSerivce(ytTable, fmrTable, sessionId, options.Config());
- downloadedSuccessfully = downloadToFmrFuture.Apply([downloadedSuccessfully] (auto& downloadFuture) {
- auto downloadResult = downloadFuture.GetValueSync();
- });
- } else {
- downloadedSuccessfully = MakeFuture();
- }
+ TFmrTableRef fmrTableRef = TFmrTableRef{outTableWithCluster.second + "." + transformedInputPath};
+ uploadFmrTablesToYtFutures.emplace_back(DoUpload(fmrTableRef, sessionId, config, outputBase, ctx));
}
- downloadedSuccessfully.Wait(); // blocking until download to fmr finishes
-
- TUploadOperationParams uploadOperationParams{
- .Input = TFmrTableRef{fmrTableId},
- .Output = TYtTableRef{outputPath, cluster}
- };
-
- auto clusterConnectionOptions = TClusterConnectionOptions(options.SessionId())
- .Cluster(cluster).Config(options.Config());
- auto clusterConnection = GetClusterConnection(std::move(clusterConnectionOptions));
- YQL_ENSURE(clusterConnection.Success());
-
- TStartOperationRequest uploadRequest{
- .TaskType = ETaskType::Upload,
- .OperationParams = uploadOperationParams,
- .SessionId = sessionId,
- .IdempotencyKey=idempotencyKey,
- .NumRetries=1,
- .ClusterConnection = TClusterConnection{
- .TransactionId = clusterConnection.TransactionId,
- .YtServerName = clusterConnection.YtServerName,
- .Token = clusterConnection.Token
- }
- };
-
- auto promise = NewPromise<TPublishResult>();
- auto future = promise.GetFuture();
- YQL_CLOG(DEBUG, FastMapReduce) << "Starting upload to yt table: " << cluster + "." + outputPath;
- auto uploadOperationResponseFuture = Coordinator_->StartOperation(uploadRequest);
- uploadOperationResponseFuture.Subscribe([this, promise = std::move(promise), sessionId] (const auto& uploadFuture) {
- TStartOperationResponse startOperationResponse = uploadFuture.GetValueSync();
- TString operationId = startOperationResponse.OperationId;
- with_lock(SessionStates_->Mutex) {
- YQL_ENSURE(SessionStates_->Sessions.contains(sessionId));
- auto& operationStates = SessionStates_->Sessions[sessionId].OperationStates;
- auto& uploadOperationStatuses = operationStates.UploadOperationStatuses;
- YQL_ENSURE(!uploadOperationStatuses.contains(operationId));
- uploadOperationStatuses[operationId] = promise;
- }
- });
- return future;
- }
-
- TFuture<TDownloadTableToFmrResult> DownloadToFmrTableDataSerivce(
- const TYtTableRef& ytTableRef, const TFmrTableRef& fmrTableRef, const TString& sessionId, TYtSettings::TConstPtr& config)
- {
- YQL_LOG_CTX_SCOPE(TStringBuf("Gateway"), __FUNCTION__);
- TString fmrTableId = fmrTableRef.TableId;
- TDownloadOperationParams downloadOperationParams{
- .Input = ytTableRef,
- .Output = {fmrTableId}
- };
+ auto outputPath = publish.Publish().Name().StringValue();
auto idempotencyKey = GenerateId();
- auto clusterConnectionOptions = TClusterConnectionOptions(sessionId)
- .Cluster(ytTableRef.Cluster).Config(config);
- auto clusterConnection = GetClusterConnection(std::move(clusterConnectionOptions));
- YQL_ENSURE(clusterConnection.Success());
- TStartOperationRequest downloadRequest{
- .TaskType = ETaskType::Download,
- .OperationParams = downloadOperationParams,
- .SessionId = sessionId,
- .IdempotencyKey = idempotencyKey,
- .NumRetries=1,
- .ClusterConnection = TClusterConnection{
- .TransactionId = clusterConnection.TransactionId,
- .YtServerName = clusterConnection.YtServerName,
- .Token = clusterConnection.Token
- }
- };
- YQL_CLOG(DEBUG, FastMapReduce) << "Starting download from yt table: " << fmrTableId;
-
- auto promise = NewPromise<TDownloadTableToFmrResult>();
- auto future = promise.GetFuture();
-
- auto downloadOperationResponseFuture = Coordinator_->StartOperation(downloadRequest);
- downloadOperationResponseFuture.Subscribe([this, promise = std::move(promise), sessionId] (const auto& downloadFuture) {
- TStartOperationResponse downloadOperationResponse = downloadFuture.GetValueSync();
- TString operationId = downloadOperationResponse.OperationId;
- with_lock(SessionStates_->Mutex) {
- auto& operationStates = SessionStates_->Sessions[sessionId].OperationStates;
- auto& downloadOperationStatuses = operationStates.DownloadOperationStatuses;
- YQL_ENSURE(!downloadOperationStatuses.contains(operationId));
- downloadOperationStatuses[operationId] = promise;
+ return WaitExceptionOrAll(uploadFmrTablesToYtFutures).Apply([&, pos = nodePos, curNode = std::move(node), options = std::move(options)] (const TFuture<void>& f) mutable {
+ try {
+ f.GetValue(); // rethrow error if any
+ return Slave_->Publish(curNode, ctx, std::move(options));
+ } catch (...) {
+ return MakeFuture(ResultFromCurrentException<TPublishResult>(pos));
}
});
- return future;
}
TClusterConnectionResult GetClusterConnection(const TClusterConnectionOptions&& options) override {
@@ -267,29 +210,233 @@ public:
YQL_ENSURE(sessions.contains(sessionId));
auto& operationStates = sessions[sessionId].OperationStates;
- auto cancelOperationsFunc = [&] <typename T> (std::unordered_map<TString, TPromise<T>>& operationStatuses) {
+ auto cancelOperationsFunc = [&] (std::unordered_map<TString, TPromise<TFmrOperationResult>>& operationStatuses) {
std::vector<TFuture<TDeleteOperationResponse>> cancelOperationsFutures;
for (auto& [operationId, promise]: operationStatuses) {
cancelOperationsFutures.emplace_back(Coordinator_->DeleteOperation({operationId}));
}
NThreading::WaitAll(cancelOperationsFutures).GetValueSync();
- for (auto& [operationId, promise]: operationStatuses) {
- SendOperationCompletionSignal(promise, false);
- }
};
- cancelOperationsFunc(operationStates.DownloadOperationStatuses);
- cancelOperationsFunc(operationStates.UploadOperationStatuses);
+ cancelOperationsFunc(operationStates.OperationStatuses);
}
Slave_->CleanupSession(std::move(options)).Wait();
return MakeFuture();
}
private:
+ TString GenerateId() {
+ return GetGuidAsString(RandomProvider_->GenGuid());
+ }
+
+ TString GetUsername(const TString& sessionId) {
+ with_lock(SessionStates_->Mutex) {
+ YQL_ENSURE(SessionStates_->Sessions.contains(sessionId));
+ auto& session = SessionStates_->Sessions[sessionId];
+ return session.UserName;
+ }
+ }
+
+ TString GetTransformedPath(const TString& path, const TString& sessionId, TYtSettings::TConstPtr& config) {
+ TString username = GetUsername(sessionId);
+ return NYql::TransformPath(GetTablesTmpFolder(*config), path, true, username);
+ }
+
+ void SetTablePresenceStatus(const TString& fmrTableId, const TString& sessionId, ETablePresenceStatus newStatus) {
+ with_lock(SessionStates_->Mutex) {
+ auto& tablePresenceStatuses = SessionStates_->Sessions[sessionId].TablePresenceStatuses;
+ tablePresenceStatuses[fmrTableId] = newStatus;
+ }
+ }
+
+ TMaybe<ETablePresenceStatus> GetTablePresenceStatus(const TString& fmrTableId, const TString& sessionId) {
+ with_lock(SessionStates_->Mutex) {
+ auto& tablePresenceStatuses = SessionStates_->Sessions[sessionId].TablePresenceStatuses;
+ if (!tablePresenceStatuses.contains(fmrTableId)) {
+ return Nothing();
+ }
+ return tablePresenceStatuses[fmrTableId];
+ }
+ }
+
+ std::vector<TYtTableRef> GetMergeInputTables(const TYtMerge& ytMerge) {
+ auto input = ytMerge.Maybe<TYtTransientOpBase>().Cast().Input();
+ std::vector<TYtTableRef> inputTables;
+ for (auto section: input.Cast<TYtSectionList>()) {
+ for (auto path: section.Paths()) {
+ TYtPathInfo pathInfo(path);
+ TYtTableRef ytTable{.Path = pathInfo.Table->Name, .Cluster = pathInfo.Table->Cluster};
+ inputTables.emplace_back(ytTable);
+ }
+ }
+ return inputTables;
+ }
+
+ TYtTableRef GetMergeOutputTable(const TYtMerge& ytMerge) {
+ auto output = ytMerge.Maybe<TYtOutputOpBase>().Cast().Output();
+ std::vector<TYtTableRef> outputTables;
+ for (auto table: output) {
+ TYtOutTableInfo tableInfo(table);
+ TString outTableName = tableInfo.Name;
+ if (outTableName.empty()) {
+ outTableName = TStringBuilder() << "tmp/" << GetGuidAsString(RandomProvider_->GenGuid());
+ }
+ outputTables.emplace_back(outTableName, tableInfo.Cluster);
+ }
+ YQL_ENSURE(outputTables.size() == 1);
+ return outputTables[0];
+ }
+
+ TString GetClusterFromMergeTables(const std::vector<TYtTableRef>& inputTables, TYtTableRef& outputTable) {
+ std::unordered_set<TString> clusters;
+ for (auto& [path, cluster]: inputTables) {
+ clusters.emplace(cluster);
+ }
+ YQL_ENSURE(clusters.size() == 1);
+ TString cluster = *clusters.begin();
+ if (outputTable.Cluster) {
+ YQL_ENSURE(outputTable.Cluster == cluster);
+ } else {
+ outputTable.Cluster = cluster;
+ }
+ return cluster;
+ }
+
+ TClusterConnection GetTablesClusterConnection(const TString& cluster, const TString& sessionId, TYtSettings::TConstPtr& config) {
+ auto clusterConnectionOptions = TClusterConnectionOptions(sessionId).Cluster(cluster).Config(config);
+ auto clusterConnection = GetClusterConnection(std::move(clusterConnectionOptions));
+ return TClusterConnection{
+ .TransactionId = clusterConnection.TransactionId,
+ .YtServerName = clusterConnection.YtServerName,
+ .Token = clusterConnection.Token
+ };
+ }
+
+ TFuture<TFmrOperationResult> GetRunningOperationFuture(const TStartOperationRequest& startOperationRequest, const TString& sessionId) {
+ auto promise = NewPromise<TFmrOperationResult>();
+ auto future = promise.GetFuture();
+ auto startOperationResponseFuture = Coordinator_->StartOperation(startOperationRequest);
+ startOperationResponseFuture.Subscribe([this, promise = std::move(promise), sessionId] (const auto& mergeFuture) {
+ TStartOperationResponse mergeOperationResponse = mergeFuture.GetValueSync();
+ TString operationId = mergeOperationResponse.OperationId;
+ with_lock(SessionStates_->Mutex) {
+ auto& operationStates = SessionStates_->Sessions[sessionId].OperationStates;
+ auto& operationStatuses = operationStates.OperationStatuses;
+ YQL_ENSURE(!operationStatuses.contains(operationId));
+ operationStatuses[operationId] = promise;
+ }
+ });
+ return future;
+ }
+
+ TFuture<TFmrOperationResult> DoUpload(const TFmrTableRef& fmrTableRef, const TString& sessionId, TYtSettings::TConstPtr& config, TExprNode::TPtr outputOpBase, TExprContext& ctx) {
+ YQL_LOG_CTX_ROOT_SESSION_SCOPE(sessionId);
+ std::vector<TString> ytTableInfo;
+ StringSplitter(fmrTableRef.TableId).SplitByString(".").AddTo(&ytTableInfo);
+ YQL_ENSURE(ytTableInfo.size() == 2);
+ TString outputCluster = ytTableInfo[0], outputPath = ytTableInfo[1];
+ auto tablePresenceStatus = GetTablePresenceStatus(fmrTableRef.TableId, sessionId);
+ if (!tablePresenceStatus || *tablePresenceStatus != ETablePresenceStatus::OnlyInFmr) {
+ YQL_CLOG(DEBUG, FastMapReduce) << " We assume table " << fmrTableRef.TableId << " should be present in yt, not uploading from fmr";
+ TFmrOperationResult fmrOperationResult = TFmrOperationResult();
+ fmrOperationResult.SetSuccess();
+ return MakeFuture(fmrOperationResult);
+ }
+
+ TUploadOperationParams uploadOperationParams{
+ .Input = fmrTableRef,
+ .Output = TYtTableRef{.Path = outputPath, .Cluster = outputCluster}
+ };
+
+ auto clusterConnection = GetTablesClusterConnection(outputCluster, sessionId, config);
+ TStartOperationRequest uploadRequest{
+ .TaskType = ETaskType::Upload,
+ .OperationParams = uploadOperationParams,
+ .SessionId = sessionId,
+ .IdempotencyKey = GenerateId(),
+ .NumRetries=1,
+ .ClusterConnection = clusterConnection,
+ .FmrOperationSpec = config->FmrOperationSpec.Get(outputCluster)
+ };
+
+ auto prepareOptions = TPrepareOptions(sessionId)
+ .Config(config);
+ auto prepareFuture = Slave_->Prepare(outputOpBase, ctx, std::move(prepareOptions));
+
+ return prepareFuture.Apply([this, uploadRequest = std::move(uploadRequest), sessionId = std::move(sessionId), fmrTableId = std::move(fmrTableRef.TableId)] (const TFuture<TRunResult>& f) {
+ try {
+ f.GetValue(); // rethrow error if any
+ YQL_LOG_CTX_ROOT_SESSION_SCOPE(sessionId);
+ YQL_CLOG(DEBUG, FastMapReduce) << "Starting upload from fmr to yt for table: " << fmrTableId;
+ return GetRunningOperationFuture(uploadRequest, sessionId).Apply([this, sessionId = std::move(sessionId), fmrTableId = std::move(fmrTableId)] (const TFuture<TFmrOperationResult>& f) {
+ try {
+ YQL_LOG_CTX_ROOT_SESSION_SCOPE(sessionId);
+ auto fmrUploadResult = f.GetValue();
+ SetTablePresenceStatus(fmrTableId, sessionId, ETablePresenceStatus::Both);
+ return f;
+ } catch (...) {
+ YQL_CLOG(ERROR, FastMapReduce) << CurrentExceptionMessage();
+ return MakeFuture(ResultFromCurrentException<TFmrOperationResult>());
+ }
+ });
+ } catch (...) {
+ YQL_CLOG(ERROR, FastMapReduce) << CurrentExceptionMessage();
+ return MakeFuture(ResultFromCurrentException<TFmrOperationResult>());
+ }
+ });
+ }
+
+ TFuture<TFmrOperationResult> DoMerge(const std::vector<TYtTableRef>& inputTables, TYtTableRef& outputTable, TRunOptions&& options) {
+ TString sessionId = options.SessionId();
+ YQL_LOG_CTX_ROOT_SESSION_SCOPE(sessionId);
+ YQL_LOG_CTX_SCOPE(TStringBuf("Gateway"), __FUNCTION__);
+ auto cluster = GetClusterFromMergeTables(inputTables, outputTable); // Can set outputTable.Cluster if empty
+
+ TString outputTableId = outputTable.Path, outputCluster = outputTable.Cluster;
+ TString transformedOutputTableId = GetTransformedPath(outputTableId, sessionId, options.Config());
+ TFmrTableRef fmrOutputTable{.TableId = outputCluster + "." + transformedOutputTableId};
+
+ std::vector<TOperationTableRef> mergeInputTables;
+ for (auto& ytTable: inputTables) {
+ TString fmrTableId = ytTable.Cluster + "." + ytTable.Path;
+ auto tablePresenceStatus = GetTablePresenceStatus(fmrTableId, sessionId);
+ if (!tablePresenceStatus) {
+ SetTablePresenceStatus(fmrTableId, sessionId, ETablePresenceStatus::OnlyInYt);
+ }
+
+ if (tablePresenceStatus && *tablePresenceStatus != ETablePresenceStatus::OnlyInYt) {
+ // table is in fmr, do not download
+ mergeInputTables.emplace_back(TFmrTableRef{.TableId = fmrTableId});
+ } else {
+ mergeInputTables.emplace_back(ytTable);
+ }
+ }
+
+ TMergeOperationParams mergeOperationParams{.Input = mergeInputTables,.Output = fmrOutputTable};
+ auto clusterConnection = GetTablesClusterConnection(cluster, sessionId, options.Config());
+ TStartOperationRequest mergeOperationRequest{
+ .TaskType = ETaskType::Merge,
+ .OperationParams = mergeOperationParams,
+ .SessionId = sessionId,
+ .IdempotencyKey = GenerateId(),
+ .NumRetries = 1,
+ .ClusterConnection = clusterConnection,
+ .FmrOperationSpec = options.Config()->FmrOperationSpec.Get(outputCluster)
+ };
+
+ std::vector<TString> inputPaths;
+ std::transform(inputTables.begin(),inputTables.end(), std::back_inserter(inputPaths), [](const TYtTableRef& ytTableRef){
+ return ytTableRef.Path;}
+ );
+
+ YQL_CLOG(DEBUG, FastMapReduce) << "Starting merge from yt tables: " << JoinRange(' ', inputPaths.begin(), inputPaths.end());
+ return GetRunningOperationFuture(mergeOperationRequest, sessionId);
+ }
+
+private:
struct TFmrGatewayOperationsState {
- std::unordered_map<TString, TPromise<TPublishResult>> UploadOperationStatuses = {}; // operationId -> promise which we set when operation completes
- std::unordered_map<TString, TPromise<TDownloadTableToFmrResult>> DownloadOperationStatuses = {};
+ std::unordered_map<TString, TPromise<TFmrOperationResult>> OperationStatuses = {}; // operationId -> promise which we set when operation completes
};
struct TSessionInfo {
@@ -309,31 +456,6 @@ private:
TDuration TimeToSleepBetweenGetOperationRequests_;
std::thread GetOperationStatusesThread_;
std::atomic<bool> StopFmrGateway_;
-
- TString GenerateId() {
- return GetGuidAsString(RandomProvider_->GenGuid());
- }
-
- template <std::derived_from<NCommon::TOperationResult> T>
- void SendOperationCompletionSignal(TPromise<T> promise, bool completedSuccessfully = false, const std::vector<TFmrError>& errorMessages = {}) {
- YQL_ENSURE(!promise.HasValue());
- T commonOperationResult{};
- if (completedSuccessfully) {
- commonOperationResult.SetSuccess();
- } else if (!errorMessages.empty()) {
- auto exception = yexception() << "Operation failed with errors: " << JoinSeq(" ", errorMessages);
- commonOperationResult.SetException(exception);
- }
- promise.SetValue(commonOperationResult);
- }
-
- TString GetUsername(const TString& sessionId) {
- with_lock(SessionStates_->Mutex) {
- YQL_ENSURE(SessionStates_->Sessions.contains(sessionId));
- auto& session = SessionStates_->Sessions[sessionId];
- return session.UserName;
- }
- }
};
} // namespace
diff --git a/yt/yql/providers/yt/gateway/lib/yt_helpers.cpp b/yt/yql/providers/yt/gateway/lib/yt_helpers.cpp
index 6ad53ec6d9..dd4ba61d7c 100644
--- a/yt/yql/providers/yt/gateway/lib/yt_helpers.cpp
+++ b/yt/yql/providers/yt/gateway/lib/yt_helpers.cpp
@@ -369,7 +369,7 @@ static bool IterateRows(NYT::ITransactionPtr tx,
} else {
auto format = specsCache.GetSpecs().MakeInputFormat(tableIndex);
auto rawReader = tx->CreateRawReader(path, format, readerOptions);
- TMkqlReaderImpl reader(*rawReader, 0, 4 << 10, tableIndex);
+ TMkqlReaderImpl reader(*rawReader, 0, 4 << 10, tableIndex, true);
reader.SetSpecs(specsCache.GetSpecs(), specsCache.GetHolderFactory());
for (reader.Next(); reader.IsValid(); reader.Next()) {
diff --git a/yt/yql/providers/yt/gateway/native/yql_yt_native.cpp b/yt/yql/providers/yt/gateway/native/yql_yt_native.cpp
index cb0e35fb78..4f476876b2 100644
--- a/yt/yql/providers/yt/gateway/native/yql_yt_native.cpp
+++ b/yt/yql/providers/yt/gateway/native/yql_yt_native.cpp
@@ -1060,15 +1060,6 @@ public:
}
const bool initial = NYql::HasSetting(publish.Settings().Ref(), EYtSettingType::Initial);
- std::unordered_map<EYtSettingType, TString> strOpts;
- for (const auto& setting : publish.Settings().Ref().Children()) {
- if (setting->ChildrenSize() == 2) {
- strOpts.emplace(FromString<EYtSettingType>(setting->Head().Content()), setting->Tail().Content());
- } else if (setting->ChildrenSize() == 1) {
- strOpts.emplace(FromString<EYtSettingType>(setting->Head().Content()), TString());;
- }
- }
-
YQL_CLOG(INFO, ProviderYt) << "Mode: " << mode << ", IsInitial: " << initial;
TSession::TPtr session = GetSession(options.SessionId());
@@ -1079,15 +1070,35 @@ public:
TVector<TSrcTable> src;
ui64 chunksCount = 0;
ui64 dataSize = 0;
- std::unordered_set<TString> columnGroups;
+ TSet<TString> srcColumnGroupAlts;
+ bool first = true;
+ const TStructExprType* itemType = nullptr;
for (auto out: publish.Input()) {
auto outTableWithCluster = GetOutTableWithCluster(out);
auto outTable = outTableWithCluster.first.Cast<TYtOutTable>();
src.emplace_back(outTable.Name().StringValue(), outTableWithCluster.second);
- if (auto columnGroupSetting = NYql::GetSetting(outTable.Settings().Ref(), EYtSettingType::ColumnGroups)) {
- columnGroups.emplace(columnGroupSetting->Tail().Content());
- } else {
- columnGroups.emplace();
+ if (first) {
+ itemType = GetSeqItemType(*outTable.Ref().GetTypeAnn()).Cast<TStructExprType>();
+ if (auto columnGroupSetting = NYql::GetSetting(outTable.Settings().Ref(), EYtSettingType::ColumnGroups)) {
+ srcColumnGroupAlts.emplace(columnGroupSetting->Tail().Content());
+ TString expanded;
+ if (ExpandDefaultColumnGroup(columnGroupSetting->Tail().Content(), *itemType, expanded)) {
+ srcColumnGroupAlts.insert(expanded);
+ }
+ }
+ first = false;
+ } else if (!srcColumnGroupAlts.empty()) {
+ if (auto columnGroupSetting = NYql::GetSetting(outTable.Settings().Ref(), EYtSettingType::ColumnGroups)) {
+ if (!srcColumnGroupAlts.contains(columnGroupSetting->Tail().Content())) {
+ TString expanded;
+ if (!ExpandDefaultColumnGroup(columnGroupSetting->Tail().Content(), *GetSeqItemType(*outTable.Ref().GetTypeAnn()).Cast<TStructExprType>(), expanded)
+ || !srcColumnGroupAlts.contains(expanded)) {
+ srcColumnGroupAlts.clear();
+ }
+ }
+ } else {
+ srcColumnGroupAlts.clear();
+ }
}
auto stat = TYtTableStatInfo(outTable.Stat());
chunksCount += stat.ChunkCount;
@@ -1099,7 +1110,38 @@ public:
if (src.size() > 10) {
YQL_CLOG(INFO, ProviderYt) << "...total input tables=" << src.size();
}
- TString srcColumnGroups = columnGroups.size() == 1 ? *columnGroups.cbegin() : TString();
+
+ bool forceMerge = false;
+ bool forceTransform = false;
+ std::unordered_map<EYtSettingType, TString> strOpts;
+ for (const auto& setting : publish.Settings().Ref().Children()) {
+ const auto settingType = FromString<EYtSettingType>(setting->Head().Content());
+ if (setting->ChildrenSize() == 2) {
+ TString value = TString{setting->Tail().Content()};
+ if (EYtSettingType::ColumnGroups == settingType) {
+ bool groupDiff = false;
+ if (srcColumnGroupAlts.empty()) {
+ groupDiff = true;
+ } else {
+ if (!srcColumnGroupAlts.contains(value)) {
+ TString expanded;
+ YQL_ENSURE(itemType);
+ if (ExpandDefaultColumnGroup(value, *itemType, expanded)) {
+ value = std::move(expanded);
+ groupDiff = !srcColumnGroupAlts.contains(value);
+ }
+ }
+ }
+ if (groupDiff) {
+ forceMerge = forceTransform = true;
+ YQL_CLOG(INFO, ProviderYt) << "Column groups diff forces merge";
+ }
+ }
+ strOpts.emplace(settingType, value);
+ } else if (setting->ChildrenSize() == 1) {
+ strOpts.emplace(settingType, TString());
+ }
+ }
bool combineChunks = false;
if (auto minChunkSize = options.Config()->MinPublishedAvgChunkSize.Get()) {
@@ -1111,6 +1153,7 @@ public:
YQL_CLOG(INFO, ProviderYt) << "Output: " << cluster << '.' << dst;
if (combineChunks) {
YQL_CLOG(INFO, ProviderYt) << "Use chunks combining";
+ forceMerge = true;
}
if (Services_.Config->GetLocalChainTest()) {
if (!src.empty()) {
@@ -1130,9 +1173,9 @@ public:
const ui32 dstEpoch = TEpochInfo::Parse(publish.Publish().Epoch().Ref()).GetOrElse(0);
auto execCtx = MakeExecCtx(std::move(options), session, cluster, node.Get(), &ctx);
- return session->Queue_->Async([execCtx, src = std::move(src), dst, dstEpoch, isAnonymous, mode, initial, srcColumnGroups, combineChunks, strOpts = std::move(strOpts)] () mutable {
+ return session->Queue_->Async([execCtx, src = std::move(src), dst, dstEpoch, isAnonymous, mode, initial, combineChunks, forceMerge, forceTransform, strOpts = std::move(strOpts)] () mutable {
YQL_LOG_CTX_ROOT_SESSION_SCOPE(execCtx->LogCtx_);
- return ExecPublish(execCtx, std::move(src), dst, dstEpoch, isAnonymous, mode, initial, srcColumnGroups, combineChunks, strOpts);
+ return ExecPublish(execCtx, std::move(src), dst, dstEpoch, isAnonymous, mode, initial, combineChunks, forceMerge, forceTransform, strOpts);
})
.Apply([nodePos] (const TFuture<void>& f) {
try {
@@ -2414,8 +2457,9 @@ private:
const bool isAnonymous,
EYtWriteMode mode,
const bool initial,
- const TString& srcColumnGroups,
const bool combineChunks,
+ bool forceMerge,
+ bool forceTransform,
const std::unordered_map<EYtSettingType, TString>& strOpts)
{
TString tmpFolder = GetTablesTmpFolder(*execCtx->Options_.Config());
@@ -2490,8 +2534,6 @@ private:
);
}
- bool forceMerge = combineChunks;
-
NYT::MergeNodes(yqlAttrs, GetUserAttributes(execCtx->GetEntryForCluster(src.back().Cluster)->Tx, src.back().Name, true));
NYT::MergeNodes(yqlAttrs, YqlOpOptionsToAttrs(execCtx->Session_->OperationOptions_));
if (EYtWriteMode::RenewKeepMeta == mode) {
@@ -2582,8 +2624,6 @@ private:
}
}
- bool forceTransform = false;
-
#define DEFINE_OPT(name, attr, transform) \
auto dst##name = isAnonymous \
? execCtx->Options_.Config()->Temporary##name.Get(cluster) \
@@ -2612,10 +2652,6 @@ private:
NYT::TNode columnGroupsSpec;
if (const auto it = strOpts.find(EYtSettingType::ColumnGroups); it != strOpts.cend() && execCtx->Options_.Config()->OptimizeFor.Get(cluster).GetOrElse(NYT::OF_LOOKUP_ATTR) != NYT::OF_LOOKUP_ATTR) {
columnGroupsSpec = NYT::NodeFromYsonString(it->second);
- if (it->second != srcColumnGroups) {
- forceMerge = forceTransform = true;
- YQL_CLOG(INFO, ProviderYt) << "Column groups diff forces merge, src=" << srcColumnGroups << ", dst=" << it->second;
- }
}
TFuture<void> res;
@@ -2656,7 +2692,7 @@ private:
input = TRichYPath(std::get<0>(*p)).TransactionId(std::get<1>(*p)).OriginalPath(NYT::AddPathPrefix(dstPath, NYT::TConfig::Get()->Prefix)).Columns(columns);
}
} else {
- input = TRichYPath(dstPath).Columns(columns);
+ input = TRichYPath(dstPath).Columns(columns);
}
mergeSpec.AddInput(input);
}
diff --git a/yt/yql/providers/yt/gateway/native/yql_yt_transform.cpp b/yt/yql/providers/yt/gateway/native/yql_yt_transform.cpp
index 26af2460bf..18e6204a58 100644
--- a/yt/yql/providers/yt/gateway/native/yql_yt_transform.cpp
+++ b/yt/yql/providers/yt/gateway/native/yql_yt_transform.cpp
@@ -82,11 +82,7 @@ TCallableVisitFunc TGatewayTransformer::operator()(TInternName internName) {
if (EPhase::Content == Phase_ || EPhase::All == Phase_) {
return [&, name, useBlocks](NMiniKQL::TCallable& callable, const TTypeEnvironment& env) {
- if (useBlocks) {
- YQL_ENSURE(callable.GetInputsCount() == 4, "Expected 4 args");
- } else {
- YQL_ENSURE(callable.GetInputsCount() == 3, "Expected 3 args");
- }
+ YQL_ENSURE(callable.GetInputsCount() == 3, "Expected 3 args");
const TString cluster = ExecCtx_.Cluster_;
const TString tmpFolder = GetTablesTmpFolder(*Settings_);
@@ -331,7 +327,6 @@ TCallableVisitFunc TGatewayTransformer::operator()(TInternName internName) {
callable.GetType()->GetReturnType());
if (useBlocks) {
call.Add(PgmBuilder_.NewDataLiteral<NUdf::EDataSlot::String>(uniqueId));
- call.Add(callable.GetInput(3)); // orig struct type
call.Add(PgmBuilder_.NewDataLiteral(tableList->GetItemsCount()));
call.Add(PgmBuilder_.NewDataLiteral<NUdf::EDataSlot::String>(NYT::NodeToYsonString(specNode)));
call.Add(PgmBuilder_.NewDataLiteral(ETableContentDeliveryMode::File == deliveryMode)); // use compression
diff --git a/yt/yql/providers/yt/job/yql_job_user.cpp b/yt/yql/providers/yt/job/yql_job_user.cpp
index f9e923ccff..0b355e6609 100644
--- a/yt/yql/providers/yt/job/yql_job_user.cpp
+++ b/yt/yql/providers/yt/job/yql_job_user.cpp
@@ -171,6 +171,7 @@ void TYqlUserJob::DoImpl(const TFile& inHandle, const TVector<TFile>& outHandles
}
if (UseBlockInput) {
MkqlIOSpecs->SetUseBlockInput();
+ MkqlIOSpecs->SetInputBlockRepresentation(TMkqlIOSpecs::EBlockRepresentation::WideBlock);
}
if (UseBlockOutput) {
MkqlIOSpecs->SetUseBlockOutput();
diff --git a/yt/yql/providers/yt/lib/expr_traits/yql_expr_traits.cpp b/yt/yql/providers/yt/lib/expr_traits/yql_expr_traits.cpp
index a55d9baf65..8d6b31a157 100644
--- a/yt/yql/providers/yt/lib/expr_traits/yql_expr_traits.cpp
+++ b/yt/yql/providers/yt/lib/expr_traits/yql_expr_traits.cpp
@@ -372,6 +372,7 @@ namespace NYql {
TStringBuf("Last"),
TStringBuf("ToDict"),
TStringBuf("SqueezeToDict"),
+ TStringBuf("BlockStorage"),
TStringBuf("Iterator"), // Why?
TStringBuf("Collect"),
TStringBuf("Length"),
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp
index b1836d0019..89758d7b2e 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp
@@ -678,16 +678,6 @@ TCoLambda FallbackLambdaOutput(TCoLambda lambda, TExprContext& ctx) {
return lambda;
}
-TYtDSink GetDataSink(TExprBase input, TExprContext& ctx) {
- if (auto read = input.Maybe<TCoRight>().Input().Maybe<TYtReadTable>()) {
- return TYtDSink(ctx.RenameNode(read.Cast().DataSource().Ref(), "DataSink"));
- } else if (auto out = input.Maybe<TYtOutput>()) {
- return GetOutputOp(out.Cast()).DataSink();
- } else {
- YQL_ENSURE(false, "Unknown operation input");
- }
-}
-
TYtDSink MakeDataSink(TPositionHandle pos, TStringBuf cluster, TExprContext& ctx) {
return Build<TYtDSink>(ctx, pos)
.Category()
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.h b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.h
index 7cd61ba28a..6cb1af8a2c 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.h
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.h
@@ -49,7 +49,6 @@ NNodes::TCoLambda FallbackLambdaInput(NNodes::TCoLambda lambda, TExprContext& ct
NNodes::TCoLambda FallbackLambdaOutput(NNodes::TCoLambda lambda, TExprContext& ctx);
-NNodes::TYtDSink GetDataSink(NNodes::TExprBase input, TExprContext& ctx);
NNodes::TYtDSink MakeDataSink(TPositionHandle pos, TStringBuf cluster, TExprContext& ctx);
NNodes::TYtDSource MakeDataSource(TPositionHandle pos, TStringBuf cluster, TExprContext& ctx);
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp
index 23dc3d2d67..a59d38fb87 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp
@@ -50,7 +50,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::EquiJoin(TExprBase node
} else {
hasYtInput = true;
auto cluster = DeriveClusterFromInput(list, selectionMode);
- if (!UpdateUsedCluster(inputClusters[i], cluster, selectionMode)) {
+ if (!cluster || !UpdateUsedCluster(inputClusters[i], *cluster, selectionMode)) {
return node;
}
}
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp
index 373d8dbf65..efc9ce9916 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp
@@ -64,7 +64,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::FlatMap(TExprBase node,
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(input, selectionMode);
- if (!IsYtCompleteIsolatedLambda(flatMap.Lambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster || !IsYtCompleteIsolatedLambda(flatMap.Lambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -120,7 +120,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::FlatMap(TExprBase node,
auto ytMap = Build<TYtMap>(ctx, node.Pos())
.World(ApplySyncListToWorld(GetWorld(input, {}, ctx).Ptr(), syncList, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx))
.Output()
.Add(outTables)
@@ -160,7 +160,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::LMap(TExprBase node, TE
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(lmap.Input(), selectionMode);
- if (!IsYtCompleteIsolatedLambda(lmap.Lambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster || !IsYtCompleteIsolatedLambda(lmap.Lambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -197,7 +197,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::LMap(TExprBase node, TE
auto map = Build<TYtMap>(ctx, lmap.Pos())
.World(ApplySyncListToWorld(NPrivate::GetWorld(lmap.Input(), {}, ctx).Ptr(), syncList, ctx))
- .DataSink(NPrivate::GetDataSink(lmap.Input(), ctx))
+ .DataSink(MakeDataSink(lmap.Pos(), *cluster, ctx))
.Input(NPrivate::ConvertInputTable(lmap.Input(), ctx))
.Output()
.Add(outTables)
@@ -243,19 +243,22 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::CombineByKey(TExprBase
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(input, selectionMode);
- if (!IsYtCompleteIsolatedLambda(combineByKey.PreMapLambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster) {
return node;
}
- if (!IsYtCompleteIsolatedLambda(combineByKey.KeySelectorLambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(combineByKey.PreMapLambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
- if (!IsYtCompleteIsolatedLambda(combineByKey.InitHandlerLambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(combineByKey.KeySelectorLambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
- if (!IsYtCompleteIsolatedLambda(combineByKey.UpdateHandlerLambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(combineByKey.InitHandlerLambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
- if (!IsYtCompleteIsolatedLambda(combineByKey.FinishHandlerLambda().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(combineByKey.UpdateHandlerLambda().Ref(), syncList, *cluster, false, selectionMode)) {
+ return node;
+ }
+ if (!IsYtCompleteIsolatedLambda(combineByKey.FinishHandlerLambda().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -394,7 +397,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::CombineByKey(TExprBase
return Build<TYtOutput>(ctx, combineByKey.Pos())
.Operation<TYtMap>()
.World(ApplySyncListToWorld(GetWorld(input, {}, ctx).Ptr(), syncList, ctx))
- .DataSink(MakeDataSink(combineByKey.Pos(), cluster, ctx))
+ .DataSink(MakeDataSink(combineByKey.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx))
.Output()
.Add(combineOut.ToExprNode(ctx, combineByKey.Pos()).Cast<TYtOutTable>())
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp
index a28693fa81..3b65cedd9f 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp
@@ -326,7 +326,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::TakeOrSkip(TExprBase no
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(input, selectionMode);
- if (!IsYtCompleteIsolatedLambda(countBase.Count().Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster || !IsYtCompleteIsolatedLambda(countBase.Count().Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -937,7 +937,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::UpdateDataSinkCluster(T
return node;
}
- TString cluster = DeriveClusterFromSectionList(op.Input(), selectionMode);
+ TString cluster = GetClusterFromSectionList(op.Input());
if (cluster == op.DataSink().Cluster().Value()) {
return node;
}
@@ -956,7 +956,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::UpdateDataSourceCluster
}
auto op = node.Cast<TYtReadTable>();
- TString cluster = DeriveClusterFromSectionList(op.Input(), ERuntimeClusterSelectionMode::Auto);
+ TString cluster = GetClusterFromSectionList(op.Input());
if (cluster == op.DataSource().Cluster().Value()) {
return node;
}
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp
index b2035b5749..11100b77fc 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp
@@ -40,8 +40,9 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(input, selectionMode);
- if (!IsYtCompleteIsolatedLambda(keySelectorLambda.Ref(), syncList, cluster, false, selectionMode)
- || !IsYtCompleteIsolatedLambda(handlerLambda.Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster
+ || !IsYtCompleteIsolatedLambda(keySelectorLambda.Ref(), syncList, *cluster, false, selectionMode)
+ || !IsYtCompleteIsolatedLambda(handlerLambda.Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -99,7 +100,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
}
TCoLambda sortKeySelectorLambda = partByKey.SortKeySelectorLambda().Cast<TCoLambda>();
- if (!IsYtCompleteIsolatedLambda(sortKeySelectorLambda.Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(sortKeySelectorLambda.Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -732,7 +733,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
if (canUseReduce) {
auto reduce = Build<TYtReduce>(ctx, node.Pos())
.World(ApplySyncListToWorld(GetWorld(input, {}, ctx).Ptr(), syncList, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx))
.Output()
.Add(ConvertOutTables(node.Pos(), outItemType, ctx, State_, &partByKey.Ref().GetConstraintSet()))
@@ -783,7 +784,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
input = Build<TYtOutput>(ctx, node.Pos())
.Operation<TYtMap>()
.World(world)
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, TConvertInputOpts().MakeUnordered(unordered)))
.Output()
.Add(ConvertOutTables(node.Pos(), mapOutputType ? mapOutputType : inputItemType, ctx, State_))
@@ -807,7 +808,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
input = Build<TYtOutput>(ctx, node.Pos())
.Operation<TYtMerge>()
.World(world)
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, opts.MakeUnordered(unordered)))
.Output()
.Add(ConvertOutTables(node.Pos(), inputItemType, ctx, State_))
@@ -832,7 +833,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
input = Build<TYtOutput>(ctx, node.Pos())
.Operation<TYtMap>()
.World(world)
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, TConvertInputOpts().MakeUnordered(unordered)))
.Output()
.Add(ConvertOutTables(node.Pos(), mapOutputType, ctx, State_))
@@ -861,7 +862,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
auto result = Build<TYtMap>(ctx, node.Pos())
.World(ApplySyncListToWorld(world.Ptr(), syncList, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, TConvertInputOpts().MakeUnordered(unordered)))
.Output()
.Add(ConvertOutTables(node.Pos(), outItemType, ctx, State_, &partByKey.Ref().GetConstraintSet()))
@@ -877,7 +878,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::PartitionByKey(TExprBas
}
auto mapReduce = Build<TYtMapReduce>(ctx, node.Pos())
.World(ApplySyncListToWorld(world.Ptr(), syncList, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, TConvertInputOpts().MakeUnordered(unordered)))
.Output()
.Add(ConvertOutTables(node.Pos(), outItemType, ctx, State_, &partByKey.Ref().GetConstraintSet()))
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp
index 3ba8f17156..51f8b826bb 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp
@@ -76,7 +76,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Sort(TExprBase node, TE
auto cluster = DeriveClusterFromInput(sort.Input(), selectionMode);
TSyncMap syncList;
- if (!IsYtCompleteIsolatedLambda(keySelectorLambda.Ref(), syncList, cluster, false, selectionMode)) {
+ if (!cluster || !IsYtCompleteIsolatedLambda(keySelectorLambda.Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
@@ -136,7 +136,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Sort(TExprBase node, TE
sortInput = Build<TYtOutput>(ctx, node.Pos())
.Operation<TYtMap>()
.World(world)
- .DataSink(NPrivate::GetDataSink(sort.Input(), ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(NPrivate::ConvertInputTable(sort.Input(), ctx, NPrivate::TConvertInputOpts().MakeUnordered(unordered)))
.Output()
.Add(mapOut.ToExprNode(ctx, node.Pos()).Cast<TYtOutTable>())
@@ -168,7 +168,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Sort(TExprBase node, TE
sortInput = Build<TYtOutput>(ctx, node.Pos())
.Operation<TYtMerge>()
.World(world)
- .DataSink(NPrivate::GetDataSink(sort.Input(), ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(NPrivate::ConvertInputTable(sort.Input(), ctx, opts.MakeUnordered(unordered)))
.Output()
.Add(mergeOut.ToExprNode(ctx, node.Pos()).Cast<TYtOutTable>())
@@ -219,7 +219,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Sort(TExprBase node, TE
auto res = canUseMerge ?
TExprBase(Build<TYtMerge>(ctx, node.Pos())
.World(world)
- .DataSink(NPrivate::GetDataSink(sortInput, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(NPrivate::ConvertInputTable(sortInput, ctx, opts.ClearUnordered()))
.Output()
.Add(sortOut.ToExprNode(ctx, node.Pos()).Cast<TYtOutTable>())
@@ -234,7 +234,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Sort(TExprBase node, TE
.Done()):
TExprBase(Build<TYtSort>(ctx, node.Pos())
.World(world)
- .DataSink(NPrivate::GetDataSink(sortInput, ctx))
+ .DataSink(MakeDataSink(node.Pos(), *cluster, ctx))
.Input(NPrivate::ConvertInputTable(sortInput, ctx, opts.MakeUnordered(unordered)))
.Output()
.Add(sortOut.ToExprNode(ctx, node.Pos()).Cast<TYtOutTable>())
@@ -475,6 +475,10 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::AssumeConstraints(TExpr
return assume;
}
+ const ERuntimeClusterSelectionMode selectionMode =
+ State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
+ auto cluster = DeriveClusterFromInput(input, selectionMode);
+
auto sorted = assume.Ref().GetConstraint<TSortedConstraintNode>();
auto maybeOp = input.Maybe<TYtOutput>().Operation();
@@ -559,7 +563,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::AssumeConstraints(TExpr
return Build<TYtOutput>(ctx, assume.Pos())
.Operation<TYtMerge>()
.World(GetWorld(input, {}, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(assume.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx, opts))
.Output()
.Add(outTable.ToExprNode(ctx, assume.Pos()).Cast<TYtOutTable>())
@@ -620,7 +624,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::AssumeConstraints(TExpr
return Build<TYtOutput>(ctx, assume.Pos())
.Operation<TYtMap>()
.World(GetWorld(input, {}, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(assume.Pos(), *cluster, ctx))
.Input(ConvertInputTable(input, ctx))
.Output()
.Add(outTable.ToExprNode(ctx, assume.Pos()).Cast<TYtOutTable>())
diff --git a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp
index 0cd3a828fb..d64d795d94 100644
--- a/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp
+++ b/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp
@@ -342,9 +342,12 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::Write(TExprBase node, T
auto cluster = TString{write.DataSink().Cluster().Value()};
const auto selectionMode = State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
const auto srcCluster = DeriveClusterFromInput(write.Content(), selectionMode);
- if (selectionMode == ERuntimeClusterSelectionMode::Disable && cluster != srcCluster) {
+ if (!srcCluster) {
+ return node;
+ }
+ if (selectionMode == ERuntimeClusterSelectionMode::Disable && cluster != *srcCluster) {
ctx.AddError(TIssue(ctx.GetPosition(node.Pos()), TStringBuilder()
- << "Result from cluster " << TString{srcCluster}.Quote()
+ << "Result from cluster " << srcCluster->Quote()
<< " cannot be written to a different destination cluster " << cluster.Quote()));
return {};
}
@@ -599,12 +602,12 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::ReplaceStatWriteTable(T
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
+ TString cluster;
if (!IsYtProviderInput(input, false)) {
if (!EnsurePersistable(input.Ref(), ctx)) {
return {};
}
- TString cluster;
TSyncMap syncList;
if (!IsYtCompleteIsolatedLambda(input.Ref(), syncList, cluster, false, selectionMode)) {
return node;
@@ -660,10 +663,15 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::ReplaceStatWriteTable(T
auto section = read.Input().Item(0);
auto scheme = section.Ptr()->GetTypeAnn()->Cast<TListExprType>()->GetItemType();
+ auto srcCluster = DeriveClusterFromInput(input, selectionMode);
+ if (!srcCluster) {
+ return node;
+ }
+ cluster = *srcCluster;
auto path = CopyOrTrivialMap(section.Pos(),
GetWorld(input, {}, ctx),
- GetDataSink(input, ctx),
+ MakeDataSink(section.Pos(), cluster, ctx),
*scheme,
Build<TYtSection>(ctx, section.Pos())
.InitFrom(section)
@@ -693,7 +701,7 @@ TMaybeNode<TExprBase> TYtPhysicalOptProposalTransformer::ReplaceStatWriteTable(T
return Build<TYtStatOut>(ctx, write.Pos())
.World(GetWorld(input, {}, ctx))
- .DataSink(GetDataSink(input, ctx))
+ .DataSink(MakeDataSink(write.Pos(), cluster, ctx))
.Input(newInput.Cast())
.Table(table)
.ReplaceMask(write.ReplaceMask())
diff --git a/yt/yql/providers/yt/provider/yql_yt_block_input.cpp b/yt/yql/providers/yt/provider/yql_yt_block_input.cpp
index 5f0f3e0739..3731311542 100644
--- a/yt/yql/providers/yt/provider/yql_yt_block_input.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_block_input.cpp
@@ -68,46 +68,19 @@ private:
return EnsureWideFlowType(mapLambda.Cast().Args().Arg(0).Ref(), ctx);
}
- TMaybeNode<TExprBase> TryTransformTableContent(TExprBase node, TExprContext& ctx, const TGetParents& getParents) const {
+ TMaybeNode<TExprBase> TryTransformTableContent(TExprBase node, TExprContext& ctx) const {
auto tableContent = node.Cast<TYtTableContent>();
if (!NYql::HasSetting(tableContent.Settings().Ref(), EYtSettingType::BlockInputReady)) {
return tableContent;
}
- const TParentsMap* parentsMap = getParents();
- if (auto it = parentsMap->find(tableContent.Raw()); it != parentsMap->end() && it->second.size() > 1) {
- return tableContent;
- }
-
YQL_CLOG(INFO, ProviderYt) << "Rewrite YtTableContent with block input";
- auto inputStructType = GetSeqItemType(tableContent.Ref().GetTypeAnn())->Cast<TStructExprType>();
- auto asStructBuilder = Build<TCoAsStruct>(ctx, tableContent.Pos());
- TExprNode::TListType narrowMapArgs;
- for (auto& item : inputStructType->GetItems()) {
- auto arg = ctx.NewArgument(tableContent.Pos(), item->GetName());
- asStructBuilder.Add<TCoNameValueTuple>()
- .Name().Build(item->GetName())
- .Value(arg)
- .Build();
- narrowMapArgs.push_back(std::move(arg));
- }
-
auto settings = RemoveSetting(tableContent.Settings().Ref(), EYtSettingType::BlockInputReady, ctx);
- return Build<TCoForwardList>(ctx, tableContent.Pos())
- .Stream<TCoNarrowMap>()
- .Input<TCoToFlow>()
- .Input<TCoWideFromBlocks>()
- .Input<TYtBlockTableContent>()
- .Input(tableContent.Input())
- .Settings(settings)
- .Build()
- .Build()
- .Build()
- .Lambda()
- .Args(narrowMapArgs)
- .Body(asStructBuilder.Done())
- .Build()
+ return Build<TCoListFromBlocks>(ctx, tableContent.Pos())
+ .Input<TYtBlockTableContent>()
+ .Input(tableContent.Input())
+ .Settings(settings)
.Build()
.Done();
}
diff --git a/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp b/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp
index 2d7e0ce5b5..05e0ca5caa 100644
--- a/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp
@@ -606,12 +606,16 @@ private:
);
}
- TStatusCallbackPair HandleYtDqProcessWrite(const TExprNode::TPtr& input, TExprContext& ctx) {
+ TStatusCallbackPair HandleYtDqProcessWrite(const TExprNode::TPtr& input, TExprNode::TPtr& output, TExprContext& ctx) {
const TYtDqProcessWrite op(input);
const auto section = op.Output().Cast<TYtOutSection>();
Y_ENSURE(section.Size() == 1, "TYtDqProcessWrite expects 1 output table but got " << section.Size());
const TYtOutTable tmpTable = section.Item(0);
+ if (AssignRuntimeCluster(op, output, ctx)) {
+ return SyncRepeatWithRestart();
+ }
+
if (!input->HasResult()) {
if (!tmpTable.Name().Value().empty()) {
ctx.AddError(TIssue(ctx.GetPosition(section.Pos()), TStringBuilder() << "Incomplete execution of "
diff --git a/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp b/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp
index 1e93f1bf93..789b821fc4 100644
--- a/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp
@@ -441,19 +441,6 @@ private:
}
const bool initialWrite = NYql::HasSetting(settings, EYtSettingType::Initial);
const bool monotonicKeys = NYql::HasSetting(settings, EYtSettingType::MonotonicKeys);
- TString columnGroup;
- TSet<TString> columnGroupAlts;
- if (auto setting = NYql::GetSetting(settings, EYtSettingType::ColumnGroups)) {
- if (!ValidateColumnGroups(*setting, *itemType->Cast<TStructExprType>(), ctx)) {
- return TStatus::Error;
- }
- columnGroup = setting->Tail().Content();
- columnGroupAlts.insert(columnGroup);
- TString exandedSpec;
- if (ExpandDefaultColumnGroup(setting->Tail().Content(), *itemType->Cast<TStructExprType>(), exandedSpec)) {
- columnGroupAlts.insert(std::move(exandedSpec));
- }
- }
if (!initialWrite && mode != EYtWriteMode::Append) {
ctx.AddError(TIssue(pos, TStringBuilder() <<
@@ -587,8 +574,22 @@ private:
<< GetTypeDiff(*description.RowType, *itemType)));
return TStatus::Error;
}
+ }
- if (!columnGroupAlts.empty() && !AnyOf(columnGroupAlts, [&](const auto& grp) { return description.ColumnGroupSpecAlts.contains(grp); })) {
+ TString columnGroup;
+ TSet<TString> columnGroupAlts;
+ // Check and expand column groups _after_ type alignment (TryConvertTo)
+ if (auto setting = NYql::GetSetting(settings, EYtSettingType::ColumnGroups)) {
+ if (!ValidateColumnGroups(*setting, *itemType->Cast<TStructExprType>(), ctx)) {
+ return TStatus::Error;
+ }
+ columnGroup = setting->Tail().Content();
+ columnGroupAlts.insert(columnGroup);
+ TString exandedSpec;
+ if (ExpandDefaultColumnGroup(setting->Tail().Content(), *itemType->Cast<TStructExprType>(), exandedSpec)) {
+ columnGroupAlts.insert(std::move(exandedSpec));
+ }
+ if (checkLayout && !AnyOf(columnGroupAlts, [&](const auto& grp) { return description.ColumnGroupSpecAlts.contains(grp); })) {
ctx.AddError(TIssue(pos, TStringBuilder()
<< "Insert with different "
<< ToString(EYtSettingType::ColumnGroups).Quote()
diff --git a/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp b/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp
index fcad1669af..9e3bd1fbc2 100644
--- a/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp
@@ -189,42 +189,9 @@ public:
return TStatus::Ok;
}
- TStatus HandleBlockTableContent(TExprBase input, TExprContext& ctx) {
+ TStatus HandleBlockTableContent(TExprBase input, TExprContext& /*ctx*/) {
TYtBlockTableContent tableContent = input.Cast<TYtBlockTableContent>();
-
- auto listType = tableContent.Input().Maybe<TYtOutput>()
- ? tableContent.Input().Ref().GetTypeAnn()
- : tableContent.Input().Ref().GetTypeAnn()->Cast<TTupleExprType>()->GetItems().back();
- auto itemStructType = listType->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>();
-
- auto pathRename = [&](TPartOfConstraintBase::TPathType path) -> std::vector<TPartOfConstraintBase::TPathType> {
- YQL_ENSURE(!path.empty());
-
- auto fieldIndex = itemStructType->FindItem(path[0]);
- YQL_ENSURE(fieldIndex.Defined());
-
- path[0] = ctx.GetIndexAsString(*fieldIndex);
- return { path };
- };
-
- TConstraintSet wideConstraints;
- for (auto constraint : tableContent.Input().Ref().GetAllConstraints()) {
- if (auto empty = dynamic_cast<const TEmptyConstraintNode*>(constraint)) {
- wideConstraints.AddConstraint(ctx.MakeConstraint<TEmptyConstraintNode>());
- } else if (auto sorted = dynamic_cast<const TSortedConstraintNode*>(constraint)) {
- wideConstraints.AddConstraint(sorted->RenameFields(ctx, pathRename));
- } else if (auto chopped = dynamic_cast<const TChoppedConstraintNode*>(constraint)) {
- wideConstraints.AddConstraint(chopped->RenameFields(ctx, pathRename));
- } else if (auto unique = dynamic_cast<const TUniqueConstraintNode*>(constraint)) {
- wideConstraints.AddConstraint(unique->RenameFields(ctx, pathRename));
- } else if (auto distinct = dynamic_cast<const TDistinctConstraintNode*>(constraint)) {
- wideConstraints.AddConstraint(distinct->RenameFields(ctx, pathRename));
- } else {
- YQL_ENSURE(false, "unexpected constraint");
- }
- }
-
- input.Ptr()->SetConstraints(wideConstraints);
+ input.Ptr()->CopyConstraints(tableContent.Input().Ref());
return TStatus::Ok;
}
diff --git a/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp b/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp
index 69dec1f558..84c811cc81 100644
--- a/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp
@@ -898,14 +898,26 @@ public:
auto listType = tableContent.Input().Maybe<TYtOutput>()
? tableContent.Input().Ref().GetTypeAnn()
: tableContent.Input().Ref().GetTypeAnn()->Cast<TTupleExprType>()->GetItems().back();
- auto itemStructType = listType->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>();
+ auto tableStructType = listType->Cast<TListExprType>()->GetItemType()->Cast<TStructExprType>();
+
+ TVector<const TItemExprType*> outputStructItems;
+ for (auto item : tableStructType->GetItems()) {
+ auto itemType = item->GetItemType();
+ if (itemType->IsBlockOrScalar()) {
+ ctx.AddError(TIssue(ctx.GetPosition(input.Pos()), "Input type should not be a block or scalar"));
+ return IGraphTransformer::TStatus::Error;
+ }
+
+ if (!EnsureSupportedAsBlockType(input.Pos(), *itemType, ctx, *State_->Types)) {
+ return IGraphTransformer::TStatus::Error;
+ }
- TTypeAnnotationNode::TListType multiTypeItems;
- for (auto& item: itemStructType->GetItems()) {
- multiTypeItems.emplace_back(ctx.MakeType<TBlockExprType>(item->GetItemType()));
+ outputStructItems.push_back(ctx.MakeType<TItemExprType>(item->GetName(), ctx.MakeType<TBlockExprType>(itemType)));
}
- multiTypeItems.push_back(ctx.MakeType<TScalarExprType>(ctx.MakeType<TDataExprType>(EDataSlot::Uint64)));
- input.Ptr()->SetTypeAnn(ctx.MakeType<TStreamExprType>(ctx.MakeType<TMultiExprType>(multiTypeItems)));
+ outputStructItems.push_back(ctx.MakeType<TItemExprType>(BlockLengthColumnName, ctx.MakeType<TScalarExprType>(ctx.MakeType<TDataExprType>(EDataSlot::Uint64))));
+
+ auto outputStructType = ctx.MakeType<TStructExprType>(outputStructItems);
+ input.Ptr()->SetTypeAnn(ctx.MakeType<TListExprType>(outputStructType));
if (auto columnOrder = State_->Types->LookupColumnOrder(tableContent.Input().Ref())) {
return State_->Types->SetColumnOrder(input.Ref(), *columnOrder, ctx);
diff --git a/yt/yql/providers/yt/provider/yql_yt_helpers.cpp b/yt/yql/providers/yt/provider/yql_yt_helpers.cpp
index 60428c7f64..887b57d204 100644
--- a/yt/yql/providers/yt/provider/yql_yt_helpers.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_helpers.cpp
@@ -374,12 +374,14 @@ TExprNode::TPtr ToOutTable(TYtOutput output, TExprContext& ctx) {
.Done().Ptr();
}
-TString DeriveClusterFromSection(const NNodes::TYtSection& section, ERuntimeClusterSelectionMode mode) {
+TMaybe<TString> DeriveClusterFromSection(const NNodes::TYtSection& section, ERuntimeClusterSelectionMode mode) {
TString result;
for (const auto& path : section.Paths()) {
auto info = TYtTableBaseInfo::Parse(path.Table());
YQL_ENSURE(info->Cluster, "Unexpected TYtOutTable in input section");
- YQL_ENSURE(UpdateUsedCluster(result, info->Cluster, mode));
+ if (!UpdateUsedCluster(result, info->Cluster, mode)) {
+ return {};
+ }
}
return result;
}
@@ -387,22 +389,32 @@ TString DeriveClusterFromSection(const NNodes::TYtSection& section, ERuntimeClus
} // unnamed
TString GetClusterFromSection(const NNodes::TYtSection& section) {
- return DeriveClusterFromSection(section, ERuntimeClusterSelectionMode::Auto);
+ auto result = DeriveClusterFromSection(section, ERuntimeClusterSelectionMode::Auto);
+ YQL_ENSURE(result);
+ return *result;
}
TString GetClusterFromSectionList(const NNodes::TYtSectionList& sectionList) {
- return DeriveClusterFromSectionList(sectionList, ERuntimeClusterSelectionMode::Auto);
+ auto result = DeriveClusterFromSectionList(sectionList, ERuntimeClusterSelectionMode::Auto);
+ YQL_ENSURE(result);
+ return *result;
}
-TString DeriveClusterFromSectionList(const NNodes::TYtSectionList& sectionList, ERuntimeClusterSelectionMode mode) {
+TMaybe<TString> DeriveClusterFromSectionList(const NNodes::TYtSectionList& sectionList, ERuntimeClusterSelectionMode mode) {
TString result;
for (const auto& section : sectionList) {
- YQL_ENSURE(UpdateUsedCluster(result, DeriveClusterFromSection(section, mode), mode));
+ auto sectionCluster = DeriveClusterFromSection(section, mode);
+ if (!sectionCluster.Defined()) {
+ return {};
+ }
+ if (!UpdateUsedCluster(result, *sectionCluster, mode)) {
+ return {};
+ }
}
return result;
}
-TString DeriveClusterFromInput(const NNodes::TExprBase& input, ERuntimeClusterSelectionMode mode) {
+TMaybe<TString> DeriveClusterFromInput(const NNodes::TExprBase& input, ERuntimeClusterSelectionMode mode) {
if (auto read = input.Maybe<TCoRight>().Input().Maybe<TYtReadTable>()) {
return DeriveClusterFromSectionList(read.Cast().Input(), mode);
} else if (auto output = input.Maybe<TYtOutput>()) {
diff --git a/yt/yql/providers/yt/provider/yql_yt_helpers.h b/yt/yql/providers/yt/provider/yql_yt_helpers.h
index 218885159c..9eaa4b4aeb 100644
--- a/yt/yql/providers/yt/provider/yql_yt_helpers.h
+++ b/yt/yql/providers/yt/provider/yql_yt_helpers.h
@@ -21,10 +21,14 @@ namespace NYql {
constexpr TStringBuf YtUnspecifiedCluster = "$runtime";
+// Equivalent to Derive* with mode=Auto
TString GetClusterFromSection(const NNodes::TYtSection& section);
TString GetClusterFromSectionList(const NNodes::TYtSectionList& sectionList);
-TString DeriveClusterFromSectionList(const NNodes::TYtSectionList& sectionList, ERuntimeClusterSelectionMode mode);
-TString DeriveClusterFromInput(const NNodes::TExprBase& input, ERuntimeClusterSelectionMode mode);
+
+// Derive cluster according to mode. Will return empty optional for mode=Disable if input contains multiple clusters
+TMaybe<TString> DeriveClusterFromSectionList(const NNodes::TYtSectionList& sectionList, ERuntimeClusterSelectionMode mode);
+TMaybe<TString> DeriveClusterFromInput(const NNodes::TExprBase& input, ERuntimeClusterSelectionMode mode);
+
TString GetRuntimeCluster(const TExprNode& op, const TYtState::TPtr& state);
bool UpdateUsedCluster(TString& usedCluster, const TString& newCluster, ERuntimeClusterSelectionMode mode);
diff --git a/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp b/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp
index c3fb88db5a..5d78e98fae 100644
--- a/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp
@@ -332,6 +332,9 @@ protected:
const ERuntimeClusterSelectionMode selectionMode =
State_->Configuration->RuntimeClusterSelection.Get().GetOrElse(DEFAULT_RUNTIME_CLUSTER_SELECTION);
auto cluster = DeriveClusterFromInput(input, selectionMode);
+ if (!cluster) {
+ return node;
+ }
for (auto handler: aggregate.Handlers()) {
auto trait = handler.Trait();
@@ -346,12 +349,12 @@ protected:
t.FinishHandler(),
};
for (auto lambda : lambdas) {
- if (!IsYtCompleteIsolatedLambda(lambda.Ref(), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(lambda.Ref(), syncList, *cluster, false, selectionMode)) {
return node;
}
}
} else if (trait.Ref().IsCallable("AggApply")) {
- if (!IsYtCompleteIsolatedLambda(*trait.Ref().Child(2), syncList, cluster, false, selectionMode)) {
+ if (!IsYtCompleteIsolatedLambda(*trait.Ref().Child(2), syncList, *cluster, false, selectionMode)) {
return node;
}
}
diff --git a/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp b/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp
index 0c3a1a208b..a6c34dc815 100644
--- a/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp
+++ b/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp
@@ -232,23 +232,13 @@ TRuntimeNode BuildTableContentCall(TStringBuf callName,
samplingTupleItems.push_back(ctx.ProgramBuilder.NewDataLiteral(isSystemSampling));
}
- TType* outType = nullptr;
if (useBlocks) {
- auto structType = AS_TYPE(TStructType, outItemType);
-
- std::vector<TType*> outputItems;
- outputItems.reserve(structType->GetMembersCount());
- for (size_t i = 0; i < structType->GetMembersCount(); i++) {
- outputItems.push_back(ctx.ProgramBuilder.NewBlockType(structType->GetMemberType(i), TBlockType::EShape::Many));
- }
- outputItems.push_back(ctx.ProgramBuilder.NewBlockType(ctx.ProgramBuilder.NewDataType(NUdf::TDataType<ui64>::Id), TBlockType::EShape::Scalar));
- outType = ctx.ProgramBuilder.NewStreamType(ctx.ProgramBuilder.NewMultiType(outputItems));
-
- } else {
- outType = ctx.ProgramBuilder.NewListType(outItemType);
+ outItemType = ctx.ProgramBuilder.BuildBlockStructType(AS_TYPE(TStructType, outItemType));
}
- TCallableBuilder call(ctx.ProgramBuilder.GetTypeEnvironment(), callName, outType);
+ auto outListType = ctx.ProgramBuilder.NewListType(outItemType);
+
+ TCallableBuilder call(ctx.ProgramBuilder.GetTypeEnvironment(), callName, outListType);
call.Add(ctx.ProgramBuilder.NewList(listTypeGroup, groups));
call.Add(ctx.ProgramBuilder.NewTuple(samplingTupleItems));
@@ -259,10 +249,6 @@ TRuntimeNode BuildTableContentCall(TStringBuf callName,
call.Add(ctx.ProgramBuilder.NewEmptyTuple());
}
- if (useBlocks) {
- call.Add(TRuntimeNode(outItemType, true));
- }
-
auto res = TRuntimeNode(call.Build(), false);
if (settings) {
@@ -505,8 +491,8 @@ void RegisterYtMkqlCompilers(NCommon::TMkqlCallableCompilerBase& compiler) {
[](const TExprNode& node, NCommon::TMkqlBuildContext& ctx) {
TYtBlockTableContent tableContent(&node);
if (node.GetConstraint<TEmptyConstraintNode>()) {
- const auto streamType = ctx.BuildType(node, *node.GetTypeAnn());
- return ctx.ProgramBuilder.EmptyIterator(streamType);
+ const auto itemType = ctx.BuildType(node, GetSeqItemType(*node.GetTypeAnn()));
+ return ctx.ProgramBuilder.NewEmptyList(itemType);
}
auto origItemStructType = (
diff --git a/yt/yql/tests/sql/suites/multicluster/map_force.cfg b/yt/yql/tests/sql/suites/multicluster/map_force.cfg
new file mode 100644
index 0000000000..0d0b05747b
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/map_force.cfg
@@ -0,0 +1,4 @@
+providers yt
+res result.txt
+in plato.PInput input1.txt
+in banach.Unused input2.txt
diff --git a/yt/yql/tests/sql/suites/multicluster/map_force.sql b/yt/yql/tests/sql/suites/multicluster/map_force.sql
new file mode 100644
index 0000000000..6df51bcf08
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/map_force.sql
@@ -0,0 +1,6 @@
+/* postgres can not */
+/* yt can not */
+pragma yt.RuntimeCluster='banach';
+pragma yt.RuntimeClusterSelection='force';
+
+select key || "0" as key0, subkey from plato.PInput where subkey != "3";
diff --git a/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.cfg b/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.cfg
new file mode 100644
index 0000000000..0d0b05747b
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.cfg
@@ -0,0 +1,4 @@
+providers yt
+res result.txt
+in plato.PInput input1.txt
+in banach.Unused input2.txt
diff --git a/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.sql b/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.sql
new file mode 100644
index 0000000000..67239c3f27
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/partition_by_key_force.sql
@@ -0,0 +1,9 @@
+/* postgres can not */
+/* yt can not */
+pragma yt.RuntimeCluster='banach';
+pragma yt.RuntimeClusterSelection='force';
+
+select key, max(subkey) as msk from plato.PInput group by key;
+
+select max(key) as mk from plato.PInput;
+
diff --git a/yt/yql/tests/sql/suites/multicluster/sort_force.cfg b/yt/yql/tests/sql/suites/multicluster/sort_force.cfg
new file mode 100644
index 0000000000..0d0b05747b
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/sort_force.cfg
@@ -0,0 +1,4 @@
+providers yt
+res result.txt
+in plato.PInput input1.txt
+in banach.Unused input2.txt
diff --git a/yt/yql/tests/sql/suites/multicluster/sort_force.sql b/yt/yql/tests/sql/suites/multicluster/sort_force.sql
new file mode 100644
index 0000000000..074905f3d1
--- /dev/null
+++ b/yt/yql/tests/sql/suites/multicluster/sort_force.sql
@@ -0,0 +1,8 @@
+/* postgres can not */
+/* yt can not */
+pragma yt.RuntimeCluster='banach';
+pragma yt.RuntimeClusterSelection='force';
+
+select * from plato.PInput
+order by key, subkey;
+
diff --git a/yt/yt/client/api/client.cpp b/yt/yt/client/api/client.cpp
index 8eff6f8d3f..b7e4e876d1 100644
--- a/yt/yt/client/api/client.cpp
+++ b/yt/yt/client/api/client.cpp
@@ -12,6 +12,7 @@ namespace NYT::NApi {
using namespace NConcurrency;
using namespace NYTree;
+using namespace NYson;
////////////////////////////////////////////////////////////////////////////////
@@ -19,43 +20,49 @@ static constexpr auto& Logger = ApiLogger;
////////////////////////////////////////////////////////////////////////////////
-std::optional<std::string> TClusterAwareClientBase::GetClusterName(bool fetchIfNull)
+TFuture<std::optional<std::string>> TClusterAwareClientBase::GetClusterName(bool fetchIfNull)
{
{
auto guard = ReaderGuard(SpinLock_);
if (ClusterName_) {
- return ClusterName_;
+ return MakeFuture(ClusterName_);
}
}
auto clusterName = GetConnection()->GetClusterName();
- if (fetchIfNull && !clusterName) {
- clusterName = FetchClusterNameFromMasterCache();
- }
-
- if (!clusterName) {
- return {};
+ if (clusterName) {
+ auto guard = WriterGuard(SpinLock_);
+ ClusterName_ = clusterName;
+ return MakeFuture(ClusterName_);
}
- auto guard = WriterGuard(SpinLock_);
- if (!ClusterName_) {
- ClusterName_ = clusterName;
+ if (!fetchIfNull) {
+ return MakeFuture<std::optional<std::string>>({});
}
- return ClusterName_;
+ return FetchClusterNameFromMasterCache().Apply(
+ BIND([this, this_ = MakeStrong(this)] (const std::optional<std::string>& clusterName) -> std::optional<std::string> {
+ auto guard = WriterGuard(SpinLock_);
+ ClusterName_ = clusterName;
+ return ClusterName_;
+ }));
}
-std::optional<std::string> TClusterAwareClientBase::FetchClusterNameFromMasterCache()
+TFuture<std::optional<std::string>> TClusterAwareClientBase::FetchClusterNameFromMasterCache()
{
TGetNodeOptions options;
options.ReadFrom = EMasterChannelKind::MasterCache;
- auto clusterNameYsonOrError = WaitFor(GetNode(ClusterNamePath, options));
- if (!clusterNameYsonOrError.IsOK()) {
- YT_LOG_WARNING(clusterNameYsonOrError, "Could not fetch cluster name from from master cache (Path: %v)",
- ClusterNamePath);
- return {};
- }
- return ConvertTo<std::string>(clusterNameYsonOrError.Value());
+
+ return GetNode(ClusterNamePath, options).Apply(
+ BIND([] (const TErrorOr<TYsonString>& clusterNameYsonOrError) -> std::optional<std::string> {
+ if (!clusterNameYsonOrError.IsOK()) {
+ YT_LOG_WARNING(clusterNameYsonOrError, "Could not fetch cluster name from from master cache (Path: %v)",
+ ClusterNamePath);
+ return {};
+ }
+
+ return ConvertTo<std::string>(clusterNameYsonOrError.Value());
+ }));
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/client/api/client.h b/yt/yt/client/api/client.h
index d0ada67e69..823e37ad39 100644
--- a/yt/yt/client/api/client.h
+++ b/yt/yt/client/api/client.h
@@ -84,7 +84,7 @@ struct IClient
virtual const NChaosClient::IReplicationCardCachePtr& GetReplicationCardCache() = 0;
virtual const NTransactionClient::ITimestampProviderPtr& GetTimestampProvider() = 0;
- virtual std::optional<std::string> GetClusterName(bool fetchIfNull = true) = 0;
+ virtual TFuture<std::optional<std::string>> GetClusterName(bool fetchIfNull = true) = 0;
};
DEFINE_REFCOUNTED_TYPE(IClient)
@@ -103,13 +103,13 @@ public:
//! NB: Descendants of this class should be able to perform GetNode calls,
//! so this cannot be used directly in tablet transactions.
//! Use the transaction's parent client instead.
- std::optional<std::string> GetClusterName(bool fetchIfNull) override;
+ TFuture<std::optional<std::string>> GetClusterName(bool fetchIfNull) override;
private:
YT_DECLARE_SPIN_LOCK(NThreading::TReaderWriterSpinLock, SpinLock_);
std::optional<std::string> ClusterName_;
- std::optional<std::string> FetchClusterNameFromMasterCache();
+ TFuture<std::optional<std::string>> FetchClusterNameFromMasterCache();
};
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/client/api/delegating_client.h b/yt/yt/client/api/delegating_client.h
index ca2e945220..f04c210842 100644
--- a/yt/yt/client/api/delegating_client.h
+++ b/yt/yt/client/api/delegating_client.h
@@ -23,7 +23,7 @@ public:
// IClientBase methods
DELEGATE_METHOD(IConnectionPtr, GetConnection, (), ())
- DELEGATE_METHOD(std::optional<std::string>, GetClusterName,
+ DELEGATE_METHOD(TFuture<std::optional<std::string>>, GetClusterName,
(bool fetchIfNull),
(fetchIfNull))
diff --git a/yt/yt/client/api/public.h b/yt/yt/client/api/public.h
index 36dbf9d637..7322d60830 100644
--- a/yt/yt/client/api/public.h
+++ b/yt/yt/client/api/public.h
@@ -180,6 +180,8 @@ DECLARE_REFCOUNTED_STRUCT(TDetailedProfilingInfo)
DECLARE_REFCOUNTED_STRUCT(TQueryFile)
+DECLARE_REFCOUNTED_STRUCT(TQuerySecret)
+
DECLARE_REFCOUNTED_STRUCT(TSchedulingOptions)
DECLARE_REFCOUNTED_CLASS(TJobInputReader)
diff --git a/yt/yt/client/api/query_tracker_client.cpp b/yt/yt/client/api/query_tracker_client.cpp
index f374fa39ee..7bab186e52 100644
--- a/yt/yt/client/api/query_tracker_client.cpp
+++ b/yt/yt/client/api/query_tracker_client.cpp
@@ -22,9 +22,19 @@ void TQueryFile::Register(TRegistrar registrar)
////////////////////////////////////////////////////////////////////////////////
+void TQuerySecret::Register(TRegistrar registrar)
+{
+ registrar.Parameter("id", &TThis::Id).NonEmpty();
+ registrar.Parameter("category", &TThis::Category).Optional(true);
+ registrar.Parameter("subcategory", &TThis::Subcategory).Optional(true);
+ registrar.Parameter("ypath", &TThis::YPath).NonEmpty();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
void Serialize(const TQuery& query, NYson::IYsonConsumer* consumer)
{
- static_assert(pfr::tuple_size<TQuery>::value == 16);
+ static_assert(pfr::tuple_size<TQuery>::value == 17);
BuildYsonFluently(consumer)
.BeginMap()
.OptionalItem("id", query.Id)
@@ -42,6 +52,7 @@ void Serialize(const TQuery& query, NYson::IYsonConsumer* consumer)
.OptionalItem("progress", query.Progress)
.OptionalItem("annotations", query.Annotations)
.OptionalItem("error", query.Error)
+ .OptionalItem("secrets", query.Secrets)
.DoIf(static_cast<bool>(query.OtherAttributes), [&] (TFluentMap fluent) {
for (const auto& [key, value] : query.OtherAttributes->ListPairs()) {
fluent.Item(key).Value(value);
diff --git a/yt/yt/client/api/query_tracker_client.h b/yt/yt/client/api/query_tracker_client.h
index 3aa78fb92a..3fce44fe10 100644
--- a/yt/yt/client/api/query_tracker_client.h
+++ b/yt/yt/client/api/query_tracker_client.h
@@ -34,6 +34,21 @@ struct TQueryFile
DEFINE_REFCOUNTED_TYPE(TQueryFile)
+struct TQuerySecret
+ : public NYTree::TYsonStruct
+{
+ TString Id;
+ TString Category;
+ TString Subcategory;
+ TString YPath;
+
+ REGISTER_YSON_STRUCT(TQuerySecret);
+
+ static void Register(TRegistrar registrar);
+};
+
+DEFINE_REFCOUNTED_TYPE(TQuerySecret)
+
struct TStartQueryOptions
: public TTimeoutOptions
, public TQueryTrackerOptions
@@ -44,6 +59,7 @@ struct TStartQueryOptions
std::vector<TQueryFilePtr> Files;
std::optional<TString> AccessControlObject; // COMPAT(mpereskokova)
std::optional<std::vector<TString>> AccessControlObjects;
+ std::vector<TQuerySecretPtr> Secrets;
};
struct TAbortQueryOptions
@@ -111,6 +127,7 @@ struct TQuery
std::optional<TError> Error;
NYson::TYsonString Annotations;
NYTree::IAttributeDictionaryPtr OtherAttributes;
+ std::optional<NYson::TYsonString> Secrets;
};
void Serialize(const TQuery& query, NYson::IYsonConsumer* consumer);
diff --git a/yt/yt/client/api/rpc_proxy/client_base.cpp b/yt/yt/client/api/rpc_proxy/client_base.cpp
index b6db71e2c5..359c814011 100644
--- a/yt/yt/client/api/rpc_proxy/client_base.cpp
+++ b/yt/yt/client/api/rpc_proxy/client_base.cpp
@@ -254,7 +254,7 @@ TFuture<TYsonString> TClientBase::GetNode(
// COMPAT(max42): after 22.3 is everywhere, drop legacy field.
if (options.Attributes) {
- ToProto(req->mutable_legacy_attributes()->mutable_keys(), options.Attributes.Keys);
+ ToProto(req->mutable_legacy_attributes()->mutable_keys(), options.Attributes.Keys());
ToProto(req->mutable_attributes(), options.Attributes);
} else {
req->mutable_legacy_attributes()->set_all(true);
@@ -290,7 +290,7 @@ TFuture<TYsonString> TClientBase::ListNode(
// COMPAT(max42): after 22.3 is everywhere, drop legacy field.
if (options.Attributes) {
- ToProto(req->mutable_legacy_attributes()->mutable_keys(), options.Attributes.Keys);
+ ToProto(req->mutable_legacy_attributes()->mutable_keys(), options.Attributes.Keys());
ToProto(req->mutable_attributes(), options.Attributes);
} else {
req->mutable_legacy_attributes()->set_all(true);
diff --git a/yt/yt/client/api/rpc_proxy/client_impl.cpp b/yt/yt/client/api/rpc_proxy/client_impl.cpp
index a5d3dc4249..9274e82811 100644
--- a/yt/yt/client/api/rpc_proxy/client_impl.cpp
+++ b/yt/yt/client/api/rpc_proxy/client_impl.cpp
@@ -1741,6 +1741,8 @@ TFuture<NApi::TMultiTablePartitions> TClient::PartitionTables(
req->set_enable_key_guarantee(options.EnableKeyGuarantee);
req->set_enable_cookies(options.EnableCookies);
+ req->set_use_new_slicing_implementation_in_ordered_pool(options.UseNewSlicingImplementationInOrderedPool);
+
ToProto(req->mutable_transactional_options(), options);
return req->Invoke().Apply(BIND([] (const TApiServiceProxy::TRspPartitionTablesPtr& rsp) {
@@ -2274,6 +2276,20 @@ TFuture<NQueryTrackerClient::TQueryId> TClient::StartQuery(
protoFile->set_type(static_cast<NProto::EContentType>(file->Type));
}
+ for (const auto& secret : options.Secrets) {
+ auto* protoSecret = req->add_secrets();
+ protoSecret->set_id(secret->Id);
+ if (!secret->Category.empty()) {
+ protoSecret->set_category(secret->Category);
+ }
+ if (!secret->Subcategory.empty()) {
+ protoSecret->set_subcategory(secret->Subcategory);
+ }
+ if (!secret->YPath.empty()) {
+ protoSecret->set_ypath(secret->YPath);
+ }
+ }
+
return req->Invoke().Apply(BIND([] (const TApiServiceProxy::TRspStartQueryPtr& rsp) {
return FromProto<NQueryTrackerClient::TQueryId>(rsp->query_id());
}));
diff --git a/yt/yt/client/api/table_client.h b/yt/yt/client/api/table_client.h
index c6faa288fa..bbff942011 100644
--- a/yt/yt/client/api/table_client.h
+++ b/yt/yt/client/api/table_client.h
@@ -319,6 +319,9 @@ struct TPartitionTablesOptions
//! Whether to return cookies that can be fed to CreateTablePartitionReader.
bool EnableCookies = false;
+
+ //! COMPAT(apollo1321): remove in 25.2 release.
+ bool UseNewSlicingImplementationInOrderedPool = true;
};
struct TReadTablePartitionOptions
diff --git a/yt/yt/client/chaos_client/replication_card.cpp b/yt/yt/client/chaos_client/replication_card.cpp
index 78b2be376c..9aa1561afe 100644
--- a/yt/yt/client/chaos_client/replication_card.cpp
+++ b/yt/yt/client/chaos_client/replication_card.cpp
@@ -743,19 +743,31 @@ TReplicationProgress BuildMaxProgress(
if (otherIt == otherEnd) {
cmpResult = -1;
- if (!upperKeySelected && CompareRows(progressIt->LowerKey, other.UpperKey) >= 0) {
- upperKeySelected = true;
- otherTimestamp = NullTimestamp;
- tryAppendSegment(other.UpperKey, progressTimestamp);
- continue;
+ if (!upperKeySelected) {
+ int upperKeyCmpResult = CompareRows(progressIt->LowerKey, other.UpperKey);
+ if (upperKeyCmpResult >= 0) {
+ upperKeySelected = true;
+ otherTimestamp = NullTimestamp;
+ if (upperKeyCmpResult > 0) {
+ // UpperKey is smaller than progressIt->LowerKey so there's a gap to fill with progressTimestamp.
+ tryAppendSegment(other.UpperKey, progressTimestamp);
+ continue;
+ }
+ }
}
} else if (progressIt == progressEnd) {
cmpResult = 1;
- if (!upperKeySelected && CompareRows(otherIt->LowerKey, progress.UpperKey) >= 0) {
- upperKeySelected = true;
- progressTimestamp = NullTimestamp;
- tryAppendSegment(progress.UpperKey, otherTimestamp);
- continue;
+ if (!upperKeySelected) {
+ int upperKeyCmpResult = CompareRows(otherIt->LowerKey, progress.UpperKey);
+ if (upperKeyCmpResult >= 0) {
+ upperKeySelected = true;
+ progressTimestamp = NullTimestamp;
+ if (upperKeyCmpResult > 0) {
+ // UpperKey is smaller than otherIt->LowerKey so there's a gap to fill with otherTimestamp.
+ tryAppendSegment(progress.UpperKey, otherTimestamp);
+ continue;
+ }
+ }
}
} else {
cmpResult = CompareRows(progressIt->LowerKey, otherIt->LowerKey);
@@ -902,4 +914,3 @@ THashMap<TReplicaId, TDuration> ComputeReplicasLag(const THashMap<TReplicaId, TR
////////////////////////////////////////////////////////////////////////////////
} // namespace NYT::NChaosClient
-
diff --git a/yt/yt/client/driver/distributed_table_commands.cpp b/yt/yt/client/driver/distributed_table_commands.cpp
index 4942408318..0f26f86cbf 100644
--- a/yt/yt/client/driver/distributed_table_commands.cpp
+++ b/yt/yt/client/driver/distributed_table_commands.cpp
@@ -46,9 +46,9 @@ void TStartDistributedWriteSessionCommand::DoExecute(ICommandContextPtr context)
auto sessionAndCookies = WaitFor(context->GetClient()->StartDistributedWriteSession(Path, Options))
.ValueOrThrow();
- signatureGenerator->Sign(sessionAndCookies.Session.Underlying());
+ signatureGenerator->Resign(sessionAndCookies.Session.Underlying());
for (const auto& cookie : sessionAndCookies.Cookies) {
- signatureGenerator->Sign(cookie.Underlying());
+ signatureGenerator->Resign(cookie.Underlying());
}
ProduceOutput(context, [sessionAndCookies = std::move(sessionAndCookies)] (IYsonConsumer* consumer) {
@@ -146,7 +146,7 @@ void TWriteTableFragmentCommand::DoExecute(ICommandContextPtr context)
auto writer = DynamicPointerCast<NApi::ITableFragmentWriter>(TableWriter);
auto signedWriteResult = writer->GetWriteFragmentResult();
- context->GetDriver()->GetSignatureGenerator()->Sign(signedWriteResult.Underlying());
+ context->GetDriver()->GetSignatureGenerator()->Resign(signedWriteResult.Underlying());
ProduceOutput(context, [result = std::move(signedWriteResult)] (IYsonConsumer* consumer) {
Serialize(
diff --git a/yt/yt/client/driver/driver.cpp b/yt/yt/client/driver/driver.cpp
index 11c96e1335..0fa6393238 100644
--- a/yt/yt/client/driver/driver.cpp
+++ b/yt/yt/client/driver/driver.cpp
@@ -537,8 +537,8 @@ private:
TClientCachePtr ClientCache_;
const IClientPtr RootClient_;
IProxyDiscoveryCachePtr ProxyDiscoveryCache_;
- ISignatureGeneratorPtr SignatureGenerator_;
- ISignatureValidatorPtr SignatureValidator_;
+ const ISignatureGeneratorPtr SignatureGenerator_;
+ const ISignatureValidatorPtr SignatureValidator_;
class TCommandContext;
using TCommandContextPtr = TIntrusivePtr<TCommandContext>;
diff --git a/yt/yt/client/driver/query_commands.cpp b/yt/yt/client/driver/query_commands.cpp
index 4b5ccdecea..7fb7ccc531 100644
--- a/yt/yt/client/driver/query_commands.cpp
+++ b/yt/yt/client/driver/query_commands.cpp
@@ -76,6 +76,13 @@ void TStartQueryCommand::Register(TRegistrar registrar)
return command->Options.AccessControlObjects;
})
.Optional(/*init*/ false);
+
+ registrar.ParameterWithUniversalAccessor<std::vector<TQuerySecretPtr>>(
+ "secrets",
+ [] (TThis* command) -> auto& {
+ return command->Options.Secrets;
+ })
+ .Optional(/*init*/ false);
}
void TStartQueryCommand::DoExecute(ICommandContextPtr context)
diff --git a/yt/yt/client/driver/table_commands.cpp b/yt/yt/client/driver/table_commands.cpp
index 6a2d4eb19c..e31b6e43f2 100644
--- a/yt/yt/client/driver/table_commands.cpp
+++ b/yt/yt/client/driver/table_commands.cpp
@@ -528,7 +528,7 @@ void TPartitionTablesCommand::DoExecute(ICommandContextPtr context)
for (auto& partition : partitions.Partitions) {
if (partition.Cookie) {
- context->GetDriver()->GetSignatureGenerator()->Sign(partition.Cookie.Underlying());
+ context->GetDriver()->GetSignatureGenerator()->Resign(partition.Cookie.Underlying());
}
}
diff --git a/yt/yt/client/federated/client.cpp b/yt/yt/client/federated/client.cpp
index ffce8ff06c..2a07474aaa 100644
--- a/yt/yt/client/federated/client.cpp
+++ b/yt/yt/client/federated/client.cpp
@@ -342,7 +342,7 @@ public:
return client->GetConnection();
}
- std::optional<std::string> GetClusterName(bool fetchIfNull) override
+ TFuture<std::optional<std::string>> GetClusterName(bool fetchIfNull) override
{
auto [client, _] = GetActiveClient();
return client->GetClusterName(fetchIfNull);
@@ -637,7 +637,7 @@ void TClient::CheckClustersHealth()
const auto& check = checks[index];
auto error = NConcurrency::WaitFor(check);
YT_LOG_DEBUG_UNLESS(error.IsOK(), error, "Cluster %Qv is marked as unhealthy",
- UnderlyingClients_[index]->Client->GetClusterName(/*fetchIfNull*/ false));
+ UnderlyingClients_[index]->Client->GetConnection()->GetClusterName());
UnderlyingClients_[index]->HasErrors = !error.IsOK()
&& !error.FindMatching(NSecurityClient::EErrorCode::AuthorizationError); // Ignore authorization errors.
}
diff --git a/yt/yt/client/federated/unittests/client_ut.cpp b/yt/yt/client/federated/unittests/client_ut.cpp
index 5caba8db1a..baa69dc45d 100644
--- a/yt/yt/client/federated/unittests/client_ut.cpp
+++ b/yt/yt/client/federated/unittests/client_ut.cpp
@@ -21,6 +21,7 @@ using namespace NYT::NApi;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
+using ::testing::ReturnRef;
using ::testing::StrictMock;
using TStrictMockClient = StrictMock<NApi::TMockClient>;
@@ -172,9 +173,14 @@ TEST(TFederatedClientTest, CheckHealth)
auto mockClientSas = New<TStrictMockClient>();
+ auto mockConnectionVla = New<TStrictMockConnection>();
+ std::optional<std::string> clusterName("vla-cluster");
+ EXPECT_CALL(*mockConnectionVla, GetClusterName())
+ .WillRepeatedly(ReturnRef(clusterName));
+
auto mockClientVla = New<TStrictMockClient>();
- EXPECT_CALL(*mockClientVla, GetClusterName(_))
- .Times(AnyNumber());
+ EXPECT_CALL(*mockClientVla, GetConnection())
+ .WillRepeatedly(Return(mockConnectionVla));
// To identify best (closest) cluster.
NYson::TYsonString listResult1(TStringBuf(R"(["a-rpc-proxy-a.sas.yp-c.yandex.net:9013"])"));
@@ -462,17 +468,25 @@ TEST(TFederatedClientTest, AttachTransaction)
auto mockConnectionSas = New<TStrictMockConnection>();
EXPECT_CALL(*mockConnectionSas, GetClusterTag())
.WillRepeatedly(Return(NObjectClient::TCellTag(123)));
+ std::optional<std::string> clusterNameSas = "cluster-sas";
+ EXPECT_CALL(*mockConnectionSas, GetClusterName())
+ .WillRepeatedly(ReturnRef(clusterNameSas));
EXPECT_CALL(*mockClientSas, GetConnection())
- .WillOnce(Return(mockConnectionSas));
+ .WillRepeatedly(Return(mockConnectionSas));
auto mockConnectionVla = New<TStrictMockConnection>();
EXPECT_CALL(*mockConnectionVla, GetClusterTag())
.WillRepeatedly(Return(NObjectClient::TCellTag(456)));
+ std::optional<std::string> clusterNameVla = "cluster-vla";
+ EXPECT_CALL(*mockConnectionVla, GetClusterName())
+ .WillRepeatedly(ReturnRef(clusterNameVla));
+ EXPECT_CALL(*mockClientVla, GetConnection())
+ .WillRepeatedly(Return(mockConnectionVla));
// Creation of federated client.
std::vector<IClientPtr> clients{mockClientSas, mockClientVla};
auto config = New<TFederationConfig>();
- config->ClusterHealthCheckPeriod = TDuration::Seconds(5);
+ config->ClusterHealthCheckPeriod = TDuration::Seconds(1);
auto federatedClient = CreateClient(clients, config);
// Wait initialization.
diff --git a/yt/yt/client/hedging/hedging.cpp b/yt/yt/client/hedging/hedging.cpp
index b6f7e294da..5ea0675af6 100644
--- a/yt/yt/client/hedging/hedging.cpp
+++ b/yt/yt/client/hedging/hedging.cpp
@@ -66,10 +66,10 @@ public:
return Executor_->GetClient(0)->GetConnection();
}
- std::optional<std::string> GetClusterName(bool fetchIfNull = true) override
+ TFuture<std::optional<std::string>> GetClusterName(bool fetchIfNull = true) override
{
Y_UNUSED(fetchIfNull);
- return {};
+ return MakeFuture<std::optional<std::string>>({});
}
RETRYABLE_METHOD(TFuture<TUnversionedLookupRowsResult>, LookupRows, (const TYPath&, NTableClient::TNameTablePtr, const TSharedRange<NTableClient::TUnversionedRow>&, const TLookupRowsOptions&));
diff --git a/yt/yt/client/queue_client/consumer_client.cpp b/yt/yt/client/queue_client/consumer_client.cpp
index d3d8a7cb22..cccac4b318 100644
--- a/yt/yt/client/queue_client/consumer_client.cpp
+++ b/yt/yt/client/queue_client/consumer_client.cpp
@@ -672,12 +672,13 @@ ISubConsumerClientPtr CreateSubConsumerClient(
{
auto queueCluster = queuePath.GetCluster();
if (!queueCluster && queueClusterClient) {
- if (auto queueClusterFromClient = queueClusterClient->GetClusterName()) {
+ // `CreateSubConsumerClient` function calls `WaitFor` already, it will be fixed later.
+ if (auto queueClusterFromClient = WaitFor(queueClusterClient->GetClusterName()).ValueOrThrow()) {
queueCluster = *queueClusterFromClient;
}
}
if (!queueCluster) {
- if (auto clientCluster = consumerClusterClient->GetClusterName()) {
+ if (auto clientCluster = WaitFor(consumerClusterClient->GetClusterName()).ValueOrThrow()) {
queueCluster = *clientCluster;
}
}
diff --git a/yt/yt/client/scheduler/public.h b/yt/yt/client/scheduler/public.h
index 74a89e058c..4e3c22af5c 100644
--- a/yt/yt/client/scheduler/public.h
+++ b/yt/yt/client/scheduler/public.h
@@ -161,6 +161,7 @@ DEFINE_ENUM(EAbortReason,
((AddressResolveFailed) ( 57))
((UnexpectedNodeJobPhase) ( 58))
((JobCountChangedByUserRequest) ( 59))
+ ((NbdErrors) ( 60))
((SchedulingFirst) (100))
((SchedulingTimeout) (101))
((SchedulingResourceOvercommit) (102))
diff --git a/yt/yt/client/signature/generator.cpp b/yt/yt/client/signature/generator.cpp
index 33a431a5ab..799b64e620 100644
--- a/yt/yt/client/signature/generator.cpp
+++ b/yt/yt/client/signature/generator.cpp
@@ -10,25 +10,38 @@ using namespace NYson;
////////////////////////////////////////////////////////////////////////////////
-TSignaturePtr ISignatureGenerator::Sign(std::string payload)
+TSignaturePtr ISignatureGenerator::Sign(std::string payload) const
{
auto signature = New<TSignature>();
signature->Payload_ = std::move(payload);
- Sign(signature);
+ Resign(signature);
return signature;
}
////////////////////////////////////////////////////////////////////////////////
+namespace {
+
struct TDummySignatureGenerator
: public ISignatureGenerator
{
- void Sign(const TSignaturePtr& signature) override
+ void Resign(const TSignaturePtr& /*signature*/) const final
+ { }
+};
+
+struct TAlwaysThrowingSignatureGenerator
+ : public ISignatureGenerator
+{
+ void Resign(const TSignaturePtr& /*signature*/) const final
{
- signature->Header_ = NYson::TYsonString("DummySignature"_sb);
+ THROW_ERROR_EXCEPTION("Signature generation is unsupported");
}
};
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
ISignatureGeneratorPtr CreateDummySignatureGenerator()
{
return New<TDummySignatureGenerator>();
@@ -40,17 +53,6 @@ const ISignatureGeneratorPtr& GetDummySignatureGenerator()
return signatureGenerator;
}
-////////////////////////////////////////////////////////////////////////////////
-
-struct TAlwaysThrowingSignatureGenerator
- : public ISignatureGenerator
-{
- void Sign(const TSignaturePtr& /*signature*/) override
- {
- THROW_ERROR_EXCEPTION("Signature generation is unsupported");
- }
-};
-
ISignatureGeneratorPtr CreateAlwaysThrowingSignatureGenerator()
{
return New<TAlwaysThrowingSignatureGenerator>();
diff --git a/yt/yt/client/signature/generator.h b/yt/yt/client/signature/generator.h
index 771288af7f..93aeb9f409 100644
--- a/yt/yt/client/signature/generator.h
+++ b/yt/yt/client/signature/generator.h
@@ -13,9 +13,9 @@ struct ISignatureGenerator
{
//! Fills out the Signature_ and Header_ fields in a given TSignature
//! based on its payload.
- virtual void Sign(const TSignaturePtr& signature) = 0;
+ virtual void Resign(const TSignaturePtr& signature) const = 0;
- [[nodiscard]] TSignaturePtr Sign(std::string payload);
+ [[nodiscard]] TSignaturePtr Sign(std::string payload) const;
};
DEFINE_REFCOUNTED_TYPE(ISignatureGenerator)
diff --git a/yt/yt/client/signature/signature.cpp b/yt/yt/client/signature/signature.cpp
index b7b26d25ec..4e59711add 100644
--- a/yt/yt/client/signature/signature.cpp
+++ b/yt/yt/client/signature/signature.cpp
@@ -25,11 +25,9 @@ void Serialize(const TSignature& signature, IYsonConsumer* consumer)
{
consumer->OnBeginMap();
BuildYsonMapFragmentFluently(consumer)
- .Item("header").Value(signature.Header_.ToString())
+ .Item("header").Value((signature.Header_ ? signature.Header_.ToString() : ""))
.Item("payload").Value(signature.Payload_)
- .Item("signature").Value(TStringBuf(
- reinterpret_cast<const char*>(signature.Signature_.data()),
- signature.Signature_.size()));
+ .Item("signature").Value(signature.Signature_);
consumer->OnEndMap();
}
@@ -40,12 +38,7 @@ void Deserialize(TSignature& signature, INodePtr node)
auto mapNode = node->AsMap();
signature.Header_ = TYsonString(mapNode->GetChildValueOrThrow<TString>("header"));
signature.Payload_ = mapNode->GetChildValueOrThrow<std::string>("payload");
-
- auto signatureString = mapNode->GetChildValueOrThrow<std::string>("signature");
- auto signatureBytes = std::as_bytes(std::span(signatureString));
- signature.Signature_.resize(signatureBytes.size());
-
- std::copy(signatureBytes.begin(), signatureBytes.end(), signature.Signature_.begin());
+ signature.Signature_ = mapNode->GetChildValueOrThrow<std::string>("signature");
}
void Deserialize(TSignature& signature, TYsonPullParserCursor* cursor)
diff --git a/yt/yt/client/signature/signature.h b/yt/yt/client/signature/signature.h
index a9bbaf8660..bab9dcab38 100644
--- a/yt/yt/client/signature/signature.h
+++ b/yt/yt/client/signature/signature.h
@@ -25,16 +25,12 @@ public:
private:
NYson::TYsonString Header_;
std::string Payload_;
- std::vector<std::byte> Signature_;
+ std::string Signature_;
friend struct ISignatureGenerator;
- friend struct TDummySignatureGenerator;
- friend struct TAlwaysThrowingSignatureGenerator;
friend class TSignatureGenerator;
friend struct ISignatureValidator;
- friend struct TDummySignatureValidator;
- friend struct TAlwaysThrowingSignatureValidator;
friend class TSignatureValidator;
friend void Serialize(const TSignature& signature, NYson::IYsonConsumer* consumer);
diff --git a/yt/yt/client/signature/unittests/dummy_ut.cpp b/yt/yt/client/signature/unittests/dummy_ut.cpp
index 364717473e..7034efd6cb 100644
--- a/yt/yt/client/signature/unittests/dummy_ut.cpp
+++ b/yt/yt/client/signature/unittests/dummy_ut.cpp
@@ -17,26 +17,16 @@ using namespace NYson;
using namespace NYTree;
const auto YsonSignature = TYsonString(
- R"({"header"="DummySignature";"payload"="payload";"signature"="";})"_sb);
+ R"({"header"="DummySignature";"payload"="payload";"signature"="abacaba";})"_sb);
////////////////////////////////////////////////////////////////////////////////
TEST(TDummySignatureGeneratorTest, Generate)
{
auto generator = CreateDummySignatureGenerator();
- auto signature = generator->Sign("payload");
- EXPECT_EQ(ConvertToYsonString(signature, EYsonFormat::Text), YsonSignature);
- generator->Sign(signature);
- EXPECT_EQ(ConvertToYsonString(signature, EYsonFormat::Text), YsonSignature);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-TEST(TDummySignatureValidatorTest, ValidateGood)
-{
auto signature = ConvertTo<TSignaturePtr>(YsonSignature);
- auto validator = CreateDummySignatureValidator();
- EXPECT_TRUE(validator->Validate(signature).Get().Value());
+ generator->Resign(signature);;
+ EXPECT_EQ(ConvertToYsonString(signature, EYsonFormat::Text).ToString(), YsonSignature.ToString());
}
////////////////////////////////////////////////////////////////////////////////
@@ -54,7 +44,7 @@ TEST(TDummySignatureValidatorTest, GenerateValidate)
TEST(TAlwaysThrowingSignatureGeneratorTest, Generate)
{
auto generator = CreateAlwaysThrowingSignatureGenerator();
- EXPECT_THROW_WITH_SUBSTRING(generator->Sign(New<TSignature>()), "unsupported");
+ EXPECT_THROW_WITH_SUBSTRING(generator->Resign(New<TSignature>()), "unsupported");
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/client/signature/validator.cpp b/yt/yt/client/signature/validator.cpp
index aebc42d488..e4a25a3bdd 100644
--- a/yt/yt/client/signature/validator.cpp
+++ b/yt/yt/client/signature/validator.cpp
@@ -8,32 +8,35 @@ namespace NYT::NSignature {
////////////////////////////////////////////////////////////////////////////////
+namespace {
+
struct TDummySignatureValidator
: public ISignatureValidator
{
- TFuture<bool> Validate(const TSignaturePtr& signature) override
+ TFuture<bool> Validate(const TSignaturePtr& /*signature*/) const final
{
- YT_VERIFY(signature->Header_.ToString() == "DummySignature");
return TrueFuture;
}
};
-ISignatureValidatorPtr CreateDummySignatureValidator()
-{
- return New<TDummySignatureValidator>();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
struct TAlwaysThrowingSignatureValidator
: public ISignatureValidator
{
- TFuture<bool> Validate(const TSignaturePtr& /*signature*/) override
+ TFuture<bool> Validate(const TSignaturePtr& /*signature*/) const final
{
THROW_ERROR_EXCEPTION("Signature validation is unsupported");
}
};
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
+ISignatureValidatorPtr CreateDummySignatureValidator()
+{
+ return New<TDummySignatureValidator>();
+}
+
ISignatureValidatorPtr CreateAlwaysThrowingSignatureValidator()
{
return New<TAlwaysThrowingSignatureValidator>();
diff --git a/yt/yt/client/signature/validator.h b/yt/yt/client/signature/validator.h
index 94425001d3..72777f9475 100644
--- a/yt/yt/client/signature/validator.h
+++ b/yt/yt/client/signature/validator.h
@@ -13,7 +13,7 @@ namespace NYT::NSignature {
struct ISignatureValidator
: public TRefCounted
{
- virtual TFuture<bool> Validate(const TSignaturePtr& signature) = 0;
+ virtual TFuture<bool> Validate(const TSignaturePtr& signature) const = 0;
};
DEFINE_REFCOUNTED_TYPE(ISignatureValidator)
diff --git a/yt/yt/client/tablet_client/config.cpp b/yt/yt/client/tablet_client/config.cpp
index 6e30181969..dc6a909d22 100644
--- a/yt/yt/client/tablet_client/config.cpp
+++ b/yt/yt/client/tablet_client/config.cpp
@@ -82,6 +82,12 @@ void TReplicatedTableOptions::Register(TRegistrar registrar)
.Optional();
registrar.Parameter("min_sync_replica_count", &TThis::MinSyncReplicaCount)
.Optional();
+ registrar.Parameter("max_sync_queue_replica_count", &TThis::MaxSyncQueueReplicaCount)
+ .Optional()
+ .DontSerializeDefault();
+ registrar.Parameter("min_sync_queue_replica_count", &TThis::MinSyncQueueReplicaCount)
+ .Optional()
+ .DontSerializeDefault();
registrar.Parameter("enable_replicated_table_tracker", &TThis::EnableReplicatedTableTracker)
.Default(false);
@@ -100,26 +106,60 @@ void TReplicatedTableOptions::Register(TRegistrar registrar)
.Default(TDuration::Minutes(5));
registrar.Postprocessor([] (TThis* config) {
- if (config->MaxSyncReplicaCount && config->MinSyncReplicaCount && *config->MinSyncReplicaCount > *config->MaxSyncReplicaCount) {
+ if (config->MaxSyncReplicaCount &&
+ config->MinSyncReplicaCount &&
+ config->MinSyncReplicaCount > config->MaxSyncReplicaCount)
+ {
THROW_ERROR_EXCEPTION("\"min_sync_replica_count\" must be less or equal to \"max_sync_replica_count\"");
}
+
+ if (config->MaxSyncQueueReplicaCount && config->MaxSyncQueueReplicaCount < 2) {
+ THROW_ERROR_EXCEPTION("\"max_sync_queue_replica_count\" canot be less than 2, actual: %v",
+ config->MaxSyncQueueReplicaCount);
+ }
+
+ if (config->MaxSyncQueueReplicaCount &&
+ config->MinSyncQueueReplicaCount &&
+ config->MinSyncQueueReplicaCount > config->MaxSyncQueueReplicaCount)
+ {
+ THROW_ERROR_EXCEPTION("\"min_sync_queue_replica_count\" must be less or equal to \"max_sync_queue_replica_count\"");
+ }
});
}
-std::tuple<int, int> TReplicatedTableOptions::GetEffectiveMinMaxReplicaCount(int replicaCount) const
+std::tuple<int, int> TReplicatedTableOptions::GetEffectiveMinMaxReplicaCount(
+ ETableReplicaContentType contentType,
+ int replicaCount) const
{
- int maxSyncReplicas = 0;
- int minSyncReplicas = 0;
+ auto getResult = [&] (auto minSyncReplicaCount, auto maxSyncReplicaCount) {
+ int maxSyncReplicas = 0;
+ int minSyncReplicas = 0;
+
+ if (!maxSyncReplicaCount && !minSyncReplicaCount) {
+ maxSyncReplicas = 1;
+ } else {
+ maxSyncReplicas = maxSyncReplicaCount.value_or(replicaCount);
+ }
- if (!MaxSyncReplicaCount && !MinSyncReplicaCount) {
- maxSyncReplicas = 1;
- } else {
- maxSyncReplicas = MaxSyncReplicaCount.value_or(replicaCount);
- }
+ minSyncReplicas = minSyncReplicaCount.value_or(maxSyncReplicas);
- minSyncReplicas = MinSyncReplicaCount.value_or(maxSyncReplicas);
+ return std::tuple(minSyncReplicas, maxSyncReplicas);
+ };
- return std::tuple(minSyncReplicas, maxSyncReplicas);
+ if (contentType == ETableReplicaContentType::Queue) {
+ int minSyncReplicas;
+ int maxSyncReplicas;
+ if (MinSyncQueueReplicaCount || MaxSyncQueueReplicaCount) {
+ std::tie(minSyncReplicas, maxSyncReplicas) = getResult(MinSyncQueueReplicaCount, MaxSyncQueueReplicaCount);
+ } else {
+ std::tie(minSyncReplicas, maxSyncReplicas) = getResult(MinSyncReplicaCount, MaxSyncReplicaCount);
+ }
+ return std::tuple(
+ std::max(minSyncReplicas, 1),
+ std::max(maxSyncReplicas, 2));
+ } else {
+ return getResult(MinSyncReplicaCount, MaxSyncReplicaCount);
+ }
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/client/tablet_client/config.h b/yt/yt/client/tablet_client/config.h
index 1e5a0531f4..eaf16aa37c 100644
--- a/yt/yt/client/tablet_client/config.h
+++ b/yt/yt/client/tablet_client/config.h
@@ -104,6 +104,9 @@ public:
std::optional<int> MaxSyncReplicaCount;
std::optional<int> MinSyncReplicaCount;
+ std::optional<int> MaxSyncQueueReplicaCount;
+ std::optional<int> MinSyncQueueReplicaCount;
+
TDuration SyncReplicaLagThreshold;
// TODO(akozhikhov): We probably do not need these in this per-table config.
@@ -113,7 +116,7 @@ public:
bool EnablePreloadStateCheck;
TDuration IncompletePreloadGracePeriod;
- std::tuple<int, int> GetEffectiveMinMaxReplicaCount(int replicaCount) const;
+ std::tuple<int, int> GetEffectiveMinMaxReplicaCount(ETableReplicaContentType contentType, int replicaCount) const;
REGISTER_YSON_STRUCT(TReplicatedTableOptions);
diff --git a/yt/yt/client/unittests/mock/client.h b/yt/yt/client/unittests/mock/client.h
index 254f08dd90..8a5f8b7bdf 100644
--- a/yt/yt/client/unittests/mock/client.h
+++ b/yt/yt/client/unittests/mock/client.h
@@ -38,7 +38,7 @@ public:
MOCK_METHOD(IConnectionPtr, GetConnection, (), (override));
- MOCK_METHOD(std::optional<std::string>, GetClusterName, (bool fetchIfNull), (override));
+ MOCK_METHOD(TFuture<std::optional<std::string>>, GetClusterName, (bool fetchIfNull), (override));
MOCK_METHOD(TFuture<ITransactionPtr>, StartTransaction, (
NTransactionClient::ETransactionType type,
diff --git a/yt/yt/client/unittests/replication_progress_ut.cpp b/yt/yt/client/unittests/replication_progress_ut.cpp
index 5ef0cf5265..41746f544c 100644
--- a/yt/yt/client/unittests/replication_progress_ut.cpp
+++ b/yt/yt/client/unittests/replication_progress_ut.cpp
@@ -735,8 +735,21 @@ INSTANTIATE_TEST_SUITE_P(
"{segments=[{lower_key=[1];timestamp=1073741824};{lower_key=[2];timestamp=3221225472}];"
"upper_key=[<type=max>#]}",
"{segments=[{lower_key=[];timestamp=1073741824};{lower_key=[2];timestamp=3221225472}];"
- "upper_key=[<type=max>#]}")
-
+ "upper_key=[<type=max>#]}"),
+ std::tuple(
+ "{segments=[{lower_key=[2];timestamp=1}];upper_key=[<type=max>#]}",
+ "{segments=[{lower_key=[1];timestamp=1}];upper_key=[2]}",
+ "{segments=[{lower_key=[1];timestamp=1}];upper_key=[<type=max>#]}"),
+ std::tuple(
+ "{segments=[{lower_key=[3];timestamp=1}];upper_key=[<type=max>#]}",
+ "{segments=[{lower_key=[1];timestamp=1}];upper_key=[2]}",
+ "{segments=[{lower_key=[1];timestamp=1};{lower_key=[2];timestamp=0};{lower_key=[3];timestamp=1}];"
+ "upper_key=[<type=max>#]}"),
+ std::tuple(
+ "{segments=[{lower_key=[1];timestamp=1};{lower_key=[2];timestamp=0};{lower_key=[3];timestamp=1}];"
+ "upper_key=[<type=max>#]}",
+ "{segments=[{lower_key=[2];timestamp=1}];upper_key=[3]}",
+ "{segments=[{lower_key=[1];timestamp=1}];upper_key=[<type=max>#]}")
));
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/core/actions/invoker_util.h b/yt/yt/core/actions/invoker_util.h
index 0b020c3f6b..a5dd251fa7 100644
--- a/yt/yt/core/actions/invoker_util.h
+++ b/yt/yt/core/actions/invoker_util.h
@@ -1,12 +1,17 @@
#pragma once
+#include "invoker.h"
#include "public.h"
-#include <yt/yt/core/concurrency/scheduler_api.h>
+#include <yt/yt/core/actions/bind.h>
namespace NYT {
////////////////////////////////////////////////////////////////////////////////
+// Forward declaration
+IInvoker* GetCurrentInvoker();
+
+////////////////////////////////////////////////////////////////////////////////
//! Returns the synchronous-ish invoker that defers recurrent action invocation.
/*!
diff --git a/yt/yt/core/actions/unittests/future_ut.cpp b/yt/yt/core/actions/unittests/future_ut.cpp
index de1f661e1d..e27136c092 100644
--- a/yt/yt/core/actions/unittests/future_ut.cpp
+++ b/yt/yt/core/actions/unittests/future_ut.cpp
@@ -5,6 +5,7 @@
#include <yt/yt/core/actions/invoker_util.h>
#include <yt/yt/core/concurrency/action_queue.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/misc/ref_counted_tracker.h>
#include <yt/yt/core/misc/mpsc_stack.h>
diff --git a/yt/yt/core/actions/unittests/invoker_ut.cpp b/yt/yt/core/actions/unittests/invoker_ut.cpp
index be43a0b3d7..8460a78471 100644
--- a/yt/yt/core/actions/unittests/invoker_ut.cpp
+++ b/yt/yt/core/actions/unittests/invoker_ut.cpp
@@ -6,6 +6,7 @@
#include <yt/yt/core/misc/finally.h>
#include <yt/yt/core/concurrency/action_queue.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/concurrency/thread_pool.h>
#include <yt/yt/library/profiling/public.h>
diff --git a/yt/yt/core/concurrency/delayed_executor.cpp b/yt/yt/core/concurrency/delayed_executor.cpp
index 6b9758f13f..ae71138169 100644
--- a/yt/yt/core/concurrency/delayed_executor.cpp
+++ b/yt/yt/core/concurrency/delayed_executor.cpp
@@ -562,7 +562,7 @@ void TDelayedExecutor::Cancel(const TDelayedExecutorCookie& cookie)
void TDelayedExecutor::CancelAndClear(TDelayedExecutorCookie& cookie)
{
- NDetail::TDelayedExecutorImpl::Get()->Cancel(std::move(cookie));
+ NDetail::TDelayedExecutorImpl::Get()->Cancel(std::exchange(cookie, {}));
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/core/concurrency/fair_share_action_queue.cpp b/yt/yt/core/concurrency/fair_share_action_queue.cpp
index ef00551028..e90347d81a 100644
--- a/yt/yt/core/concurrency/fair_share_action_queue.cpp
+++ b/yt/yt/core/concurrency/fair_share_action_queue.cpp
@@ -4,6 +4,7 @@
#include "profiling_helpers.h"
#include "system_invokers.h"
+#include <yt/yt/core/actions/bind.h>
#include <yt/yt/core/actions/invoker_util.h>
#include <yt/yt/core/actions/invoker_detail.h>
diff --git a/yt/yt/core/concurrency/fiber_scheduler_thread.cpp b/yt/yt/core/concurrency/fiber_scheduler_thread.cpp
index 228b9629d9..073e8d4007 100644
--- a/yt/yt/core/concurrency/fiber_scheduler_thread.cpp
+++ b/yt/yt/core/concurrency/fiber_scheduler_thread.cpp
@@ -8,6 +8,7 @@
#include <yt/yt/library/profiling/producer.h>
#include <yt/yt/core/actions/invoker_util.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/misc/finally.h>
#include <yt/yt/core/misc/shutdown.h>
diff --git a/yt/yt/core/concurrency/thread_pool_detail.cpp b/yt/yt/core/concurrency/thread_pool_detail.cpp
index 36cd128bee..f0d9a23181 100644
--- a/yt/yt/core/concurrency/thread_pool_detail.cpp
+++ b/yt/yt/core/concurrency/thread_pool_detail.cpp
@@ -4,6 +4,7 @@
#include "private.h"
#include <yt/yt/core/actions/invoker_util.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <algorithm>
diff --git a/yt/yt/core/concurrency/unittests/bounded_concurrency_invoker_ut.cpp b/yt/yt/core/concurrency/unittests/bounded_concurrency_invoker_ut.cpp
index 5a22f97430..163eee1f26 100644
--- a/yt/yt/core/concurrency/unittests/bounded_concurrency_invoker_ut.cpp
+++ b/yt/yt/core/concurrency/unittests/bounded_concurrency_invoker_ut.cpp
@@ -3,6 +3,7 @@
#include <yt/yt/core/actions/future.h>
#include <yt/yt/core/concurrency/action_queue.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/logging/log.h>
diff --git a/yt/yt/core/concurrency/unittests/invoker_pool_ut.cpp b/yt/yt/core/concurrency/unittests/invoker_pool_ut.cpp
index 498f18ecc5..23fe1158ce 100644
--- a/yt/yt/core/concurrency/unittests/invoker_pool_ut.cpp
+++ b/yt/yt/core/concurrency/unittests/invoker_pool_ut.cpp
@@ -1,6 +1,7 @@
#include <yt/yt/core/test_framework/framework.h>
#include <yt/yt/core/concurrency/action_queue.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/actions/invoker.h>
#include <yt/yt/core/actions/invoker_detail.h>
diff --git a/yt/yt/core/concurrency/unittests/scheduled_executor_ut.cpp b/yt/yt/core/concurrency/unittests/scheduled_executor_ut.cpp
index f0573463c6..6c52861a28 100644
--- a/yt/yt/core/concurrency/unittests/scheduled_executor_ut.cpp
+++ b/yt/yt/core/concurrency/unittests/scheduled_executor_ut.cpp
@@ -5,6 +5,7 @@
#include <yt/yt/core/concurrency/action_queue.h>
#include <yt/yt/core/concurrency/delayed_executor.h>
#include <yt/yt/core/concurrency/scheduled_executor.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <atomic>
diff --git a/yt/yt/core/http/compression.cpp b/yt/yt/core/http/compression.cpp
index e719f54262..17e7a24872 100644
--- a/yt/yt/core/http/compression.cpp
+++ b/yt/yt/core/http/compression.cpp
@@ -5,6 +5,7 @@
#include <yt/yt/core/ytree/serialize.h>
#include <yt/yt/core/compression/dictionary_codec.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <library/cpp/streams/brotli/brotli.h>
diff --git a/yt/yt/core/misc/error.cpp b/yt/yt/core/misc/error.cpp
index d316d8d579..6601358f75 100644
--- a/yt/yt/core/misc/error.cpp
+++ b/yt/yt/core/misc/error.cpp
@@ -2,6 +2,7 @@
#include "serialize.h"
#include <yt/yt/core/concurrency/fls.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <yt/yt/core/net/local_address.h>
diff --git a/yt/yt/core/misc/fs.cpp b/yt/yt/core/misc/fs.cpp
index 17ebc4b0db..b14d33e6bd 100644
--- a/yt/yt/core/misc/fs.cpp
+++ b/yt/yt/core/misc/fs.cpp
@@ -6,6 +6,7 @@
#include <yt/yt/core/misc/proc.h>
#include <yt/yt/core/actions/invoker_util.h>
+#include <yt/yt/core/concurrency/scheduler_api.h>
#include <library/cpp/yt/system/handle_eintr.h>
#include <library/cpp/yt/system/exit.h>
diff --git a/yt/yt/core/misc/range_helpers-inl.h b/yt/yt/core/misc/range_helpers-inl.h
index 6ce1d498f1..aba43b32a6 100644
--- a/yt/yt/core/misc/range_helpers-inl.h
+++ b/yt/yt/core/misc/range_helpers-inl.h
@@ -9,26 +9,59 @@ namespace NDetail {
////////////////////////////////////////////////////////////////////////////////
+template <class TContainer>
+struct TAppendTo
+{ };
+
+template <class TContainer>
+ requires requires (TContainer container, typename TContainer::value_type value) {
+ container.push_back(value);
+ }
+struct TAppendTo<TContainer>
+{
+ template <class TValue>
+ static void Append(TContainer& container, TValue&& value)
+ {
+ container.push_back(std::forward<TValue>(value));
+ }
+};
+
+template <class TContainer>
+ requires requires (TContainer container, typename TContainer::value_type value) {
+ container.insert(value);
+ }
+struct TAppendTo<TContainer>
+{
+ template <class TValue>
+ static void Append(TContainer& container, TValue&& value)
+ {
+ container.insert(std::forward<TValue>(value));
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
template <class TContainer, std::ranges::input_range TRange>
struct TRangeTo
{ };
template <class TContainer, std::ranges::input_range TRange>
- requires requires (TContainer container, size_t size) {
- container.reserve(size);
- container.push_back(std::declval<typename TContainer::value_type>());
+ requires requires (TContainer container, typename TContainer::value_type value) {
+ TAppendTo<TContainer>::Append(container, value);
}
struct TRangeTo<TContainer, TRange>
{
static auto ToContainer(TRange&& range)
{
TContainer container;
- if constexpr (requires { std::ranges::size(range); }) {
+ if constexpr (requires { std::ranges::size(range); } &&
+ requires { container.reserve(std::declval<size_t>()); })
+ {
container.reserve(std::ranges::size(range));
}
for (auto&& element : range) {
- container.push_back(std::forward<decltype(element)>(element));
+ TAppendTo<TContainer>::Append(container, std::forward<decltype(element)>(element));
}
return container;
@@ -54,6 +87,28 @@ auto RangeTo(TRange&& range)
return NDetail::TRangeTo<TContainer, TRange>::ToContainer(std::forward<TRange>(range));
}
+template <class TContainer, std::ranges::input_range TRange, class TTransformFunction>
+auto TransformRangeTo(TRange&& range, TTransformFunction&& function)
+{
+ return RangeTo<TContainer>(std::ranges::views::transform(
+ std::forward<TRange>(range),
+ std::forward<TTransformFunction>(function)));
+}
+
+template <std::ranges::range TRange, class TOperation, class TProjection>
+auto FoldRange(TRange&& range, TOperation operation, TProjection projection)
+{
+ auto iter = range.begin();
+ if (iter == range.end()) {
+ return std::remove_cvref_t<decltype(std::invoke(projection, *iter))>{};
+ }
+ auto accumulator = std::invoke(projection, *iter);
+ for (++iter; iter != range.end(); ++iter) {
+ accumulator = std::invoke(operation, accumulator, std::invoke(projection, *iter));
+ }
+ return accumulator;
+}
+
////////////////////////////////////////////////////////////////////////////////
} // namespace NYT
diff --git a/yt/yt/core/misc/range_helpers.h b/yt/yt/core/misc/range_helpers.h
index 2de49738d2..5df2f63c3c 100644
--- a/yt/yt/core/misc/range_helpers.h
+++ b/yt/yt/core/misc/range_helpers.h
@@ -18,10 +18,18 @@ template <std::ranges::range... TRanges>
auto ZipMutable(TRanges&&... ranges);
//! Converts the provided range to the specified container.
-//! This is a simplified equivalent of std::ranges::to from range-v3.
+//! This is a simplified equivalent of std::ranges::to from ranges-v3.
template <class TContainer, std::ranges::input_range TRange>
auto RangeTo(TRange&& range);
+//! Shortcut for `RangeTo(std::ranges::views::transform)`.
+template <class TContainer, std::ranges::input_range TRange, class TTransformFunction>
+auto TransformRangeTo(TRange&& range, TTransformFunction&& function);
+
+//! An equivalent of std::ranges::fold_left from ranges-v3.
+template <std::ranges::range TRange, class TOperation, class TProjection = std::identity>
+auto FoldRange(TRange&& range, TOperation operation, TProjection projection = {});
+
////////////////////////////////////////////////////////////////////////////////
} // namespace NYT
diff --git a/yt/yt/core/misc/unittests/range_helpers_ut.cpp b/yt/yt/core/misc/unittests/range_helpers_ut.cpp
index 19a2031034..4c38374f75 100644
--- a/yt/yt/core/misc/unittests/range_helpers_ut.cpp
+++ b/yt/yt/core/misc/unittests/range_helpers_ut.cpp
@@ -50,5 +50,17 @@ TEST(TRangeHelpersTest, RangeToString)
////////////////////////////////////////////////////////////////////////////////
+TEST(TRangeHelpersTest, Fold)
+{
+ EXPECT_EQ(0, FoldRange(std::vector<int>{}, std::plus{}));
+ EXPECT_EQ(6, FoldRange(std::vector<int>{1, 2, 3}, std::plus{}));
+ EXPECT_EQ(5, FoldRange(
+ std::vector<std::vector<int>>{{1, 2}, {3, 4, 5}},
+ std::plus{},
+ std::ranges::ssize));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
} // namespace
} // namespace NYT
diff --git a/yt/yt/core/yson/protobuf_interop-inl.h b/yt/yt/core/yson/protobuf_interop-inl.h
index 6f7d630002..6f7d3585ad 100644
--- a/yt/yt/core/yson/protobuf_interop-inl.h
+++ b/yt/yt/core/yson/protobuf_interop-inl.h
@@ -4,6 +4,8 @@
#include "protobuf_interop.h"
#endif
+#include <library/cpp/yt/error/error.h>
+
namespace NYT::NYson {
////////////////////////////////////////////////////////////////////////////////
@@ -17,13 +19,6 @@ const TProtobufMessageType* ReflectProtobufMessageType()
////////////////////////////////////////////////////////////////////////////////
-namespace NDetail {
-
-////////////////////////////////////////////////////////////////////////////////
-
-template <CProtobufElement TElementType>
-consteval std::string_view GetProtobufElementTypeName();
-
#define MAP_PROTOBUF_ELEMENT_TYPE_NAME(elementType, name) \
template <> \
consteval std::string_view GetProtobufElementTypeName<elementType>() \
@@ -40,22 +35,26 @@ MAP_PROTOBUF_ELEMENT_TYPE_NAME(TProtobufAnyElement, "any")
#undef MAP_PROTOBUF_ELEMENT_TYPE_NAME
-std::string_view GetProtobufElementTypeName(const NYson::TProtobufElement& element);
-
////////////////////////////////////////////////////////////////////////////////
-} // namespace NDetail
-
-////////////////////////////////////////////////////////////////////////////////
+template <class... U>
+auto VisitProtobufElement(const TProtobufElement& element, U&&... visitorOverloads)
+{
+ TOverloaded visitor{std::forward<U>(visitorOverloads)...};
+ return Visit(element, [&] <CProtobufElement TElement> (const std::unique_ptr<TElement>& element) {
+ YT_VERIFY(element);
+ return visitor(*element);
+ });
+}
template <CProtobufElement TElementType>
-const TElementType& GetProtobufElementOrThrow(const NYson::TProtobufElement& element)
+const TElementType& GetProtobufElementOrThrow(const TProtobufElement& element)
{
const auto* result = std::get_if<std::unique_ptr<TElementType>>(&element);
THROW_ERROR_EXCEPTION_UNLESS(result,
"Expected protobuf element of type %Qv, but got of type %Qv",
- NDetail::GetProtobufElementTypeName<TElementType>(),
- NDetail::GetProtobufElementTypeName(element));
+ GetProtobufElementTypeName<TElementType>(),
+ GetProtobufElementTypeName(element));
return *result->get();
}
diff --git a/yt/yt/core/yson/protobuf_interop.cpp b/yt/yt/core/yson/protobuf_interop.cpp
index 0aa00e3f07..b49cae0c42 100644
--- a/yt/yt/core/yson/protobuf_interop.cpp
+++ b/yt/yt/core/yson/protobuf_interop.cpp
@@ -197,6 +197,74 @@ void WriteSchema(const TProtobufEnumType* enumType, IYsonConsumer* consumer);
////////////////////////////////////////////////////////////////////////////////
+NYson::TProtobufElementType GetProtobufElementType(const TProtobufElement& protobufElement)
+{
+ return VisitProtobufElement(protobufElement,
+ [] (const TProtobufMessageElement& /*element*/) {
+ return TProtobufElementType{FieldDescriptor::TYPE_MESSAGE};
+ },
+ [] (const TProtobufScalarElement& element) {
+ return element.Type;
+ },
+ [] (const TProtobufAttributeDictionaryElement& /*element*/) {
+ return TProtobufElementType{FieldDescriptor::TYPE_MESSAGE};
+ },
+ [] (const TProtobufRepeatedElement& element) {
+ return GetProtobufElementType(element.Element);
+ },
+ [] (const TProtobufMapElement& /*element*/) {
+ // NB! Map is interpreted directly as repeated message field.
+ return TProtobufElementType{FieldDescriptor::TYPE_MESSAGE};
+ },
+ [] (const TProtobufAnyElement& /*element*/) {
+ return TProtobufElementType{FieldDescriptor::TYPE_STRING};
+ }
+ );
+}
+
+NYTree::ENodeType GetNodeTypeByProtobufScalarElement(const TProtobufScalarElement& scalarElement)
+{
+ switch (scalarElement.Type.Underlying()) {
+ case FieldDescriptor::TYPE_INT64:
+ case FieldDescriptor::TYPE_INT32:
+ case FieldDescriptor::TYPE_SINT32:
+ case FieldDescriptor::TYPE_SINT64:
+ case FieldDescriptor::TYPE_SFIXED32:
+ case FieldDescriptor::TYPE_SFIXED64:
+ return NYTree::ENodeType::Int64;
+ case FieldDescriptor::TYPE_UINT64:
+ case FieldDescriptor::TYPE_FIXED64:
+ case FieldDescriptor::TYPE_UINT32:
+ case FieldDescriptor::TYPE_FIXED32:
+ return NYTree::ENodeType::Uint64;
+ case FieldDescriptor::TYPE_BOOL:
+ return NYTree::ENodeType::Boolean;
+ case FieldDescriptor::TYPE_STRING:
+ case FieldDescriptor::TYPE_BYTES:
+ return NYTree::ENodeType::String;
+ case FieldDescriptor::TYPE_ENUM:
+ switch (scalarElement.EnumStorageType) {
+ case EEnumYsonStorageType::String:
+ return NYTree::ENodeType::String;
+ case EEnumYsonStorageType::Int:
+ return NYTree::ENodeType::Int64;
+ }
+ YT_ABORT();
+ case FieldDescriptor::TYPE_DOUBLE:
+ case FieldDescriptor::TYPE_FLOAT:
+ return NYTree::ENodeType::Double;
+ case FieldDescriptor::TYPE_GROUP:
+ case FieldDescriptor::TYPE_MESSAGE:
+ // NB! Scalar element cannot be of type message.
+ break;
+ }
+
+ THROW_ERROR_EXCEPTION("Encountered non-scalar field type for scalar protobuf element")
+ << TErrorAttribute("fieldType", scalarElement.Type.Underlying());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
class TProtobufTypeRegistry
{
public:
@@ -810,7 +878,8 @@ TProtobufElement TProtobufField::GetElement(bool insideRepeated) const
return std::make_unique<TProtobufScalarElement>(TProtobufScalarElement{
static_cast<TProtobufElementType>(GetType()),
GetEnumYsonStorageType(),
- EnumType_
+ GetEnumType(),
+ IsEnumValueCheckStrict(),
});
}
}
@@ -3022,10 +3091,6 @@ TProtobufElementResolveResult GetProtobufElementFromField(
////////////////////////////////////////////////////////////////////////////////
-namespace NDetail {
-
-////////////////////////////////////////////////////////////////////////////////
-
std::string_view GetProtobufElementTypeName(const NYson::TProtobufElement& element)
{
return Visit(element,
@@ -3034,10 +3099,6 @@ std::string_view GetProtobufElementTypeName(const NYson::TProtobufElement& eleme
});
}
-////////////////////////////////////////////////////////////////////////////////
-
-} // namespace NDetail
-
TProtobufElementResolveResult ResolveProtobufElementByYPath(
const TProtobufMessageType* rootType,
const NYPath::TYPathBuf path,
diff --git a/yt/yt/core/yson/protobuf_interop.h b/yt/yt/core/yson/protobuf_interop.h
index 7ad3bd9879..79b06b9ec4 100644
--- a/yt/yt/core/yson/protobuf_interop.h
+++ b/yt/yt/core/yson/protobuf_interop.h
@@ -84,6 +84,7 @@ struct TProtobufScalarElement
// Meaningful only when TYPE == TYPE_ENUM.
EEnumYsonStorageType EnumStorageType;
const TProtobufEnumType* EnumType;
+ bool StrictEnumChecks;
};
struct TProtobufAttributeDictionaryElement
@@ -126,7 +127,20 @@ TProtobufElementResolveResult ResolveProtobufElementByYPath(
////////////////////////////////////////////////////////////////////////////////
template <CProtobufElement TElementType>
-const TElementType& GetProtobufElementOrThrow(const NYson::TProtobufElement& element);
+consteval std::string_view GetProtobufElementTypeName();
+
+std::string_view GetProtobufElementTypeName(const TProtobufElement& element);
+
+// Version of `NYT::Visit` for `TProtobufElement` which skips `std::unique_ptr` wrappers.
+template <class... U>
+auto VisitProtobufElement(const TProtobufElement& element, U&&... visitorOverloads);
+
+TProtobufElementType GetProtobufElementType(const TProtobufElement& protobufElement);
+
+NYTree::ENodeType GetNodeTypeByProtobufScalarElement(const TProtobufScalarElement& scalarElement);
+
+template <CProtobufElement TElementType>
+const TElementType& GetProtobufElementOrThrow(const TProtobufElement& element);
////////////////////////////////////////////////////////////////////////////////
diff --git a/yt/yt/core/ytree/attribute_filter-inl.h b/yt/yt/core/ytree/attribute_filter-inl.h
index a319270ad1..34c1934284 100644
--- a/yt/yt/core/ytree/attribute_filter-inl.h
+++ b/yt/yt/core/ytree/attribute_filter-inl.h
@@ -10,7 +10,7 @@ namespace NYT::NYTree {
template <class T>
TAttributeFilter::TAttributeFilter(std::initializer_list<T> keys)
- : Keys({keys.begin(), keys.end()})
+ : Keys_({keys.begin(), keys.end()})
, Universal(false)
{ }
diff --git a/yt/yt/core/ytree/attribute_filter.cpp b/yt/yt/core/ytree/attribute_filter.cpp
index e6213d810e..3c5d217f37 100644
--- a/yt/yt/core/ytree/attribute_filter.cpp
+++ b/yt/yt/core/ytree/attribute_filter.cpp
@@ -197,18 +197,18 @@ std::unique_ptr<IHeterogenousFilterConsumer> CreateFilteringConsumerImpl(
////////////////////////////////////////////////////////////////////////////////
TAttributeFilter::TAttributeFilter(std::vector<IAttributeDictionary::TKey> keys, std::vector<TYPath> paths)
- : Keys(std::move(keys))
- , Paths(std::move(paths))
+ : Keys_(std::move(keys))
+ , Paths_(std::move(paths))
, Universal(false)
{ }
TAttributeFilter::TAttributeFilter(std::initializer_list<TString> keys)
- : Keys({keys.begin(), keys.end()})
+ : Keys_({keys.begin(), keys.end()})
, Universal(false)
{ }
TAttributeFilter::TAttributeFilter(const std::vector<TString>& keys)
- : Keys({keys.begin(), keys.end()})
+ : Keys_({keys.begin(), keys.end()})
, Universal(false)
{ }
@@ -219,14 +219,25 @@ TAttributeFilter::operator bool() const
void TAttributeFilter::ValidateKeysOnly(TStringBuf context) const
{
- if (!Paths.empty()) {
+ if (!Paths_.empty()) {
THROW_ERROR_EXCEPTION("Filtering attributes by path is not implemented for %v", context);
}
}
bool TAttributeFilter::IsEmpty() const
{
- return !Universal && Keys.empty() && Paths.empty();
+ return !Universal && Keys_.empty() && Paths_.empty();
+}
+
+void TAttributeFilter::AddKey(IAttributeDictionary::TKey key)
+{
+ Universal = false;
+ Keys_.emplace_back(std::move(key));
+}
+
+void TAttributeFilter::ReserveKeys(size_t capacity)
+{
+ Keys_.reserve(capacity);
}
bool TAttributeFilter::AdmitsKeySlow(TStringBuf key) const
@@ -234,18 +245,18 @@ bool TAttributeFilter::AdmitsKeySlow(TStringBuf key) const
if (!*this) {
return true;
}
- return std::find(Keys.begin(), Keys.end(), key) != Keys.end() ||
- std::find(Paths.begin(), Paths.end(), "/" + ToYPathLiteral(key)) != Paths.end();
+ return std::find(Keys_.begin(), Keys_.end(), key) != Keys_.end() ||
+ std::find(Paths_.begin(), Paths_.end(), "/" + ToYPathLiteral(key)) != Paths_.end();
}
TAttributeFilter::TKeyToFilter TAttributeFilter::Normalize() const
{
YT_VERIFY(*this);
- if (Paths.empty()) {
+ if (Paths_.empty()) {
// Fast path for key-only case.
TKeyToFilter result;
- result.reserve(Keys.size());
- for (const auto& key : Keys) {
+ result.reserve(Keys_.size());
+ for (const auto& key : Keys_) {
result[key] = std::nullopt;
}
return result;
@@ -254,12 +265,12 @@ TAttributeFilter::TKeyToFilter TAttributeFilter::Normalize() const
// As a first step, prepare a combined vector of paths: canonize all paths
// and transform all keys to paths of form /<ToYPathLiteral(key)> (which is
// already a canonical form).
- std::vector<TYPath> paths = Paths;
+ std::vector<TYPath> paths = Paths_;
for (auto& path : paths) {
NDetail::CanonizeAndValidatePath(path);
}
- paths.reserve(paths.size() + Keys.size());
- for (const auto& key : Keys) {
+ paths.reserve(paths.size() + Keys_.size());
+ for (const auto& key : Keys_) {
paths.emplace_back("/" + ToYPathLiteral(key));
}
@@ -414,15 +425,17 @@ void ToProto(NProto::TAttributeFilter* protoFilter, const TAttributeFilter& filt
{
YT_VERIFY(filter);
- ToProto(protoFilter->mutable_keys(), filter.Keys);
- ToProto(protoFilter->mutable_paths(), filter.Paths);
+ ToProto(protoFilter->mutable_keys(), filter.Keys());
+ ToProto(protoFilter->mutable_paths(), filter.Paths());
}
void FromProto(TAttributeFilter* filter, const NProto::TAttributeFilter& protoFilter)
{
- filter->Universal = false;
- FromProto(&filter->Keys, protoFilter.keys());
- FromProto(&filter->Paths, protoFilter.paths());
+ std::vector<IAttributeDictionary::TKey> keys;
+ std::vector<NYPath::TYPath> paths;
+ FromProto(&keys, protoFilter.keys());
+ FromProto(&paths, protoFilter.paths());
+ *filter = TAttributeFilter(std::move(keys), std::move(paths));
}
void Serialize(const TAttributeFilter& filter, IYsonConsumer* consumer)
@@ -430,8 +443,8 @@ void Serialize(const TAttributeFilter& filter, IYsonConsumer* consumer)
if (filter) {
BuildYsonFluently(consumer)
.BeginMap()
- .Item("keys").Value(filter.Keys)
- .Item("paths").Value(filter.Paths)
+ .Item("keys").Value(filter.Keys())
+ .Item("paths").Value(filter.Paths())
.EndMap();
} else {
BuildYsonFluently(consumer)
@@ -445,30 +458,29 @@ void Deserialize(TAttributeFilter& filter, const INodePtr& node)
case ENodeType::Map: {
auto mapNode = node->AsMap();
- filter.Universal = false;
- filter.Keys.clear();
+ std::vector<IAttributeDictionary::TKey> keys;
+ std::vector<TYPath> paths;
if (auto keysNode = mapNode->FindChild("keys")) {
- filter.Keys = ConvertTo<std::vector<IAttributeDictionary::TKey>>(keysNode);
+ keys = ConvertTo<std::vector<IAttributeDictionary::TKey>>(keysNode);
}
- filter.Paths.clear();
if (auto pathsNode = mapNode->FindChild("paths")) {
- filter.Paths = ConvertTo<std::vector<TYPath>>(pathsNode);
+ paths = ConvertTo<std::vector<TYPath>>(pathsNode);
}
+ filter = TAttributeFilter(std::move(keys), std::move(paths));
+
break;
}
case ENodeType::List: {
// Compatibility mode with HTTP clients that specify attribute keys as string lists.
- filter.Universal = false;
- filter.Keys = ConvertTo<std::vector<IAttributeDictionary::TKey>>(node);
- filter.Paths = {};
+
+ auto keys = ConvertTo<std::vector<IAttributeDictionary::TKey>>(node);
+ filter = TAttributeFilter(keys);
break;
}
case ENodeType::Entity: {
- filter.Universal = true;
- filter.Keys = {};
- filter.Paths = {};
+ filter = TAttributeFilter();
break;
}
default:
@@ -487,7 +499,7 @@ void FormatValue(
TStringBuf /*spec*/)
{
if (attributeFilter) {
- builder->AppendFormat("{Keys: %v, Paths: %v}", attributeFilter.Keys, attributeFilter.Paths);
+ builder->AppendFormat("{Keys: %v, Paths: %v}", attributeFilter.Keys(), attributeFilter.Paths());
} else {
builder->AppendString("(universal)");
}
@@ -512,8 +524,8 @@ void FormatValue(
auto limit = view.Limit;
if (attributeFilter) {
builder->AppendFormat("{Keys: %v, Paths: %v}",
- MakeShrunkFormattableView(attributeFilter.Keys, TDefaultFormatter{}, limit),
- MakeShrunkFormattableView(attributeFilter.Paths, TDefaultFormatter{}, limit));
+ MakeShrunkFormattableView(attributeFilter.Keys(), TDefaultFormatter{}, limit),
+ MakeShrunkFormattableView(attributeFilter.Paths(), TDefaultFormatter{}, limit));
} else {
builder->AppendString("(universal)");
}
diff --git a/yt/yt/core/ytree/attribute_filter.h b/yt/yt/core/ytree/attribute_filter.h
index 10b7fc6551..59ddc7a9ff 100644
--- a/yt/yt/core/ytree/attribute_filter.h
+++ b/yt/yt/core/ytree/attribute_filter.h
@@ -66,8 +66,8 @@ namespace NYT::NYTree {
struct TAttributeFilter
{
//! Whitelist of top-level keys to be returned.
- std::vector<IAttributeDictionary::TKey> Keys;
- std::vector<NYPath::TYPath> Paths;
+ DEFINE_BYREF_RO_PROPERTY(std::vector<IAttributeDictionary::TKey>, Keys);
+ DEFINE_BYREF_RO_PROPERTY(std::vector<NYPath::TYPath>, Paths);
//! If true, filter is universal, i.e. behavior depends on service's own policy;
//! in such case #Keys and #Paths are always empty.
@@ -100,6 +100,12 @@ struct TAttributeFilter
//! error message.
void ValidateKeysOnly(TStringBuf context = "this context") const;
+ //! Adds key. Makes attribute filter not universal if it was universal.
+ void AddKey(IAttributeDictionary::TKey key);
+
+ //! Reserve keys.
+ void ReserveKeys(size_t capacity);
+
//! Returns true if #key appears in Keys or "/#key" appears in Paths using linear search.
bool AdmitsKeySlow(IAttributeDictionary::TKeyView key) const;
diff --git a/yt/yt/core/ytree/serialize-inl.h b/yt/yt/core/ytree/serialize-inl.h
index 486d7ab3f6..f0f11c1e83 100644
--- a/yt/yt/core/ytree/serialize-inl.h
+++ b/yt/yt/core/ytree/serialize-inl.h
@@ -149,6 +149,18 @@ void DeserializeVector(T& value, INodePtr node)
}
template <class T>
+void DeserializeProtobufRepeated(T& value, INodePtr node)
+{
+ auto listNode = node->AsList();
+ auto size = listNode->GetChildCount();
+ value.Clear();
+ value.Reserve(size);
+ for (int i = 0; i < size; ++i) {
+ Deserialize(*value.Add(), listNode->GetChildOrThrow(i));
+ }
+}
+
+template <class T>
void DeserializeSet(T& value, INodePtr node)
{
auto listNode = node->AsList();
@@ -606,6 +618,20 @@ void Deserialize(TCompactVector<T, N>& value, INodePtr node)
NDetail::DeserializeVector(value, node);
}
+// RepeatedPtrField
+template <class T>
+void Deserialize(google::protobuf::RepeatedPtrField<T>& value, INodePtr node)
+{
+ NDetail::DeserializeProtobufRepeated(value, node);
+}
+
+// RepeatedField
+template <class T>
+void Deserialize(google::protobuf::RepeatedField<T>& value, INodePtr node)
+{
+ NDetail::DeserializeProtobufRepeated(value, node);
+}
+
// TErrorOr
template <class T>
void Deserialize(TErrorOr<T>& error, NYTree::INodePtr node)
diff --git a/yt/yt/core/ytree/serialize.h b/yt/yt/core/ytree/serialize.h
index 138c77e1d6..6748cb107b 100644
--- a/yt/yt/core/ytree/serialize.h
+++ b/yt/yt/core/ytree/serialize.h
@@ -253,6 +253,14 @@ void Deserialize(std::deque<T, A>& value, INodePtr node);
template <class T, size_t N>
void Deserialize(TCompactVector<T, N>& value, INodePtr node);
+// RepeatedPtrField
+template <class T>
+void Deserialize(google::protobuf::RepeatedPtrField<T>& items, INodePtr node);
+
+// RepeatedField
+template <class T>
+void Deserialize(google::protobuf::RepeatedField<T>& items, INodePtr node);
+
// TErrorOr
template <class T>
void Deserialize(TErrorOr<T>& error, INodePtr node);
diff --git a/yt/yt/core/ytree/unittests/serialize_ut.cpp b/yt/yt/core/ytree/unittests/serialize_ut.cpp
index 7df8cb866a..241e7bf86a 100644
--- a/yt/yt/core/ytree/unittests/serialize_ut.cpp
+++ b/yt/yt/core/ytree/unittests/serialize_ut.cpp
@@ -457,6 +457,30 @@ TEST(TYTreeSerializationTest, ProtobufKeepUnknown)
}
}
+TEST(TSerializationTest, ProtobufRepeatedField)
+{
+ google::protobuf::RepeatedField<i64> original;
+ original.Add(1);
+ original.Add(2);
+ original.Add(3);
+
+ auto node = ConvertToNode(original);
+ auto deserialized = ConvertTo<google::protobuf::RepeatedField<i64>>(node);
+ EXPECT_TRUE(std::ranges::equal(original, deserialized));
+}
+
+TEST(TSerializationTest, ProtobufRepeatedPtrField)
+{
+ google::protobuf::RepeatedPtrField<TString> original;
+ original.Add("one");
+ original.Add("two");
+ original.Add("three");
+
+ auto node = ConvertToNode(original);
+ auto deserialized = ConvertTo<google::protobuf::RepeatedPtrField<TString>>(node);
+ EXPECT_TRUE(std::ranges::equal(original, deserialized));
+}
+
////////////////////////////////////////////////////////////////////////////////
class TTestClass
diff --git a/yt/yt_proto/yt/client/api/rpc_proxy/proto/api_service.proto b/yt/yt_proto/yt/client/api/rpc_proxy/proto/api_service.proto
index d121f2e079..c1f922829e 100644
--- a/yt/yt_proto/yt/client/api/rpc_proxy/proto/api_service.proto
+++ b/yt/yt_proto/yt/client/api/rpc_proxy/proto/api_service.proto
@@ -2154,6 +2154,9 @@ message TReqPartitionTables
optional bool enable_cookies = 10;
+ // COMPAT(apollo1321): Remove in 25.2 release.
+ optional bool use_new_slicing_implementation_in_ordered_pool = 11 [default=true];
+
optional TTransactionalOptions transactional_options = 100;
}
@@ -3318,6 +3321,14 @@ message TReqStartQuery
repeated string items = 1;
}
+ message TSecret
+ {
+ optional string id = 1;
+ optional string category = 2;
+ optional string subcategory = 3;
+ optional string ypath = 4;
+ }
+
required string query_tracker_stage = 1;
required EQueryEngine engine = 2;
required string query = 3;
@@ -3327,6 +3338,7 @@ message TReqStartQuery
repeated TQueryFile files = 7;
optional string access_control_object = 8; // COMPAT(mpereskokova)
optional TAccessControlObjects access_control_objects = 9;
+ repeated TSecret secrets = 10;
}
message TRspStartQuery